From e6bed8cccd23020d313318b7d5b0f9f45a8a2172 Mon Sep 17 00:00:00 2001 From: <> Date: Tue, 25 Feb 2025 15:28:11 +0000 Subject: [PATCH] Deployed 56ba949 with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 2196 ++++++ access/index.html | 2264 ++++++ access/mfa/index.html | 2449 ++++++ access/ssh/index.html | 2604 +++++++ access/vscode/index.html | 2531 +++++++ access/web/index.html | 2313 ++++++ accounts/index.html | 2338 ++++++ accounts/ump/index.html | 2376 ++++++ accounts/waldur/index.html | 2405 ++++++ alps/hardware/index.html | 2545 +++++++ alps/index.html | 2266 ++++++ alps/platforms/index.html | 2272 ++++++ alps/storage/index.html | 2257 ++++++ alps/vclusters/index.html | 2337 ++++++ assets/cscs-logo.png | Bin 0 -> 10001 bytes assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.f1b6f286.min.js | 16 + assets/javascripts/bundle.f1b6f286.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.f8cc74c7.min.js | 42 + .../workers/search.f8cc74c7.min.js.map | 7 + assets/stylesheets/main.8608ea7d.min.css | 1 + assets/stylesheets/main.8608ea7d.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + build-install/containers/index.html | 2421 ++++++ build-install/cpe/index.html | 2259 ++++++ build-install/index.html | 2290 ++++++ build-install/pip/index.html | 2248 ++++++ build-install/uenv/index.html | 2640 +++++++ images/access/mfa-otp-prompt.png | Bin 0 -> 12358 bytes images/access/mfa-web-login.png | Bin 0 -> 30759 bytes images/access/ump.png | Bin 0 -> 61870 bytes .../f7t-api-subscriptions-management.png | Bin 0 -> 69529 bytes images/firecrest/f7t-api-subscriptions.png | Bin 0 -> 137832 bytes images/firecrest/f7t-apis.png | Bin 0 -> 112750 bytes images/firecrest/f7t-existing-keys.png | Bin 0 -> 48724 bytes images/firecrest/f7t-generate-keys.png | Bin 0 -> 62498 bytes images/firecrest/f7t-keys-overview.png | Bin 0 -> 90318 bytes images/firecrest/f7t-keys.png | Bin 0 -> 43252 bytes images/storage/cyberduck.png | Bin 0 -> 112431 bytes images/storage/globus_endpoint_login.png | Bin 0 -> 155448 bytes images/storage/globus_login.png | Bin 0 -> 83365 bytes index.html | 2391 ++++++ platforms/cwp/index.html | 2240 ++++++ platforms/hpcp/index.html | 2240 ++++++ platforms/mlp/index.html | 2323 ++++++ policies/code-of-conduct/index.html | 2366 ++++++ policies/index.html | 2288 ++++++ policies/regulations/index.html | 2257 ++++++ policies/slack/index.html | 2296 ++++++ policies/support/index.html | 2499 ++++++ search/search_index.json | 1 + sitemap.xml | 231 + sitemap.xml.gz | Bin 0 -> 559 bytes software/index.html | 2237 ++++++ software/prgenv/index.html | 2242 ++++++ software/prgenv/linalg/index.html | 2252 ++++++ software/prgenv/prgenv-gnu/index.html | 2252 ++++++ software/prgenv/prgenv-nvfortran/index.html | 2252 ++++++ software/sciapps/cp2k/index.html | 2252 ++++++ software/sciapps/gromacs/index.html | 2252 ++++++ software/sciapps/index.html | 2253 ++++++ software/sciapps/lammps/index.html | 2252 ++++++ software/sciapps/namd/index.html | 2684 +++++++ software/sciapps/quantumespresso/index.html | 2252 ++++++ software/sciapps/vasp/index.html | 2252 ++++++ software/tools/index.html | 2242 ++++++ software/tools/linaro/index.html | 2817 +++++++ storage/filesystems/index.html | 2707 +++++++ storage/index.html | 2263 ++++++ storage/longterm/index.html | 2779 +++++++ storage/object/index.html | 2617 +++++++ storage/transfer/index.html | 2437 ++++++ stylesheets/extra.css | 113 + tools/cicd/index.html | 3198 ++++++++ tools/container-engine/index.html | 3708 +++++++++ tools/firecrest/index.html | 2725 +++++++ tools/index.html | 2250 ++++++ tools/slurm/index.html | 2520 +++++++ tools/uenv/index.html | 3214 ++++++++ vclusters/bristen/index.html | 2250 ++++++ vclusters/clariden/index.html | 2503 ++++++ vclusters/daint/index.html | 2246 ++++++ vclusters/eiger/index.html | 2246 ++++++ vclusters/santis/index.html | 2246 ++++++ 120 files changed, 147933 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 access/index.html create mode 100644 access/mfa/index.html create mode 100644 access/ssh/index.html create mode 100644 access/vscode/index.html create mode 100644 access/web/index.html create mode 100644 accounts/index.html create mode 100644 accounts/ump/index.html create mode 100644 accounts/waldur/index.html create mode 100644 alps/hardware/index.html create mode 100644 alps/index.html create mode 100644 alps/platforms/index.html create mode 100644 alps/storage/index.html create mode 100644 alps/vclusters/index.html create mode 100644 assets/cscs-logo.png create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.f1b6f286.min.js create mode 100644 assets/javascripts/bundle.f1b6f286.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.f8cc74c7.min.js create mode 100644 assets/javascripts/workers/search.f8cc74c7.min.js.map create mode 100644 assets/stylesheets/main.8608ea7d.min.css create mode 100644 assets/stylesheets/main.8608ea7d.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 build-install/containers/index.html create mode 100644 build-install/cpe/index.html create mode 100644 build-install/index.html create mode 100644 build-install/pip/index.html create mode 100644 build-install/uenv/index.html create mode 100644 images/access/mfa-otp-prompt.png create mode 100644 images/access/mfa-web-login.png create mode 100644 images/access/ump.png create mode 100644 images/firecrest/f7t-api-subscriptions-management.png create mode 100644 images/firecrest/f7t-api-subscriptions.png create mode 100644 images/firecrest/f7t-apis.png create mode 100644 images/firecrest/f7t-existing-keys.png create mode 100644 images/firecrest/f7t-generate-keys.png create mode 100644 images/firecrest/f7t-keys-overview.png create mode 100644 images/firecrest/f7t-keys.png create mode 100644 images/storage/cyberduck.png create mode 100644 images/storage/globus_endpoint_login.png create mode 100644 images/storage/globus_login.png create mode 100644 index.html create mode 100644 platforms/cwp/index.html create mode 100644 platforms/hpcp/index.html create mode 100644 platforms/mlp/index.html create mode 100644 policies/code-of-conduct/index.html create mode 100644 policies/index.html create mode 100644 policies/regulations/index.html create mode 100644 policies/slack/index.html create mode 100644 policies/support/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 software/index.html create mode 100644 software/prgenv/index.html create mode 100644 software/prgenv/linalg/index.html create mode 100644 software/prgenv/prgenv-gnu/index.html create mode 100644 software/prgenv/prgenv-nvfortran/index.html create mode 100644 software/sciapps/cp2k/index.html create mode 100644 software/sciapps/gromacs/index.html create mode 100644 software/sciapps/index.html create mode 100644 software/sciapps/lammps/index.html create mode 100644 software/sciapps/namd/index.html create mode 100644 software/sciapps/quantumespresso/index.html create mode 100644 software/sciapps/vasp/index.html create mode 100644 software/tools/index.html create mode 100644 software/tools/linaro/index.html create mode 100644 storage/filesystems/index.html create mode 100644 storage/index.html create mode 100644 storage/longterm/index.html create mode 100644 storage/object/index.html create mode 100644 storage/transfer/index.html create mode 100644 stylesheets/extra.css create mode 100644 tools/cicd/index.html create mode 100644 tools/container-engine/index.html create mode 100644 tools/firecrest/index.html create mode 100644 tools/index.html create mode 100644 tools/slurm/index.html create mode 100644 tools/uenv/index.html create mode 100644 vclusters/bristen/index.html create mode 100644 vclusters/clariden/index.html create mode 100644 vclusters/daint/index.html create mode 100644 vclusters/eiger/index.html create mode 100644 vclusters/santis/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..0b59602 --- /dev/null +++ b/404.html @@ -0,0 +1,2196 @@ + + + + + + + + + + + + + + + + + + + CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/access/index.html b/access/index.html new file mode 100644 index 0000000..f231d04 --- /dev/null +++ b/access/index.html @@ -0,0 +1,2264 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Connecting to Alps - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

Connecting to Alps

+

This documentation guides users through the process of accessing CSCS systems and services.

+
+

Before accessing CSCS, you need to have an account at CSCS, and be part of a project that has been allocated resources. +More information on how to get an account is available in accounts and projects.

+
+
+
    +
  • +

    Multi Factor Authentification

    +

    Before signing in to CSCS' web portals or using SSH, all users have to set up multi factor authentification (MFA)

    +

    MFA

    +
  • +
  • +

    Web Services

    +

    Before signing in to CSCS' web portals or using SSH, all users have to set up multi factor authentification (MFA)

    +

    Accessing CSCS web services

    +
  • +
  • +

    SSH Access

    +

    Logging into Clusters on Alps

    +

    SSH

    +
  • +
  • +

    VSCode

    +

    How to connect VSCode IDE on your laptop with Alps

    +

    SSH

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/access/mfa/index.html b/access/mfa/index.html new file mode 100644 index 0000000..f50f712 --- /dev/null +++ b/access/mfa/index.html @@ -0,0 +1,2449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Multi Factor Authentification (MFA) - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Multi Factor Authentification

+

To access CSCS services and systems users are required to authenticate using multi-factor authentication (MFA). +MFA is implemented as a two-factor authentication, where one factor is the login and password pair ("the thing you know") and the other factor is the device which generates one-time passwords (OTPs, "the thing you have"). +In this way security is significantly improved compared to single-factor (password only) authentication.

+

The MFA workflow uses a time-based one-time password (OTP) to verify identity. +An OTP is a six-digit number which changes every 30 seconds. +OTPs are generated using a tool installed on a device other than the one used to access CSCS services and infrastructure. +We recommend to use a smartphone with an application such as Google Authenticator to obtain the OTPs.

+

+

Getting Started

+

When you first log in to any of the CSCS web applications such as UMP, Jupyter, etc., you will be asked to register your device.

+

Firstly, you will be asked to provide a code that you received by email. +After this validation step, you will need to scan a QR code with your mobile phone using an application such as Google Authenticator. +Lastly, you will need to enter the OTP from the authenticator application to complete the registration of your device. +From then on, two-factor authrentication will be required to access CSCS services and systems. +A more detailed explanation of the registration process is provided in the next section.

+
+

Warning

+

It is not possible to log in to CSCS systems using SSH without registering a device and creating certified SSH keys. +See below for details on generating certified SSH keys.

+
+

Authenticator Application

+

CSCS supports authenticators that follow an open standard called TOTP. +The recommended way to access such an authenticator is to install an application on your mobile phone. +Google Authenticator and FreeOTP have been tested successfully; however, if you are using a different mobile application for OTPs, feel free to continue using it - given it supports the TOTP standard.

+

You can download Google Authenticator for your phone:

+ +

+

Configure the Authenticator

+

Before starting, ensure that the following pre-requisites are satisfied

+
    +
  1. You have an invitation email from CSCS for MFA enrollment
      +
    • a notification email will be sent atleast one week before we sent the invitation email.
    • +
    +
  2. +
  3. You have installed an OTP Authenticator app on your mobile device (see above).
  4. +
+
+

Note

+

If you try access any of our web applications without setting up MFA, you will be redirected to enroll for MFA.

+
+
+

Warning

+

If you try to SSH to CSCS systems without setting up MFA, you will be prompted with permission denied error, for example: +

> ssh ela.cscs.ch
+bobsmith@ela.cscs.ch: Permission denied (publickey).
+Connection closed by UNKNOWN port 65535
+

+
+

Steps:

+
    +
  1. Access any of the CSCS Web applications such as account.cscs.ch, Jupyter, etc., on a new browser session which will redirects you to the CSCS login page.
  2. +
  3. Log in with your username and password.
  4. +
  5. You will be asked to key in a code which CSCS Authentication system sent to you by email. + After successfaul validation of the code you will be redirected to the next page which present a QR code.
  6. +
  7. Scan the QR code with the authenticator app that was installed on your mobile device. + After scanning the QR code the authenticator app will start generating a new 6 digit OTP every 60 seconds.
  8. +
  9. To complete the OTP registration process, please enter the 6 digit OTP from the authenticator app at the bottom of the the same QR code page. Optionally, you can input your device name where you imported the OTP seed by scanning the QR code
  10. +
  11. On successful registration you will be logged into the CSCS web application that you accessed in step-1
  12. +
+
+

Todo

+

do we need the images from KB?

+
+

Resetting the Authenticator

+

In case users lose access to their mobile device/Authenticator OTP, users can reset their OTP by following the below self-service process.

+
    +
  1. Access any CSCS web application like: account.cscs.ch which redirects you to the CSCS Login page.
  2. +
  3. From the login screen, click the "Reset OTP" link below the "LOG IN" button
  4. +
  5. Enter your username and password.
  6. +
  7. On successful validation of user credentials, users will receive an email with a reset credentials link like the one below, click on the link in the email
  8. +
  9. The steps are the same as for the first time you configured the authenticator.
  10. +
+
+

Warning

+

When replacing your smartphone remember to sync the authenticator app before resetting the old smartphone. +Otherwise, you will have to follow this process.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/access/ssh/index.html b/access/ssh/index.html new file mode 100644 index 0000000..b9ac287 --- /dev/null +++ b/access/ssh/index.html @@ -0,0 +1,2604 @@ + + + + + + + + + + + + + + + + + + + + + + + + + SSH - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

+

Using SSH

+

Before accessing CSCS clusters using SSH, first ensure that you have created a user account that is part of a project that has access to the cluster, and have multi factor authentification configured.

+

+

Generating Keys with SSHService

+

It is not possible to authenticate with a username/password and user-created SSH keys. +Instead, it is necessary to use a certified SSH key created using the CSCS SSHService.

+
+

Note

+

Keys are valid for 24 hours, after which a new key must be generated.

+
+
+

Warning

+

The number of certified SSH keys is limited to five per day. +Once you have reached this number you will not be able to generate new keys until at least one of these key expires or keys are revoked.

+
+

There are two methods for generating SSH keys using the SSHService, the SSHService web app or by using a command-line script.

+

Getting keys via the command line

+

On Linux and MacOS, the SSH keys can be generated and automatically installed using a command-line script. +This script is provided in pure Bash and in Python. +Python 3 is required together with packages listed in requirements.txt provided with the scripts.

+
+

Note

+

We recommend to using a virtual environment for Python.

+
+

If this is the first time, download the ssh service from CSCS GitHub:

+
git clone https://github.com/eth-cscs/sshservice-cli
+cd sshservice-cli
+
+

The next step is to use either the bash or python scripts:

+
+
+
+

Run the bash script in the sshservice-cli path:

+
./cscs-keygen.sh
+
+
+
+

The first time you use the script, you can set up a python virtual environment with the dependencies installed:

+
python3 -m venv mfa
+source mfa/bin/activate
+pip install -r requirements.txt
+
+

Therafter, activate the venv before using the script:

+
source mfa/bin/activate
+python cscs-keygen.py
+
+
+
+
+

For both approaches, follow the on screen instructions that require you to enter your username, password and the six-digit OTP from the authentifactor app on your phone. +The script generates the key pair (cscs-key and cscs-key-cert.pub) in your ~/.ssh path:

+
> ls ~/.ssh/cscs-key*
+/home/bobsmith/.ssh/cscs-key  /home/bobsmith/.ssh/cscs-key-cert.pub
+
+

Getting keys via the web app

+

Access the SSHService web application by accessing the URL, sshservice.cscs.ch.

+
    +
  1. Sign in with username, password and OTP
  2. +
  3. Select "Signed key" on the left tab and click on "Get a signed key"
  4. +
  5. On the next page a key pair is generated and ready to be downloaded. Download or copy/paste both keys.
  6. +
+

Once generated, the keys need to be copied from where your browser downloaded them to your ~/.ssh path, for example: +

mv /download/location/cscs-key-cert.pub ~/.ssh/cscs-key-cert.pub
+mv /download/location/cscs-key ~/.ssh/cscs-key
+chmod 0600 ~/.ssh/cscs-key
+

+

Adding a password to the key

+

Once the key has been generated using either the CLI or web interface above, it is strongly reccomended that you add a password to the generated key using the ssh-keygen tool.

+
ssh-keygen -f ~/.ssh/cscs-key -p
+
+

Logging In

+

To ensure secure access, CSCS requires users to connect through the designated jump host Ela (ela.cscs.ch) before accessing any cluster.

+

Before trying to log into your target cluster, you can first check that the SSH key generated above can be used to access Ela: +

ssh -i ~/.ssh/cscs-key ela.cscs.ch
+

+

To log into a target system at CSCS, you need to perform some additional setup to handle forwarding of SSH keys generated using the SSHService. +There are two alternatives detailed below.

+

+

Adding Ela as a jump host in SSH Configuration

+

This approach configures Ela as a jump host and creates aliases for the systems that you want to access in ~/.ssh/config on your laptop or PC. +The benefit of this approach is that once the ~/.ssh/config file has been configured, no additional steps are required between creating a new key using MFA, and logging in.

+

Below is an example ~/.ssh/config file that facilitates directly logging into the Daint, Santis and Clariden clusters using ela.cscs.ch as a Jump host:

+
Host ela
+    HostName ela.cscs.ch
+    User cscsusername
+    IdentityFile ~/.ssh/cscs-key
+
+Host daint
+    HostName daint.alps.cscs.ch
+    User cscsusername
+    ProxyJump ela
+    IdentityFile ~/.ssh/cscs-key
+    IdentitiesOnly yes
+
+Host santis
+    HostName santis.alps.cscs.ch
+    ProxyJump ela
+    User cscsusername
+    IdentityFile ~/.ssh/cscs-key
+    IdentitiesOnly yes
+
+Host clariden
+    HostName clariden.alps.cscs.ch
+    ProxyJump ela
+    User cscsusername
+    IdentityFile ~/.ssh/cscs-key
+    IdentitiesOnly yes
+
+
+

❗ Replace cscsusername with your CSCS username in the file above.

+
+

After saving this file, one can directly log into daint.alps.cscs.ch from your local system using the alias daint:

+
ssh daint
+
+

+

Using SSH Agent

+

Alternatively, the SSH authentification agent can be configured to manage the keys.

+

Each time a new key is generated using the SSHService, add the key to the SSH agent: +

ssh-add -t 1d ~/.ssh/cscs-key
+

+
+Could not open a connection to your authentification agent +

If you see this error message, the ssh agent is not running. +You can start it with the following command: +

eval $(ssh-agent)
+

+
+

Once the key has been configured, log into Ela using the -A flag, and then jump to the target system: +

# log in to ela.cscs.ch
+ssh -A cscsusername@ela.cscs.ch
+
+# then jump to a cluster
+ssh daint.cscs.ch
+

+

Frequently encountered issues

+
+too many authentification failures +

You may have too many keys in your ssh agent. +Remove the unused keys from the agent or flush them all with the following command: +

ssh-add -D
+

+
+
+Permission denied +

This might indicate that they key has expired.

+
+
+Could not open a connection to your authentication agent +

If you see this error when adding keys to the ssh-agent, please make sure the agent is up, and if not bring up the agent using the following command: +

eval $(ssh-agent)
+

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/access/vscode/index.html b/access/vscode/index.html new file mode 100644 index 0000000..f52fede --- /dev/null +++ b/access/vscode/index.html @@ -0,0 +1,2531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + VSCode - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Connecting with VSCode

+

Visual Studio Code provides flexible support for remote development. +VSCode's remote tunnel feature starts a server on a remote system, and connects the editor to this server. +There are two ways to set up the connection:

+
    +
  • using the code CLI: the most flexible method if using containers or uenv.
  • +
  • using the VSCode interface: VSCode will connect onto the system, download and start the server
  • +
+

The main challenge with using VSCode is that the most convenient method for starting a remote session is to start a remote tunnel from the VS Code GUI. +This approach starts a session in the standard login environment on that node, however this won't work if you want to be developing in a container, in a uenv, or on a compute node.

+

Flexible method: remote server

+

The most flexible method for connecting VSCode is to log in to the Alps system, set up your environment (start a container or uenv, start a session on a compute node), and start the remote server in that environment pre-configured.

+
+

Note

+

This approach requires that you have a GitHub account, and that the GitHub account is configured with your VS Code editor.

+
+

The first step is to download the VS Code CLI tool code, which CSCS provides for easy download. +There are two executables, one for using on systems with x86 or ARM CPUs respectively.

+
+
+
+
wget https://jfrog.svc.cscs.ch/artifactory/uenv-sources/vscode/vscode_cli_alpine_arm64_cli.tar.gz
+tar -xf vscode_cli_alpine_arm64_cli.tar.gz
+
+
+
+
wget https://jfrog.svc.cscs.ch/artifactory/uenv-sources/vscode/vscode_cli_alpine_x64_cli.tar.gz
+tar -xf vscode_cli_alpine_x64_cli.tar.gz
+
+
+
+
+

Alternatively, download the CLI tool from the VS Code site -- take care to select either x86 or Arm64 version that matches the target system.

+

After downloading, copy the code executable to a location in your PATH, so that it is available for future sessions.

+
+guidance on where to put architecture-specific executables +

The home directory can be shared by multiple clusters that might have different micro-architectures, so it is important to separate executables for x86 and aarch64 (ARM) targets.

+

In ~/.bashrc, add the following line (you will need to log in again for this to take effect): +

export PATH=$HOME/.local/$(uname -m)/bin:$PATH
+
+The uname -m command will print aarch64 or x86_64, according to the microarchitecture of the node it is run on.

+

Then create the path, and copy the code executable to the architecture-specific path: +

mkdir -p $HOME/.local/$(uname -m)/bin
+cp ./code $HOME/.local/$(uname -m)/bin
+

+
+

To set up a remote server on the target system, +run the code executable that you downloaded the tunnel argument. +You will be asked to choose whether to log in to Microsoft or GitHub (we have tested with GitHub):

+
> code tunnel --name=$CLUSTER_NAME-tunnel
+...
+? How would you like to log in to Visual Studio Code? ›
+  Microsoft Account
+❯ GitHub Account
+
+
+

Tip

+

Give the tunnel a unique name using the --name flag, which will later be listed on the VSCode UI.

+
+

You will be requested to go to github.com/login/device and enter an 8-digit code. +Once you have finished registering the service with GitHub, in VSCode on your PC/laptop open the "remote explorer" pane on the left hand side of the main window, and the connection will be visible under REMOTES (TUNNELS/SSH) -> Tunnels.

+
+

first time setting up a remote service

+

If this is the first time you have followed this procedure, you may have to sign in to GitHub in VSCode. +Click on the Remote Explorer button on the left hand side, and then find the following option:

+
REMOTES(TUNNELS/SSH)
+ Tunnels
+    Sign in to tunnels registered with GitHub
+
+

If you have not signed in to GitHub with VS Code editor, you will be redirected to the browser to sign in.

+

After signing in and authorizing VSCode, the open tunnel should be visible under REMOTES (TUNNELS/SSH) -> Tunnels.

+
+

Using with uenv

+

To use a uenv with VSCode, the uenv must be started before calling code tunnel. +Log into the target system and start the uenv, then start the remote server, for example: +

# log into daint (this could be any other Alps cluster)
+ssh daint
+# start a uenv session on the login node
+uenv start --view=default prgenv-gnu/24.11:v1
+# then start the tunnel
+code tunnel --name=$CLUSTER_NAME-tunnel
+

+

Alternatively, you can execute code tunnel directly in the environment: +

ssh daint
+uenv run --view=default prgenv-gnu/24.11:v1 -- code tunnel --name=$CLUSTER_NAME-tunnel
+

+

Once the tunnel is configured, you can access it from VSCode.

+
+

Warning

+

If you plan to do any intensive work: repeated compilation of large projects or running python code in Jupyter, please see the guide to running on a compute node below. +Running intensive workloads on login nodes, which are shared resources between all users, is against CSCS fair usage of Shared Resources policy.

+
+

Using with containers

+
+

Todo

+

write a guide

+
+

Running on a compute node

+

If you plan to do computation using your VSCode, then you should first allocate resources on a compute node and set up your environment there.

+
+

directly create the tunnel using srun

+

You can directly execute the code tunnel command using srun: +

ssh daint
+srun --uenv=prgenv-gnu/24.11:v1 --view=default -t120 -n1 --pty code tunnel --name=$CLUSTER_NAME-tunnel
+

+
    +
  • --uenv and --view set up the uenv
  • +
  • -t120 requests a 2 hour (120 minute) reservation
  • +
  • -n1 requests a single rank - only one rank/process is required for VSCode
  • +
  • --pty allows forwarding of terminal I/O, regired to sign in to Github
  • +
+

Once the job allocation is granted, you will be prompted to log into GitHub, the same as starting a session on the login node. +If you don't want to use a uenv, the command is even simpler: +

ssh daint
+srun -t120 -n1 --pty code tunnel --name=$CLUSTER_NAME-tunnel
+

+
+
+

log into a node before starting

+

It is also possible to log into a compute node before executing the code tunnel command, if that suits your workflow: +

# log into daint
+ssh daint
+
+# start an interactive shell session
+srun -t120 -n1 --pty bash
+
+# set up the environment before starting the tunnel
+uenv start prgenv-gnu/24.11:v1 --view=default
+code tunnel --name=$CLUSTER_NAME-tunnel
+

+
    +
  • -t120 requests a 2 hour (120 minute) reservation
  • +
  • -n1 requests a single rank - only one rank/process is required for VSCode
  • +
  • --pty allows forwarding of terminal I/O, for bash to work interactively
  • +
+
+

Connecting via VSCode UI

+
+

Warning

+

This approach is not recommended, because while it may be easier to connect via the VS Code UI, it is much more difficult to configure the connection so that you can use uenv, containers or compute nodes.

+
+
+

Todo

+

Write the guide

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/access/web/index.html b/access/web/index.html new file mode 100644 index 0000000..2fe6dc7 --- /dev/null +++ b/access/web/index.html @@ -0,0 +1,2313 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Web Services - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Accessing CSCS Web Portals

+

Most services at CSCS are connected to the CSCS Single Sign-On gate. +This gives users the comfort of not having to sign in multiple times in each individual service connected to this gate and increases security. +Furthermore, the Single Sign-On gate allow users to recover their forgotten passwords and authenticate using a third-party account. The login page looks like

+

+

Using MFA to acccess web-based services

+

After having completed the setup of MFA, you will be asked to enter your login/password and the OTP to access all web-based services.

+

Enter username and password.

+

mfa-login

+

Then you will be prompted to enter the 6-digit code obtained from your device.

+

mfa-login

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/accounts/index.html b/accounts/index.html new file mode 100644 index 0000000..c29eb98 --- /dev/null +++ b/accounts/index.html @@ -0,0 +1,2338 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Index - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

+

Getting and Managing Accounts

+

Users at CSCS have one account that can be used to access all services and systems at CSCS. +To get an account you must be invited by a member of CSCS project adminstration or by a the principle investigator (PI) of a current project at CSCS.

+
+

Getting a project at CSCS for PIs

+

In order to get an account at CSCS, or to request access for the members of your team, you first need to get a project at CSCS. +CSCS issues calls for proposals that are announced via the CSCS website and e-mails. +More information about upcoming calls is available on the CSCS web site.

+
+

New PIs who have sucessfully applied for a preparatory project will receive an invitation from CSCS to get an account at CSCS. +PIs can then invite members of their groups to join their project.

+
+

Info

+

It is possible for users to be part of multiple projects by being invited separately by the PI of each project.

+
+
+

Note

+

Accounts are bound to projects, and accounts will be closed with the project unless the account is also part of another open project.

+
+

Tools for managing accounts and projects

+

The tool used to manage projects and accounts depends on the platform on which the project was granted:

+ +
+

Note

+

The portal.cscs.ch site will be used to manage all projects in the future.

+
+

Signing up for a new account

+

New users who do not already have an account at CSCS, including PIs, need to provide the following information before CSCS can open their account:

+
    +
  • a scanned copy of your passport or recognised id card.
      +
    • this will be deleted by CSCS immediately after the account has been created.
    • +
    +
  • +
  • an institutional email address (gmail, hotmail, etc. will not be accepted)
  • +
  • correct information (title, name, etc.)
  • +
+

New accounts are usually opened within 48 hours.

+

Using different accounts

+

In order to use a different account, log out of the Single Sign-On gate by going to the Account and Resources Tool and selecting "Log out of CSCS" on the upper-right profile icon with the tool used to manage your project, account.cscs.ch or portal.cscs.ch.

+

Signing in with a third-part account

+

All users at CSCS need to go through the standard registration process and get a CSCS account. In addition, they can also link their CSCS account to an external account, e.g. the one from their home institution. +In this case, they can sign into the CSCS services using his/her home institution credentials instead of the CSCS username/password. +This process happens only during the Single Sign-On procedure described above, and from that time on and for all purposes, and until the user logs out, the user identifier that presents itself to all CSCS services is the CSCS username, not the external one. +The number of external institutions that are allowed to link their accounts is limited and displayed in the login page.

+

Linking an external account can be done in the Profile section (upper-right corner) of your account page at the tool used to manage your project, account.cscs.ch or portal.cscs.ch.

+

Regulations and Policies

+

Please note that as soon as you receive and accept an invitation to get an account at CSCS, you agree to the CSCS/ETHZ regulations.

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/accounts/ump/index.html b/accounts/ump/index.html new file mode 100644 index 0000000..458cd31 --- /dev/null +++ b/accounts/ump/index.html @@ -0,0 +1,2376 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Account and Resources Management Tool - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Account and Resources Management Tool

+

The Swiss National Supercomputing Centre (CSCS) offers a web-based tool for users to manage their accounts and projects at account.cscs.ch.

+

With this tool, users can:

+
    +
  • Access their profile, manage institutional details, or reset their password.
  • +
  • List the projects they belong to, including closed ones.
  • +
  • Check details on each project, quotas, and current utilization.
  • +
  • Get an overview of where their files are stored at CSCS (including home directories, scratch, etc.).
  • +
+

For group leaders (or PIs), the tool allows:

+
    +
  • Managing user membership and access control.
  • +
  • Inviting users to their projects via email. Existing users can accept immediately, while new users will receive instructions to create an account and join the project.
  • +
  • Removing users from their projects.
  • +
  • Selecting which users can access a system (and submit jobs) and which ones can only access project data.
  • +
  • Defining one or more deputies to perform such tasks.
    +Note: The responsibility of what happens within the project still belongs to the group leader or PI.
  • +
+

A short guideline on how to perform these tasks is provided below.

+

Usage

+

The tool is designed to be intuitive and comprises the following main areas:

+
    +
  • A) Account selector: For users with multiple accounts (e.g., service accounts).
  • +
  • B) Profile management: To view and edit the account's institutional details and change the password.
  • +
  • C) Project membership: To show the selected project in detail.
  • +
  • D) Storage: Where users can see where they have stored their files (home, scratch, and project areas).
  • +
  • E) Main view
  • +
+

Screenshot

+

Membership Management (for Group Leaders and Deputies Only)

+

To invite users to a selected project, group leaders or their deputies need to:

+
    +
  1. Select the project on the left menu.
  2. +
  3. Click the "Members" tab.
  4. +
  5. Scroll down to the "Users" (or "Deputies" to manage deputies) section.
  6. +
  7. Use the "+" (plus) button on the right of the section and enter the given and family names and email address of the invitee.
    + The invitee will receive instructions on how to join the project. The group leader will get a confirmation on whether the invitee has accepted or rejected the invitation.
    + If the invitee does not have an account, they will also receive instructions on how to create one, which needs to be verified by CSCS administration staff.
  8. +
+

To remove users from a selected project, group leaders or their deputies need to:

+
    +
  1. Repeat steps 1 to 3 above.
  2. +
  3. Use the icon with the three horizontal lines (see screenshot below) that is on the right of the user and select "Remove user."
  4. +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/accounts/waldur/index.html b/accounts/waldur/index.html new file mode 100644 index 0000000..c7762e5 --- /dev/null +++ b/accounts/waldur/index.html @@ -0,0 +1,2405 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Project and Resources Management Tool - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

The Project and Resources Management Tool

+

CSCS Account Managers, PIs and deputy PIs can invite users to the respective projects following the below steps on CSCS's new project management portal.

+
+

Info

+

The new user project management portal is currently only used by the Machine Learning Platform +All other platforms use the old user management portal

+
+

log in to the portal

+

Navigate to the site project management portal portal.cscs.ch.

+

Select the Organisation

+

After login to the portal, choose the corresponding organization in which the project was created.

+
+

Todo

+

screenshot

+
+

In this example, The project was hosted by the CSCS organization, and say the project name is csstaff_n, From the organization dashboard navigate to Projects and click on csstaff_n Project

+
+

Todo

+

screenshot

+
+

Invite users

+

From the project dashboard, navigate to Team -> Invitations

+
+

Todo

+

screenshot

+
+
+

Info

+

Using both the web interface and bulk invitation, the following roles can be assigned in the tool:

+
    +
  • Project administrator: PI
  • +
  • Project manager: deputy PI
  • +
  • Project member: team member
  • +
+
+
+
+
+

To invite a user, click on the "Invite Users" button on the right hand side of the tab.

+
+

Todo

+

screenshot

+
+
+

Todo

+

screenshot

+
+
+
+

It is also possible to bulk invite users by preparing a CSV file and uploading it in this step.

+
Email,Role,Project
+CragAlvarado@example.com,Project member,prj02
+Andrease@example.com,Project member,prj02
+JoannWaters@example.com,Project administrator,prj02
+DonnaSchwartz@example.com,Project manager,prj02
+
+
+
+
+
+

Note

+

An email will be sent to the invited user:

+
    +
  • users who already have CSCS accounts should click on the link in the email they received, and authenticate against CSCS KeyCloak with username, password, and OTP to accept the invitation.
  • +
  • new users should follow the procedure to create a CSCS account.
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/alps/hardware/index.html b/alps/hardware/index.html new file mode 100644 index 0000000..a4ceead --- /dev/null +++ b/alps/hardware/index.html @@ -0,0 +1,2545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Hardware - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

+

Alps Hardware

+

Alps is a HPE Cray EX3000 system, a liquid cooled blade-based, high-density system.

+
+

Todo

+

this is a skeleton - all of the details need to be filled in

+
+

Alps Cabinets

+

The basic building block of the system is a liquid-cooled cabinet. +A single cabinet can accommodate up to 64 compute blade slots within 8 compute chassis. The cabinet is not configured with any +cooling fans. +All cooling needs for the cabinet are provided by direct liquid cooling and the CDU. +This approach to cooling provides greater efficiency for the rack-level cooling, decreases power costs associated with cooling (no blowers) and utilizes a single water source per CDU One cabinet supports the following:

+
    +
  • 8 compute chassis
  • +
  • 4 power shelves with a maximum of 6 rectifiers per shelf- 24 total 12.5 or 15kW rectifiers per cabinet
  • +
  • 4 PDUs (1 per power shelf)
  • +
  • 3 power input whips (3-phase)
  • +
  • Maximum of 64 quad-blade compute blades
  • +
  • Maximum of 64 Slingshot switch blades
  • +
+

Alps High Speed Network

+
+

Todo

+

information about the network.

+
    +
  • Details about SlingShot 11.
      +
    • how many NICS per node
    • +
    • raw feeds and speeds
    • +
    +
  • +
  • Some OSU benchmark results.
  • +
  • GPU-aware communication
  • +
  • slingshot is not infiniband - there is no NVSwitch
  • +
+
+

Alps Nodes

+

Alps was installed in phases, starting with the installation of 1024 AMD Rome dual socket CPU nodes in 2020, through to the main installation of 2,688 Grace-Hopper nodes in 2024.

+

There are currently four node types in Alps, with another becoming available in 2025:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
typebladesnodesCPU socketsGPU devices
NVIDIA GH2001344268810,75210,752
AMD Rome25610242,048--
NVIDIA A10072144144576
AMD MI250x12242496
AMD MI300A64128512512
+

+

NVIDIA GH200 GPU Nodes

+

Perry Peak

+

+

AMD Rome CPU Nodes

+

EX425

+

+

NVIDIA A100 GPU Nodes

+

Grizzly Peak

+

+

AMD MI250x GPU Nodes

+

Bard Peak

+

+

AMD MI300A GPU Nodes

+

Parry Peak

+
+

coming soon

+

H1 2025

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/alps/index.html b/alps/index.html new file mode 100644 index 0000000..2f67183 --- /dev/null +++ b/alps/index.html @@ -0,0 +1,2266 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Index - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Alps Infrastructure

+

Alps is a general-purpose compute and data Research Infrastructure (RI) open to the broad community of researchers in Switzerland and the rest of the world. +Alps provides a high impact, challenging and innovative RI that will allows Switzerland to advance science and impact society.

+

Alps enables the creation of versatile clusters (vClusters) that can be tailored to the specific needs of users while maintaining confidentiality. +For example, a vCluster will be dedicated to MeteoSwiss’ numerical weather forecasts, another one to the User Lab and another one to Machine Learning and Artificial Intelligence.

+

A key feature of Alps is multi-tenancy, where tenants are organizations, typically a research institution, that deploys, operates, or manages its platform on the Alps infrastructure. +Tenants have privileged access to resource nodes, enabling them to deploy their own services and resource configurations. +Additionally, network segregation ensures secure and isolated communication, with the option to connect to the tenant's private network.

+
+
    +
  • +

    Platforms

    +

    Alps Platforms

    +
  • +
  • +

    Clusters

    +

    The resources on Alps are partitioned and configured into versatile software defined clusters (vClusters).

    +

    Alps vClusters

    +
  • +
  • +

    Hardware

    +

    Learn about the node types and networking infrastructure in Alps.

    +

    Alps Hardware

    +
  • +
  • +

    Storage

    +

    Learn about the file systems attached to Alps.

    +

    Alps Storage

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/alps/platforms/index.html b/alps/platforms/index.html new file mode 100644 index 0000000..0cacf8a --- /dev/null +++ b/alps/platforms/index.html @@ -0,0 +1,2272 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Platforms - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Platforms on Alps

+

A platform represents a set of scientific services along with compute and data resources hosted on the Alps research infrastructure, provided to a specific scientific community. +Each platform addresses particular research needs and domains, such as climate and weather modeling, machine learning, or high-performance computing applications. +A platform can consist of one or multiple clusters, and its services can be managed either by CSCS or by the scientific community itself, including access control, usage policies, and support.

+
+
    +
  • +

    Machine Learning Platform

    +

    The Machine Learning Platform (MLP) hosts ML and AI researchers.

    +

    MLP

    +
  • +
  • +

    HPC Platform

    +
    +

    Todo

    +
    +

    HPCP

    +
  • +
  • +

    Climate and Weather Platform

    +
    +

    Todo

    +
    +

    CWP

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/alps/storage/index.html b/alps/storage/index.html new file mode 100644 index 0000000..7a76b67 --- /dev/null +++ b/alps/storage/index.html @@ -0,0 +1,2257 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Storage - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Alps Storage

+
+

Todo

+

Document the main storage hardware attached to Alps:

+
    +
  • capstor
  • +
  • iopstor
  • +
  • vast
  • +
+

The focus of these docs would be the basic details.

+

The mounts, and how they are used for SCRATCH, STORE, PROJECT, HOME would be in the storage docs

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/alps/vclusters/index.html b/alps/vclusters/index.html new file mode 100644 index 0000000..0b43b4d --- /dev/null +++ b/alps/vclusters/index.html @@ -0,0 +1,2337 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Clusters - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Alps Clusters

+

A vCluster (versatile software-defined cluster) is a logical partition of the supercomputing resources where platform services are deployed. It serves as a dedicated environment supporting a specific platform. The composition of resources and services for each vCluster is defined in a configuration file used by an automated pipeline for deployment. Once deployed by CSCS, the vCluster becomes immutable.

+

Clusters on Alps

+

Clusters on Alps are provided as part of different platforms.

+
+
    +
  • +

    Machine Learning Platform

    +

    Clariden is the main Grace-Hopper cluster

    +

    Clariden

    +

    Bristen is a small system with a100 nodes, used for todo

    +

    Bristen

    +
  • +
+
+
+
    +
  • +

    HPC Platform

    +

    Daint is the main Grace-Hopper cluster for GPU workloads

    +

    Daint

    +

    Eiger is a large AMD-CPU cluster for CPU workloads

    +

    Eiger

    +
  • +
+
+
+
    +
  • +

    Climate and Weather Platform

    +

    Santis is a Grace-Hopper cluster for climate and weather simulation

    +

    Santis

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/cscs-logo.png b/assets/cscs-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..ea4d681e957e80896984d6f90cfddba3a17927f1 GIT binary patch literal 10001 zcmbt)^;cBg8!zA>Lk~lWC_{INh%|%L&>-Cn($azm4h_=XiXz=Hz|bKnAYDUu3`j`B zo%j0}+;#6+>&%=V&e`WVdp*y7KCvS;)D(ybXbG^eu!v!bvRat`{{K$kW6W>4enl=8 z76c0>3)S|`J`A`Cq>yWF3k2j!_%E8&vIbQxC9M$utuZ~`_Y3x#`rfDn){wcE3)8XZ1Ow0(8lU1w zwu!L4tB-q0pd57O-lx5Q<-#SzURf>k|Jxyt1z})F03|}eU>F3Sks$%_e|u?(Ez@uA z!@vQ=s*)twE4j`Yh<`xOg)v9qhghRo$h=MF6=Wi4K7nv>yd_|u=ZO$15Dav0m|vj` z#MbN~3jyJPftDQN&+?DwM%0fzF64)vV{ACPPU zV3h$MC%~31gUdnj{Wn*eQ2}&YDD$AgC?$ zSh`%E{?>_%A1Zx?pzds{?9@w-8(aG@naG~GFO(>-hA;CvaB2H=j#c)B`|(B9jDj=+ zAcTDf^}r;9Rthmr0XDS)=NQ7X61z=8tA?Uu3)4Eyw6V-A+LRJ;NB8fZt@6%9umRuj z)2Lh~E;y_4gg?H{%@y&bW3Uxje4fHi0t9=1Ap7`F^)mWol}~G+^V3oK!xyToFV6P z`#YTb{jB#NWaA&BF**qHvWQgjuaZn@!k3q;a(rL*=se-*Tb7m3(U_v-WX!9uUVH6n%Mh9)0{Gas zG%B&r4s2ag^a%S8m76%5;Oe)PJeJH3-#&Oi6^H*`{Kh46gde+1?M|#&krZx3K(Wcg z)02mo^%q=HsD||z4@#tuiwk51I=PkHyfbWJlX=}q{N1ULFiQ7!^~Pi)Gc!K7(Cn`l z3$Ikm-5EDaO80e2HhuSmRyQ>8-E$paxq07hsDg<`g%T~4LDDP(k>qS4O*#j)=XfOc zGd^pE0z-dHQ0%h5rq)UL;Bj{Ykh!P^NG9MGhb6Ko{8bPC0nk#P;iBGgpFDqBj6}md z{V2u#L4|80bJI<;R5RE1Q%dyBgGtFb{s`4m528tUoMczQFli;$Cw;Pzyg%xBl03`- zA#8d90j=Kj3jx|^FB1j*GykmqbEs_#Kn#Aj+4L5_igK z+su6Ao*)puZpp^FiFTNTzuWFrd&%W9y;hdQ^yHn)sCY#mqsa55jh9g044q^n9cRXH zMGl0W-S=b5!j56wU^RlaoP4mG+q5T-{iqH8>q7jl_(;l=U_4SBklw*$>kYe%ORCXk z*=%tzrP%LJ9K}e%9p0Jmq}w0==%|510^h~aOYyeM?v8zXGan$U*d&3Z4WSVGi`FsU zx`ndIa`K#6XGn(04m9PLW-|dF#{V}d65%3SZfIR>%*~)5=;R5@LVlexxS%N=n2Jd! zdi?JOk+X`2D$a-q9BWTHPt;4vx0>8lB>Oy0scEu&Tz6n7hf*w5PaASMo6oSR%i{R) zPfGh)XExESM=T@+L?`vhxY1BY{gJz7z~m!$&dn%xZ$GDjukB~MhNNv?$7tPH^M3N4 zGRC{J3H4Z>@T*fi61t%L*^3x2j%U7>N2?y&X zId@1Es?do7mk?Kq>DNy58#ysTAHE-3EV0wIzUKS3pUCxFFP${-uYHfqvqVTnPnkh& zNPMtsUmQL4d|%*~JKT$WF}9)17R`Cgnv(bft6EWZ=d~TJ2n>Xk$wKxgT}brfWz~9$ zhctpU&sf}$9Xh=c)AdLB#2pU#_&cJVo&J6eNA|8QyNXuaJrOgdXBQV%t6ibNeqW^Pkc9iZVrE4wSD08c`6PY%jlh!p7u0dkm*D&>gEvzA+X)^3 zf%Zm^t>NmAs}l?nSAx#Y&LlTQSj;5`iE(_F_56fg^VQ0M5*t9vxje2Bg)+f>TS~~q zxSD$`#1YMmqf8lyg zTu@5rxlG=vF7Ds|4xVmi{L@9G8ZuF+9_N9O1ss&!t>2p24x?zD;53UR8h9*Wi5@PL zaoMa9l%{^WEEW}1j$U3~kCoS62HB;`{y|W5u#-^yWR2kuu*;4F0F_5q^flo~k2czW`6}DooZeh)nv`loz ze}+vqcCk2^8%j$7x8@=Z_d#eiu(CMR8q!-Z3m}bP?aGM{aUl3?#&c%@&16c8=*SF6 zWQq~xAyFT!>A;4{Kt`GZC)C+=q>SYIoBWEl>F%8^S#u+3vI{gj9!hh zHZeqcV=C+G3MzY;A@7KaVU&DgB(==$m!f52TR4_1!(!PHC}f@lUiNdq4h65PuV1}6 zYNwvw+uwhGvN6zjL$u+-(g0z(bl+XpFl)ocQz`#+<<~trQvaa#Z7ScxnWszHGGN{w zS)I_1L!Y8^W>Ph527%1+q`A=kFW``iRwE;$(6oV9LPGB@V3b?jD~X9r4e^PDcFG1qtRGOC^7jV(KFrFo<(O*3PK9gp6I^oi*9M@3IfPp8&xOsR>vZvcZP2fj&2 z%XOAkwmA%J|kfzsqF5hs%?tPPjKtx&oKKZ164j z5Ps{id`zEc1$!^&lVriLfrhSqC6 z&oMlEpXCxz*5OW@&S$*m&&EtN?KY|-ynHk%6(jeV7zm~*W671`g+5%1!b-GUR_3G0 zqCT z^d+)Pa07$MWhH0jP$f#L>25Od7@Unss2`M1aet}~D0bwM5sptTGEWv@fSEUGVZgz> zu>y+SFY&V@CJznXN48$&)9BSii9${-<-$VE;hK!84rwaXnOrzhTwm@Xxq z=p@-6uHQ3HU5`_rO;w*$DL?c4V=!;XU}#pv_L^V_N3Cv666RIPt=FHidD-~*8)b?^ ztytKm-!Rzy11Q}+uN)>{%Y1=?T9FCtrCPy{A5AYVa=%2gjdlaBk&!2!&> z8O4Z)3{xYiK$LN`a)l+Y* zlVdv{;%-|U;$GRe{w2VyZbNpz6=|>fc%2ig4>M~fsuxXF%fV2B(;o813!kuFSuv_! zDyT1FuRnoFrVKYQW^2n9TdU$z&D(9M3mP?Vcd$nYyXGwkPhU{m8F8paGqN}-X?7EA zX=1@m_-Z*CR+K>MUHn(RSwW{yh&1M*1-R%LAP?qT$N-3+Q`WA8QW^9TNpja2s;2yQLI7Z0PS$bnk*F+Ag63BzKl)b(v^3oYL6fJabs09ycjj-) zH3z-&8#V9+lU#HL0UU#RweOhzawgdqVPC0r&x(}pf8CcuEK4?Sm>*VTX^qS&DS;%n z?CtGo+_iYWp(&Ew#iUB6gWn-tQc9K57)(-tDmkg=;hA+NJ{%@JwW&deaGz3{z-5ZbZMrbK$8%rX~Gb?V;5Q?dY6red07~46p;H_=6g+!l3k0 zZ8<-=(4Ud6k0UuUX)HpqO}GU`WE*v)ZjXcinP*EFq%h|^y(szoHH%72gi(L5Mfhqy!WAyr*)&i4`o<4@tjzqE~Ae?dfH`eDfCCU5z#0ssy zbVCLa5dNzz23S}KbJLA2NOu(m28DETZ${}eP;ctK!@l|{`nEdhEys+NJ_G$Asqn~V zvqOs^SMIM_hkd%h8$W{|ezAnmIOd^D zg@pTUDveX8q;6c>eKhEk$>A8zv)b-XXNf}X#;o6%J<;vY(I-0>eGaJLh|Mw&+q1Ts zs~pgKx-?bm%W-kM+8t(K1`tW#tWpQ^qQriFb?kl~GFVD?D4LJd)gEjW^XX4GgR0X6 z>)RFDoYU*em*$szGL%N2yjEVZ7Eu0xdn^3l-Ji&iL)CbSfK;- z8Sy<0(Ec-CVO7e+B?J}4tTr;o&cV?scGO~}QJGz=4_KgYqjwznO$Q6R=C-E{2C$mp z7F?(pu3uiokP{1`8Ci4vToB2|GzzE7@QI(aR-n_b*wS)xp%`N1&HwSJ^}>ma5#7mI zA~Yv)^lmkEWB>Gli%k)X@EQ}ME?LO)gs>wNTBS z4M6PiT~5W*C`_wW6F+<29PUv%KD;H=-jemo8BE)lmYQlgRiYE-|KrYhu8W#@(dTzA z!=ma6&wz$~C(`BXc63B3-k)_cFTEr_mnOB#*XWzye0V7EDY;tT_rOWkCm!Y_OyyBs zB_*5|%oeimktmrBv+2IpXlnC%b05cr*}SKZeKeF@FnlwQpXx{D%)iSYlCc8XDs9k4 z%ib2jkp@~Lc4hZ{|v?r3*@;Xuie-}69dv@pMljqobx34AHLr4pJ+dMFWplt9T<#nw{yYwoLzxSQY7ekq zJ{lfKZz(CgOVNRb>NyX@Wg44r-?@h0sJ@F+4*oH3+P04{ex6g?Kn=1NJt1();G&mA zh(B`$Ty9%y~PY>C_)$xipqu|B1(?Zp>Oovo!T+fmLuqw9*l;Uj-C||0@jQSS! zORLnRl^D%2!j2PegzA-&dfx^emMdUae->lX5x8-iv5dy>gC(z-1zZL&nP7vBqd)yhBlB>L`#w|_B8Oe(c=j%CvQ z2WelHUf|Vcn#{DkuaDzgjZG|lOJ-9^;t!XBTcSoIsnH0&a*0U};WxYc%5B1~;5*6I z>TzqlE)A z*=|=)_EN`QS#T%YOWmQTk;w4nrKm9@BQmNfU8m58FfI@wK;J;>1^p$>&lRKCk>ZQh4jvEo?~?_F6_Hr+=#3BVpM)-_BVO~?Yn!DZkh-h zbxBq-Xlg9mpjN7T5_E^msfC41E|HQ;mj=CF7l+)IVmtVj$rG7_=Xjh_xCq!Bev+O< zVz5k!wR$myQs~+oarkTS%+LOc>M~GlV1@TOiU4$e%Dj4$uhc<_xYun#_7)V(KI!x3 z-1Hvig>iw@A2TRaRp*6(oR-^6ib{114WopYe7oKF{O`1PO2j(b(x@?&+z<;}M!jeu zBL4;N$xqed^&peslDN+J+QC*wQY{bY+rEk3B*lNc;YI&|?=DHX> zwFd^G0(#1G4=5Ut^(74)#wpFNQ|Edt;e~5?nUAEh23&h)()@IyHk6N*)dN8U$l&L-s3tc$ zBz~(EqN^>km~4dVWIiYfX}9-OLK7_KW9RiUM!NKJxwgyg$kwu|kD_S%TL~nu`483h zxHgIzt*9{M%t#H^^^n!ee0w*}pvp}pXMz%*pO(*3`e`uYr>510=Z=71s7%+yUs^4qPJb{rlKR z76U??jS6g^U|^LjSl&ePI5FxdP?1pH9%aN%DzbzEeU|MHRXW z+v*a$A0L;|o_g>kO`0W>K6s@$Kr2(!D|;yX`U_}1UW?3e{Kz@j_}p%hQu z$kHv{XGcCC?SHh}^Awbj=}(-UwYq{d4HCX_$FnXMmu5u{Q?+r97Xz}GcH&3{(7=1Y z+!mnU>Q_J3VAU5Ja$z-#^l?>MDAv`}TaUitGw+u0vBI3a_2-L6VoE=Yov(vo3wjfU zt79JO!Dkl+{}SrQ#Ef#DJ=w01wGUO0TL0c6W5ht6cl5FfV0^k^H7jt(aF8x}Ohdf* z)1vPz{J8t!4%V2QMP$GNBqcL@k#Y{=LO85E+x0?lEv*%m7Z$SB*VCku{{*q?3gBrY zIB%zQ53Si2lqgO1G1^krhnr}EklghVq_I=4#FjEQHWxwGrn8hK8vYT-VN3nmre74A zy``N|nR1Z%$DYDp1i>kksLjTr=ix;-=`|t=EooVv zDEFWAoe1&kxv1D5#)b#~Rhk^G0b8FmTDR`J=o(d9N3Q%c{uILqfL^7SquyS-E-6X> z=~4UWDXr=5&5=EkmorXt+sEphgNDNiv2vPvi|YDRCY#_eT#SsM6hgT=#pzADP|sF2 z`!40VBap+J^MWYKNC+e{JZNxc-2%u~?uWsv(u8PZ@-Yj=&IfdTef`d1Oy#IzX&}Er zEFv3s?K;vNzorrk>+$e^djZ40q~3G0v8-TBC+P`=*Fy}l zgXMo_L@*9j2uQ+IfAxUqgkRN&6xniYCh0O}cJFD-d+&ND!j8R71fA{pcX2`JQ|jR0 zFt+bD;D*P_Dw2MGa@OeiO4yHN7+wJRGEejc>seYqG$FX=25WM~u8~9yp z%qK1>{nIGaA^+Cc4ULrxz5Wls?vFsKV#zNru z>@0g7XLVg21z+1`zBj$yoi;`xP?-6|c>AMtl;Bh9TPiC^qNn%`#n+9nTd@s%92!`S z-RSe=loWg>#dq&GsQ63|zCe0r`+{97K}w0FC-N3UI}gJK@MCQM%b2uX!TQ-`e;^|p z(DK*kM|h7HXBkigy|{!_G^Xwfj`Sg4-=8ms&~rO9Fu%#*)(s%EYnp`tFz(Aiud{>0 zICo@`vl=z8F;Gsk?q(MKJ)cUk-r1P1<-qf8=V%@6#W%G??~1t2(9vy`#ZD$R{H`iY zXcp&(WjqEoJtwBS=bFnp_hV+d1Z?;oh%i5^jTn0u9a~}jet>%>1Z!+^k~xI=ww;<= zpMNC)Z}0m&7U-)D2AsfZ1f+x)%ljQ4O5;qz{&)NT{GhS1G4ok1McI`$^G+Pu@0tvE zOdN8zrP3C-t-5l)J-H;CD>Ft?3q?H>4;;(v#l>KFIP=J8;Ur&Jsowt zsF_i~y?v)w{#4k0yqrDJssAAk%J$e5%D7qi{(QG-TlX)x(cv{}8%@EE42f_i}z$zTn>a{b0a$gpNz_zi9$2eMUdyZdMKMx0RzXX|MF zM;rlzz5ZR>ak{mj3EP0}SjCcZN)8j(@{dsn;)QpTg+3Wz%xvjxp&Gtaz(s~#F4>qA zCpH(Lk5i&cc-Yj()EmX4IvC)=p?8{q1DbW=fr6R_#Nr|#%)Qmvw=!)_n?BLD}2 zicJ|(aC3fvGW@P}j9vU5oeESQUtT8dK<64X-zo#gYCabXMjLTmaX*TVZ0q_cyXXqH zH~O+=%Hdo>BLKjjOz&~R&jyDgm>HG4jD#zwd9fdojM&J?l04gPrmM6{5-oZ_A+0TJ z>bVs1;C}|DC@=#y{X*L_$CI8!>%$Lr3#<_sN#Iar(4^Yf3_yoX^B#b$t^C4cONGIO z>Ly^RRI^WRnW@JA;TqU8YF(nvb{p zwRlAH3nC{WsqGax9xulE^YL-86ylzHmAPa@S8V;xqAqkPSXOA=*;5$ZDnlB5p))yp z!0kB3zou}&lZ4Y9dUW_|1c58 z0cY>=J7&%j^QrLWy9t=oLj{(cjeRwtgXvSb&FozeZP4t#vhE@8A(8=Nn*7-Od{k;G zmBry~wbj}+5Z?cXHG8r8S)upG&BmO?>PKwt^VrKvuJSFt{BOWLAu8|g7%P=2c*B)@ zh}5w{Yc;^4n4M26btBz-evHDZ>w1fF6qZFGc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.f1b6f286.min.js b/assets/javascripts/bundle.f1b6f286.min.js new file mode 100644 index 0000000..5082675 --- /dev/null +++ b/assets/javascripts/bundle.f1b6f286.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Vi=Object.getOwnPropertyDescriptor;var Di=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Dt)for(var r of Dt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Di(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Vi(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ee(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,ee())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(D){try{return document.execCommand(D)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(D){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=D,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var V=f()(F);return u("copy"),F.remove(),V},ee=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=ee;function k(D){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(D)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,V=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:V});if(Y)return F==="cut"?y(Y):J(Y,{container:V})},qe=ft;function Fe(D){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(D)}function ki(D,A){if(!(D instanceof A))throw new TypeError("Cannot call a class as a function")}function no(D,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof V.action=="function"?V.action:this.defaultAction,this.target=typeof V.target=="function"?V.target:this.defaultTarget,this.text=typeof V.text=="function"?V.text:this.defaultText,this.container=Fe(V.container)==="object"?V.container:document.body}},{key:"listenClick",value:function(V){var Y=this;this.listener=c()(V,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(V){var Y=V.delegateTarget||V.currentTarget,$e=this.action(Y)||"copy",Vt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Vt?"success":"error",{action:$e,text:Vt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(V){return vr("action",V)}},{key:"defaultTarget",value:function(V){var Y=vr("target",V);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(V){return vr("text",V)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(V){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(V,Y)}},{key:"cut",value:function(V){return y(V)}},{key:"isSupported",value:function(){var V=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof V=="string"?[V]:V,$e=!!document.queryCommandSupported;return Y.forEach(function(Vt){$e=$e&&!!document.queryCommandSupported(Vt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Ve(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},ee=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;ee(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(ee,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(ee,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function De(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>De(e)),Q(De(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(te("size")),n=z([o,r]).pipe(m(()=>De(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Va(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Va(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Da=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),te("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Da++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.flowchartTitleText{fill:var(--md-mermaid-label-fg-color)}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}.classDiagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs marker.marker.composition.class path,defs marker.marker.dependency.class path,defs marker.marker.extension.class path{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs marker.marker.aggregation.class path{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}.statediagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.entityTitleText{fill:var(--md-mermaid-label-fg-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}text:not([class]):last-child{fill:var(--md-mermaid-label-fg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Vn=x("table");function Dn(e){return e.replaceWith(Vn),Vn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=De(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Dn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=De(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(te("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),te("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),te("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(te("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),te("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(te("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var s;let i=new URL(t.base),a=__md_get("__outdated",sessionStorage,i);if(a===null){a=!0;let p=((s=t.version)==null?void 0:s.default)||"latest";Array.isArray(p)||(p=[p]);e:for(let c of p)for(let l of n.aliases.concat(n.version))if(new RegExp(c,"i").test(l)){a=!1;break e}__md_set("__outdated",a,sessionStorage,i)}if(a)for(let p of ae("outdated"))p.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(te("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(te("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Dr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=De(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),Ve({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Ve({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),Ve({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),Ve({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Ve({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),te("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(te("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(te("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),te("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),te("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Vr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.f1b6f286.min.js.map + diff --git a/assets/javascripts/bundle.f1b6f286.min.js.map b/assets/javascripts/bundle.f1b6f286.min.js.map new file mode 100644 index 0000000..2644bf1 --- /dev/null +++ b/assets/javascripts/bundle.f1b6f286.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2025 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +

The RESTful API HowTo provides some usage examples for the LTS API with curl and Python.

+

The Web portal: create a data collection page provides usage examples for the creation of a data collection in the LTS web portal.

+

The Web portal: define and upload objects page provides usage examples for definition and the upload of data collection objects in the LTS web portal.

+

The Licensing guide provides information about the data licenses available in LTS.

+

The Landing page page provides information about the LTS landing page.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/storage/object/index.html b/storage/object/index.html new file mode 100644 index 0000000..b775077 --- /dev/null +++ b/storage/object/index.html @@ -0,0 +1,2617 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Object Storage - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

Object Storage

+
+

Note

+

This page is currently incomplete and it is being updated following recent developments.

+
+

S3

+

CSCS offers a public cloud object storage service, based on the Ceph Object Gateway. The service can be accessed from S3-compatible clients.

+

General Information

+
    +
  • Endpoint: https://rgw.cscs.ch
  • +
  • URL: path-style in the format https://rgw.cscs.ch/%(bucket)s/key-name
  • +
  • Publicly accessible object links: https://rgw.cscs.ch/<tenant>:<bucket-name>/key-name
      +
    • after setting proper bucket policy
    • +
    +
  • +
+

Usage Examples

+

AWS CLI

+

Configuration

+

The first step is to configure the profile:

+
> aws configure --profile naret-testuser
+AWS Access Key ID [None]: [REDACTED]
+AWS Secret Access Key [None]: [REDACTED]
+Default region name [None]: cscs-zonegroup
+Default output format [None]:
+
+

Then, settings such as the default endpoint and the path-style URLs can be placed in the configuration file:

+
[profile naret-testuser]
+endpoint_url = https://rgw.cscs.ch
+region = cscs-zonegroup
+s3 =
+    addressing_style = path
+
+

Creating a pre-signed URL

+
> aws --profile=naret-testuser s3 presign s3://test-bucket/file.txt --expires-in 300
+
+https://rgw.cscs.ch/test-bucket/file.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=IA6AOCNMKPDXQ0YNA3DP%2F20241209%2Fcscs-zonegroup%2Fs3%2Faws4_request&X-Amz-Date=20241209T080748Z&X-Amz-Expires=300&X-Amz-SignedHeaders=host&X-Amz-Signature=f2e2adb457f6fd43401124e4ea2650fba528e614ab661f9c05e2fa2e77691b5d
+
+

Notice that the tenant part is missing from the URL: this is because S3 doesn't natively deal with multitenancy. +The correct object is retrieved based on the access key. +A more thorough explanation can be found in the RGW documentation.

+

Making a bucket's contents anonymously accessible from the Internet

+

First, a bucket policy needs to be written:

+
> cat test-public-bucket-anon-from-internet.json
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": "*",
+      "Action": "s3:GetObject",
+      "Resource": [
+        "arn:aws:s3:::test-public-bucket/*",
+        "arn:aws:s3:::test-public-bucket"
+      ]
+    }
+  ]
+}
+
+

Then, it can be applied to the bucket:

+
> aws --profile=naret-testuser s3api put-bucket-policy \
+      --bucket test-public-bucket --policy \
+      file://test-public-bucket-anon-from-internet.json
+
+

At this point, the objects in test-public-bucket are accessible via direct links:

+
> s3cmd --configure
+
+Enter new values or accept defaults in brackets with Enter.
+Refer to user manual for detailed description of all options.
+
+Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables.
+Access Key: [REDACTED]
+Secret Key: [REDACTED]
+Default Region [US]: cscs-zonegroup
+
+Use "s3.amazonaws.com" for S3 Endpoint and not modify it to the target Amazon S3.
+S3 Endpoint [s3.amazonaws.com]: rgw.cscs.ch
+
+Use "%(bucket)s.s3.amazonaws.com" to the target Amazon S3. "%(bucket)s" and "%(location)s" vars can be used
+if the target S3 system supports dns based buckets.
+DNS-style bucket+hostname:port template for accessing a bucket [%(bucket)s.s3.amazonaws.com]: rgw.cscs.ch/%(bucket)s
+
+Encryption password is used to protect your files from reading
+by unauthorized persons while in transfer to S3
+Encryption password:
+Path to GPG program:
+
+When using secure HTTPS protocol all communication with Amazon S3
+servers is protected from 3rd party eavesdropping. This method is
+slower than plain HTTP, and can only be proxied with Python 2.7 or newer
+Use HTTPS protocol [Yes]: Yes
+
+On some networks all internet access must go through a HTTP proxy.
+Try setting it here if you can't connect to S3 directly
+HTTP Proxy server name:
+
+New settings:
+  Access Key: [REDACTED]
+  Secret Key: [REDACTED]
+  Default Region: cscs-zonegroup
+  S3 Endpoint: rgw.cscs.ch
+  DNS-style bucket+hostname:port template for accessing a bucket: rgw.cscs.ch/%(bucket)s
+  Encryption password:
+  Path to GPG program: None
+  Use HTTPS protocol: True
+  HTTP Proxy server name:
+  HTTP Proxy server port: 0
+
+

And then confirm.

+

IMPORTANT: The configuration is not complete yet.

+
> s3cmd ls s3://test-bucket
+ERROR: S3 error: 403 (SignatureDoesNotMatch)
+
+

To fix this, it is necessary to edit the .s3cfg file, normally located in the user's home directory, and change the signature_v2 setting to true.

+
~ > cat .s3cfg | grep signature_v2
+signature_v2 = True
+
+> s3cmd ls s3://test-bucket
+2024-12-09 08:05           15  s3://test-bucket/file.txt
+
+

Cyberduck

+

Configuration

+

In order to be able to connect to the S3 endpoint using Cyberduck, a profile supporting path-style requests must be downloaded from here.

+

cyberduck

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/storage/transfer/index.html b/storage/transfer/index.html new file mode 100644 index 0000000..56ec64d --- /dev/null +++ b/storage/transfer/index.html @@ -0,0 +1,2437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Data Transfer - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

Data Transfer

+

External Transfer

+

CSCS currently offers the CSCS Globus online endpoint for uploading and downloading data from and to CSCS:

+

The recommended way to transfer data externally occurs via the CSCS globus-online endpoint.

+
    +
  1. Follow the official get started documentation to login
      +
    • in case you don't have an organisation account, you can just use the option "Sign in with Google" +globus login
    • +
    +
  2. +
  3. Use the file manager to search for an endpoint typing "CSCS"
      +
    • Please make sure that the login page belongs to the cscs.ch domain (shown in the URL)
    • +
    • The CSCS endpoint requires authentication, therefore use your CSCS credentials to log in +globus login
    • +
    +
  4. +
  5. Once logged in, you can trasfer data to and from CSCS.
      +
    • if you want to transfer the data to another endpoint, just search for it and transfer the data
    • +
    • if you want to download the data to your local system, you will need the Globus Connect Personal client: the client will turn your local system into an endpoint, so you will be able to select it and transfer the data.
    • +
    +
  6. +
+

For more information about Globus Connect Personal, please read the official Frequently Asked Questions.

+

Currently Globus provide the following mount points at CSCS:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mount PointDescription
/scratch/snx3000old Daint scratch area
/storeold Daint store area
/projectold Daint project area
/usersold Daint home directory
/scratch/sharedold Scratch-Shared area ( old meteoswiss clusters )
/iopsstor/scratch/cscsMounted on Clariden
/capstor/scratch/cscsNew Alps Daint scratch area
/capstor/store/cscsNew Alps Daint store area
/capstor/users/cscsHome directory for Bristen/Scopi/Errigal
/vast/users/cscsNew Alps vclustewrs home directory ( Alps Daint and others )
+

Internal Transfer

+

The Slurm queue xfer is available on Piz Daint (Cray XC) and daint.alps to address data transfers between internal CSCS file systems. +The queue has been created to transfer files and folders from /users, /project, /store or /capstor/store to the /capstor/scratch and /scratch file systems (stage-in) and vice versa (stage-out). +Currently the following commands are available on the cluster supporting the queue xfer:

+
cp
+mv
+rm
+rsync
+
+

You can adjust the Slurm batch script below to transfer your input data on $SCRATCH, setting the variable command to the unix command that you intend to use, choosing from the list given above:

+
#!/bin/bash -l
+#
+#SBATCH --time=02:00:00
+#SBATCH --ntasks=1
+#SBATCH --partition=xfer
+
+command="rsync -av"
+echo -e "$SLURM_JOB_NAME started on $(date):\n $command $1 $2\n"
+srun -n $SLURM_NTASKS $command $1 $2
+echo -e "$SLURM_JOB_NAME finished on $(date)\n"
+
+if [ -n "$3" ]; then
+  # unset memory constraint enabled on xfer partition
+  unset SLURM_MEM_PER_CPU
+  # submit job with dependency
+  sbatch --dependency=afterok:$SLURM_JOB_ID $3
+fi
+
+

The template Slurm batch script above requires at least two command line arguments, which are the source and the destination files (or folders) to be copied. +The stage script may take as third command line argument the name of the production Slurm batch script to be submitted after the stage job: the Slurm dependency flag --dependency=afterok:$SLURM_JOB_ID ensures that the production job can begin execution only after the stage job has successfully executed (i.e. ran to completion with an exit code of zero).

+

You can submit the stage job with a meaningful job name as below:

+
# stage-in and production jobs
+$ sbatch --job-name=stage_in stage.sbatch \
+         ${PROJECT}/<source> ${SCRATCH}/<destination> \
+         production.sbatch
+
+

The Slurm flag --job-name will set the name of the stage job that will be printed in the Slurm output file: the latter is by default the file slurm-${SLURM_JOB_ID}.out, unless you set a specific name for output and error using the Slurm flags -e/--error and/or -o/--output (e.g.: -o %j.out -e %j.err, where the Slurm symbol %j will be replaced by $SLURM_JOB_ID). +l +The stage script will also submit the Slurm batch script production.sbatch given as third command line argument. +The production script can submit in turn a stage job to transfer the results back. E.g.:

+
# stage-out
+sbatch --dependency=afterok:${SLURM_JOB_ID} --job-name=stage_out \
+       stage.sbatch ${SCRATCH}/<source> ${PROJECT}/<destination>
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 0000000..b333a92 --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,113 @@ +:root { + --md-admonition-icon--alps: url('data:image/svg+xml;charset=utf-8,'); + --base-border-radius: 0.2rem; + --base-border-width: 0.05rem; +} +.md-typeset .admonition.alps, +.md-typeset details.alps { + border-color: rgb(255, 51, 51); +} +.md-typeset .alps > .admonition-title, +.md-typeset .alps > summary { + background-color: rgba(255, 51, 51, 0.1); +} +.md-typeset .alps > .admonition-title::before, +.md-typeset .alps > summary::before { + background-color: rgb(255, 51, 51); + -webkit-mask-image: var(--md-admonition-icon--alps); + mask-image: var(--md-admonition-icon--alps); +} +.md-typeset .admonition.change, +.md-typeset details.change { + border-color: rgb(43, 155, 70); +} +.md-typeset .change > .admonition-title, +.md-typeset .change > summary { + background-color: rgba(43, 155, 70, 0.1); +} +.md-typeset .change > .admonition-title::before, +.md-typeset .change > summary::before { + background-color: rgb(43, 155, 70); + -webkit-mask-image: var(--md-admonition-icon--alps); + mask-image: var(--md-admonition-icon--alps); +} + +/* todo admonition */ +.md-typeset .admonition.todo, +.md-typeset details.todo { + border-color: rgb(255, 0, 0); +} +.md-typeset .todo > .admonition-title, +.md-typeset .todo > summary { + background-color: rgba(255, 0, 0, 0.1); +} +.md-typeset .todo > .admonition-title::before, +.md-typeset .todo > summary::before { + background-color: rgb(255, 0, 0); + -webkit-mask-image: var(--md-admonition-icon--alps); + mask-image: var(--md-admonition-icon--alps); +} + +.md-nav__item .md-nav__link--active { + font-weight: bold; +} + +/* Light mode */ +[data-md-color-scheme="default"] .md-typeset .grid.cards > ul > li { + border-radius: var(--base-border-radius); + border-width: var(--base-border-width); + border-color: #121417; /* Darker border for contrast */ + background-color: #ffffff; /* Ensures light background */ + color: #121417; /* Dark text for readability */ +} + +[data-md-color-scheme="default"] .md-typeset .grid.cards > ul > li:hover { + box-shadow: 0 0 0.5rem #010945; /* Dark blue shadow on hover */ +} + +/* Dark mode */ +[data-md-color-scheme="slate"] .md-typeset .grid.cards > ul > li { + border-radius: var(--base-border-radius); + border-width: var(--base-border-width); + border-color: #bbbbbb; /* Lighter border for better contrast in dark mode */ + background-color: #1a1a1a; /* Darker background to blend well */ + color: #e0e0e0; /* Lighter text for readability */ +} + +[data-md-color-scheme="slate"] .md-typeset .grid.cards > ul > li:hover { + box-shadow: 0 0 0.5rem #79aaff; /* Softer, lighter blue glow on hover */ +} + +/* Table */ +.md-typeset table:not([class]) { + border-radius: var(--base-border-radius); + border-width: var(--base-border-width); + border-color: #121417; +} + +/* code hilighting */ + + +/* light mode: pale yellow background, solid black foreground */ +[data-md-color-scheme="default"] { + --md-code-bg-color: #fdfdfd; + --md-code-fg-color: #000000; +} + +/* Dark mode */ +[data-md-color-scheme="slate"] { + --md-code-bg-color: #1e1e1e; + --md-code-fg-color: #ffffff; +} + +/* Light mode */ +[data-md-color-scheme="default"] .md-typeset pre { + border: 2px solid #b4b4b4; + border-radius: 2px; /* slight rounding to corners */ +} + +/* Dark mode */ +[data-md-color-scheme="slate"] .md-typeset pre { + border: 2px solid #3f4013; + border-radius: 2px; +} diff --git a/tools/cicd/index.html b/tools/cicd/index.html new file mode 100644 index 0000000..45fe51b --- /dev/null +++ b/tools/cicd/index.html @@ -0,0 +1,3198 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CI/CD - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

+

Continous Integration / Continuous Deployment (CI/CD)

+

+

Introduction containerized CI/CD

+

Containerized CI/CD allows you to build containers and run them at scale on CSCS systems. The basic idea is that you provide a Dockerfile with build instructions and run the newly created container. Most of the boilerplate work is being taken care by the CI implementation such that you can concentrate on providing build instructions and testing. The important information is provided to you from the CI side for the configuration of your repository.

+

We support any git provider that supports webhooks. This includes GitHub, GitLab and Bitbucket. A typical pipeline consists of at least one build job and one test job. The build job makes sure that a new container with your most recent code changes is built. The test step uses the new container as part of an MPI job; e.g., it can run your tests on multiple nodes with GPU support.

+

Building your software inside a container requires a Dockerfile and a name for the container in the registry where the container will be stored. Testing your software then requires the commands that must be executed to run the tests. No explicit container spawning is required (and also not possible). Your test jobs need to specify the number of nodes and tasks required for the test and the test commands.

+

Here is an example of a full helloworld project.

+

It is also helpful to consult the GitLab CI yaml reference documentation and the predefined pipeline variables reference.

+

+

Tutorial Hello World

+

In this example we are using the containerized hello world repository. This is a sample Hello World CMake project. The application only echos Hello from $HOSTNAME, but this should demonstrate the idea of how to run a program on multiple nodes. The pipeline instructions are inside the file ci/cscs.yml. Let's walk through the pipeline bit by bit. +

include:
+  - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml'
+

+

This block includes a yaml file which contains definitions with default values to build and run containers. Have a look inside this file to see available building blocks. +

stages:
+  - build
+  - test
+
+Here we define two different stages, named build and test. The names can be chosen freely. +
variables:
+  PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/helloworld:$CI_COMMIT_SHORT_SHA
+

+

This block defines variables that will apply to all jobs. See CI variables.

+
build_job:
+  stage: build
+  extends: .container-builder-cscs-zen2
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile.build
+
+

This adds a job named build_job to the stage build. This runner expects a Dockerfile as input, which is specified in the variable DOCKERFILE. The resulting container name is specified with the variable PERSIST_IMAGE_NAME, which has been defined already above, therefore it does not need to be explicitly mentioned in the variables block, again. There is further documentation of this runner at gitlab-runner-k8s-container-builder.

+
+

Todo

+

link to runner specs

+
+
test_job:
+  stage: test
+  extends: .container-runner-eiger-zen2
+  image: $PERSIST_IMAGE_NAME
+  script:
+    - /opt/helloworld/bin/hello
+  variables:
+    SLURM_JOB_NUM_NODES: 2
+    SLURM_NTASKS: 2
+
+

This block defines a test job. The job will be executed by the container-runner-eiger-zen2.

+
+

Todo

+

link to runner

+
+

This runner will pull the image on the cluster Eiger and run the commands as specified in the script tag. In this example we are requesting 2 nodes with 1 task on each node, i.e. 2 tasks total. All Slurm environment variables are supported. The commands will be running inside the container specified by the image tag.

+

+

CI at CSCS

+

Enable CI for your project

+

While the procedure to enable CSCS CI for your repository consists of only a few steps outlined below, many of them require features in GitHub, GitLab or Bitbucket. The links in the text contain additional steps which may be needed. +Some of those documents are non-trivial, especially if you do not have considerable background in the repository features. Plan sufficient time for the setup and contact a GitHub/GitLab/Bitbucket professional, if needed.

+
    +
  1. +

    Register your project with CSCS: The first step to use containerized CI/CD is to register your Git repository with CSCS. Please open an Service Desk ticket for this step. Once your project has been registered you will be provided with a webhook-secret.

    +
  2. +
  3. +

    Set up CI: Head to the CI overview page, login with your CSCS credentials, and go to the newly registered project.

    +
  4. +
  5. +

    Add FirecREST tokens: Expand the Admin config, and follow the guide (click on the small black triangle next to Firecrest Consumer Key). Enter all fields for FirecREST, i.e.,

    +
      +
    • Consumer Key
    • +
    • Consumer Secret
    • +
    • default Slurm account for job submission (what you normally provide in the --account/-A flag to Slurm) +If you don't already know how to obtain FirecREST credentials, you can find more information on How to create FirecREST clients on the Developer Portal
    • +
    +
  6. +
+
+

Todo

+

replace link to mkdocs firecrest docs

+
+
    +
  1. +

    (Optional) Private project: If your Git repository is a private repository make sure to check the Private repository box and follow the instructions to add an SSH key to your Git repository.

    +
  2. +
  3. +

    Add notification token: On the setup page you will also find the field Notification token. The token is live tested, and you will see a green checkmark when the token is valid and can be used by the CI. It is mandatory to add a token so that your Git repository will be notified about the status of the build jobs. You cannot save anything as long as the notification token is invalid. (Click on the small triangle to get further instructions)

    +
  4. +
  5. +

    Add webhook: On the setup page you will find the Setup webhook details button. If you click on it you will see all the entries which have to be added to a new webhook in your Git repository. Follow the link given there to your repository, and add the webhook with the given entries.

    +
  6. +
  7. +

    Default trusted users and default CI-enabled branches: Provide the default list of trusted users and CI-enabled branches. The global configuration will apply to all pipelines that do not overwrite it explicitly.

    +
  8. +
  9. +

    Pipeline default: Your first pipeline has the name default. Click on Pipeline default to see the pipeline setup details. The name can be chosen freely but it cannot contain whitespaces (a short descriptive name). Update the entry point, trusted users and CI-enabled branches.

    +
  10. +
  11. +

    Submit your changes

    +
  12. +
  13. +

    (Optional) Add other pipelines: Add other pipelines with a different entry point if you need more pipelines.

    +
  14. +
  15. +

    Add entry point yaml files to Git repository: Commit the yaml entry point files to your repository. You should get notifications about the build status in your repository if everything is correct. See the Hello World Tutorial for a simple yaml-file.

    +
  16. +
+

Clarifications and pitfalls to the above-mentioned steps

+
+

Info

+

This section exemplifies on GitHub, but similar settings are available on GitLab and Bitbucket

+
+

The notification token setup step is crucial, because this is the number one entrypoint for receiving initial feedback on any errors. +You will not be able to save any changes on the CI setup page, as long as the notification token is invalid. The token is checked live, whether it can be used to do notifications.

+

Notification tokens on GitHub can be setup using Classic token or Fine-grained token. We discourage the use of fine-grained tokens. Fine-grained tokens are unsupported, and come with many pitfalls. They can work, but must be enabled at the organization level by an admin, and must be created in the correct organization. +You must choose the correct resource owner, i.e., the organization that the project belongs to. If the organization is not listed, then it has disabled fine-grained tokens at the organization level. It can only be enabled globally on an organization by an admin. As for the repository you can restrict it to only the repository that you want to notify with this token or all repositories. Even if you choose "All repositories", it is still restricted to the organization, and does not grant the access to any repository outside of the resource owner.

+

Another crucial setup step is the correct webhook setup. The repository provider (GitHub, GitLab, Bitbucket) gives you the ability to see what happened, when the webhook event was sent. If the webhook was not setup correctly, you will receive an HTTP error for the webhook events. The error message can be found in the webhook event response. As an example, here is how you would find it on GitHub: Settings > Webhooks > Edit button of the webhook > Recent Deliveries tab > Choose a webhook event from the list > Response tab > Check for potential error message.

+

A typical error is accepting to defaults of GitHub for new webhooks, where only Push events are being sent. When you forget to select Send me everything, then some events will not trigger pipelines. Double check your webhook settings.

+

+

Understanding when CI is triggered

+

+

Push events

+
    +
  • Every pipeline can define its own list of CI-enabled branches
  • +
  • If a pipeline does not define a list of CI-enabled branches, the global list will be used
  • +
  • If you push changes to a branch every pipeline that has this branch in its list of CI-enabled branches will be triggered
  • +
  • If the global list and all pipelines have an empty list of CI-enabled branches, then CI will never be triggered on push events
  • +
+

+

Pull requests (Merge requests)

+
    +
  • For simplicity we use PR to mean Pull Request, although some providers call it a Merge request. It is the same thing.
  • +
  • Every pipeline can define its own list of trusted users.
  • +
  • If a pipeline does not define a list of trusted users, the global list will be used.
  • +
  • If a PR is opened/edited and targets a CI-enabled branch, and the source branch is not from a fork, then all pipelines will be started that have the target branch in its list of CI-enabled branches.
  • +
  • If a PR is opened/edited and targets a CI-enabled branch, but the source branch is from a fork, then a pipeline will be automatically started if and only if the fork is from a user in the pipeline's trusted user list and the target branch is in the pipeline's CI-enabled branches.
  • +
+

+

cscs-ci run comment

+
    +
  • You have an open PR
  • +
  • You want to trigger a specific pipeline
  • +
  • Write a comment inside the PR with the text +
    cscs-ci run PIPELINE_NAME_1,PIPELINE_NAME_2
    +
  • +
  • Special case: You have only one pipeline, then you can skip the pipeline names and write only the comment cscs-ci run
  • +
  • The pipeline will only be triggered, if the commenting user is in the pipeline's trusted users list.
  • +
  • Only the first line of the comment will be evaluated, i.e. you can add context from line 2 onwards.
  • +
  • The target branch is ignored, i.e. you can test a pipeline even if the target branch is not in the pipeline's CI-enabled branches.
  • +
  • Advanced cscs-ci run command is possible to inject variables into the pipeline (exposed as environment variables)
      +
    • Triggering a pipeline with additional variables +
      cscs-ci run PIPELINE_NAME;MY_VARIABLE=some_value;ANOTHER_VAR=other_value
      +
      + This will trigger the pipeline PIPELINE_NAME, and in your jobs there will be the environment variables MY_VARIABLE and ANOTHER_VAR available.
    • +
    • Disallowed characters for PIPELINE_NAME, variable name and variable value are the characters ,;= (comma, semicolon, equal), because they serve as separators of the different components.
    • +
    +
  • +
+

+

API call triggering

+
    +
  • It is possible to trigger a pipeline via an API call
  • +
  • Create a file named data.yaml, with the content +
    ref: main
    +pipeline: pipeline_name
    +variables:
    +  MY_VARIABLE: some_value
    +  ANOTHER_VAR: other_value
    +
    +Send a POST request to the middleware +
    curl -X POST -u 'repository_id:webhook_secret' --data-binary @data.yaml https://cicd-ext-mw.cscs.ch/ci/pipeline/trigger
    +
  • +
  • replace repository_id and webhook_secret with your repository id and the webhook secret.
  • +
+

Understanding the underlying workflow

+

Typical users do not need to know the underlying workflow behind the scenes, so you can stop reading here. However, it might put the above-mentioned steps into perspective. It also can give you background for inquiring if and when something in the procedure does not go as expected.

+

Workflow (exemplified on icon-exclaim)

+
    +
  1. (Prerequisite) icon-exclaim will have a webhook set up
  2. +
  3. You make some change in the icon-exclaim repository
  4. +
  5. GitHub sends a webhook event to cicd-ext-mw.cscs.ch  (CI middleware)
  6. +
  7. CI middleware fetches your repository from GitHub and pushes a "mirror" to GitLab
  8. +
  9. GitLab sees a change in the repository and starts a pipeline (i.e. it uses the CI yaml as entry point)
  10. +
  11. If the repository uses git submodules, GIT_SUBMODULE_STRATEGY: recursive has to be specified (see GitLab documentation)
  12. +
  13. The container-builder, which has as input a Dockerfile (specified in the variable DOCKERFILE), will take this Dockerfile and execute something similar to docker build -f $DOCKERFILE ., where the build context is the whole (recursively) cloned repository
  14. +
+

CI variables

+

Many variables exist during a pipeline run, they are documented at Gitlab's predefined variables. Additionally to CI variables available through Gitlab, there are a few CSCS specific pipeline variables:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VariableValueAdditional information
CSCS_REGISTRYjfrog.svc.cscs.chCSCS internal registry, preferred registry to store your container images
CSCS_REGISTRY_PATHjfrog.svc.cscs.ch/docker-ci-ext/The prefix path in the CSCS internal container image registry, to which your pipeline has write access. Within this prefix, you can choose any directory structure. Images that are pushed to a path matching /public/ , can be pulled by anybody within CSCS network
CSCS_CI_MW_URLhttps://cicd-ext-mw.cscs.ch/ciThe URL of the middleware, the orchestrator software.
CSCS_CI_DEFAULT_SLURM_ACCOUNTd123The project to which accounting will go to. It is set up on the CI setup page in the Admin section. It can be overwritten via SLURM_ACCOUNT for individual jobs.
CSCS_CI_ORIG_CLONE_URLhttps://github.com/my-org/my-project (public) git@github.com:my-or/my-project (private)Clone URL for git. This is needed for some implementation details of the gitlab-runner custom executor. This is the clone URL of the registered project, i.e. this is not the clone URL of the mirror project.
+

Containerized CI - best practices

+

Multi-architecture images

+

With the introduction of Grace-Hopper nodes, we have now aarch64 and x86_64 machines. This implies that the container images should be built for the correct architecture. This can be achieved by the following example +

include:
+  - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml'
+
+stages:
+  - build
+  - make_multiarch
+  - run
+
+.build:
+  stage: build
+  variables:
+    DOCKERFILE: path/to/my_dockerfile
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/${ARCH}/my_image_name:${CI_COMMIT_SHORT_SHA}
+build aarch64:
+  extends: [.container-builder-cscs-gh200, .build]
+build x86_64:
+  extends: [.container-builder-cscs-zen2, .build]
+
+make multiarch:
+  extends: .make-multiarch-image
+  stage: make_multiarch
+  variables:
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/my_multiarch_image:${CI_COMMIT_SHORT_SHA}
+    PERSIST_IMAGE_NAME_AARCH64: $CSCS_REGISTRY_PATH/aarch64/my_image_name:${CI_COMMIT_SHORT_SHA}
+    PERSIST_IMAGE_NAME_X86_64: $CSCS_REGISTRY_PATH/x86_64/my_image_name:${CI_COMMIT_SHORT_SHA}
+
+.run:
+  stage: run
+  image: $CSCS_REGISTRY_PATH/my_multiarch_image:${CI_COMMIT_SHORT_SHA}
+  script:
+    - uname -a
+run aarch64:
+  extends: [.container-runner-daint-gh200, .run]
+run x86_64:
+  extends: [.container-runner-eiger-mc, .run]
+

+

We first create two container images which have different names. Then we combine these two names to a single name, with both architectures. Finally in the run step we use the multi-architecture image, where the container runtime will pull the correct architecture.

+

It is not mandatory to combine the container images to a multi-architecture image, i.e. a CI setup which consistently uses the correct architecture specific paths can work. A multi-architecture image is convenient when you plan to distribute it to other users.

+

Dependency management

+

Problem

+

A common observation is that your software has many dependencies that are more or less static, i.e. they can change but do so very rarely. A common pattern one can observe to work around rebuilding base images unnecessarily is a multi-stage CI setup

+
    +
  1. Build (rarely but manually) a base container with all static dependencies and push it to a public container registry
  2. +
  3. Use the base container and build the software container
  4. +
  5. Test the newly created software container
  6. +
  7. Deploy the software container
  8. +
+

This works fine but has the drawback that one has to do a manual step whenever the dependencies change, e.g. when one wants to upgrade to new versions of the dependencies. Another drawback of this is that it allows to keep the recipe of the base container outside of the repository, which makes it harder to reproduce results, especially when colleagues want to reproduce a build.

+

Solution

+

A common solution to this problem is that you have a multi stage setup. Your repository should have (at least) two Dockerfiles, let us call them Dockerfile.base and Dockerfile.

+
    +
  • Dockerfile.base: This dockerfile contains the recipe to build your base-container, it normally derives FROM a very basic container, e.g. docker.io/ubuntu:24.04 or CSCS spack base containers. Let us call the container image that is built using this recipe BASE_IMG.
  • +
+
+

Todo

+

link to spack base containers

+
+
    +
  • Dockerfile: This Dockerfile contains the recipe to build your software-container. It must start with FROM $BASE_IMG.
  • +
+

The .container-builder-cscs-* blocks can be used to solve this problem. The runner supports the variable CSCS_REBUILD_POLICY, which by default is set to if-not-exists.

+

This means that the runner will check the remote registry if the container image specified in PERSIST_IMAGE_NAME exists. A new container image is built only if it does not exist yet. Note: In case you have one build job, PERSIST_IMAGE_NAME can be specified in the variables: field of this build job or as a global variable, like in the Hello World example. In case you have multiple build jobs and you specify the PERSIST_IMAGE_NAME variable per build job, you need to specify the exact name of the image to be used in the image field of the test job.

+

A CI YAML file would look in the simplest case like this:

+

ci/cscs.yml +

include:
+  - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml'
+
+stages:
+  - build_base
+  - build
+  - test
+
+build base:
+  extends: .container-builder-cscs-zen2
+  stage: build_base
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile.base
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/base/my_base_container:1.0
+    CSCS_REBUILD_POLICY: if-not-exists # default anyway, only here for verbosity
+
+build software:
+  extends: .container-builder-cscs-zen2
+  stage: build
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/software/my_software:$CI_COMMIT_SHORT_SHA
+    DOCKER_BUILD_ARGS: '["BASE_IMG=$CSCS_REGISTRY_PATH/base/my_base_container:1.0"]'
+
+test software single node:
+  extends: .container-runner-daint-gpu
+  image: $CSCS_REGISTRY_PATH/software/my_software:$CI_COMMIT_SHORT_SHA
+  script:
+    - ./test_suite_1.sh
+    - ./test_suite_2.sh
+  variables:
+    SLURM_JOB_NUM_NODES: 1
+
+test software multi:
+  extends: .container-runner-daint-gpu
+  image: $CSCS_REGISTRY_PATH/software/my_software:$CI_COMMIT_SHORT_SHA
+  script:
+    - ./test_suite_1.sh
+    - ./test_suite_2.sh
+  variables:
+    SLURM_JOB_NUM_NODES: 4
+

+

ci/docker/Dockerfile.base +

FROM docker.io/finkandreas/spack:0.19.2-cuda11.7.1-ubuntu22.04
+
+ARG NUM_PROCS
+
+RUN spack-install-helper daint-gpu \
+    petsc \
+    trilinos
+

+

ci/docker/Dockerfile +

ARG BASE_IMG
+FROM $BASE_IMG
+
+ARG NUM_PROCS
+
+RUN mkdir /build && cd /build && cmake /sourcecode && make -j$NUM_PROCS
+

+

A setup like this would run the very first time and build the container image $CSCS_REGISTRY_PATH/base/my_base_container:1.0, followed by the job that builds the container image $CSCS_REGISTRY_PATH/software/my_software:1.0. The next time CI is triggered the .container-builder-cscs-zen2 would check the remote repository if the target tag (PERSIST_IMAGE_NAME) exists, and only build a new container image if it does not exist yet. Since the tag for the job build base is static, i.e. it is the same for every run of CI, it would build the first time it is running, but not for subsequent runs. In contrast to this is the job build software: Here the tag changes with every CI run, since the variable CI_COMMIT_SHORT_SHA is different for every run.

+
Manual dependency update
+

At some point you realise that you have to update some of the dependencies. You can use a manual update process to update your base-container, where you ensure that you update all necessary image tags. In our example, this means updating in ci/cscs.yml all occurences of $CSCS_REGISTRY_PATH/base/my_base_container:1.0 to $CSCS_REGISTRY_PATH/base/my_base_container:2.0 (or any other versioning scheme - for all that matters is that the full name must change). Of course something in Dockerfile.base should change too, otherwise you are building the same artifact, with just a different name.

+
Dynamic dependency update
+

While manually updating image tags works fine, it has the drawback that it is error-prone. Take for example the situation where you update the tag in build base, but forget to change it in build software. Your pipeline would still run fine, because the dependency of build software exists. Since there is no explicit error for the inconsistencies it is hard to find the error.

+

Therefore, there is also the possibility to have a dynamic way of naming your container images. The idea is the same, i.e. we build first a base-container, and use this base-container to build our software-container.

+

The build base and build software jobs would look similar to this: +

build base:
+  extends: .container-builder-cscs-zen2
+  stage: build_base
+  before_script:
+    - DOCKER_TAG=`cat ci/docker/Dockerfile.base | sha256sum - | head -c 16`
+    - export PERSIST_IMAGE_NAME=$CSCS_REGISTRY_IMAGE/base/my_base_image:$DOCKER_TAG
+    - echo "BASE_IMAGE=$PERSIST_IMAGE_NAME" > build.env
+  artifacts:
+    reports:
+      dotenv: build.env
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile.base # overwrite with the real path of the Dockerfile
+
+build software:
+  extends: .container-builder-cscs-zen2
+  stage: build
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/software/my_software:$CI_COMMIT_SHORT_SHA
+    DOCKER_BUILD_ARGS: '["BASE_IMG=$BASE_IMAGE"]'
+

+

Let us walk through the changes in the build base job:

+
    +
  • DOCKER_TAG is computed at runtime by the sha256sum of the Dockerfile.base, i.e. it would change, when you change the content of Dockerfile.base (we keep only the first 16 characters, this is random enough to guarantee that we have a unique name).
  • +
  • We export PERSIST_IMAGE_NAME to the dynamic name with DOCKER_TAG.
  • +
  • We write the dynamic name to the file build.env
  • +
  • We tell the CI system to keep the build.env as an artifact (see here the documentation of this)
  • +
+

Note: The dotenv artifacts of a specific job for public projects is available at https://gitlab.com/cscs-ci/ci-testing/webhook-ci/mirrors/<project_id>/<pipeline_id>/-/jobs/<job_id>/artifacts/download?file_type=dotenv.

+

Now let us look at the changes in the build software job:

+
    +
  • DOCKER_BUILD_ARGS is now using $BASE_IMAGE. This variable exists, because we transferred the information via a dotenv artifact from build base to this job.
  • +
+

In this example the names BASE_IMG and BASE_IMAGE are chosen to be different, for clarification where the different variables are set and used. Feel free to use the same names for consistent naming. The default behaviour is to import all artifacts from all previous jobs. If you want only specific artifacts in your job, you should have a look at dependencies.

+

There is also a building block in the templates, name .dynamic-image-name, which you can use to get rid for most of the boilerplate. It is important to note that this building block will export the dynamic name under the hardcoded name BASE_IMAGE in the dotenv file. The jobs would look something like this: +

build base:
+  extends: [.container-builder-cscs-zen2, .dynamic-image-name]
+  stage: build_base
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile.base
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/base/my_base_image
+    WATCH_FILECHANGES: 'ci/docker/Dockerfile.base'
+
+build software:
+  extends: .container-builder-cscs-zen2
+  stage: build
+  variables:
+    DOCKERFILE: ci/docker/Dockerfile
+    PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/software/my_software:$CI_COMMIT_SHORT_SHA
+    DOCKER_BUILD_ARGS: '["BASE_IMG=$BASE_IMAGE"]'
+

+

build base is using additionally the building block .dynamic-image-name, while build software is unchanged. Have a look at the definition of the block .dynamic-image-name in the file .ci-ext.yml for further notes.

+

Examples

+

See for working examples these two yaml files (and check the respective Dockerfiles mentioned in the build jobs)

+ +

Example projects

+

Here are a couple of projects which use this CI setup. Please have a look there for more advanced usage:

+
    +
  • dcomex-framework: entry point is ci/prototype.yml
  • +
  • utopia: two pipelines, with entry points ci/cscs/mc/gitlab-daint.yml and ci/cscs/gpu/gitlab-daint.yml
  • +
  • mars: two pipelines, with entry points ci/gitlab/cscs/gpu/gitlab-daint.yml and ci/gitlab/cscs/mc/gitlab-daint.yml
  • +
  • sparse_accumulation: entry point is ci/pipeline.yml
  • +
  • gt4py: entry point is ci/cscs-ci.yml
  • +
  • SIRIUS: entry point is ci/cscs-daint.yml
  • +
  • sphericart: entry point is ci/pipeline.yml
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/tools/container-engine/index.html b/tools/container-engine/index.html new file mode 100644 index 0000000..fbf8cc8 --- /dev/null +++ b/tools/container-engine/index.html @@ -0,0 +1,3708 @@ + + + + + + + + + + + + + + + + + + + + + + + + + container-engine - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

+

Container Engine

+

The Container Engine (CE) toolset is designed to enable computing jobs to seamlessly run inside Linux application containers, thus providing support for containerized user environments.

+

Concept

+

Containers effectively encapsulate a software stack; however, to be useful in HPC computing environments, they often require the customization of bind mounts, environment variables, working directories, hooks, plugins, etc. To simplify this process, the Container Engine (CE) toolset supports the specification of user environments through Environment Definition Files.

+

An Environment Definition File (EDF) is a text file in the TOML format that declaratively and prescriptively represents the creation of a computing environment based on a container image. Users can create their own custom environments and share, edit, or build upon already existing environments.

+

The Container Engine (CE) toolset leverages its tight integration with the Slurm workload manager to parse EDFs directly from the command line or batch script and instantiate containerized user environments seamlessly and transparently.

+

Through the EDF, container use cases can be abstracted to the point where end users perform their workflows as if they were operating natively on the computing system.

+

Key Benefits + * Freedom: Container gives users full control of the user space. The user can decide what to install without involving a sysadmin. + * Reproducibility: Workloads consistently run in the same environment, ensuring uniformity across job experimental runs. + * Portability: The self-contained nature of containers simplifies the deployment across architecture-compatible HPC systems. + * Seamless Access to HPC Resources: CE facilitates native access to specialized HPC resources like GPUs, interconnects, and other system-specific tools crucial for performance

+

Quick Start

+

Let's set up a containerized Ubuntu 24.04 environment using a scratch folder as the working directory.

+

Example EDF

+
image = "library/ubuntu:24.04"
+mounts = ["/capstor/scratch/cscs/${USER}:/capstor/scratch/cscs/${USER}"]
+workdir = "/capstor/scratch/cscs/${USER}"
+
+

Note: Ensure that your ${USER} environment variable is defined with your actual username.

+

Save this file as ubuntu.toml file in $HOME/.edf directory (which is the default location of EDF files). A more detailed explanation of each entry for the EDF can be seen in the EDF reference.

+

Running the environment

+

Use Slurm in the cluster login node to start the Ubuntu environment that was just defined as follows:

+
$ srun --environment=ubuntu --pty bash
+
+

Since the ubuntu.toml file is located in the EDF search path, the filename can be passed to the option without the file extension.

+

Example Output

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=ubuntu --pty bash   # Step 1
+
+<username>@<node name>:/capstor/scratch/cscs/<username>$ pwd                        # Step 2
+/capstor/scratch/cscs/<username>
+
+<username>@<node name>:/capstor/scratch/cscs/<username>$ cat /etc/os-release        # Step 3
+PRETTY_NAME="Ubuntu 24.04 LTS"
+NAME="Ubuntu"
+VERSION_ID="24.04"
+VERSION="24.04 LTS (Noble Numbat)"
+VERSION_CODENAME=noble
+ID=ubuntu
+ID_LIKE=debian
+HOME_URL="https://www.ubuntu.com/"
+SUPPORT_URL="https://help.ubuntu.com/"
+BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+UBUNTU_CODENAME=noble
+LOGO=ubuntu-logo
+
+<username>@<node name>:/capstor/scratch/cscs/<username>$ exit                       # Step 4
+[<vcluster>][<username>@<vcluster>-ln001 ~]$  
+
+

The above terminal snippet demonstrates how to launch a containerized environment using Slurm with the --environment option, where we highlight:

+

Step 1. Starting an interactive shell session within the Ubuntu 24.04 container deployed on a compute node using srun --environment=ubuntu --pty bash. + Step 2. Confirm the working directory inside the container (pwd) and that it is set to the user's scratch folder, as per EDF. + Step 3. Show the OS version of your container (using cat /etc/os-release) based on Ubuntu 24.04 LTS. + Step 4. Exiting the container (exit), returning to the login node.

+

Note that the image pull and the container start happen automatically, streamlining the usage of the CE.

+

Running containerized environments

+

A job is run in a containerized environment by passing the --environment option to the srun or salloc Slurm commands. The option takes a file path to the EDF describing the environment in which the job should be executed, for example:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=$SCRATCH/edf/debian.toml cat /etc/os-release
+PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
+NAME="Debian GNU/Linux"
+VERSION_ID="12"
+VERSION="12 (bookworm)"
+VERSION_CODENAME=bookworm
+ID=debian
+HOME_URL="https://www.debian.org/"
+SUPPORT_URL="https://www.debian.org/support"
+BUG_REPORT_URL="https://bugs.debian.org/"
+
+`--environment` can be a relative path from the current working directory (i.e., where the Slurm command is entered). A relative path should be prepended by `./`. For example:
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ ls
+debian.toml
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=./debian.toml cat /etc/os-release
+PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
+NAME="Debian GNU/Linux"
+VERSION_ID="12"
+VERSION="12 (bookworm)"
+VERSION_CODENAME=bookworm
+ID=debian
+HOME_URL="https://www.debian.org/"
+SUPPORT_URL="https://www.debian.org/support"
+BUG_REPORT_URL="https://bugs.debian.org/"
+
+

If a file is located in the EDF search path, the argument to the command line option can be just the environment name, that is the name of the file without the .toml extension, for example:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=debian cat /etc/os-release
+PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
+NAME="Debian GNU/Linux"
+VERSION_ID="12"
+VERSION="12 (bookworm)"
+VERSION_CODENAME=bookworm
+ID=debian
+HOME_URL="https://www.debian.org/"
+SUPPORT_URL="https://www.debian.org/support"
+BUG_REPORT_URL="https://bugs.debian.org/"
+
+

Use from batch scripts

+

In principle, the --environment option can also be used within batch scripts as an #SBATCH option. +It is important to note that in such a case, all the contents of the script are executed within the containerized environment: the CE toolset gives access to the Slurm workload manager within containers via the Slurm hook, see section Container Hooks (controlled by the ENROOT_SLURM_HOOK environment variable and activated by default on most vClusters). Only with it, calls to Slurm commands (for example srun or scontrol) within the batch script will work.

+

For the time being, if the script requires to invoke Slurm commands, the recommended approach is to use --environment as part of the commands, for example, when launching job steps:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat example.sbatch 
+#!/bin/bash -l
+#SBATCH --job-name=edf-example
+#SBATCH --time=0:01:00
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=1
+#SBATCH --partition=<vcluster>
+#SBATCH --output=slurm-%x.out
+ 
+# Run job step
+srun --environment=debian cat /etc/os-release
+
+

The EDF search path

+

By default, the EDFs for each user are looked up in $HOME/.edf. The search path for EDFs can be controlled through the EDF_PATH environment variable. EDF_PATH must be a colon-separated list of absolute paths to directories where the CE looks for TOML files, similar to the PATH and LD_LIBRARY_PATH variables. If a file is located in the search path, its name can be used in --environment options without the .toml extension, for example:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ ls -l ~/.edf
+total 8
+-rw-r--r-- 1 <username> csstaff  27 Sep  6 15:19 debian.toml
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ ls -l ~/example-project/
+total 4
+-rw-r-----+ 1 <username> csstaff 28 Oct 26 17:44 fedora-env.toml
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ export EDF_PATH=$HOME/example-project/
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=fedora-env cat /etc/os-release
+NAME="Fedora Linux"
+VERSION="40 (Container Image)"
+ID=fedora
+VERSION_ID=40
+VERSION_CODENAME=""
+PLATFORM_ID="platform:f40"
+PRETTY_NAME="Fedora Linux 40 (Container Image)"
+[...]
+
+

Image Management

+

Image cache

+
+

INFO: The image caching functionality is only available on the Bristen vCluster as technical preview.

+
+

By default, images defined in the EDF as remote registry references (e.g. a Docker reference) are automatically pulled and locally cached. A cached image would be preferred to pulling the image again in later usage. 

+

An image cache is automatically created at .edf_imagestore in the user's scratch folder (i.e., $SCRATCH/.edf_imagestore), under which cached images are stored in a corresponding CPU architecture subfolder (e.g., x86 and aarch64). Users should regularly remove unused cache images to limit the cache size.

+

Should users want to re-pull a cached image, they have to remove the corresponding image in the cache.

+

To choose an alternative image store path (e.g., to use a directory owned by a group and not to an individual user), users can specify an image cache path explicitly by defining the environment variable EDF_IMAGESTOREEDF_IMAGESTORE must be an absolute path to an existing folder.

+
+

NOTE: If the CE cannot create a directory for the image cache, it operates in cache-free mode, meaning that it pulls an ephemeral image before every container launch and discards it upon termination.

+
+

Pulling images manually

+

To work with images stored from the NGC Catalog, please refer also to the next section "Using images from third party registries and private repositories".

+

To bypass any caching behavior, users can manually pull an image and directly plug it into their EDF. To do so, users may execute enroot import docker://[REGISTRY#]IMAGE[:TAG] to pull container images from OCI registries to the current directory.

+

For example, the command below pulls an nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04 image.

+
$ enroot import docker://nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
+
+
+Image import w/ full output + +
[<vcluster>][<username>@<vcluster>-ln001 <username>]$ srun enroot import docker://nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
+[INFO] Querying registry for permission grant
+[INFO] Authenticating with user: <anonymous>
+[INFO] Authentication succeeded
+[INFO] Fetching image manifest list
+[INFO] Fetching image manifest
+[INFO] Downloading 13 missing layers...
+[INFO] Extracting image layers...
+[INFO] Converting whiteouts...
+[INFO] Creating squashfs filesystem...
+Parallel mksquashfs: Using 64 processors
+Creating 4.0 filesystem on /scratch/aistor/<username>/nvidia+cuda+11.8.0-cudnn8-devel-ubuntu22.04.sqsh, block size 131072.
+
+Exportable Squashfs 4.0 filesystem, zstd compressed, data block size 131072
+    uncompressed data, compressed metadata, compressed fragments,
+    compressed xattrs, compressed ids
+    duplicates are removed
+Filesystem size 9492185.87 Kbytes (9269.71 Mbytes)
+    98.93% of uncompressed filesystem size (9594893.12 Kbytes)
+Inode table size 128688 bytes (125.67 Kbytes)
+    17.47% of uncompressed inode table size (736832 bytes)
+Directory table size 132328 bytes (129.23 Kbytes)
+    46.42% of uncompressed directory table size (285091 bytes)
+Number of duplicate files found 1069
+Number of inodes 13010
+Number of files 10610
+Number of fragments 896
+Number of symbolic links  846
+Number of device nodes 0
+Number of fifo nodes 0
+Number of socket nodes 0
+Number of directories 1554
+Number of ids (unique uids + gids) 1
+Number of uids 1
+    root (0)
+Number of gids 1
+    root (0)
+
+
+ +

After the import is complete, images are available in Squashfs format in the current directory and can be used in EDFs, for example:

+
[<vcluster>][<username>@<vcluster>-ln001 <username>]$ ls -l *.sqsh
+-rw-r--r-- 1 <username> csstaff 9720037376 Sep 11 14:46 nvidia+cuda+11.8.0-cudnn8-devel-ubuntu22.04.sqsh
+
+[<vcluster>][<username>@<vcluster>-ln001 <username>]$ realpath nvidia+cuda+11.8.0-cudnn8-devel-ubuntu22.04.sqsh  /capstor/scratch/cscs/<username>/nvidia+cuda+11.8.0-cudnn8-devel-ubuntu22.04.sqsh
+
+[<vcluster>][<username>@<vcluster>-ln001 <username>]$ cat $HOME/.edf/cudnn8.toml 
+image = "/capstor/scratch/cscs/<username>/nvidia+cuda+11.8.0-cudnn8-devel-ubuntu22.04.sqsh"
+
+
+

NOTE: It is recommended to save images in /capstor/scratch/cscs/<username> or its subdirectories before using them with the CE.

+
+

Third-party and private registries

+

Docker Hub is the default registry from which remote images are imported.

+

To use an image from a different registry, the corresponding registry URL has to be prepended to the image reference, using a hash character (#) as a separator. For example:

+
# Usage within an EDF
+[<vcluster>][<username>@<vcluster>-ln001 <username>]$ cat $HOME/.edf/nvhpc-23.7.toml
+image = "nvcr.io#nvidia/nvhpc:23.7-runtime-cuda11.8-ubuntu22.04"
+
+# Usage on the command line
+[<vcluster>][<username>@<vcluster>-ln001 <username>]$ srun enroot import docker://nvcr.io#nvidia/nvhpc:23.7-runtime-cuda11.8-ubuntu22.04
+
+

To import images from private repositories, access credentials should be configured by individual users in the $HOME/.config/enroot/.credentials file, following the netrc file format. +Using the enroot import documentation page as a reference, some examples could be:

+
# NVIDIA NGC catalog (both endpoints are required)
+machine nvcr.io login $oauthtoken password <token>
+machine authn.nvidia.com login $oauthtoken password <token>
+
+# DockerHub
+machine auth.docker.io login <login> password <password>
+
+# Google Container Registry with OAuth
+machine gcr.io login oauth2accesstoken password $(gcloud auth print-access-token)
+# Google Container Registry with JSON
+machine gcr.io login _json_key password $(jq -c '.' $GOOGLE_APPLICATION_CREDENTIALS | sed 's/ /\\u0020/g')
+
+# Amazon Elastic Container Registry
+machine 12345.dkr.ecr.eu-west-2.amazonaws.com login AWS password $(aws ecr get-login-password --region eu-west-2)
+
+# Azure Container Registry with ACR refresh token
+machine myregistry.azurecr.io login 00000000-0000-0000-0000-000000000000 password $(az acr login --name myregistry --expose-token --query accessToken  | tr -d '"')
+# Azure Container Registry with ACR admin user
+machine myregistry.azurecr.io login myregistry password $(az acr credential show --name myregistry --subscription mysub --query passwords[0].value | tr -d '"')
+
+# Github.com Container Registry (GITHUB_TOKEN needs read:packages scope)
+machine ghcr.io login <username> password <GITHUB_TOKEN>
+
+# GitLab Container Registry (GITLAB_TOKEN needs a scope with read access to the container registry)
+# GitLab instances often use different domains for the registry and the authentication service, respectively
+# Two separate credential entries are required in such cases, for example:
+# Gitlab.com
+machine registry.gitlab.com login <username> password <GITLAB TOKEN>
+machine gitlab.com login <username> password <GITLAB TOKEN>
+
+# ETH Zurich GitLab registry
+machine registry.ethz.ch login <username> password <GITLAB_TOKEN>
+machine gitlab.ethz.ch login <username> password <GITLAB_TOKEN>  
+
+

Annotations

+

Annotations define arbitrary metadata for containers in the form of key-value pairs. Within the EDF, annotations are designed to be similar in appearance and behavior to those defined by the OCI Runtime Specification. Annotation keys usually express a hierarchical namespace structure, with domains separated by "." (full stop) characters.

+

As annotations are often used to control hooks, they have a deep nesting level. For example, to execute the SSH hook described below, the annotation com.hooks.ssh.enabled must be set to the string true.

+

EDF files support setting annotations through the annotations table. This can be done in multiple ways in TOML: for example, both of the following usages are equivalent:

+
    +
  • +

    Case: nest levels in the TOML key. +

    [annotations]
    +com.hooks.ssh.enabled = "true"
    +

    +
  • +
  • +

    Case: nest levels in the TOML table name. +

    [annotations.com.hooks.ssh]
    +enabled = "true"
    +

    +
  • +
+

To avoid mistakes, notice a few key features of TOML:

+
    +
  • All property assignments belong to the section immediately preceding them (the statement in square brackets), which defines the table they refer to.
  • +
  • Tables, on the other hand, do not automatically belong to the tables declared before them; to nest tables, their name has to list their parents using the dot notations (so the previous example defines the table ssh inside hooks, which in turn is inside com, which is inside annotations).
  • +
  • An assignment can implicitly define subtables if the key you assign is a dotted list. As a reference, see the examples made earlier in this section, where assigning a string to the com.hooks.ssh.enabled attribute within the [annotations] table is exactly equivalent to assigning to the enabled attribute within the [annotations.com.hooks.ssh] subtable.
  • +
  • +

    Attributes can be added to a table only in one place in the TOML file. In other words, each table must be defined in a single square bracket section. For example, Case 3 in the example below is invalid because the ssh table was doubly defined both in the [annotations] and in the [annotations.com.hooks.ssh] sections. See the TOML format spec for more details.

    +
      +
    • +

      Case 1 (valid): +

      [annotations.com.hooks.ssh]
      +authorize_ssh_key = "/capstor/scratch/cscs/<username>/tests/edf/authorized_keys"
      +enabled = "true"
      +

      +
    • +
    • +

      Case 2 (valid): +

      [annotations]
      +com.hooks.ssh.authorize_ssh_key = "/capstor/scratch/cscs/<username>/tests/edf/authorized_keys"
      +com.hooks.ssh.enabled = "true"
      +

      +
    • +
    • +

      Case 3 (invalid): +

      [annotations]
      +com.hooks.ssh.authorize_ssh_key = "/capstor/scratch/cscs/<username>/tests/edf/authorized_keys"
      +
      +[annotations.com.hooks.ssh]
      +enabled = "true"
      +

      +
    • +
    +
  • +
+

Accessing native resources

+

NVIDIA GPUs

+

The Container Engine leverages components from the NVIDIA Container Toolkit to expose NVIDIA GPU devices inside containers. +GPU device files are always mounted in containers, and the NVIDIA driver user space components are  mounted if the NVIDIA_VISIBLE_DEVICES environment variable is not empty, unset or set to void.  NVIDIA_VISIBLE_DEVICES is already set in container images officially provided by NVIDIA to enable all GPUs available on the host system. Such images are frequently used to containerize CUDA applications, either directly or as a base for custom images, thus in many cases no action is required to access GPUs. +For example, on a cluster with 4 GH200 devices per compute node:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat .edf/cuda12.5.1.toml 
+image = "nvidia/cuda:12.5.1-devel-ubuntu24.04"
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=cuda12.5.1 nvidia-smi
+Thu Oct 26 17:59:36 2023       
++------------------------------------------------------------------------------------+
+| NVIDIA-SMI 535.129.03          Driver Version: 535.129.03   CUDA Version: 12.5     |
+|--------------------------------------+----------------------+----------------------+
+| GPU  Name              Persistence-M | Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp   Perf       Pwr:Usage/Cap |         Memory-Usage | GPU-Util  Compute M. |
+|                                      |                      |               MIG M. |
+|======================================+======================+======================|
+|   0  GH200 120GB                 On  | 00000009:01:00.0 Off |                    0 |
+| N/A   24C    P0           89W / 900W |     37MiB / 97871MiB |      0%   E. Process |
+|                                      |                      |             Disabled |
++--------------------------------------+----------------------+----------------------+
+|   1  GH200 120GB                 On  | 00000019:01:00.0 Off |                    0 |
+| N/A   24C    P0           87W / 900W |     37MiB / 97871MiB |      0%   E. Process |
+|                                      |                      |             Disabled |
++--------------------------------------+----------------------+----------------------+
+|   2  GH200 120GB                 On  | 00000029:01:00.0 Off |                    0 |
+| N/A   24C    P0           83W / 900W |     37MiB / 97871MiB |      0%   E. Process |
+|                                      |                      |             Disabled |
++--------------------------------------+----------------------+----------------------+
+|   3  GH200 120GB                 On  | 00000039:01:00.0 Off |                    0 |
+| N/A   24C    P0           85W / 900W |     37MiB / 97871MiB |      0%   E. Process |
+|                                      |                      |             Disabled |
++--------------------------------------+----------------------+----------------------+
+
++------------------------------------------------------------------------------------+
+| Processes:                                                                         |
+|  GPU   GI   CI        PID   Type   Process name                         GPU Memory |
+|        ID   ID                                                          Usage      |
+|====================================================================================|
+|  No running processes found                                                        |
++------------------------------------------------------------------------------------+
+
+

It is possible to use environment variables to control which capabilities of the NVIDIA driver are enabled inside containers. +Additionally, the NVIDIA Container Toolkit can enforce specific constraints for the container, for example, on versions of the CUDA runtime or driver, or on the architecture of the GPUs. +For the full details about using these features, please refer to the official documentation: Driver Capabilities, Constraints.

+

HPE Slingshot interconnect

+

The Container Engine provides a hook to allow containers relying on libfabric to leverage the HPE Slingshot 11 high-speed interconnect. This component is commonly referred to as the "CXI hook", taking its name from the CXI libfabric provider required to interface with Slingshot 11. +The hook leverages bind-mounting the custom host libfabric library into the container (in addition to all the required dependency libraries and devices as well). +If a libfabric library is already present in the container filesystem (for example, it's provided by the image), it is replaced with its host counterpart, otherwise the host libfabric is just added to the container.

+
+

NOTE: Due to the nature of Slingshot and the mechanism implemented by the CXI hook, container applications need to use a communication library which supports libfabric in order to benefit from usage of the hook. +Libfabric support might have to be defined at compilation time (as is the case for some MPI implementations, like MPICH and OpenMPI) or could be dynamically available at runtime (as is the case with NCCL - see also this section for more details).

+
+

The hook is activated by setting the com.hooks.cxi.enabled annotation, which can be defined in the EDF, as shown in the following example:

+
# Without the CXI hook
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat $HOME/.edf/osu-mb.toml 
+image = "quay.io#madeeks/osu-mb:6.2-mpich4.1-ubuntu22.04-arm64"
+
+[annotations]
+com.hooks.cxi.enabled = "false"
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -N2 --mpi=pmi2 --environment=osu-mb ./osu_bw
+# OSU MPI Bandwidth Test v6.2
+# Size      Bandwidth (MB/s)
+1                       0.22
+2                       0.40
+4                       0.90
+8                       1.82
+16                      3.41
+32                      6.81
+64                     13.18
+128                    26.74
+256                    11.95
+512                    38.06
+1024                   39.65
+2048                   83.22
+4096                  156.14
+8192                  143.08
+16384                  53.78
+32768                 106.77
+65536                  49.88
+131072                871.86
+262144                780.97
+524288                694.58
+1048576               831.02
+2097152              1363.30
+4194304              1279.54
+
+
+# With the CXI hook enabling access to the Slingshot high-speed network
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat .edf/osu-mb-cxi.toml 
+image = "quay.io#madeeks/osu-mb:6.2-mpich4.1-ubuntu22.04"
+
+[annotations]
+com.hooks.cxi.enabled = "true"
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -N2 --mpi=pmi2 --environment=osu-mb-cxi ./osu_bw
+# OSU MPI Bandwidth Test v6.2
+# Size      Bandwidth (MB/s)
+1                       1.21
+2                       2.32
+4                       4.85
+8                       8.38
+16                     19.36
+32                     38.47
+64                     76.28
+128                   151.76
+256                   301.25
+512                   604.17
+1024                 1145.03
+2048                 2367.25
+4096                 4817.16
+8192                 8633.36
+16384               16971.18
+32768               18740.55
+65536               21978.65
+131072              22962.31
+262144              23436.78
+524288              23672.92
+1048576             23827.78
+2097152             23890.95
+4194304             23925.61
+
+
+

TIP: On several vClusters, the CXI hook for Slingshot connectivity is enabled implicitly by default or by other hooks. Therefore, entering the enabling annotation in the EDF is unnecessary in many cases.

+
+

Container Hooks

+

Container hooks let you customize container behavior to fit system-specific needs, making them especially valuable for High-Performance Computing.

+
    +
  • What they do: Hooks extend container runtime functionality by enabling custom actions during a container's lifecycle.
  • +
  • Use for HPC: HPC systems rely on specialized hardware and fine-tuned software, unlike generic containers. Hooks bridge this gap by allowing containers to access these system-specific resources or enable custom features.
  • +
+
+

INFO: This section outlines all hooks supported in production by the Container Engine. However, specific Alps vClusters may support only a subset or use custom configurations. For details about available features in individual vClusters, consult platform documentation or contact CSCS support.

+
+

AWS OFI NCCL Hook

+

The AWS OFI NCCL plugin is a software extension that allows the NCCL and RCCL libraries to use libfabric as a network provider and, through libfabric, to access the Slingshot high-speed interconnect.

+

The Container Engine includes a hook program to inject the AWS OFI NCCL plugin in containers; since the plugin must also be compatible with the GPU programming software stack being used, the com.hooks.aws_ofi_nccl.variant annotation is used to specify a plugin variant suitable for a given container image. +At the moment of writing, 4 plugin variants are configured: cuda11, cuda12 (to be used on NVIDIA GPU nodes), rocm5, and rocm6 (to be used on AMD GPU nodes alongside RCCL). +For example, the following EDF enables the hook and uses it to mount the plugin in a CUDA 11 image:

+
image = "nvcr.io#nvidia/pytorch:22.12-py3"
+mounts = ["/capstor/scratch/cscs/amadonna:/capstor/scratch/cscs/amadonna"]
+entrypoint = false
+
+[annotations]
+com.hooks.aws_ofi_nccl.enabled = "true"
+com.hooks.aws_ofi_nccl.variant = "cuda11"
+
+

The AWS OFI NCCL hook also takes care of the following aspects:

+
    +
  • It implicitly enables the CXI hook, therefore exposing the Slingshot interconnect to container applications. In other words, when enabling the AWS OFI NCCL hook, it's unnecessary to also enable the CXI hook separately in the EDF.
  • +
  • It sets environment variables to control the behavior of NCCL and the libfabric CXI provider for Slingshot. In particular, the NCCL_NET_PLUGIN variable (link) is set to force NCCL to load the specific network plugin mounted by the hook. This is useful because certain container images (for example, those from NGC repositories) might already ship with a default NCCL plugin. Other environment variables help prevent application stalls and improve performance when using GPUDirect for RDMA communication.
  • +
+

SSH Hook

+

The SSH hook runs a lightweight, statically-linked SSH server (a build of Dropbear) inside the container. It can be useful to add SSH connectivity to containers (for example, enabling remote debugging) without bundling an SSH server into the container image or creating ad-hoc image variants for such purposes.

+

The com.hooks.ssh.authorize_ssh_key annotation allows the authorization of a custom public SSH key for remote connections. The annotation value must be the absolute path to a text file containing the public key (just the public key without any extra signature/certificate). After the container starts, it is possible to get a remote shell inside the container by connecting with SSH to the listening port.

+

By default, the server started by the SSH hook listens to port 15263, but this setting can be controlled through the com.hooks.ssh.port annotation in the EDF.

+
+

NOTE: To use the SSH hook, it is required to keep the container writable.

+
+

The following EDF file shows an example of enabling the SSH hook and authorizing a user-provided public key:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat $HOME/.edf/ubuntu-ssh.toml
+image = "ubuntu:latest"
+writable = true
+
+[annotations.com.hooks.ssh]
+enabled = "true"
+authorize_ssh_key = "<public key file>"
+
+

Using the previous EDF, a container can be started as follows. Notice that the --pty option for the srun command is currently required in order for the hook to initialize properly:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun --environment=ubuntu-ssh --pty <command>
+
+

While the container is running, it's possible to connect to it from a remote host using a private key matching the public one authorized in the EDF annotation. For example, in a host where such private key is the default identity file, the following command could be used:

+
ssh -p 15263 <host-of-container>
+
+
+

INFO: In order to establish connections through Visual Studio Code Remote - SSH extension, the scp program must be available within the container. This is required to send and establish the VS Code Server into the remote container.

+
+

NVIDIA CUDA MPS Hook

+

On several Alps vClusters, NVIDIA GPUs by default operate in "Exclusive process" mode, that is, the CUDA driver is configured to allow only one process at a time to use a given GPU. +For example, on a node with 4 GPUs, a maximum of 4 CUDA processes can run at the same time:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ nvidia-smi -L
+GPU 0: GH200 120GB (UUID: GPU-...)
+GPU 1: GH200 120GB (UUID: GPU-...)
+GPU 2: GH200 120GB (UUID: GPU-...)
+GPU 3: GH200 120GB (UUID: GPU-...)
+
+# This EDF uses the CUDA vector addition sample from NVIDIA's NGC catalog
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat $HOME/.edf/vectoradd-cuda.toml
+image = "nvcr.io#nvidia/k8s/cuda-sample:vectoradd-cuda12.5.0-ubuntu22.04"
+
+# 4 processes run successfully
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -t2 -N1 -n4 --environment=vectoradd-cuda /cuda-samples/vectorAdd | grep "Test PASSED"
+Test PASSED
+Test PASSED
+Test PASSED
+Test PASSED
+
+# More than 4 concurrent processes result in oversubscription errors
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -t2 -N1 -n5 --environment=vectoradd-cuda /cuda-samples/vectorAdd | grep "Test PASSED"
+Failed to allocate device vector A (error code CUDA-capable device(s) is/are busy or unavailable)!
+srun: error: [...]
+[...]
+
+

In order to run multiple processes concurrently on the same GPU (one example could be running multiple MPI ranks on the same device), the NVIDIA CUDA Multi-Process Service (or MPS, for short) must be started on the compute node.

+

The Container Engine provides a hook to automatically manage the setup and removal of the NVIDIA CUDA MPS components within containers. +The hook can be activated by setting the com.hooks.nvidia_cuda_mps.enabled to the string true.

+
+

NOTE: To use the CUDA MPS hook, it is required to keep the container writable.

+
+

The following is an example of using the NVIDIA CUDA MPS hook:

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat $HOME/.edf/vectoradd-cuda-mps.toml
+image = "nvcr.io#nvidia/k8s/cuda-sample:vectoradd-cuda12.5.0-ubuntu22.04"
+writable = true
+
+[annotations]
+com.hooks.nvidia_cuda_mps.enabled = "true"
+
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -t2 -N1 -n8 --environment=vectoradd-cuda-mps /cuda-samples/vectorAdd | grep "Test PASSED" | wc -l
+8
+
+
+

INFO: When using the NVIDIA CUDA MPS hook it is not necessary to use other wrappers or scripts to manage the Multi-Process Service, as is documented for native jobs on some vClusters.

+
+

EDF Reference

+

EDF files use the TOML format. For details about the data types used by the different parameters, please refer to the TOML spec webpage.

+

In the following, the default value is none (i.e., the empty value of the corresponding type) if not specified.

+

(ARRAY or STRING) base_environment

+

Ordered list of EDFs that this file inherits from. Parameters from listed environments are evaluated sequentially. Supports up to 10 levels of recursion.

+
+Notes + + * Parameters from the listed environments are evaluated sequentially, adding new entries or overwriting previous ones, before evaluating the parameters from the current EDF. In other words, the current EDF inherits the parameters from the EDFs listed in `base_environment`. When evaluating `mounts` or `env` parameters, values from downstream EDFs are appended to inherited values. + + * The individual EDF entries in the array follow the same search rules as the arguments of the `--environment` CLI option for Slurm; they can be either file paths or filenames without extension if the file is located in the [EDF search path](#edf-search-path). + + * This parameter can be a string if there is only one base environment. +
+ +
+Examples + + * Single environment inheritance: +
base_environment = "common_env"
+
+ + * Multiple environment inheritance: +
base_environment = ["common_env", "ml_pytorch_env1"]
+
+
+ +

(STRING) image

+

The container image to use. Can reference a remote Docker/OCI registry or a local Squashfs file as a filesystem path.

+
+Notes + + * The full format for remote references is `[USER@][REGISTRY#]IMAGE[:TAG]`. + * `[REGISTRY#]`: (optional) registry URL, followed by #. Default: Docker Hub. + * `IMAGE`: image name. + * `[:TAG]`: (optional) image tag name, preceded by :. + * The registry user can also be specified in the `$HOME/.config/enroot/.credentials` file. +
+ +
+Examples + + * Reference of Ubuntu image in the Docker Hub registry (default registry) +
image = "library/ubuntu:24.04"
+
+ + * Explicit reference of Ubuntu image in the Docker Hub registry +
image = "docker.io#library/ubuntu:24.04"
+
+ + * Reference to PyTorch image from NVIDIA Container Registry (nvcr.io) +
image = "nvcr.io#nvidia/pytorch:22.12-py3"
+
+ + * Image from third-party quay.io registry +
image = "quay.io#madeeks/osu-mb:6.2-mpich4.1-ubuntu22.04-arm64"
+
+ + * Reference to a manually pulled image stored in parallel FS +
image = "/path/to/image.squashfs"
+
+
+ +

(STRING) workdir

+

Initial working directory when the container starts. Default: inherited from image.

+
+Example + + * Workdir pointing to a user defined project path  +
workdir = "/home/user/projects"
+
+ + * Workdir pointing to the `/tmp` directory +
workdir = "/tmp"
+
+
+ +

(BOOL) entrypoint

+

If true, run the entrypoint from the container image. Default: true.

+
+Example + +
entrypoint = false
+
+
+ +

(BOOL) writable

+

If false, the container filesystem is read-only. Default: false.

+
+Example + +
writable = true
+
+
+ +

(ARRAY) mounts

+

List of bind mounts in the format SOURCE:DESTINATION[:FLAGS]. Flags are optional and can include ro, private, etc.

+
+Notes + + * Mount flags are separated with a plus symbol, for example: `ro+private`. + * Optional flags from docker format or OCI (need reference) +
+ +
+Example + + * Literal fixed mount map +
mounts = ["/capstor/scratch/cscs/amadonna:/capstor/scratch/cscs/amadonna"]
+
+ + * Mapping path with `env` variable expansion +
mounts = ["/capstor/scratch/cscs/${USER}:/capstor/scratch/cscs/${USER}"]
+
+ + * Mounting the scratch filesystem using a host environment variable +
mounts = ["${SCRATCH}:/scratch"]
+
+
+ +

(TABLE) env

+

Environment variables to set in the container. Null-string values will unset the variable. Default: inherited from the host and the image.

+
+Notes + + * By default, containers inherit environment variables from the container image and the host environment, with variables from the image taking precedence. + * The env table can be used to further customize the container environment by setting, modifying, or unsetting variables. + * Values of the table entries must be strings. If an entry has a null value, the variable corresponding to the entry key is unset in the container. +
+ +
+Example + + * Basic `env` block +
[env]
+MY_RUN = "production",
+DEBUG = "false"
+
+ + * Use of environment variable expansion +
[env]
+MY_NODE = "${VAR_FROM_HOST}",
+PATH = "${PATH}:/custom/bin", 
+DEBUG = "true"
+
+
+ +

(TABLE) annotations

+

OCI-like annotations for the container. For more details, refer to the Annotations section.

+
+Example + + * Disabling the CXI hook +
[annotations]
+com.hooks.cxi.enabled = "false"
+
+ + * Control of SSH hook parameters via annotation and variable expansion +
[annotations.com.hooks.ssh]
+authorize_ssh_key = "/capstor/scratch/cscs/${USER}/tests/edf/authorized_keys"
+enabled = "true"
+
+ + * Alternative example for usage of annotation with fixed path +
[annotations]
+com.hooks.ssh.authorize_ssh_key = "/path/to/authorized_keys"
+com.hooks.ssh.enabled = "true"
+
+
+ +
+

INFO: Environment variable expansion and relative paths expansion are only available on the Bristen vCluster as technical preview.

+
+

Environment Variable Expansion

+

Environment variable expansion allows for dynamic substitution of environment variable values within the EDF (Environment Definition File). This capability applies across all configuration parameters in the EDF, providing flexibility in defining container environments.

+
    +
  • Syntax. Use ${VAR} to reference an environment variable VAR. The variable's value is resolved from the combined environment, which includes variables defined in the host and the container image, the later taking precedence.
  • +
  • Scope. Variable expansion is supported across all EDF parameters. This includes EDF’s parameters like mounts, workdir, image, etc. For example, ${SCRATCH} can be used in mounts to reference a directory path.
  • +
  • Undefined Variables. Referencing an undefined variable results in an error. To safely handle undefined variables, you can use the syntax ${VAR:-}, which evaluates to an empty string if VAR is undefined.
  • +
  • Preventing Expansion. To prevent expansion, use double dollar signs $$. For example, $$${VAR} will render as the literal string ${VAR}.
  • +
  • Limitations
      +
    • Variables defined within the [env] EDF table cannot reference other entries from [env] tables in the same or other EDF files (e.g. the ones entered as base environments) . Therefore, only environment variables from the host or image can be referenced.
    • +
    +
  • +
  • Environment Variable Resolution Order. The environment variables are resolved based on the following order:
      +
    • TOML env: Variable values as defined in EDF’s env.
    • +
    • Container Image: Variables defined in the container image's environment take precedence.
    • +
    • Host Environment: Environment variables defined in the host system.
    • +
    +
  • +
+

Relative paths expansion

+

Relative filesystem paths can be used within EDF parameters, and will be expanded by the CE at runtime. The paths are interpreted as relative to the working directory of the process calling the CE, not to the location of the EDF file.

+

Known Issues

+

Compatibility with Alpine Linux

+

Alpine Linux is incompatible with some hooks, causing errors when used with Slurm. For example,

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat alpine.toml
+image = "alpine: *19"
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -lN1 --environment=alpine.toml echo "abc"
+0: slurmstepd: error: pyxis: container start failed with error code: 1
+0: slurmstepd: error: pyxis: printing enroot log file:
+0: slurmstepd: error: pyxis:     [ERROR] Failed to refresh the dynamic linker cache
+0: slurmstepd: error: pyxis:     [ERROR] /etc/enroot/hooks.d/87-slurm.sh exited with return code 1
+0: slurmstepd: error: pyxis: couldn't start container
+0: slurmstepd: error: spank: required plugin spank_pyxis.so: task_init() failed with rc=-1
+0: slurmstepd: error: Failed to invoke spank plugin stack
+
+

This is because some hooks (e.g., Slurm and CXI hooks) leverage ldconfig (from Glibc) when they bind-mount host libraries inside containers; since Alpine Linux provides an alternative ldconfig (from Musl Libc), it does not work as intended by hooks. As a workaround, users may disable problematic hooks. For example,

+
[<vcluster>][<username>@<vcluster>-ln001 ~]$ cat alpine_workaround.toml
+image = "alpine: *19"
+[annotations]
+com.hooks.slurm.enabled = "false"
+com.hooks.cxi.enabled = "false"
+[<vcluster>][<username>@<vcluster>-ln001 ~]$ srun -lN1 --environment=alpine_workaround.toml echo "abc"
+abc
+
+

Notice the section [annotations] disabling Slurm and CXI hooks.

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/tools/firecrest/index.html b/tools/firecrest/index.html new file mode 100644 index 0000000..f5b23bd --- /dev/null +++ b/tools/firecrest/index.html @@ -0,0 +1,2725 @@ + + + + + + + + + + + + + + + + + + + + + + + + + FirecREST - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

FirecREST

+

FirecREST is a RESTful API for managing High-Performance Computing resources, developed at CSCS. +Scientific platform developers can integrate Firecrest into web-enabled portals and applications, allowing them to securely access authenticated and authorized CSCS services such as job submission and data transfer on HPC systems.

+

Users can make HTTP requests to perform the following operations:

+
    +
  • basic system utilities like ls, mkdir, mv, chmod, chown, among others
  • +
  • actions against the Slurm workload manager (submit, query, and cancel jobs of the user)
  • +
  • internal (between CSCS systems) and external (to/from CSCS systems) data transfers
  • +
+

For a full feature set, have a look at the latest FirecREST API specification deployed at CSCS.

+

Please refer to the FirecREST documentation for detailed documentation.

+

FirecREST Deployment on Alps

+

FirecREST is available for all three major Alps platforms, with a different API endpoint for each platform.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
PlatformAPI EndpointClusters
HPC Platformhttps://api.cscs.ch/hpc/firecrest/v1Daint, Eiger
ML Platformhttps://api.cscs.ch/ml/firecrest/v1Bristen, Clariden
CW Platformhttps://api.cscs.ch/cw/firecrest/v1Santis
+

Developer Portal: Creating FirecREST clients

+

The Developer Portal is an application based on WSO2 API Manager that facilitates CSCS Users to manage subscriptions to an API at CSCS (such as FirecREST).

+

Start by navigating to developer.cscs.ch, then sign in by clicking the "SIGN-IN" button on the top right hand corner of the page.

+

Once logged in, you will see a list of APIs that are available to your user.

+

Creating an Application

+

Click on the "Applications" button at the top of the screen to manage your Applications.

+

FirecREST Main Page

+

To create a new application, click on the "ADD NEW APPLICATION" button at the top of the Applications page, and complete the mandatory fields (marked with *). +Make sure to give the application a unique name and select the number of requests per minute. +When finished, click on the "Save" button.

+
+

Note

+

To subscribe to an API you need at least one application, for which it is possible to use the DefaultApplication.

+
+
+

Note

+

The quota of requests per minute will be shared by all subscribers to the Application over all APIs.

+
+

Configuring Production Keys

+

Once the Application is created, create the Production Keys (Client ID and Client Secret) by clicking on "Production Keys"

+

FirecREST production keys

+

There are two ways to configuring production keys:

+
+
+
+

This approach can be used if you have already generated keys for FirecREST.

+
    +
  • click on the "Provide Existing OAuth Keys" button
  • +
  • enter the Consumer Key (Client ID) and Consumer Secret (Client Secret)
  • +
  • click the "Provide" button to confirm
  • +
+

FirecREST existing keys

+
+
+

Use this if this is your first FirecREST application, or if you wish to create new keys.

+
    +
  • click on the "Generate Keys" button at the bottom of the page
  • +
+

FirecREST existing keys

+
+
+
+

Once the keys are generated, you will see the pair "Consumer Key" and "Consumer Secret".

+

FirecREST keys

+
+

Warning

+

Store this pair of credentials securely, these are the access keys to your resources at CSCS.

+
+

Subscribe to an API

+

Once you have set up your Application, is time to subscribe it to an API.

+

To do so:

+
    +
  • (8a) click on the "Subscriptions" option on the left panel
  • +
  • (8b) click the "Subscribe APIS" button
  • +
  • (8c) choose the API you want to subscribe to by clicking the "Subscribe" button
  • +
+

FirecREST subscriptions

+

Back on the Subscription Management page, you can review your active subscriptions and APIs that your Application has access to.

+

FirecREST subscription management

+

To use your Application to access FirecREST, follow the API documentation.

+

Getting Started

+

Using the Python Interface

+

One way to get started is by using pyFirecREST, a Python package with a collection of wrappers for the different functionalities of FirecREST. +This package simplifies the usage of FirecREST by making multiple requests in the background for more complex workflows as well as by refreshing the access token before it expires.

+
+

Try FirecREST using pyFirecREST

+
import firecrest as fc
+
+
+client_id = <client_id>
+client_secret = <client_secret>
+token_uri = "https://auth.cscs.ch/auth/realms/firecrest-clients/protocol/openid-connect/token"
+
+# Setup the client for the specific account
+# For instance, for the Alps HPC Platform system Daint:
+
+client = fc.Firecrest(
+    firecrest_url="https://api.cscs.ch/hpc/firecrest/v1",
+    authorization=fc.ClientCredentialsAuth(client_id, client_secret, token_uri)
+)
+
+print(client.all_systems())
+# Output: (one dictionary per system)
+# [{
+#      'system': 'daint'
+#      'status': 'available',
+#      'description': 'System ready',
+#  }]
+
+print(client.list_files('daint', '/capstor/scratch/cscs/<username>'))
+# Example output: (one dictionary per file)
+# [{
+#      'name': 'file.txt',
+#      'user': 'username'
+#      'last_modified': '2024-04-28T12:03:33',
+#      'permissions': 'rw-r--r--',
+#      'size': '2021',
+#      'type': '-',
+#      'group': 'project',
+#      'link_target': '',
+#  },
+#  {
+#       'name': 'test-dir',
+#       'user': 'username'
+#       'last_modified': '2024-04-20T11:22:41',
+#       'permissions': 'rwxr-xr-x',
+#       'size': '4096',
+#       'type': 'd',
+#       'group': 'project',
+#       'link_target': '',
+#  }]
+
+
+

The tutorial is written for a generic instance of FirecREST but if you have a valid user at CSCS you can test it directly with your resource allocation on the exposed systems.

+

CSCS Developer Portal

+

A client application that makes requests to FirecREST will not be using directly the credentials of the user for the authentication, but an access token instead. The access token is a signed JSON Web Token (JWT) which contains expiry information. Behind the API, all commands launched by the client will use the account of the user that registered the client, inheriting their access rights. You can manage your client application on the CSCS Developer Portal.

+

Every client will have a client ID (Consumer Key) and a secret (Consumer Secret) that will be used to get a short-lived access token with an HTTP request.

+
+

curl call to fetch the access token

+
curl -s -X POST https://auth.cscs.ch/auth/realms/firecrest-clients/protocol/openid-connect/token \
+     --data "grant_type=client_credentials" \
+     --data "client_id=<your_client>" \
+     --data "client_secret=<your_secret>"
+
+
+

Downloading Large Files

+

A staging area is used for external transfers and downloading/uploading a file from/to a CSCS filesystem.

+

Please follow the steps below to download a file:

+
    +
  1. Request FirecREST to move the file to the staging area: a download link will be provided
  2. +
  3. The file will remain in the staging area for 7 days or until the link gets invalidated with a request to the /storage/xfer-external/invalidate endpoint or through the pyfirecrest method
  4. +
  5. The staging area is common for all users, therefore users should invalidate the link as soon as the download has been completed
  6. +
+

You can see the full process in this tutorial.

+

We may be forced to delete older files sooner than 7 days whenever large files are moved to the staging area and the link is not invalidated after the download, to avoid issues for other users: we will contact the user in this case.

+

When uploading files through the staging area, you don't need to invalidate the link. FirecREST will do it automatically as soon as it transfers the file to the filesystem of CSCS.

+

There is also a constraint on the size of a single file to transfer externally to our systems via FirecREST: 5 GB.

+

If you wish to transfer data bigger than the limit mentioned above, you can check the compress and extract endpoints or follow the following example on how to split large files and download/upload them using FirecREST.

+

The limit on the time and size of files that can be download/uploaded via FirecREST might change if needed.

+
+

checking the current values in the parameters endpoint

+
>>> print(json.dumps(client.parameters(), indent = 2))
+{
+  (...)
+
+  "storage": [
+    {
+      "description": "Type of object storage, like `swift`, `s3v2` or `s3v4`.",
+      "name": "OBJECT_STORAGE",
+      "unit": "",
+      "value": "s3v4"
+    },
+    {
+      "description": "Expiration time for temp URLs.",
+      "name": "STORAGE_TEMPURL_EXP_TIME",
+      "unit": "seconds",
+      "value": "604800"  ## <-------- 7 days
+    },
+    {
+      "description": "Maximum file size for temp URLs.",
+      "name": "STORAGE_MAX_FILE_SIZE",
+      "unit": "MB",
+      "value": "5120"   ## <--------- 5 GB
+    }
+  (...)
+}
+
+
+

Job Submission to the Workload Manager through FirecREST

+

FirecREST provides an abstraction for job submission using in the backend the SLURM scheduler of the vCluster (in the case of CSCS).

+

When submitting a job via the different endpoints, you should pass the -l option to the /bin/bash command on the batch file.

+

This option ensures that the job submitted uses the same environment as your login shell to access the system-wide profile (/etc/profile) or to your profile (in files like ~/.bash_profile, ~/.bash_login, or ~/.profile).

+

Further Information

+ + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/tools/index.html b/tools/index.html new file mode 100644 index 0000000..8f3d51a --- /dev/null +++ b/tools/index.html @@ -0,0 +1,2250 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tools - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

Tools

+
+

Todo

+

Documentation for tools used on the vClusters, including:

+
    +
  • slurm
  • +
  • uenv
  • +
  • container engine
  • +
  • debuggers and profilers
  • +
+

We document the tools and their interfaces here, but we don't put all the documentation for a tool here.

+
    +
  • e.g Documentation on how to build software using uenv, is in another section.
  • +
  • e.g Documentation on how to use Podman to build containers, is in another section.
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/tools/slurm/index.html b/tools/slurm/index.html new file mode 100644 index 0000000..f05933c --- /dev/null +++ b/tools/slurm/index.html @@ -0,0 +1,2520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + slurm - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

SLURM

+

CSCS uses the SLURM as its workload manager to efficiently schedule and manage jobs on Alps vClusters. +SLURM is an open-source, highly scalable job scheduler that allocates computing resources, queues user jobs, and optimizes workload distribution across the cluster. It supports advanced scheduling policies, job dependencies, resource reservations, and accounting, making it well-suited for high-performance computing environments.

+

Accounting

+
+

Todo

+

document --account, --constrant and other generic flags.

+
+

Partitions

+

At CSCS, SLURM is configured to accommodate the diverse range of node types available in our HPC clusters. These nodes vary in architecture, including CPU-only nodes and nodes equipped with different types of GPUs. Because of this heterogeneity, SLURM must be tailored to ensure efficient resource allocation, job scheduling, and workload management specific to each node type.

+

Each type of node has different resource constraints and capabilities, which SLURM takes into account when scheduling jobs. For example, CPU-only nodes may have configurations optimized for multi-threaded CPU workloads, while GPU nodes require additional parameters to allocate GPU resources efficiently. SLURM ensures that user jobs request and receive the appropriate resources while preventing conflicts or inefficient utilization.

+

The following sections will provide detailed guidance on how to use SLURM to request and manage CPU cores, memory, and GPUs in jobs. These instructions will help users optimize their workload execution and ensure efficient use of CSCS computing resources.

+

+

NVIDIA GH200 GPU Nodes

+

The GH200 nodes on Alps have four GPUs per node, and SLURM job submissions must be configured appropriately to best make use of the resources. +Applications that can saturate the GPUs with a single process per GPU should generally prefer this mode. +Configuring SLURM jobs to use a single GPU per rank is also the most straightforward setup. +Some applications perform badly with a single rank per GPU, and require use of NVIDIA's Multi-Process Service (MPS) to oversubscribe GPUs with multiple ranks per GPU.

+

The best SLURM configuration is application- and workload-specific, so it is worth testing which works best in your particular case. +See Scientific Applications for information about recommended application-specific SLURM configurations.

+
+

Warning

+

The GH200 nodes have their GPUs configured in "default" compute mode. +The "default" mode is used to avoid issues with certain containers. +Unlike "exclusive process" mode, "default" mode allows multiple processes to submit work to a single GPU simultaneously. +This also means that different ranks on the same node can inadvertently use the same GPU leading to suboptimal performance or unused GPUs, rather than job failures.

+

Some applications benefit from using multiple ranks per GPU. However, MPS should be used in these cases.

+

If you are unsure about which GPU is being used for a particular rank, print the CUDA_VISIBLE_DEVICES variable, along with e.g. SLURM_LOCALID, SLURM_PROCID, and SLURM_NODEID variables, in your job script. +If the variable is unset or empty all GPUs are visible to the rank and the rank will in most cases only use the first GPU.

+
+

+

One rank per GPU

+

Configuring SLURM to use one GH200 GPU per rank is easiest done using the --ntasks-per-node=4 and --gpus-per-task=1 SLURM flags. +For advanced users, using --gpus-per-task is equivalent to setting CUDA_VISIBLE_DEVICES to SLURM_LOCALID, assuming the job is using four ranks per node. +The examples below launch jobs on two nodes with four ranks per node using sbatch and srun:

+
#!/bin/bash
+#SBATCH --job-name=gh200-single-rank-per-gpu
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=4
+#SBATCH --gpus-per-task=1
+
+srun <application>
+
+

Omitting the --gpus-per-task results in CUDA_VISIBLE_DEVICES being unset, which will lead to most applications using the first GPU on all ranks.

+

+

Multiple ranks per GPU

+

Using multiple ranks per GPU can improve performance e.g. of applications that don't generate enough work for a GPU using a single rank, or ones that scale badly to all 72 cores of the Grace CPU. +In these cases SLURM jobs must be configured to assign multiple ranks to a single GPU. +This is best done using NVIDIA's Multi-Process Service (MPS). +To use MPS, launch your application using the following wrapper script, which will start MPS on one rank per node and assign GPUs to ranks according to the CPU mask of a rank, ensuring the closest GPU is used:

+
#!/bin/bash
+# Example mps-wrapper.sh usage:
+# > srun [srun args] mps-wrapper.sh [cmd] [cmd args]
+
+# Only this path is supported by MPS
+export CUDA_MPS_PIPE_DIRECTORY=/tmp/nvidia-mps
+export CUDA_MPS_LOG_DIRECTORY=/tmp/nvidia-log-$(id -un)
+
+# Launch MPS from a single rank per node
+if [[ $SLURM_LOCALID -eq 0 ]]; then
+    CUDA_VISIBLE_DEVICES=0,1,2,3 nvidia-cuda-mps-control -d
+fi
+
+# Set CUDA device
+numa_nodes=$(hwloc-calc --physical --intersect NUMAnode $(hwloc-bind --get --taskset))
+export CUDA_VISIBLE_DEVICES=$numa_nodes
+
+# Wait for MPS to start
+sleep 1
+
+# Run the command
+numactl --membind=$numa_nodes "$@"
+result=$?
+
+# Quit MPS control daemon before exiting
+if [[ $SLURM_LOCALID -eq 0 ]]; then
+    echo quit | nvidia-cuda-mps-control
+fi
+
+exit $result
+
+

Save the above script as mps-wrapper.sh and make it executable with chmod +x mps-wrapper.sh. +If the mps-wrapper.sh script is in the current working directory, you can then launch jobs using MPS for example as follows:

+
#!/bin/bash
+#SBATCH --job-name=gh200-multiple-ranks-per-gpu
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=32
+#SBATCH --cpus-per-task=8
+
+srun ./mps-wrapper.sh <application>
+
+

Note that in the example job above:

+
    +
  • --gpus-per-node is not set at all; the mps-wrapper.sh script ensures that the right GPU is visible for each rank using CUDA_VISIBLE_DEVICES
  • +
  • --ntasks-per-node is set to 32; this results in 8 ranks per GPU
  • +
  • --cpus-per-task is set to 8; this ensures that threads are not allowed to migrate across the whole GH200 node
  • +
+

The configuration that is optimal for your application may be different.

+

+

AMD CPU

+
+

Todo

+

document how slurm is configured on AMD CPU nodes (e.g. eiger)

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/tools/uenv/index.html b/tools/uenv/index.html new file mode 100644 index 0000000..6e56285 --- /dev/null +++ b/tools/uenv/index.html @@ -0,0 +1,3214 @@ + + + + + + + + + + + + + + + + + + + + + + + + + uenv - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + + + + + + +

+

uenv

+

Uenv are user environments that provide scientific applications, libraries and tools. +This page will explain how to find, dowload and use uenv on the command line, and how to enable them in SLURM jobs.

+

Uenv are typically application-specific, domain-specific or tool-specific - each uenv contains only what is required for the application or tools that it provides.

+

Each uenv is packaged in a single file (in the Squashfs file format), that stores a compressed directory tree that contains all of the software, tools and other information like modules, required to provide a rich environment.

+

Each environment contains a software stack, comprised of compilers, libraries, tools and scientific applications - built using Spack.

+
+

Warning

+

This documentation is for the new uenv2 implementation of uenv, that is not yet installed on Alps.

+
+

Getting started

+

After logging into an Alps cluster, you can quickly check the availability of uenv with the following commands:

+
> uenv status
+there is no uenv loaded
+> uenv --version
+7.0.0
+
+

uenv Labels

+

Uenv are referred to using labels, where a label has the following form name/version:tag@system%uarch, for example prgenv-gnu/24.11:v2@todi%gh200.

+

name

+

the name of the uenv. In this case prgenv-gnu.

+

version

+

The version of the uenv. The format of version depends on the specific uenv. +Often they use the yy.mm format, though they may also use the version of the software being packaged. +For example the namd/3.0.1 uenv packages version 3.0.1 of the popular NAMD simulation tool.

+

tag

+

Used to differentiate between releases of a versioned uenv. Some examples of tags include:

+
    +
  • rc1, rc2: release candidates.
  • +
  • v1: a first release typically made after some release candidates.
  • +
  • v2: a second release, that might fix issues in the first release.
  • +
+

system

+

The name of the Alps cluster for which the uenv was built.

+

+

uarch

+

The node type (microarchitecture) that the uenv is built for:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
uarchCPUGPUcomment
gh2004 72-core NVIDIA Grace (aarch64)4 NVIDIA H100 GPUs
zen22 64-core AMD Rome (zen2)-used in Eiger
a1001 64-core AMD Milan (zen3)4 NVIDIA A100 GPUs
mi2001 64-core AMD Milan (zen3)4 AMD Mi250x GPUs
zen32 64-core AMD Milan (zen3)-only in MCH system
+

Using labels

+

The uenv command line has a flexible interface for filtering uenv by providing only part of the full label:

+
# search for all uenv on the current system that have the name prgenv-gnu
+uenv image find prgenv-gnu
+
+# search for all uenv with version 24.11
+uenv image find /24.11
+
+# search for all uenv with tag v1
+uenv image find :v1
+
+# seach for a specific version
+uenv image find prgenv-gnu/24.11:v1
+
+

By default, the uenv filters results to uenv that were built on the current cluster. +The name of the current cluster is always available via the CLUSTER_NAME environment variable.

+
# log into the eiger vCluster
+ssh eiger
+
+# this command will search for all pgrenv-gnu uenv on _eiger_
+uenv image find prgenv-gnu
+
+# use @ to search on a specific system, e.g. on daint:
+uenv image find prgenv-gnu@daint
+
+# this can be used to search for all uenv on daint:
+uenv image find @daint
+
+# the '*' is a wildcard used meaining "all systems"
+# this will show all images on all systems
+# NOTE: the * character must be quoted in single quotes
+uenv image find @'*'
+
+# search for all images on Alps that were built for gh200 nodes.
+uenv image find @'*'%gh200
+
+
+

Note

+

The wild card * used for "all systems" must always be escaped in single quotes: @'*'.

+
+

Finding uenv

+

Uenv for programming environments, tools and applications are provided by CSCS on each Alps system.

+
+

Info

+

The same uenv are not installed on every system. Instead uenv that are supported for the users of that platform are provided.

+
+

The available uenv images are stored in a registry, that can be queried using the uenv image find command:

+
+

uenv image find

+
> uenv image find
+uenv                       arch  system  id                size(MB)  date
+cp2k/2024.1:v1             zen2  eiger   2a56f1df31a4c196   2,693    2024-07-01
+cp2k/2024.2:v1             zen2  eiger   f83e95328d654c0f   2,739    2024-08-23
+cp2k/2024.3:v1             zen2  eiger   7c7369b64b5fabe5   2,740    2024-09-18
+editors/24.7:rc1           zen2  eiger   e5fb284962908eed   1,030    2024-07-18
+editors/24.7:v2            zen2  eiger   4f0f2770616135b1   1,062    2024-09-04
+julia/24.9:v1              zen2  eiger   0ff97a74dfcaa44e     539    2024-11-09
+linalg/24.11:rc1           zen2  eiger   b69f4664bf0cd1c4     770    2024-11-20
+linalg/24.11:v1            zen2  eiger   c11f6c85028abf5b     776    2024-12-03
+linalg-complex/24.11:v1    zen2  eiger   846a04b4713d469b     792    2024-12-03
+linaro-forge/24.0.2:v1     zen2  eiger   65734ce35494a5f5     313    2024-07-18
+linaro-forge/24.1:v1       zen2  eiger   b65d7c85adfb317a     344    2024-11-27
+netcdf-tools/2024:v1       zen2  eiger   e7e508c34cf40ccd   3,706    2024-11-14
+prgenv-gnu/24.11:rc4       zen2  eiger   811469b00f030493     570    2024-11-21
+prgenv-gnu/24.11:v1        zen2  eiger   0b6ab5fc4907bb38     572    2024-11-27
+prgenv-gnu/24.7:v1         zen2  eiger   7f68f4c8099de257     478    2024-07-01
+quantumespresso/v7.3.1:v1  zen2  eiger   61d1f21881a65578     864    2024-11-08
+
+
+

The output above shows that there are 12 uenv (prgenv-gnu, namd , cp2k and arbor).

+

Downloading uenv

+

To use a uenv, it first has to be pulled from the registry to local storage where you can access it. +For example, to use the prgenv-gnu uenv, use the uenv image pull command:

+
+

uenv image pull

+
# The following commands have the same effect
+
+# method 1: pull using the name of the uenv
+> uenv image pull prgenv-gnu/24.2:v1
+
+# method 2: pull using the id of the image
+> uenv image pull 3ea1945046d884ee
+
+
+
+

Note

+

In order to pull images, a local directory for storing the images must first be created, and you will receive an error message. +To create a repo in the default location, use the following command:

+
uenv image repo
> uenv repo create
+
+
+

Some images can be large, over 10 GB, and it can take a while to download from the registry.

+

To view all uenv that have been pulled, and are ready to use use the uenv image ls command:

+
+

listing downloaded uenv

+
> uenv image ls
+uenv                           arch   system  id                size(MB)  date
+editors/24.7:v2                gh200  daint   e7b0d930df729da5   1,270    2024-09-04
+gromacs/2024:v1                gh200  daint   b58e6406810279d5   3,658    2024-09-12
+julia/24.9:v1                  gh200  daint   7a4269abfdadc046   3,939    2024-11-09
+linalg/24.11:v1                gh200  daint   e1640cf6aafdca01   4,461    2024-12-03
+linaro-forge/23.1.2:v1         gh200  daint   fd67b726a90318d6     341    2024-08-26
+namd/3.0:v3                    gh200  daint   49bc65c6905eb5da   4,028    2024-12-12
+netcdf-tools/2024:v1           gh200  daint   2a799e99a12b7c13   1,260    2024-09-04
+prgenv-gnu/24.11:v1            gh200  daint   b81fd6ba25e88782   4,191    2024-11-27
+prgenv-gnu/24.7:v3             gh200  daint   b50ca0d101456970   3,859    2024-08-23
+prgenv-nvfortran/24.11:v1      gh200  daint   d2afc254383cef20   8,703    2025-01-30
+
+
+

Accessing restricted software

+

By default, uenv can be pulled by all users on a system, with no restrictions.

+

Some uenv are not available to all users, for exampl the vasp images are only available for users who have a VASP license, who are added to the vasp6 group once then have provided CSCS with a copy of their license.

+

To be able to pull such images, you first need to configure the token for that specific software. This step only needs to be performed once - once set up you will only need to perform it again if the token is changed, or if you need to use a different token for another uenv.

+
+

using a token to access VASP

+
uenv image pull \
+    --token=/capstor/scratch/cscs/bcumming/tokens/vasp6 \
+    --username=vasp6 \
+    vasp/v6.4.3:v1
+
+
+
+

Note

+

As of March 2025, the only restricted software is VASP.

+
+

+

Starting a uenv session

+

The uenv start command will start a new shell with one or more uenv images mounted. +This is very useful for interactive sessions, for example if you want to work in the terminal to compile an application, or set up a python virtual environment.

+
+

start an interactive shell to compile an application

+

Here we want to compile an MPI + CUDA application "affinity".

+
# start the prgenv-gnu uenv, which provides MPI, cuda and CMake
+# use the "default" view, which will load all of the software in the uenv
+> uenv start prgenv-gnu/24.11:v1 --view=default
+
+# clone the software and set up the build directory
+> git clone https://github.com/bcumming/affinity.git
+> mkdir -p affinity/build
+> cd affinity/build/
+
+# configure the build with CMake, then call make to build
+# mpicc, mpic++ and cmake are all provided by the uenv
+> CXX=mpic++ CC=mpicc cmake ..
+> make -j
+
+# run the affinity executable on two nodes - note how the uenv is
+# automatically loaded by slurm on the compute nodes, because CUDA and MPI from
+# the uenv are required to run.
+> srun -n2 -N2 ./affinity.cuda
+GPU affinity test for 2 MPI ranks
+rank      0 @ nid005636
+ cores   : 0-287
+ gpu   0 : GPU-13a62579-bf3c-fb6b-667f-f2c588f4667b
+ gpu   1 : GPU-74968c03-7401-9013-0590-8445b3623208
+ gpu   2 : GPU-dfbd9ec1-a4b7-4a8d-603e-ebcc360f55a3
+ gpu   3 : GPU-6a44522d-bf84-9864-decf-6d3e85078442
+rank      1 @ nid006322
+ cores   : 0-287
+ gpu   0 : GPU-6d96b1d5-69e9-7bd4-f59a-a37ec1f5da1c
+ gpu   1 : GPU-c0508d69-a357-934e-87a0-be04adf4eee9
+ gpu   2 : GPU-02a7fd85-ff41-1d81-d010-d7a85f6134d8
+ gpu   3 : GPU-e07d996e-4d67-c9f4-cf75-81cfd45a1ae1
+
+# finish the uenv session
+> exit
+
+
+
+

which shell is used

+

uenv start starts a new shell, and by default it will use the default shell for the user. +You can see the default shell by looking at the $SHELL environment variable. +If you want to force a different shell: +

SHELL=`which zsh` uenv start ...
+

+
+
+

attention C Shell / tcsh users

+

uenv is tested extensively with bash (the default shell), and zsh. C shell is not tested properly, and we won't make significant changes to uenv to maintain support for C shell.

+

If your are one of the handful of users using tcsh (c shell) and you want to use uenv, we strongly recommend creating a request at the CSCS service desk to change to either bash or zsh as your default.

+
+

The basic syntax of uenv start is uenv start image where image is the uenv to start. +The image can be a label, the hash/id of the uenv, or a file:

+
+

uenv start

+
# start the image using the name of the uenv
+> uenv start netcdf-tools/2024:v1
+
+# or use the unqique id of the uenv
+> uenv start 499c886f2947538e
+
+# or provide the path to a squashfs file
+> uenv start $SCRATCH/my-uenv/gromacs.squashfs
+
+
+
+what does 'uenv start' actually do? +

uenv are squashfs images, which are a compressed file that contains a directory tree. +The squashfs image of a uenv is a directory that contains all of the software provided by the uenv, along with useful meta data. +When you run uenv start (or uenv run, or use the --uenv flag with SLURM) the squashfs file is mounted at the mount location for the uenv, which is most often /user-environment.

+
# log into daint
+> ssh daint.alps.cscs.ch
+
+# /user-environment is empty
+> ls -l /user-environment
+total 0
+
+# start a uenv
+> uenv start prgenv-nvfortran/24.11:v1
+
+# the uenv software is now available
+> ls /user-environment/
+bin  config  env  linux-sles15-neoverse_v2  meta  modules  repo
+
+# findmnt verifies that a squashfs image has been mounted
+> findmnt /user-environment
+TARGET            SOURCE      FSTYPE   OPTIONS
+/user-environment /dev/loop25 squashfs ro,nosuid,nodev,relatime,errors=continue
+
+# end the session and verify that the uenv is not longer mounted
+> exit
+> ls -l /user-environment
+total 0
+
+

Loading an environment has no impact on other users or other terminal sessions that you have open on the same node – the mounted environment is only visible in your terminal. +This means that multiple users on a login node can mount their own environment at the same mount point, without interfering with one-another.

+
+

Views

+

Running uenv start $label on its own will create a shell with the software at /user-environment or /user-tools, however no changes are made to environment variables like $PATH.

+

Uenv images provide views, which will set environment variables that load the software into your environment. +Views are loaded using the --view flag for uenv start (also for uenv run and the SLURM plugin, documented below)

+
+

loading views

+
# activate the view named default in prgenv-gnu
+> uenv start --view=default prgenv-gnu/24.11:v1
+
+# activate both the spack and modules views in prgenv-gnu using
+# a comma-separated list of view names
+> uenv start --view=spack,modules prgenv-gnu/24.11:v1
+
+# when starting multiple uenv, you can disambiguate using uenvname:viewname
+> uenv start --view=prgenv-gnu:default,editors:ed prgenv-gnu/24.11:v1,editors
+
+
+

Modules

+

Most uenv provide the modules, that can be accessed using the module command. +By default, the modules are not activated when a uenv is started, and need to be explicitly activated using the module view.

+
+

using the module view

+
> uenv start prgenv-gnu/24.11:v1 --view=modules
+> module avail
+---------------------------- /user-environment/modules ----------------------------
+   aws-ofi-nccl/git.v1.9.2-aws_1.9.2    lua/5.4.6
+   boost/1.86.0                         lz4/1.10.0
+   cmake/3.30.5                         meson/1.5.1
+   cray-mpich/8.1.30                    nccl-tests/2.13.6
+   cuda/12.6.2                          nccl/2.22.3-1
+   fftw/3.3.10                          netlib-scalapack/2.2.0
+   fmt/11.0.2                           ninja/1.12.1
+   gcc/13.3.0                           openblas/0.3.28
+   gsl/2.8                              osu-micro-benchmarks/5.9
+   hdf5/1.14.5                          papi/7.1.0
+   kokkos-kernels/4.4.01                python/3.12.5
+   kokkos-tools/develop                 superlu/5.3.0
+   kokkos/4.4.01                        zlib-ng/2.2.1
+   libtree/3.1.1
+> module load cuda gcc cmake
+> nvcc --version
+nvcc: NVIDIA (R) Cuda compiler driver
+Cuda compilation tools, release 12.6, V12.6.77
+> gcc --version
+gcc (Spack GCC) 13.3.0
+> cmake --version
+cmake version 3.30.5
+
+
+

Spack

+

uenv images provide a full upstream Spack configuration to facilitate building your own software with Spack using the packages installed inside as dependencies. +No view needs to be loaded to use Spack, however all uenv provide a spack view that sets some environment variables that contain useful information like the location of the Spack configuration, and the version of Spack that was used to build the uenv. +For more information, see our guide on building software with Spack and uenv.

+

+

Running a uenv

+

The uenv run command can be used to run an application or script in a uenv environment, and return control to the calling shell when the command has finished running.

+
+how is uenv run different from uenv start? +

uenv start sets up the uenv environment, then starts an interactive shell in that environment. +When you are finished, you can type exit to finish the session.

+

uenv run is more generic - instead of running a shell in environment, it takes the executable and arguments to run in the shell. +The following commands are equivalent:

+
# start a new bash shell in prgenv-gnu
+uenv start prgenv-gnu/24.11
+# start a new bash shell in prgenv-gnu
+uenv run prgenv-gnu/24.11 -- bash
+
+
+
+

running cmake

+

Call cmake to configure a build with the default view loaded +

# run a command
+> uenv run prgenv-gnu/24.11:v1 --view=default -- cmake -DUSE_GPU=cuda ..
+

+
+
+

running an application executable

+

Run the GROMACS executable from inside the gromacs uenv. +

# run an executable:
+> uenv run --view=gromacs gromacs/2024:v1 -- gmx_mpi
+

+
+
+

running applications with different environments

+

uenv run is useful for running multiple applications or scripts in a pipeline or workflow, where each application has separate requirements. +In this example the pre and post processing stages use prgenv-gnu, while the simulation stage uses the gromacs uenv. +

# run multiple applications, one after the other, that have different requirements
+> uenv run --view=default prgenv-gnu/24.11:v1 -- ./pre-processing-script.sh
+> uenv run --view=gromacs gromacs/2024:v1 -- gmx_mpi $gromacs_args
+> uenv run --view=default prgenv-gnu/24.11:v1 -- ./post-processing-script.sh
+

+
+

Building uenv

+

CSCS provides a build service for uenv that takes as its input a uenv recipe, and builds the uenv using the same pipeline used to build the officially supported uenv.

+

The command takes two arguments:

+
    +
  • recipe: the path to the recipe
      +
    • A uenv recipe is a description of the software to build in the uenv. + See the stackinator documentation for more information.
    • +
    +
  • +
  • label: the label to attach, of the form name/version@system%uarch where:
      +
    • name is the name, e.g. prgenv-gnu, gromacs, vistools.
    • +
    • version is a version string, e.g. 24.11, v1.2, 2025-rc2
    • +
    • system is the CSCS cluster to build on (e.g. daint, santis, clariden, eiger)
    • +
    • uarch is the micro-architecture.
    • +
    +
  • +
+
+

building a uenv

+

Call the +

uenv build $SCRATCH/recipes/myapp myapp/v3@daint%gh200
+

+

The image will be built on daint. +The build tool gives you a url to a status page, that shows the progress of the build. +After a successful build, the uenv can be pulled: +

uenv image pull service::myapp/v3:1669479716
+

+

Note that the image is given a unique numeric tag, that you can find on the status page for the build.

+
+
+

Info

+

To use an existing uenv recipe as the starting point for a custom recipe, uenv start the uenv and take the contents of the meta/recipe path in the mounted image (this is the recipe that was used to build the uenv).

+
+

All uenv built by uenv build are pushed into the service namespace, where they can be accessed by all users logged in to CSCS. +This makes it easy to share your uenv with other users, by giving them the name, version and tag of the image.

+
+

Warning

+

If, for whatever reason, your uenv can't be made publicly available, do not use the build service.

+
+
+

search user-built uenv

+

To view all of the uenv on daint that have been built by the service: +

uenv image find service::@daint
+

+
+

+

SLURM integration

+

The environment to load can be provided directly to SLURM via three arguments:

+
    +
  • --uenv: a comma-separated list of uenv to mount
  • +
  • --view: a comma-separated list of views to load
  • +
  • --repo: an alternative (if not set, the default repo in $SCRATCH/.uenv-images is used)
  • +
+

For example, the flags can be used with srun : +

# mount the uenv prgenv-gnu with the view named default
+> srun --uenv=prgenv-gnu/24.7:v3 --view=default ...
+
+# mount an image at an explicit location (/user-tools)
+> srun --uenv=$IMAGES/myenv.squashfs:/user-tools ...
+
+# mount multiple images: use a comma to separate the different options
+> srun --uenv=prgenv-gnu/24.7:v3,editors/24.7:v2 --view=default,editors:modules ...
+

+

The commands can also be used in sbatch scripts to have fine-grained control:

+
+

sbatch script for uenv

+

It is possible to provide a uenv that is loaded inside the script, and will be loaded by default by all srun commands that do not override it with their own --uenv parameters. +

#!/bin/bash
+
+#SBATCH --uenv=editors/24.7:v2
+#SBATCH --view=editors:ed
+#SBATCH --ntasks=4
+#SBATCH --nodes=1
+#SBATCH --output=out-%j.out
+#SBATCH --error=out-%j.out
+
+echo "==== test in script ===="
+# the fd command is provided by the ed view
+# use it to inspect the meta data in the mounted image
+fd . /user-tools/meta/recipe
+
+echo "==== test in srun ===="
+# use srun to launch the parallel job
+srun -n4 bash -c 'echo $SLURM_PROCID on $(hostname): $(which emacs)'
+
+echo "==== alternative mount ===="
+srun -n4 --uenv=prgenv-gnu --view=prgenv-gnu:default bash -c 'echo $SLURM_PROCID on $(hostname): $(which mpicc)'
+sbatch output
+

+

The sbatch job above would generate output like the following: +

==== test in script ====
+/user-tools/meta/recipe/compilers.yaml
+/user-tools/meta/recipe/config.yaml
+/user-tools/meta/recipe/environments.yaml
+/user-tools/meta/recipe/modules.yaml
+==== test in srun ====
+1 on nid007144: /user-tools/env/ed/bin/emacs
+3 on nid007144: /user-tools/env/ed/bin/emacs
+0 on nid007144: /user-tools/env/ed/bin/emacs
+2 on nid007144: /user-tools/env/ed/bin/emacs
+==== alternative mount ====
+0 on nid007144: /user-environment/env/default/bin/mpicc
+1 on nid007144: /user-environment/env/default/bin/mpicc
+2 on nid007144: /user-environment/env/default/bin/mpicc
+3 on nid007144: /user-environment/env/default/bin/mpicc
+

+
+

In the example above, the #SBATCH --uenv and #SBATCH --view parameters in the preamble of the sbatch script set the default uenv to editors with the view ed.

+
    +
  • editors is mounted and the view set in the script (the "test in script" part)
  • +
  • editors is also mounted in the first call to srun (which does not provide a `–-uenv flag)
  • +
+

it is possible to override the default uenv by passing a different --uenv and --view flags to an srun call inside the script, as is done in the second srun call.

+
    +
  • Note how the second call has access to mpicc, provided by prgenv-gnu.
  • +
+

+

Installing the uenv tool

+

The command line tool can be installed from source, if you are working on a cluster that does not have uenv installed, or if you need to test a new version.

+
+

Note

+

uenv is installed already on CSCS clusters, so installation is not required.

+

Only follow these steps if you are advised to test out a new version (e.g. if it has a fix for an issues that you are encountering).

+
+
+

manually installing uenv in the terminal

+
git clone https://github.com/eth-cscs/uenv2.git
+cd uenv2
+
+# run the installation script.
+# this will install uenv2 in $HOME/.local/$(uname -m)/
+./install-alps-local.sh
+
+# update bashrc
+echo "export PATH=\$HOME/.local/\$(uname -m)/bin:\$PATH" >> $HOME/.bashrc
+echo "unset -f uenv" >> $HOME/.bashrc
+
+
+
+

Warning

+

Before uenv can be used, you need to log out then back in again and type which uenv to verify that uenv has been installed in your $HOME path.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/vclusters/bristen/index.html b/vclusters/bristen/index.html new file mode 100644 index 0000000..e7efb9b --- /dev/null +++ b/vclusters/bristen/index.html @@ -0,0 +1,2250 @@ + + + + + + + + + + + + + + + + + + + + + + + + + bristen - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Bristen

+
+

Todo

+

use the clariden as template.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/vclusters/clariden/index.html b/vclusters/clariden/index.html new file mode 100644 index 0000000..ac30e8e --- /dev/null +++ b/vclusters/clariden/index.html @@ -0,0 +1,2503 @@ + + + + + + + + + + + + + + + + + + + + + + + + + clariden - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + +

+

Clariden

+
+

Todo

+

Introduction

+

This page is a cut and paste of some of Todi's old documentation, which we can turn into a template.

+
+

Cluster Details

+
+

Todo

+

a standardised table with information about

+
    +
  • number and type of nodes
  • +
+

and any special notes

+
+

Logging into Clariden

+
+

Todo

+

how to log in, i.e. ssh clariden.cscs.ch via ela.cscs.ch

+

provide the snippet to add to your ~/.ssh/config, and link to where we document this (docs not currently available)

+
+

Software and services

+
+

Todo

+

information about CSCS services/tools available

+
    +
  • container engine
  • +
  • uenv
  • +
  • CPE
  • +
  • ... etc
  • +
+
+

Running Jobs on Clariden

+

Clariden uses SLURM as the workload manager, which is used to launch and monitor distributed workloads, such as training runs.

+

See detailed instructions on how to run jobs on the Grace-Hopper nodes.

+

Storage

+
+

Todo

+

describe the file systems that are attached, and where.

+

This is where $SCRATCH, $PROJECT etc are defined for this cluster.

+

Refer to the specific file systems that these map onto (capstor, iopstor, waldur), and link to the storage docs for these.

+

Also discuss any specific storage policies. You might want to discuss storage policies for MLp one level up, in the MLp docs.

+
+
    +
  • attached storage and policies
  • +
+

Calendar and key events

+

The system is updated every Tuesday, between 9 am and 12 pm. +...

+
+

Todo

+

notifications

+

a calendar widget would be useful, particularly if we can have a central calendar, and a way to filter events for specific instances

+
+

Change log

+
+

special text boxes for updates

+

they can be opened and closed.

+
+
+

2024-10-15 reservation daint available again

+

The reservation daint is available again exclusively for Daint users that need to run their benchmarks for submitting their proposals, additionally to the debug partition and free nodes. +Please add the Slurm option --reservation=daint to your batch script if you want to use it

+
+
+2024-10-07 New compute node image deployed +

New compute node image deployed to fix the issue with GPU-aware MPI.

+

Max job time limit is decreased from 12 hours to 6 hours

+
+
+2024-09-18 Daint users +

In order to complete the preparatory work necessary to deliver Alps in production, as of September 18 2024 the vCluster Daint on Alps will no longer be accessible until further notice: the early access will still be granted on Tödi using the Slurm reservation option --reservation=daint

+
+

Known issues

+

TODO list of know issues - include links to known issues page

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/vclusters/daint/index.html b/vclusters/daint/index.html new file mode 100644 index 0000000..adaf997 --- /dev/null +++ b/vclusters/daint/index.html @@ -0,0 +1,2246 @@ + + + + + + + + + + + + + + + + + + + + + + + + + daint - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Daint

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/vclusters/eiger/index.html b/vclusters/eiger/index.html new file mode 100644 index 0000000..32a1dc8 --- /dev/null +++ b/vclusters/eiger/index.html @@ -0,0 +1,2246 @@ + + + + + + + + + + + + + + + + + + + + + + + + + eiger - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Eiger

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/vclusters/santis/index.html b/vclusters/santis/index.html new file mode 100644 index 0000000..ab1351b --- /dev/null +++ b/vclusters/santis/index.html @@ -0,0 +1,2246 @@ + + + + + + + + + + + + + + + + + + + + + + + + + santis - CSCS Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + +

+

Santis

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file