diff --git a/README.md b/README.md index f3708003..61d4f4ce 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ The following is a list of the latest updates to OG-MARL: ✅ We have implemented our **first set of JAX-based systems in OG-MARL**. Our JAX systems use [Flashbax](https://github.com/instadeepai/flashbax) as the replay buffer backend. Flashbax buffers are completly jit-able, which means that our JAX systems have fully intergrated and jitted training and data sampling. -✅ We have **intergrated [MARL-eval](https://github.com/instadeepai/marl-eval/tree/main)** into OG-MARL to standardise and simplify the reporting of experimental results. +✅ We have **integrated [MARL-eval](https://github.com/instadeepai/marl-eval/tree/main)** into OG-MARL to standardise and simplify the reporting of experimental results. ## Need for Speed 🏎️ diff --git a/docs/datasets.md b/docs/datasets.md new file mode 100644 index 00000000..ad790a8f --- /dev/null +++ b/docs/datasets.md @@ -0,0 +1,64 @@ + + + + +Website Page + + +

Datasets

+

SMAC V1

+ + +

SMAC V2

+ + +

Flatland

+ + +

PettingZoo

+ + +

MaMuJoCo

+ + +

Voltage Control

+ + +

City Learn

+ + +

MPE

+ + + + diff --git a/docs/html_from_dict.py b/docs/html_from_dict.py new file mode 100644 index 00000000..26f8832b --- /dev/null +++ b/docs/html_from_dict.py @@ -0,0 +1,63 @@ +DATASET_URLS = { + "smac_v1": { + "3m": "https://tinyurl.com/3m-dataset", + "8m": "https://tinyurl.com/8m-dataset", + "5m_vs_6m": "https://tinyurl.com/5m-vs-6m-dataset", + "2s3z": "https://tinyurl.com/2s3z-dataset", + "3s5z_vs_3s6z": "https://tinyurl.com/3s5z-vs-3s6z-dataset3", + "2c_vs_64zg": "https://tinyurl.com/2c-vs-64zg-dataset", + "27m_vs_30m": "https://tinyurl.com/27m-vs-30m-dataset" + }, + "smac_v2": { + "terran_5_vs_5": "https://tinyurl.com/terran-5-vs-5-dataset", + "zerg_5_vs_5": "https://tinyurl.com/zerg-5-vs-5-dataset", + "terran_10_vs_10": "https://tinyurl.com/terran-10-vs-10-dataset" + }, + "flatland": { + "3_trains": "https://tinyurl.com/3trains-dataset", + "5_trains": "https://tinyurl.com/5trains-dataset" + }, + "pettingzoo": { + "pursuit": "https://tinyurl.com/pursuit-dataset", + "pistonball":"https://tinyurl.com/pistonball-dataset", + "coop_pong":"https://tinyurl.com/coop-pong-dataset", + "kaz":"https://tinyurl.com/kaz-dataset" + }, + "mamujoco": { + "2_halfcheetah": "https://tinyurl.com/2halfcheetah-dataset", + "2_ant": "https://tinyurl.com/2ant-dataset", + "4_ant": "https://tinyurl.com/4ant-dataset" + }, + "voltage_control": { + "case33_3min_final": "https://tinyurl.com/case33-3min-final-dataset", + }, + "city_learn": { + "2022_all_phases":"https://tinyurl.com/2022-all-phases-dataset" + }, + "mpe": { + "simple_adversary":"https://tinyurl.com/simple-adversary-dataset" + } +} + +env_names = ["SMAC V1","SMAC V2","Flatland","PettingZoo","MaMuJoCo","Voltage Control","City Learn","MPE"] + +lines = ["\n", + "\n", + "\n", + "\n", + "Website Page\n", + "\n", + "\n", + "

Datasets

\n" + ] +for i,env in enumerate(list(DATASET_URLS.keys())): + lines.extend(["

"+env_names[i]+"

\n","\n","\n"]) + +lines.extend(["\n", "\n"]) + +f = open("test.txt","w") +f.writelines(lines) +f.close() \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 8d7d31ed..54ef6a81 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -36,6 +36,10 @@ theme: icon: material/weather-night name: Switch to light mode +nav: + -Home: 'index.md' + -Datasets: 'datasets.md' + markdown_extensions: - attr_list - md_in_html diff --git a/og_marl/offline_dataset.py b/og_marl/offline_dataset.py index 8b636fad..97cf0e98 100644 --- a/og_marl/offline_dataset.py +++ b/og_marl/offline_dataset.py @@ -122,7 +122,7 @@ def __getattr__(self, name): "8m": "https://tinyurl.com/8m-dataset", "5m_vs_6m": "https://tinyurl.com/5m-vs-6m-dataset", "2s3z": "https://tinyurl.com/2s3z-dataset", - "3s5z_vs_3s6z": "ttps://tinyurl.com/3s5z-vs-3s6z-dataset3", + "3s5z_vs_3s6z": "https://tinyurl.com/3s5z-vs-3s6z-dataset3", "2c_vs_64zg": "https://tinyurl.com/2c-vs-64zg-dataset", "27m_vs_30m": "https://tinyurl.com/27m-vs-30m-dataset" }, @@ -135,24 +135,36 @@ def __getattr__(self, name): "3_trains": "https://tinyurl.com/3trains-dataset", "5_trains": "https://tinyurl.com/5trains-dataset" }, + "pettingzoo": { + "pursuit": "https://tinyurl.com/pursuit-dataset", + "pistonball":"https://tinyurl.com/pistonball-dataset", + "coop_pong":"https://tinyurl.com/coop-pong-dataset", + "kaz":"https://tinyurl.com/kaz-dataset" + }, "mamujoco": { - "2_halfcheetah": "", - "2_ant": "", - "4_ant": "" + "2_halfcheetah": "https://tinyurl.com/2halfcheetah-dataset", + "2_ant": "https://tinyurl.com/2ant-dataset", + "4_ant": "https://tinyurl.com/4ant-dataset" }, "voltage_control": { "case33_3min_final": "https://tinyurl.com/case33-3min-final-dataset", + }, + "city_learn": { + "2022_all_phases":"https://tinyurl.com/2022-all-phases-dataset" + }, + "mpe": { + "simple_adversary":"https://tinyurl.com/simple-adversary-dataset" } } def download_and_unzip_dataset(env_name, scenario_name, dataset_base_dir="./datasets"): dataset_download_url = DATASET_URLS[env_name][scenario_name] os.makedirs(f'{dataset_base_dir}/tmp/', exist_ok=True) - os.makedirs(f'{dataset_base_dir}/{env_name}/{scenario_name}', exist_ok=True) + os.makedirs(f'{dataset_base_dir}/{env_name}', exist_ok=True) zip_file_path = f'{dataset_base_dir}/tmp/tmp_dataset.zip' - extraction_path = f'{dataset_base_dir}/{env_name}/{scenario_name}' + extraction_path = f'{dataset_base_dir}/{env_name}/' response = requests.get(dataset_download_url, stream=True) total_length = response.headers.get('content-length')