From b8b016ebad20b9a699ea0a44af38fae4322e2a16 Mon Sep 17 00:00:00 2001 From: GnomeBlue Date: Sun, 6 Sep 2020 11:44:52 +0200 Subject: [PATCH] Initial release --- .gitignore | 3 + LICENSE | 21 + README.md | 49 ++ RefreshEnv.cmd | 66 +++ logger/appearance.cfg | 29 ++ logger/logger.cfg | 9 + logger/logger.py | 99 +++++ logger/rlbot.cfg | 73 +++ logger/run.bat | 9 + logger/run.py | 41 ++ requirements.txt | 12 + rlbot.cfg | 73 +++ run-gui.bat | 11 + run.bat | 11 + run.py | 40 ++ src/appearance.cfg | 49 ++ src/bot.cfg | 29 ++ src/drone_reference.py | 10 + src/gosling/objects.py | 420 ++++++++++++++++++ src/gosling/routines.py | 404 +++++++++++++++++ src/gosling/tools.py | 80 ++++ src/gosling/utils.py | 157 +++++++ src/main.py | 88 ++++ src/physics/control/__init__.py | 4 + src/physics/control/base.py | 17 + src/physics/control/steer.py | 22 + src/physics/control/throttle.py | 1 + src/physics/math/__init__.py | 4 + src/physics/math/matrix3.py | 54 +++ src/physics/math/vector3.py | 207 +++++++++ src/physics/simulations/__init__.py | 4 + src/physics/simulations/ball.py | 78 ++++ src/physics/simulations/base_sim.py | 8 + src/physics/tests/math_test.py | 235 ++++++++++ src/scenario/__init__.py | 10 + src/scenario/ball_through_center.py | 92 ++++ src/scenario/base_scenario.py | 43 ++ src/scenario/default_scenario.py | 30 ++ src/scenario/dribble_easy.py | 81 ++++ src/scenario/dribble_hard.py | 95 ++++ src/scenario/intercept.py | 78 ++++ src/scenario/shoot_ball_at_goal.py | 95 ++++ src/scenario/side_shot.py | 79 ++++ src/settings/default.ini | 16 + src/settings/dribble_easy_test.ini | 16 + src/settings/dribble_hard_test.ini | 16 + src/settings/intercept_ball.ini | 17 + src/settings/keeper_test.ini | 16 + src/settings/kickoff_captain.ini | 16 + src/settings/kickoff_demo.ini | 16 + src/settings/only_demo.ini | 16 + src/settings/only_prepare.ini | 16 + src/settings/only_shadow.ini | 16 + src/settings/side_shot_test.ini | 16 + src/setup.py | 28 ++ src/strategy/__init__.py | 6 + src/strategy/base_ccp.py | 70 +++ src/strategy/captains/__init__.py | 13 + src/strategy/captains/all_drone_demo.py | 34 ++ src/strategy/captains/all_drone_dribble.py | 36 ++ src/strategy/captains/all_drone_get_boost.py | 34 ++ src/strategy/captains/all_drone_intercept.py | 35 ++ src/strategy/captains/all_drone_kickoff.py | 35 ++ src/strategy/captains/all_drone_shadow.py | 43 ++ src/strategy/captains/attack.py | 233 ++++++++++ src/strategy/captains/defense.py | 196 ++++++++ src/strategy/captains/kickoff.py | 83 ++++ src/strategy/captains/kickoff_demolition.py | 49 ++ src/strategy/coaches/__init__.py | 13 + src/strategy/coaches/kickoff_demo_coach.py | 24 + src/strategy/coaches/mr_cover.py | 26 ++ src/strategy/coaches/mr_demo.py | 24 + src/strategy/coaches/mr_dribble.py | 24 + src/strategy/coaches/mr_get_boost.py | 24 + src/strategy/coaches/mr_intercept.py | 24 + src/strategy/coaches/mr_kickoff.py | 24 + src/strategy/coaches/mr_prepare.py | 28 ++ src/strategy/coaches/mr_shadow.py | 24 + src/strategy/coaches/triple_rotations.py | 104 +++++ src/strategy/drone.py | 113 +++++ src/strategy/players/__init__.py | 13 + src/strategy/players/demolition.py | 27 ++ src/strategy/players/gosling_style/cover.py | 75 ++++ src/strategy/players/gosling_style/dribble.py | 97 ++++ .../players/gosling_style/get_boost.py | 85 ++++ .../players/gosling_style/intercept.py | 193 ++++++++ src/strategy/players/gosling_style/keeper.py | 59 +++ src/strategy/players/gosling_style/kickoff.py | 29 ++ src/strategy/players/gosling_style/prepare.py | 74 +++ .../players/gosling_style/shadowing.py | 102 +++++ src/strategy/players/gosling_style/wait.py | 70 +++ src/strategy/utils/__init__.py | 3 + src/strategy/utils/gosling_wrapper.py | 60 +++ src/world/__init__.py | 3 + src/world/components/__init__.py | 8 + src/world/components/ball.py | 35 ++ src/world/components/car.py | 50 +++ src/world/components/game.py | 42 ++ src/world/components/team.py | 31 ++ src/world/components/world_boost_pad.py | 32 ++ src/world/components/world_component.py | 9 + src/world/world.py | 58 +++ 102 files changed, 5699 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 RefreshEnv.cmd create mode 100644 logger/appearance.cfg create mode 100644 logger/logger.cfg create mode 100644 logger/logger.py create mode 100644 logger/rlbot.cfg create mode 100644 logger/run.bat create mode 100644 logger/run.py create mode 100644 requirements.txt create mode 100644 rlbot.cfg create mode 100644 run-gui.bat create mode 100644 run.bat create mode 100644 run.py create mode 100644 src/appearance.cfg create mode 100644 src/bot.cfg create mode 100644 src/drone_reference.py create mode 100644 src/gosling/objects.py create mode 100644 src/gosling/routines.py create mode 100644 src/gosling/tools.py create mode 100644 src/gosling/utils.py create mode 100644 src/main.py create mode 100644 src/physics/control/__init__.py create mode 100644 src/physics/control/base.py create mode 100644 src/physics/control/steer.py create mode 100644 src/physics/control/throttle.py create mode 100644 src/physics/math/__init__.py create mode 100644 src/physics/math/matrix3.py create mode 100644 src/physics/math/vector3.py create mode 100644 src/physics/simulations/__init__.py create mode 100644 src/physics/simulations/ball.py create mode 100644 src/physics/simulations/base_sim.py create mode 100644 src/physics/tests/math_test.py create mode 100644 src/scenario/__init__.py create mode 100644 src/scenario/ball_through_center.py create mode 100644 src/scenario/base_scenario.py create mode 100644 src/scenario/default_scenario.py create mode 100644 src/scenario/dribble_easy.py create mode 100644 src/scenario/dribble_hard.py create mode 100644 src/scenario/intercept.py create mode 100644 src/scenario/shoot_ball_at_goal.py create mode 100644 src/scenario/side_shot.py create mode 100644 src/settings/default.ini create mode 100644 src/settings/dribble_easy_test.ini create mode 100644 src/settings/dribble_hard_test.ini create mode 100644 src/settings/intercept_ball.ini create mode 100644 src/settings/keeper_test.ini create mode 100644 src/settings/kickoff_captain.ini create mode 100644 src/settings/kickoff_demo.ini create mode 100644 src/settings/only_demo.ini create mode 100644 src/settings/only_prepare.ini create mode 100644 src/settings/only_shadow.ini create mode 100644 src/settings/side_shot_test.ini create mode 100644 src/setup.py create mode 100644 src/strategy/__init__.py create mode 100644 src/strategy/base_ccp.py create mode 100644 src/strategy/captains/__init__.py create mode 100644 src/strategy/captains/all_drone_demo.py create mode 100644 src/strategy/captains/all_drone_dribble.py create mode 100644 src/strategy/captains/all_drone_get_boost.py create mode 100644 src/strategy/captains/all_drone_intercept.py create mode 100644 src/strategy/captains/all_drone_kickoff.py create mode 100644 src/strategy/captains/all_drone_shadow.py create mode 100644 src/strategy/captains/attack.py create mode 100644 src/strategy/captains/defense.py create mode 100644 src/strategy/captains/kickoff.py create mode 100644 src/strategy/captains/kickoff_demolition.py create mode 100644 src/strategy/coaches/__init__.py create mode 100644 src/strategy/coaches/kickoff_demo_coach.py create mode 100644 src/strategy/coaches/mr_cover.py create mode 100644 src/strategy/coaches/mr_demo.py create mode 100644 src/strategy/coaches/mr_dribble.py create mode 100644 src/strategy/coaches/mr_get_boost.py create mode 100644 src/strategy/coaches/mr_intercept.py create mode 100644 src/strategy/coaches/mr_kickoff.py create mode 100644 src/strategy/coaches/mr_prepare.py create mode 100644 src/strategy/coaches/mr_shadow.py create mode 100644 src/strategy/coaches/triple_rotations.py create mode 100644 src/strategy/drone.py create mode 100644 src/strategy/players/__init__.py create mode 100644 src/strategy/players/demolition.py create mode 100644 src/strategy/players/gosling_style/cover.py create mode 100644 src/strategy/players/gosling_style/dribble.py create mode 100644 src/strategy/players/gosling_style/get_boost.py create mode 100644 src/strategy/players/gosling_style/intercept.py create mode 100644 src/strategy/players/gosling_style/keeper.py create mode 100644 src/strategy/players/gosling_style/kickoff.py create mode 100644 src/strategy/players/gosling_style/prepare.py create mode 100644 src/strategy/players/gosling_style/shadowing.py create mode 100644 src/strategy/players/gosling_style/wait.py create mode 100644 src/strategy/utils/__init__.py create mode 100644 src/strategy/utils/gosling_wrapper.py create mode 100644 src/world/__init__.py create mode 100644 src/world/components/__init__.py create mode 100644 src/world/components/ball.py create mode 100644 src/world/components/car.py create mode 100644 src/world/components/game.py create mode 100644 src/world/components/team.py create mode 100644 src/world/components/world_boost_pad.py create mode 100644 src/world/components/world_component.py create mode 100644 src/world/world.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..478a0d6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.idea/ +venv/ +__pycache__/ \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ab60297 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..cc89322 --- /dev/null +++ b/README.md @@ -0,0 +1,49 @@ +# RocketNoodles +RocketNoodles is the rocket league bot designed and built by [Serpentine](serpentinai.nl). + +## Quick Start +The easiest way to start a python bot is demonstrated here! +https://youtu.be/YJ69QZ-EX7k + +It shows you how to: +- Install the RLBot GUI +- Use it to create a new bot + +The original repository can be found at +[the python rlbot repository](https://github.com/RLBot/RLBotPythonExample) on github. + +### Changing the bot + +- Bot behavior is controlled by `src/main.py` +- Bot appearance is controlled by `src/appearance.cfg` + +See https://github.com/RLBot/RLBotPythonExample/wiki for documentation and tutorials. + +## Repository Structure +``` +. +|-- src # Contains main.py to run bot +| |-- gosling # Library for agent behaviour and control +| |-- physics # Models and predictions of ingame physics +| |-- scenario # Ingame test scenarios +| |-- settings # Agent configuration files loaded on bot startup +| |-- strategy # System for agent strategy, coordination and tactics +| |-- world # World model, agent representation of the game +| +|-- logger # Tools and utilities +``` +Some of these directories are explained in more detail below. + +### Gosling +[GoslingUtils](https://github.com/ddthj/GoslingUtils) is a library that executes agent behaviour +and provides basic controllers for doing so. **We rely on Gosling in strategy but are +working to phase out the library in the future.** It will allow us greater flexibility +in the design of our bot and a neater repository. + +### Strategy +Strategic reasoning and coordination is done here. The system consists of three parts: +Coaches, Captains and Players. They do the following: +- _Coaches_: Highest level in strategy. Determine which strategy will be executed by the captain. +Examples could be: Play offensively, defensively, use trick shots or add keeper to offense. +- _Captains_: Coordinate individual agents. The captain assigns a task to each active agent. +- _Players_: Behaviour of an individual agents. \ No newline at end of file diff --git a/RefreshEnv.cmd b/RefreshEnv.cmd new file mode 100644 index 0000000..567fd78 --- /dev/null +++ b/RefreshEnv.cmd @@ -0,0 +1,66 @@ +@echo off +:: This file is taken from chocolatey: +:: https://github.com/chocolatey/choco/blob/master/src/chocolatey.resources/redirects/RefreshEnv.cmd +:: +:: RefreshEnv.cmd +:: +:: Batch file to read environment variables from registry and +:: set session variables to these values. +:: +:: With this batch file, there should be no need to reload command +:: environment every time you want environment changes to propagate + +::echo "RefreshEnv.cmd only works from cmd.exe, please install the Chocolatey Profile to take advantage of refreshenv from PowerShell" +echo | set /p dummy="Refreshing environment variables from registry for cmd.exe. Please wait..." + +goto main + +:: Set one environment variable from registry key +:SetFromReg + "%WinDir%\System32\Reg" QUERY "%~1" /v "%~2" > "%TEMP%\_envset.tmp" 2>NUL + for /f "usebackq skip=2 tokens=2,*" %%A IN ("%TEMP%\_envset.tmp") do ( + echo/set "%~3=%%B" + ) + goto :EOF + +:: Get a list of environment variables from registry +:GetRegEnv + "%WinDir%\System32\Reg" QUERY "%~1" > "%TEMP%\_envget.tmp" + for /f "usebackq skip=2" %%A IN ("%TEMP%\_envget.tmp") do ( + if /I not "%%~A"=="Path" ( + call :SetFromReg "%~1" "%%~A" "%%~A" + ) + ) + goto :EOF + +:main + echo/@echo off >"%TEMP%\_env.cmd" + + :: Slowly generating final file + call :GetRegEnv "HKLM\System\CurrentControlSet\Control\Session Manager\Environment" >> "%TEMP%\_env.cmd" + call :GetRegEnv "HKCU\Environment">>"%TEMP%\_env.cmd" >> "%TEMP%\_env.cmd" + + :: Special handling for PATH - mix both User and System + call :SetFromReg "HKLM\System\CurrentControlSet\Control\Session Manager\Environment" Path Path_HKLM >> "%TEMP%\_env.cmd" + call :SetFromReg "HKCU\Environment" Path Path_HKCU >> "%TEMP%\_env.cmd" + + :: Caution: do not insert space-chars before >> redirection sign + echo/set "Path=%%Path_HKLM%%;%%Path_HKCU%%" >> "%TEMP%\_env.cmd" + + :: Cleanup + del /f /q "%TEMP%\_envset.tmp" 2>nul + del /f /q "%TEMP%\_envget.tmp" 2>nul + + :: capture user / architecture + SET "OriginalUserName=%USERNAME%" + SET "OriginalArchitecture=%PROCESSOR_ARCHITECTURE%" + + :: Set these variables + call "%TEMP%\_env.cmd" + + :: reset user / architecture + SET "USERNAME=%OriginalUserName%" + SET "PROCESSOR_ARCHITECTURE=%OriginalArchitecture%" + + echo | set /p dummy="Finished." + echo . \ No newline at end of file diff --git a/logger/appearance.cfg b/logger/appearance.cfg new file mode 100644 index 0000000..911990d --- /dev/null +++ b/logger/appearance.cfg @@ -0,0 +1,29 @@ +[Bot Loadout] +team_color_id = 27 +custom_color_id = 75 +car_id = 23 +decal_id = 307 +wheels_id = 1656 +boost_id = 0 +antenna_id = 287 +hat_id = 0 +paint_finish_id = 1978 +custom_finish_id = 1978 +engine_audio_id = 0 +trails_id = 0 +goal_explosion_id = 1971 + +[Bot Loadout Orange] +team_color_id = 1 +custom_color_id = 1 +car_id = 23 +decal_id = 0 +wheels_id = 818 +boost_id = 0 +antenna_id = 287 +hat_id = 0 +paint_finish_id = 266 +custom_finish_id = 266 +engine_audio_id = 0 +trails_id = 0 +goal_explosion_id = 1971 \ No newline at end of file diff --git a/logger/logger.cfg b/logger/logger.cfg new file mode 100644 index 0000000..e0b4ec2 --- /dev/null +++ b/logger/logger.cfg @@ -0,0 +1,9 @@ +[Locations] +# Path to loadout config. Can use relative path from here. +looks_config = ./appearance.cfg + +# Path to python file. Can use relative path from here. +python_file = ./logger.py + +# Name of the bot in-game +name = logger diff --git a/logger/logger.py b/logger/logger.py new file mode 100644 index 0000000..06610b0 --- /dev/null +++ b/logger/logger.py @@ -0,0 +1,99 @@ +import math +import json + +from rlbot.agents.base_agent import BaseAgent, SimpleControllerState +from rlbot.utils.structures.game_data_struct import GameTickPacket + +from RLUtilities.Simulation import Input +from RLUtilities.controller_input import controller + + +class PythonExample(BaseAgent): + + def __init__(self, name, team, index): + self.count = 0 + self.recording = False + self.start_time = 0 + self.outfile = None + self.frame_info = [] + + def get_output(self, packet: GameTickPacket) -> SimpleControllerState: + + if controller.L1 == 1: + + if self.recording == False: + self.frame_info = [] + self.recording = True + self.start_time = packet.game_info.seconds_elapsed + + self.frame_info.append({ + "time": packet.game_info.seconds_elapsed - self.start_time, + "ball": { + "location": [ + packet.game_ball.physics.location.x, + packet.game_ball.physics.location.y, + packet.game_ball.physics.location.z + ], + "velocity": [ + packet.game_ball.physics.velocity.x, + packet.game_ball.physics.velocity.y, + packet.game_ball.physics.velocity.z + ], + "rotator": [ + packet.game_ball.physics.rotation.pitch, + packet.game_ball.physics.rotation.yaw, + packet.game_ball.physics.rotation.roll + ], + "angular_velocity": [ + packet.game_ball.physics.angular_velocity.x, + packet.game_ball.physics.angular_velocity.y, + packet.game_ball.physics.angular_velocity.z + ] + }, + "car": { + "location": [ + packet.game_cars[0].physics.location.x, + packet.game_cars[0].physics.location.y, + packet.game_cars[0].physics.location.z + ], + "velocity": [ + packet.game_cars[0].physics.velocity.x, + packet.game_cars[0].physics.velocity.y, + packet.game_cars[0].physics.velocity.z + ], + "rotator": [ + packet.game_cars[0].physics.rotation.pitch, + packet.game_cars[0].physics.rotation.yaw, + packet.game_cars[0].physics.rotation.roll + ], + "angular_velocity": [ + packet.game_cars[0].physics.angular_velocity.x, + packet.game_cars[0].physics.angular_velocity.y, + packet.game_cars[0].physics.angular_velocity.z + ], + "boost": packet.game_cars[0].boost, + "jumped": packet.game_cars[0].jumped, + "double_jumped": packet.game_cars[0].double_jumped, + "is_supersonic": packet.game_cars[0].is_super_sonic, + "has_wheel_contact": packet.game_cars[0].has_wheel_contact + }, + "input": { + "throttle": controller.throttle, + "steer": controller.steer, + "pitch": controller.pitch, + "yaw": controller.yaw, + "roll": controller.roll, + "jump": controller.jump, + "boost": controller.boost, + "handbrake": controller.handbrake + } + }) + + else: + if self.recording == True: + with open(f"data_{self.count}.json", "w") as outfile: + json.dump(self.frame_info, outfile) + self.recording = False + self.count += 1 + + return controller.get_output() diff --git a/logger/rlbot.cfg b/logger/rlbot.cfg new file mode 100644 index 0000000..d3b01c8 --- /dev/null +++ b/logger/rlbot.cfg @@ -0,0 +1,73 @@ +[RLBot Configuration] +# Visit https://github.com/RLBot/RLBot/wiki/Config-File-Documentation to see what you can put here. + +[Team Configuration] +# Visit https://github.com/RLBot/RLBot/wiki/Config-File-Documentation to see what you can put here. + +[Match Configuration] +# Number of bots/players which will be spawned. We support up to max 10. +num_participants = 1 +game_mode = Soccer +game_map = Mannfield + +[Mutator Configuration] +# Visit https://github.com/RLBot/RLBot/wiki/Config-File-Documentation to see what you can put here. + +[Participant Configuration] +# Put the name of your bot config file here. Only num_participants config files will be read! +# Everything needs a config, even players and default bots. We still set loadouts and names from config! +participant_config_0 = logger.cfg +participant_config_1 = logger.cfg +participant_config_2 = logger.cfg +participant_config_3 = logger.cfg +participant_config_4 = logger.cfg +participant_config_5 = logger.cfg +participant_config_6 = logger.cfg +participant_config_7 = logger.cfg +participant_config_8 = logger.cfg +participant_config_9 = logger.cfg + +# team 0 shoots on positive goal, team 1 shoots on negative goal +participant_team_0 = 0 +participant_team_1 = 1 +participant_team_2 = 0 +participant_team_3 = 1 +participant_team_4 = 0 +participant_team_5 = 1 +participant_team_6 = 0 +participant_team_7 = 1 +participant_team_8 = 0 +participant_team_9 = 1 + +# Accepted values are "human", "rlbot", "psyonix", and "party_member_bot" +# You can have up to 4 local players and they must be activated in game or it will crash. +# If no player is specified you will be spawned in as spectator! +# human - not controlled by the framework +# rlbot - controlled by the framework +# psyonix - default bots (skill level can be changed with participant_bot_skill +# party_member_bot - controlled by the framework but the game detects it as a human +participant_type_0 = party_member_bot +participant_type_1 = rlbot +participant_type_2 = rlbot +participant_type_3 = rlbot +participant_type_4 = rlbot +participant_type_5 = rlbot +participant_type_6 = rlbot +participant_type_7 = rlbot +participant_type_8 = rlbot +participant_type_9 = rlbot + + +# If participant is a bot and not RLBot controlled, this value will be used to set bot skill. +# 0.0 is Rookie, 0.5 is pro, 1.0 is all-star. You can set values in-between as well. +# Please leave a value here even if it isn't used :) +participant_bot_skill_0 = 1.0 +participant_bot_skill_1 = 1.0 +participant_bot_skill_2 = 1.0 +participant_bot_skill_3 = 1.0 +participant_bot_skill_4 = 1.0 +participant_bot_skill_5 = 1.0 +participant_bot_skill_6 = 1.0 +participant_bot_skill_7 = 1.0 +participant_bot_skill_8 = 1.0 +participant_bot_skill_9 = 1.0 diff --git a/logger/run.bat b/logger/run.bat new file mode 100644 index 0000000..047f068 --- /dev/null +++ b/logger/run.bat @@ -0,0 +1,9 @@ +@echo off + +@rem Change the working directory to the location of this file so that relative paths will work +cd /D "%~dp0" + +@rem Make sure the environment variables are up-to-date. This is useful if the user installed python a moment ago. +call ./RefreshEnv.cmd + +python run.py diff --git a/logger/run.py b/logger/run.py new file mode 100644 index 0000000..834ba84 --- /dev/null +++ b/logger/run.py @@ -0,0 +1,41 @@ +# https://stackoverflow.com/a/51704613 +try: + from pip import main as pipmain +except ImportError: + from pip._internal import main as pipmain + + +# https://stackoverflow.com/a/24773951 +def install_and_import(package): + import importlib + + try: + importlib.import_module(package) + except ImportError: + pipmain(['install', package]) + finally: + globals()[package] = importlib.import_module(package) + + +if __name__ == '__main__': + install_and_import('rlbot') + from rlbot.utils import public_utils + + if public_utils.is_safe_to_upgrade(): + pipmain(['install', '-r', 'requirements.txt', '--upgrade', '--upgrade-strategy=eager']) + + try: + import sys + + if len(sys.argv) > 1 and sys.argv[1] == 'gui': + from rlbot.gui.qt_root import RLBotQTGui + + RLBotQTGui.main() + else: + from rlbot import runner + + runner.main() + except Exception as e: + print("Encountered exception: ", e) + print("Press enter to close.") + input() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8ce80ed --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +rlutilities~=0.0.13 +# Include everything the framework requires +# You will automatically get updates for all versions starting with "1.". +rlbot==1.* + +# This will cause pip to auto-upgrade and stop scaring people with warning messages +pip~=20.1.1 + +dataclasses~=0.6 +configparser~=5.0.0 +numpy~=1.19.1 +setuptools~=47.3.1 \ No newline at end of file diff --git a/rlbot.cfg b/rlbot.cfg new file mode 100644 index 0000000..7b7051a --- /dev/null +++ b/rlbot.cfg @@ -0,0 +1,73 @@ +[RLBot Configuration] +# Visit https://github.com/RLBot/RLBot/wiki/Config-File-Documentation to see what you can put here. + +[Team Configuration] +# Visit https://github.com/RLBot/RLBot/wiki/Config-File-Documentation to see what you can put here. + +[Match Configuration] +# Number of bots/players which will be spawned. We support up to max 64. +num_participants = 2 +game_mode = Soccer +game_map = Mannfield + +[Mutator Configuration] +# Visit https://github.com/RLBot/RLBot/wiki/Config-File-Documentation to see what you can put here. + +[Participant Configuration] +# Put the name of your bot config file here. Only num_participants config files will be read! +# Everything needs a config, even players and default bots. We still set loadouts and names from config! +participant_config_0 = src/bot.cfg +participant_config_1 = src/bot.cfg +participant_config_2 = src/bot.cfg +participant_config_3 = src/bot.cfg +participant_config_4 = src/bot.cfg +participant_config_5 = src/bot.cfg +participant_config_6 = src/bot.cfg +participant_config_7 = src/bot.cfg +participant_config_8 = src/bot.cfg +participant_config_9 = src/bot.cfg + +# team 0 shoots on positive goal, team 1 shoots on negative goal +participant_team_0 = 0 +participant_team_1 = 1 +participant_team_2 = 0 +participant_team_3 = 1 +participant_team_4 = 0 +participant_team_5 = 1 +participant_team_6 = 0 +participant_team_7 = 1 +participant_team_8 = 0 +participant_team_9 = 1 + +# Accepted values are "human", "rlbot", "psyonix", and "party_member_bot" +# You can have up to 4 local players and they must be activated in game or it will crash. +# If no player is specified you will be spawned in as spectator! +# human - not controlled by the framework +# rlbot - controlled by the framework +# psyonix - default bots (skill level can be changed with participant_bot_skill +# party_member_bot - controlled by the framework but the game detects it as a human +participant_type_0 = rlbot +participant_type_1 = rlbot +participant_type_2 = rlbot +participant_type_3 = rlbot +participant_type_4 = rlbot +participant_type_5 = rlbot +participant_type_6 = rlbot +participant_type_7 = rlbot +participant_type_8 = rlbot +participant_type_9 = rlbot + + +# If participant is a bot and not RLBot controlled, this value will be used to set bot skill. +# 0.0 is Rookie, 0.5 is pro, 1.0 is all-star. You can set values in-between as well. +# Please leave a value here even if it isn't used :) +participant_bot_skill_0 = 1.0 +participant_bot_skill_1 = 1.0 +participant_bot_skill_2 = 1.0 +participant_bot_skill_3 = 1.0 +participant_bot_skill_4 = 1.0 +participant_bot_skill_5 = 1.0 +participant_bot_skill_6 = 1.0 +participant_bot_skill_7 = 1.0 +participant_bot_skill_8 = 1.0 +participant_bot_skill_9 = 1.0 diff --git a/run-gui.bat b/run-gui.bat new file mode 100644 index 0000000..a1c00d9 --- /dev/null +++ b/run-gui.bat @@ -0,0 +1,11 @@ +@echo off + +@rem Change the working directory to the location of this file so that relative paths will work +cd /D "%~dp0" + +@rem Make sure the environment variables are up-to-date. This is useful if the user installed python a moment ago. +call ./RefreshEnv.cmd + +python run.py gui + +pause diff --git a/run.bat b/run.bat new file mode 100644 index 0000000..b3318d9 --- /dev/null +++ b/run.bat @@ -0,0 +1,11 @@ +@echo off + +@rem Change the working directory to the location of this file so that relative paths will work +cd /D "%~dp0" + +@rem Make sure the environment variables are up-to-date. This is useful if the user installed python a moment ago. +call ./RefreshEnv.cmd + +python run.py + +pause diff --git a/run.py b/run.py new file mode 100644 index 0000000..e8a45a3 --- /dev/null +++ b/run.py @@ -0,0 +1,40 @@ +import subprocess +import sys + +DEFAULT_LOGGER = 'rlbot' + +if __name__ == '__main__': + + try: + from rlbot.utils import public_utils, logging_utils + + logger = logging_utils.get_logger(DEFAULT_LOGGER) + if not public_utils.have_internet(): + logger.log(logging_utils.logging_level, + 'Skipping upgrade check for now since it looks like you have no internet') + elif public_utils.is_safe_to_upgrade(): + subprocess.call([sys.executable, "-m", "pip", "install", '-r', 'requirements.txt', '--upgrade', + '--upgrade-strategy=eager']) + + # https://stackoverflow.com/a/44401013 + rlbots = [module for module in sys.modules if module.startswith('rlbot')] + for rlbot_module in rlbots: + sys.modules.pop(rlbot_module) + + except ImportError: + subprocess.call( + [sys.executable, "-m", "pip", "install", '-r', 'requirements.txt', '--upgrade', '--upgrade-strategy=eager']) + + try: + if len(sys.argv) > 1 and sys.argv[1] == 'gui': + from rlbot.gui.qt_root import RLBotQTGui + + RLBotQTGui.main() + else: + from rlbot import runner + + runner.main() + except Exception as e: + print("Encountered exception: ", e) + print("Press enter to close.") + input() diff --git a/src/appearance.cfg b/src/appearance.cfg new file mode 100644 index 0000000..b89ed16 --- /dev/null +++ b/src/appearance.cfg @@ -0,0 +1,49 @@ +[Bot Loadout] +team_color_id = 60 +custom_color_id = 0 +car_id = 23 +decal_id = 0 +wheels_id = 1565 +boost_id = 35 +antenna_id = 0 +hat_id = 0 +paint_finish_id = 1681 +custom_finish_id = 1681 +engine_audio_id = 0 +trails_id = 3220 +goal_explosion_id = 3018 + +[Bot Loadout Orange] +team_color_id = 3 +custom_color_id = 0 +car_id = 23 +decal_id = 0 +wheels_id = 1565 +boost_id = 35 +antenna_id = 0 +hat_id = 0 +paint_finish_id = 1681 +custom_finish_id = 1681 +engine_audio_id = 0 +trails_id = 3220 +goal_explosion_id = 3018 + +[Bot Paint Blue] +car_paint_id = 12 +decal_paint_id = 0 +wheels_paint_id = 7 +boost_paint_id = 7 +antenna_paint_id = 0 +hat_paint_id = 0 +trails_paint_id = 2 +goal_explosion_paint_id = 0 + +[Bot Paint Orange] +car_paint_id = 12 +decal_paint_id = 0 +wheels_paint_id = 14 +boost_paint_id = 14 +antenna_paint_id = 0 +hat_paint_id = 0 +trails_paint_id = 14 +goal_explosion_paint_id = 0 diff --git a/src/bot.cfg b/src/bot.cfg new file mode 100644 index 0000000..4b1bc7b --- /dev/null +++ b/src/bot.cfg @@ -0,0 +1,29 @@ +[Locations] +# Path to loadout config. Can use relative path from here. +looks_config = ./appearance.cfg + +# Path to python file. Can use relative path from here. +python_file = ./drone_reference.py + +# Name of the bot in-game +name = RocketNoodles + +# The maximum number of ticks per second that your bot wishes to receive. +maximum_tick_rate_preference = 60 + +[Details] +# These values are optional but useful metadata for helper programs +# Name of the bot's creator/developer +developer = Serpentine + +# Short description of the bot +description = RocketNoodles bot by Serpentine + +# Fun fact about the bot +fun_fact = It is not a real noodle but a Snake + +# Link to github repository +github = https://gitlab.serpentineai.nl/competitions/rocket-league/rocketnoodles + +# Programming language +language = python diff --git a/src/drone_reference.py b/src/drone_reference.py new file mode 100644 index 0000000..bb056c6 --- /dev/null +++ b/src/drone_reference.py @@ -0,0 +1,10 @@ +from pathlib import Path +from rlbot.agents.hivemind.drone_agent import DroneAgent + + +class RocketNoodlesDroneAgent(DroneAgent): + """"Drone agent class used by the PythonHivemind class.""" + + hive_path = str(Path(__file__).parent / 'main.py') + hive_key = 'noodle-hive' + hive_name = 'NoodleHive' diff --git a/src/gosling/objects.py b/src/gosling/objects.py new file mode 100644 index 0000000..8d4b87a --- /dev/null +++ b/src/gosling/objects.py @@ -0,0 +1,420 @@ +import math +import rlbot.utils.structures.game_data_struct as game_data_struct +from rlbot.agents.base_agent import BaseAgent, SimpleControllerState + + +# This file holds all of the objects used in gosling utils +# Includes custom vector and matrix objects + +class GoslingAgent(BaseAgent): + # This is the main object of Gosling Utils. It holds/updates information about the game and runs routines + # All utils rely on information being structured and accessed the same way as configured in this class + def initialize_agent(self): + # A list of cars for both teammates and opponents + self.friends = [] + self.foes = [] + # This holds the carobject for our agent + self.me = car_object(self.index) + + self.ball = ball_object() + self.game = game_object() + # A list of boosts + self.boosts = [] + # goals + self.friend_goal = goal_object(self.team) + self.foe_goal = goal_object(not self.team) + # A list that acts as the routines stack + self.stack = [] + # Game time + self.time = 0.0 + # Whether or not GoslingAgent has run its get_ready() function + self.ready = False + # the controller that is returned to the framework after every tick + self.controller = SimpleControllerState() + # a flag that tells us when kickoff is happening + self.kickoff_flag = False + + def get_ready(self, packet): + # Preps all of the objects that will be updated during play + field_info = self.get_field_info() + for i in range(field_info.num_boosts): + boost = field_info.boost_pads[i] + self.boosts.append(boost_object(i, boost.location, boost.is_full_boost)) + self.refresh_player_lists(packet) + self.ball.update(packet) + self.ready = True + + def refresh_player_lists(self, packet): + # makes new freind/foe lists + # Useful to keep separate from get_ready because humans can join/leave a match + self.friends = [car_object(i, packet) for i in range(packet.num_cars) if + packet.game_cars[i].team == self.team and i != self.index] + self.foes = [car_object(i, packet) for i in range(packet.num_cars) if packet.game_cars[i].team != self.team] + + def push(self, routine): + # Shorthand for adding a routine to the stack + self.stack.append(routine) + + def pop(self): + # Shorthand for removing a routine from the stack, returns the routine + return self.stack.pop() + + def line(self, start, end, color=None): + color = color if color != None else [255, 255, 255] + self.renderer.draw_line_3d(start, end, self.renderer.create_color(255, *color)) + + def debug_stack(self): + # Draws the stack on the screen + white = self.renderer.white() + for i in range(len(self.stack) - 1, -1, -1): + text = self.stack[i].__class__.__name__ + self.renderer.draw_string_2d(10, 50 + (50 * (len(self.stack) - i)), 3, 3, text, white) + + def clear(self): + # Shorthand for clearing the stack of all routines + self.stack = [] + + def preprocess(self, packet): + # Calling the update functions for all of the objects + if packet.num_cars != len(self.friends) + len(self.foes) + 1: self.refresh_player_lists(packet) + for car in self.friends: car.update(packet) + for car in self.foes: car.update(packet) + for pad in self.boosts: pad.update(packet) + self.ball.update(packet) + self.me.update(packet) + self.game.update(packet) + self.time = packet.game_info.seconds_elapsed + # When a new kickoff begins we empty the stack + if self.kickoff_flag == False and packet.game_info.is_round_active and packet.game_info.is_kickoff_pause: + self.stack = [] + # Tells us when to go for kickoff + self.kickoff_flag = packet.game_info.is_round_active and packet.game_info.is_kickoff_pause + + def get_output(self, packet): + # Reset controller + self.controller.__init__() + # Get ready, then preprocess + if not self.ready: + self.get_ready(packet) + self.preprocess(packet) + + self.renderer.begin_rendering() + # Run our strategy code + self.run() + # run the routine on the end of the stack + if len(self.stack) > 0: + self.stack[-1].run(self) + self.renderer.end_rendering() + # send our updated controller back to rlbot + return self.controller + + def run(self): + # override this with your strategy code + pass + + +class car_object: + # The carObject, and kin, convert the gametickpacket in something a little friendlier to use, + # and are updated by GoslingAgent as the game runs + def __init__(self, index, packet=None): + self.location = Vector3(0, 0, 0) + self.orientation = Matrix3(0, 0, 0) + self.velocity = Vector3(0, 0, 0) + self.angular_velocity = [0, 0, 0] + self.demolished = False + self.airborne = False + self.supersonic = False + self.jumped = False + self.doublejumped = False + self.boost = 0 + self.index = index + if packet != None: + self.update(packet) + + def local(self, value): + # Shorthand for self.orientation.dot(value) + return self.orientation.dot(value) + + def update(self, packet): + car = packet.game_cars[self.index] + self.location.data = [car.physics.location.x, car.physics.location.y, car.physics.location.z] + self.velocity.data = [car.physics.velocity.x, car.physics.velocity.y, car.physics.velocity.z] + self.orientation = Matrix3(car.physics.rotation.pitch, car.physics.rotation.yaw, car.physics.rotation.roll) + self.angular_velocity = self.orientation.dot( + [car.physics.angular_velocity.x, car.physics.angular_velocity.y, car.physics.angular_velocity.z]).data + self.demolished = car.is_demolished + self.airborne = not car.has_wheel_contact + self.supersonic = car.is_super_sonic + self.jumped = car.jumped + self.doublejumped = car.double_jumped + self.boost = car.boost + + @property + def forward(self): + # A vector pointing forwards relative to the cars orientation. Its magnitude is 1 + return self.orientation.forward + + @property + def left(self): + # A vector pointing left relative to the cars orientation. Its magnitude is 1 + return self.orientation.left + + @property + def up(self): + # A vector pointing up relative to the cars orientation. Its magnitude is 1 + return self.orientation.up + + +class ball_object: + def __init__(self): + self.location = Vector3(0, 0, 0) + self.velocity = Vector3(0, 0, 0) + self.latest_touched_time = 0 + self.latest_touched_team = 0 + + def update(self, packet): + ball = packet.game_ball + self.location.data = [ball.physics.location.x, ball.physics.location.y, ball.physics.location.z] + self.velocity.data = [ball.physics.velocity.x, ball.physics.velocity.y, ball.physics.velocity.z] + self.latest_touched_time = ball.latest_touch.time_seconds + self.latest_touched_team = ball.latest_touch.team + + +class boost_object: + def __init__(self, index, location, large): + self.index = index + self.location = Vector3(location.x, location.y, location.z) + self.active = True + self.large = large + + def update(self, packet): + self.active = packet.game_boosts[self.index].is_active + + +class goal_object: + # This is a simple object that creates/holds goalpost locations for a given team (for soccer on standard maps only) + def __init__(self, team): + team = 1 if team == 1 else -1 + self.location = Vector3(0, team * 5100, 320) # center of goal line + # Posts are closer to x=750, but this allows the bot to be a little more accurate + self.left_post = Vector3(team * 850, team * 5100, 320) + self.right_post = Vector3(-team * 850, team * 5100, 320) + + +class game_object: + # This object holds information about the current match + def __init__(self): + self.time = 0 + self.time_remaining = 0 + self.overtime = False + self.round_active = False + self.kickoff = False + self.match_ended = False + + def update(self, packet): + game = packet.game_info + self.time = game.seconds_elapsed + self.time_remaining = game.game_time_remaining + self.overtime = game.is_overtime + self.round_active = game.is_round_active + self.kickoff = game.is_kickoff_pause + self.match_ended = game.is_match_ended + + +class Matrix3: + # The Matrix3's sole purpose is to convert roll, pitch, and yaw data from the gametickpaket into an orientation matrix + # An orientation matrix contains 3 Vector3's + # Matrix3[0] is the "forward" direction of a given car + # Matrix3[1] is the "left" direction of a given car + # Matrix3[2] is the "up" direction of a given car + # If you have a distance between the car and some object, ie ball.location - car.location, + # you can convert that to local coordinates by dotting it with this matrix + # ie: local_ball_location = Matrix3.dot(ball.location - car.location) + def __init__(self, pitch, yaw, roll): + CP = math.cos(pitch) + SP = math.sin(pitch) + CY = math.cos(yaw) + SY = math.sin(yaw) + CR = math.cos(roll) + SR = math.sin(roll) + # List of 3 vectors, each descriping the direction of an axis: Forward, Left, and Up + self.data = [ + Vector3(CP * CY, CP * SY, SP), + Vector3(CY * SP * SR - CR * SY, SY * SP * SR + CR * CY, -CP * SR), + Vector3(-CR * CY * SP - SR * SY, -CR * SY * SP + SR * CY, CP * CR)] + self.forward, self.left, self.up = self.data + + def __getitem__(self, key): + return self.data[key] + + def dot(self, vector): + return Vector3(self.forward.dot(vector), self.left.dot(vector), self.up.dot(vector)) + + +class Vector3: + # This is the backbone of Gosling Utils. The Vector3 makes it easy to store positions, velocities, etc and perform vector math + # A Vector3 can be created with: + # - Anything that has a __getitem__ (lists, tuples, Vector3's, etc) + # - 3 numbers + # - A gametickpacket vector + def __init__(self, *args): + if hasattr(args[0], "__getitem__"): + self.data = list(args[0]) + elif isinstance(args[0], game_data_struct.Vector3): + self.data = [args[0].x, args[0].y, args[0].z] + elif isinstance(args[0], game_data_struct.Rotator): + self.data = [args[0].pitch, args[0].yaw, args[0].roll] + elif len(args) == 3: + self.data = list(args) + else: + raise TypeError("Vector3 unable to accept %s" % (args)) + + # Property functions allow you to use `Vector3.x` vs `Vector3[0]` + @property + def x(self): + return self.data[0] + + @x.setter + def x(self, value): + self.data[0] = value + + @property + def y(self): + return self.data[1] + + @y.setter + def y(self, value): + self.data[1] = value + + @property + def z(self): + return self.data[2] + + @z.setter + def z(self, value): + self.data[2] = value + + def __getitem__(self, key): + # To access a single value in a Vector3, treat it like a list + # ie: to get the first (x) value use: Vector3[0] + # The same works for setting values + return self.data[key] + + def __setitem__(self, key, value): + self.data[key] = value + + def __str__(self): + # Vector3's can be printed to console + return str(self.data) + + __repr__ = __str__ + + def __eq__(self, value): + # Vector3's can be compared with: + # - Another Vector3, in which case True will be returned if they have the same values + # - A list, in which case True will be returned if they have the same values + # - A single value, in which case True will be returned if the Vector's length matches the value + if isinstance(value, Vector3): + return self.data == value.data + elif isinstance(value, list): + return self.data == value + else: + return self.magnitude() == value + + # Vector3's support most operators (+-*/) + # If using an operator with another Vector3, each dimension will be independent + # ie x+x, y+y, z+z + # If using an operator with only a value, each dimension will be affected by that value + # ie x+v, y+v, z+v + def __add__(self, value): + if isinstance(value, Vector3): + return Vector3(self[0] + value[0], self[1] + value[1], self[2] + value[2]) + return Vector3(self[0] + value, self[1] + value, self[2] + value) + + __radd__ = __add__ + + def __sub__(self, value): + if isinstance(value, Vector3): + return Vector3(self[0] - value[0], self[1] - value[1], self[2] - value[2]) + return Vector3(self[0] - value, self[1] - value, self[2] - value) + + __rsub__ = __sub__ + + def __neg__(self): + return Vector3(-self[0], -self[1], -self[2]) + + def __mul__(self, value): + if isinstance(value, Vector3): + return Vector3(self[0] * value[0], self[1] * value[1], self[2] * value[2]) + return Vector3(self[0] * value, self[1] * value, self[2] * value) + + __rmul__ = __mul__ + + def __truediv__(self, value): + if isinstance(value, Vector3): + return Vector3(self[0] / value[0], self[1] / value[1], self[2] / value[2]) + return Vector3(self[0] / value, self[1] / value, self[2] / value) + + def __rtruediv__(self, value): + if isinstance(value, Vector3): + return Vector3(value[0] / self[0], value[1] / self[1], value[2] / self[2]) + raise TypeError("unsupported rtruediv operands") + + def magnitude(self): + # Magnitude() returns the length of the vector + return math.sqrt((self[0] * self[0]) + (self[1] * self[1]) + (self[2] * self[2])) + + def normalize(self, return_magnitude=False): + # Normalize() returns a Vector3 that shares the same direction but has a length of 1.0 + # Normalize(True) can also be used if you'd like the length of this Vector3 (used for optimization) + magnitude = self.magnitude() + if magnitude != 0: + if return_magnitude: + return Vector3(self[0] / magnitude, self[1] / magnitude, self[2] / magnitude), magnitude + return Vector3(self[0] / magnitude, self[1] / magnitude, self[2] / magnitude) + if return_magnitude: + return Vector3(0, 0, 0), 0 + return Vector3(0, 0, 0) + + # Linear algebra functions + def dot(self, value): + return self[0] * value[0] + self[1] * value[1] + self[2] * value[2] + + def cross(self, value): + return Vector3((self[1] * value[2]) - (self[2] * value[1]), (self[2] * value[0]) - (self[0] * value[2]), + (self[0] * value[1]) - (self[1] * value[0])) + + def flatten(self): + # Sets Z (Vector3[2]) to 0 + return Vector3(self[0], self[1], 0) + + def render(self): + # Returns a list with the x and y values, to be used with pygame + return [self[0], self[1]] + + def copy(self): + # Returns a copy of this Vector3 + return Vector3(self.data[:]) + + def angle(self, value): + # Returns the angle between this Vector3 and another Vector3 + return math.acos(round(self.flatten().normalize().dot(value.flatten().normalize()), 4)) + + def rotate(self, angle): + # Rotates this Vector3 by the given angle in radians + # Note that this is only 2D, in the x and y axis + return Vector3((math.cos(angle) * self[0]) - (math.sin(angle) * self[1]), + (math.sin(angle) * self[0]) + (math.cos(angle) * self[1]), self[2]) + + def clamp(self, start, end): + # Similar to integer clamping, Vector3's clamp() forces the Vector3's direction between a start and end Vector3 + # Such that Start < Vector3 < End in terms of clockwise rotation + # Note that this is only 2D, in the x and y axis + s = self.normalize() + right = s.dot(end.cross((0, 0, -1))) < 0 + left = s.dot(start.cross((0, 0, -1))) > 0 + if (right and left) if end.dot(start.cross((0, 0, -1))) > 0 else (right or left): + return self + if start.dot(s) < end.dot(s): + return end + return start diff --git a/src/gosling/routines.py b/src/gosling/routines.py new file mode 100644 index 0000000..651d298 --- /dev/null +++ b/src/gosling/routines.py @@ -0,0 +1,404 @@ +from gosling.utils import * + + +# This file holds all of the mechanical tasks, called "routines", that the bot can do + +class atba(): + # An example routine that just drives towards the ball at max speed + def run(self, agent): + relative_target = agent.ball.location - agent.me.location + local_target = agent.me.local(relative_target) + defaultPD(agent, local_target) + defaultThrottle(agent, 2300) + + +class aerial_shot(): + # Very similar to jump_shot(), but instead designed to hit targets above 300uu + # ***This routine is a WIP*** It does not currently hit the ball very hard, nor does it like to be accurate above 600uu or so + def __init__(self, ball_location, intercept_time, shot_vector, ratio): + self.ball_location = ball_location + self.intercept_time = intercept_time + # The direction we intend to hit the ball in + self.shot_vector = shot_vector + # The point we hit the ball at + self.intercept = self.ball_location - (self.shot_vector * 110) + # dictates when (how late) we jump, much later than in jump_shot because we can take advantage of a double jump + self.jump_threshold = 600 + # what time we began our jump at + self.jump_time = 0 + # If we need a second jump we have to let go of the jump button for 3 frames, this counts how many frames we have let go for + self.counter = 0 + + def run(self, agent): + raw_time_remaining = self.intercept_time - agent.time + # Capping raw_time_remaining above 0 to prevent division problems + time_remaining = cap(raw_time_remaining, 0.01, 10.0) + + car_to_ball = self.ball_location - agent.me.location + # whether we are to the left or right of the shot vector + side_of_shot = sign(self.shot_vector.cross((0, 0, 1)).dot(car_to_ball)) + + car_to_intercept = self.intercept - agent.me.location + car_to_intercept_perp = car_to_intercept.cross((0, 0, side_of_shot)) # perpendicular + distance_remaining = car_to_intercept.flatten().magnitude() + + speed_required = distance_remaining / time_remaining + # When still on the ground we pretend gravity doesn't exist, for better or worse + acceleration_required = backsolve(self.intercept, agent.me, time_remaining, 0 if self.jump_time == 0 else 325) + local_acceleration_required = agent.me.local(acceleration_required) + + # The adjustment causes the car to circle around the dodge point in an effort to line up with the shot vector + # The adjustment slowly decreases to 0 as the bot nears the time to jump + adjustment = car_to_intercept.angle(self.shot_vector) * distance_remaining / 1.57 # size of adjustment + adjustment *= (cap(self.jump_threshold - (acceleration_required[2]), 0.0, + self.jump_threshold) / self.jump_threshold) # factoring in how close to jump we are + # we don't adjust the final target if we are already jumping + final_target = self.intercept + ((car_to_intercept_perp.normalize() * adjustment) if self.jump_time == 0 else 0) + + # Some extra adjustment to the final target to ensure it's inside the field and we don't try to dirve through any goalposts to reach it + if abs(agent.me.location[1]) > 5150: final_target[0] = cap(final_target[0], -750, 750) + + local_final_target = agent.me.local(final_target - agent.me.location) + + # drawing debug lines to show the dodge point and final target (which differs due to the adjustment) + # agent.line(agent.me.location, self.intercept) + # agent.line(self.intercept - Vector3(0, 0, 100), self.intercept + Vector3(0, 0, 100), [255, 0, 0]) + # agent.line(final_target - Vector3(0, 0, 100), final_target + Vector3(0, 0, 100), [0, 255, 0]) + + angles = defaultPD(agent, local_final_target) + + if self.jump_time == 0: + defaultThrottle(agent, speed_required) + agent.controller.boost = False if abs(angles[1]) > 0.3 or agent.me.airborne else agent.controller.boost + agent.controller.handbrake = True if abs(angles[1]) > 2.3 else agent.controller.handbrake + if acceleration_required[2] > self.jump_threshold: + # Switch into the jump when the upward acceleration required reaches our threshold, hopefully we have aligned already... + self.jump_time = agent.time + else: + time_since_jump = agent.time - self.jump_time + + # While airborne we boost if we're within 30 degrees of our local acceleration requirement + if agent.me.airborne and local_acceleration_required.magnitude() * time_remaining > 100: + angles = defaultPD(agent, local_acceleration_required) + if abs(angles[0]) + abs(angles[1]) < 0.5: + agent.controller.boost = True + if self.counter == 0 and (time_since_jump <= 0.2 and local_acceleration_required[2] > 0): + # hold the jump button up to 0.2 seconds to get the most acceleration from the first jump + agent.controller.jump = True + elif time_since_jump > 0.2 and self.counter < 3: + # Release the jump button for 3 ticks + agent.controller.jump = False + self.counter += 1 + elif local_acceleration_required[2] > 300 and self.counter == 3: + # the acceleration from the second jump is instant, so we only do it for 1 frame + agent.controller.jump = True + agent.controller.pitch = 0 + agent.controller.yaw = 0 + agent.controller.roll = 0 + self.counter += 1 + + if raw_time_remaining < -0.25 or not shot_valid(agent, self): + agent.pop() + agent.push(recovery()) + + +class flip(): + # Flip takes a vector in local coordinates and flips/dodges in that direction + # cancel causes the flip to cancel halfway through, which can be used to half-flip + def __init__(self, vector, cancel=False): + self.vector = vector.normalize() + self.pitch = abs(self.vector[0]) * -sign(self.vector[0]) + self.yaw = abs(self.vector[1]) * sign(self.vector[1]) + self.cancel = cancel + # the time the jump began + self.time = -1 + # keeps track of the frames the jump button has been released + self.counter = 0 + + def run(self, agent): + if self.time == -1: + elapsed = 0 + self.time = agent.time + else: + elapsed = agent.time - self.time + if elapsed < 0.15: + agent.controller.jump = True + elif elapsed >= 0.15 and self.counter < 3: + agent.controller.jump = False + self.counter += 1 + elif elapsed < 0.3 or (not self.cancel and elapsed < 0.9): + agent.controller.jump = True + agent.controller.pitch = self.pitch + agent.controller.yaw = self.yaw + else: + agent.pop() + agent.push(recovery()) + + +class goto(): + # Drives towards a designated (stationary) target + # Optional vector controls where the car should be pointing upon reaching the target + # TODO - slow down if target is inside our turn radius + def __init__(self, target, vector=None, direction=1): + self.target = target + self.vector = vector + self.direction = direction + + def run(self, agent): + car_to_target = self.target - agent.me.location + distance_remaining = car_to_target.flatten().magnitude() + + # agent.line(self.target - Vector3(0, 0, 500), self.target + Vector3(0, 0, 500), [255, 0, 255]) + + if self.vector != None: + # See commends for adjustment in jump_shot or aerial for explanation + side_of_vector = sign(self.vector.cross((0, 0, 1)).dot(car_to_target)) + car_to_target_perp = car_to_target.cross((0, 0, side_of_vector)).normalize() + adjustment = car_to_target.angle(self.vector) * distance_remaining / 3.14 + final_target = self.target + (car_to_target_perp * adjustment) + else: + final_target = self.target + + # Some adjustment to the final target to ensure it's inside the field and we don't try to dirve through any goalposts to reach it + if abs(agent.me.location[1]) > 5150: final_target[0] = cap(final_target[0], -750, 750) + + local_target = agent.me.local(final_target - agent.me.location) + + angles = defaultPD(agent, local_target, self.direction) + defaultThrottle(agent, 2300, self.direction) + + agent.controller.boost = False + agent.controller.handbrake = True if abs(angles[1]) > 2.3 else agent.controller.handbrake + + velocity = 1 + agent.me.velocity.magnitude() + if distance_remaining < 350: + agent.pop() + elif abs(angles[1]) < 0.05 and velocity > 600 and velocity < 2150 and distance_remaining / velocity > 2.0: + agent.push(flip(local_target)) + elif abs(angles[1]) > 2.8 and velocity < 200: + agent.push(flip(local_target, True)) + elif agent.me.airborne: + agent.push(recovery(self.target)) + + +class goto_boost(): + # very similar to goto() but designed for grabbing boost + # if a target is provided the bot will try to be facing the target as it passes over the boost + def __init__(self, boost, target=None): + self.boost = boost + self.target = target + + def run(self, agent): + car_to_boost = self.boost.location - agent.me.location + distance_remaining = car_to_boost.flatten().magnitude() + + # agent.line(self.boost.location - Vector3(0, 0, 500), self.boost.location + Vector3(0, 0, 500), [0, 255, 0]) + + if self.target != None: + vector = (self.target - self.boost.location).normalize() + side_of_vector = sign(vector.cross((0, 0, 1)).dot(car_to_boost)) + car_to_boost_perp = car_to_boost.cross((0, 0, side_of_vector)).normalize() + adjustment = car_to_boost.angle(vector) * distance_remaining / 3.14 + final_target = self.boost.location + (car_to_boost_perp * adjustment) + car_to_target = (self.target - agent.me.location).magnitude() + else: + adjustment = 9999 + car_to_target = 0 + final_target = self.boost.location.copy() + + # Some adjustment to the final target to ensure it's inside the field and we don't try to dirve through any goalposts to reach it + if abs(agent.me.location[1]) > 5150: final_target[0] = cap(final_target[0], -750, 750) + + local_target = agent.me.local(final_target - agent.me.location) + + angles = defaultPD(agent, local_target) + defaultThrottle(agent, 2300) + + agent.controller.boost = self.boost.large if abs(angles[1]) < 0.3 else False + agent.controller.handbrake = True if abs(angles[1]) > 2.3 else agent.controller.handbrake + + velocity = 1 + agent.me.velocity.magnitude() + if self.boost.active == False or agent.me.boost >= 99.0 or distance_remaining < 350: + agent.pop() + elif agent.me.airborne: + agent.push(recovery(self.target)) + elif abs(angles[1]) < 0.05 and velocity > 600 and velocity < 2150 and ( + distance_remaining / velocity > 2.0 or (adjustment < 90 and car_to_target / velocity > 2.0)): + agent.push(flip(local_target)) + + +class jump_shot(): + # Hits a target point at a target time towards a target direction + # Target must be no higher than 300uu unless you're feeling lucky + # TODO - speed + def __init__(self, ball_location, intercept_time, shot_vector, ratio, direction=1, speed=2300): + self.ball_location = ball_location + self.intercept_time = intercept_time + # The direction we intend to hit the ball in + self.shot_vector = shot_vector + # The point we dodge at + # 173 is the 93uu ball radius + a bit more to account for the car's hitbox + self.dodge_point = self.ball_location - (self.shot_vector * 173) + # Ratio is how aligned the car is. Low ratios (<0.5) aren't likely to be hit properly + self.ratio = ratio + # whether the car should attempt this backwards + self.direction = direction + # Intercept speed not implemented + self.speed_desired = speed + # controls how soon car will jump based on acceleration required. max 584 + # bigger = later, which allows more time to align with shot vector + # smaller = sooner + self.jump_threshold = 400 + # Flags for what part of the routine we are in + self.jumping = False + self.dodging = False + self.counter = 0 + + def run(self, agent): + raw_time_remaining = self.intercept_time - agent.time + # Capping raw_time_remaining above 0 to prevent division problems + time_remaining = cap(raw_time_remaining, 0.001, 10.0) + car_to_ball = self.ball_location - agent.me.location + # whether we are to the left or right of the shot vector + side_of_shot = sign(self.shot_vector.cross((0, 0, 1)).dot(car_to_ball)) + + car_to_dodge_point = self.dodge_point - agent.me.location + car_to_dodge_perp = car_to_dodge_point.cross((0, 0, side_of_shot)) # perpendicular + distance_remaining = car_to_dodge_point.magnitude() + + speed_required = distance_remaining / time_remaining + acceleration_required = backsolve(self.dodge_point, agent.me, time_remaining, 0 if not self.jumping else 650) + local_acceleration_required = agent.me.local(acceleration_required) + + # The adjustment causes the car to circle around the dodge point in an effort to line up with the shot vector + # The adjustment slowly decreases to 0 as the bot nears the time to jump + adjustment = car_to_dodge_point.angle(self.shot_vector) * distance_remaining / 2.0 # size of adjustment + adjustment *= (cap(self.jump_threshold - (acceleration_required[2]), 0.0, + self.jump_threshold) / self.jump_threshold) # factoring in how close to jump we are + # we don't adjust the final target if we are already jumping + final_target = self.dodge_point + ( + (car_to_dodge_perp.normalize() * adjustment) if not self.jumping else 0) + Vector3(0, 0, 50) + # Ensuring our target isn't too close to the sides of the field, where our car would get messed up by the radius of the curves + + # Some adjustment to the final target to ensure it's inside the field and we don't try to dirve through any goalposts to reach it + if abs(agent.me.location[1]) > 5150: final_target[0] = cap(final_target[0], -750, 750) + + local_final_target = agent.me.local(final_target - agent.me.location) + + # drawing debug lines to show the dodge point and final target (which differs due to the adjustment) + # agent.line(agent.me.location, self.dodge_point) + # agent.line(self.dodge_point - Vector3(0, 0, 100), self.dodge_point + Vector3(0, 0, 100), [255, 0, 0]) + # agent.line(final_target - Vector3(0, 0, 100), final_target + Vector3(0, 0, 100), [0, 255, 0]) + + # Calling our drive utils to get us going towards the final target + angles = defaultPD(agent, local_final_target, self.direction) + defaultThrottle(agent, speed_required, self.direction) + + # agent.line(agent.me.location, agent.me.location + (self.shot_vector * 200), [255, 255, 255]) + + agent.controller.boost = False if abs(angles[1]) > 0.3 or agent.me.airborne else agent.controller.boost + agent.controller.handbrake = True if abs( + angles[1]) > 2.3 and self.direction == 1 else agent.controller.handbrake + + if not self.jumping: + if raw_time_remaining <= 0.0 or (speed_required - 2300) * time_remaining > 45 or not shot_valid(agent, + self): + agent.pop() + if agent.me.airborne: + agent.push(recovery()) + elif local_acceleration_required[2] > self.jump_threshold and local_acceleration_required[ + 2] > local_acceleration_required.flatten().magnitude(): + # Switch into the jump when the upward acceleration required reaches our threshold, and our lateral acceleration is negligible + self.jumping = True + else: + if (raw_time_remaining > 0.2 and not shot_valid(agent, self, 150)) or raw_time_remaining <= -0.9 or ( + not agent.me.airborne and self.counter > 0): + agent.pop() + agent.push(recovery()) + elif self.counter == 0 and local_acceleration_required[2] > 0.0 and raw_time_remaining > 0.083: + # Initial jump to get airborne + we hold the jump button for extra power as required + agent.controller.jump = True + elif self.counter < 3: + # make sure we aren't jumping for at least 3 frames + agent.controller.jump = False + self.counter += 1 + elif raw_time_remaining <= 0.1 and raw_time_remaining > -0.9: + # dodge in the direction of the shot_vector + agent.controller.jump = True + if not self.dodging: + vector = agent.me.local(self.shot_vector) + self.p = abs(vector[0]) * -sign(vector[0]) + self.y = abs(vector[1]) * sign(vector[1]) * self.direction + self.dodging = True + # simulating a deadzone so that the dodge is more natural + agent.controller.pitch = self.p if abs(self.p) > 0.2 else 0 + agent.controller.yaw = self.y if abs(self.y) > 0.3 else 0 + + +class kickoff(): + # A simple 1v1 kickoff that just drives up behind the ball and dodges + # misses the boost on the slight-offcenter kickoffs haha + def run(self, agent): + target = agent.ball.location + Vector3(0, 200 * side(agent.team), 0) + local_target = agent.me.local(target - agent.me.location) + defaultPD(agent, local_target) + defaultThrottle(agent, 2300) + if local_target.magnitude() < 650: + agent.pop() + # flip towards opponent goal + agent.push(flip(agent.me.local(agent.foe_goal.location - agent.me.location))) + + +class recovery(): + # Point towards our velocity vector and land upright, unless we aren't moving very fast + # A vector can be provided to control where the car points when it lands + def __init__(self, target=None): + self.target = target + + def run(self, agent): + if self.target != None: + local_target = agent.me.local((self.target - agent.me.location).flatten()) + else: + local_target = agent.me.local(agent.me.velocity.flatten()) + + defaultPD(agent, local_target) + agent.controller.throttle = 1 + if not agent.me.airborne: + agent.pop() + + +class short_shot(): + # This routine drives towards the ball and attempts to hit it towards a given target + # It does not require ball prediction and kinda guesses at where the ball will be on its own + def __init__(self, target): + self.target = target + + def run(self, agent): + car_to_ball, distance = (agent.ball.location - agent.me.location).normalize(True) + ball_to_target = (self.target - agent.ball.location).normalize() + + relative_velocity = car_to_ball.dot(agent.me.velocity - agent.ball.velocity) + if relative_velocity != 0.0: + eta = cap(distance / cap(relative_velocity, 400, 2300), 0.0, 1.5) + else: + eta = 1.5 + + # If we are approaching the ball from the wrong side the car will try to only hit the very edge of the ball + left_vector = car_to_ball.cross((0, 0, 1)) + right_vector = car_to_ball.cross((0, 0, -1)) + target_vector = -ball_to_target.clamp(left_vector, right_vector) + final_target = agent.ball.location + (target_vector * (distance / 2)) + + # Some adjustment to the final target to ensure we don't try to dirve through any goalposts to reach it + if abs(agent.me.location[1]) > 5150: final_target[0] = cap(final_target[0], -750, 750) + + # agent.line(final_target - Vector3(0, 0, 100), final_target + Vector3(0, 0, 100), [255, 255, 255]) + + angles = defaultPD(agent, agent.me.local(final_target - agent.me.location)) + defaultThrottle(agent, 2300 if distance > 1600 else 2300 - cap(1600 * abs(angles[1]), 0, 2050)) + agent.controller.boost = False if agent.me.airborne or abs(angles[1]) > 0.3 else agent.controller.boost + agent.controller.handbrake = True if abs(angles[1]) > 2.3 else agent.controller.handbrake + + if abs(angles[1]) < 0.05 and (eta < 0.45 or distance < 150): + agent.pop() + agent.push(flip(agent.me.local(car_to_ball))) diff --git a/src/gosling/tools.py b/src/gosling/tools.py new file mode 100644 index 0000000..d039abc --- /dev/null +++ b/src/gosling/tools.py @@ -0,0 +1,80 @@ +from gosling.routines import * + + +# This file is for strategic tools + +def find_hits(agent, targets): + # find_hits takes a dict of (left,right) target pairs and finds routines that could hit the ball between those target pairs + # find_hits is only meant for routines that require a defined intercept time/place in the future + # find_hits should not be called more than once in a given tick, as it has the potential to use an entire tick to calculate + + # Example Useage: + # targets = {"goal":(opponent_left_post,opponent_right_post), "anywhere_but_my_net":(my_right_post,my_left_post)} + # hits = find_hits(agent,targets) + # print(hits) + # >{"goal":[a ton of jump and aerial routines,in order from soonest to latest], "anywhere_but_my_net":[more routines and stuff]} + hits = {name: [] for name in targets} + struct = agent.get_ball_prediction_struct() + + # Begin looking at slices 0.25s into the future + # The number of slices + i = 15 + while i < struct.num_slices: + # Gather some data about the slice + intercept_time = struct.slices[i].game_seconds + time_remaining = intercept_time - agent.time + if time_remaining > 0: + ball_location = Vector3(struct.slices[i].physics.location) + ball_velocity = Vector3(struct.slices[i].physics.velocity).magnitude() + + if abs(ball_location[1]) > 5250: + break # abandon search if ball is scored at/after this point + + # determine the next slice we will look at, based on ball velocity (slower ball needs fewer slices) + i += 15 - cap(int(ball_velocity // 150), 0, 13) + + car_to_ball = ball_location - agent.me.location + # Adding a True to a vector's normalize will have it also return the magnitude of the vector + direction, distance = car_to_ball.normalize(True) + + # How far the car must turn in order to face the ball, for forward and reverse + forward_angle = direction.angle(agent.me.forward) + backward_angle = math.pi - forward_angle + + # Accounting for the average time it takes to turn and face the ball + # Backward is slightly longer as typically the car is moving forward and takes time to slow down + forward_time = time_remaining - (forward_angle * 0.318) + backward_time = time_remaining - (backward_angle * 0.418) + + # If the car only had to drive in a straight line, we ensure it has enough time to reach the ball (a few assumptions are made) + forward_flag = forward_time > 0.0 and (distance * 1.05 / forward_time) < ( + 2290 if agent.me.boost > distance / 100 else 1400) + backward_flag = distance < 1500 and backward_time > 0.0 and (distance * 1.05 / backward_time) < 1200 + + # Provided everything checks out, we begin to look at the target pairs + if forward_flag or backward_flag: + for pair in targets: + # First we correct the target coordinates to account for the ball's radius + # If swapped == True, the shot isn't possible because the ball wouldn't fit between the targets + left, right, swapped = post_correction(ball_location, targets[pair][0], targets[pair][1]) + if not swapped: + # Now we find the easiest direction to hit the ball in order to land it between the target points + left_vector = (left - ball_location).normalize() + right_vector = (right - ball_location).normalize() + best_shot_vector = direction.clamp(left_vector, right_vector) + + # Check to make sure our approach is inside the field + if in_field(ball_location - (200 * best_shot_vector), 1): + # The slope represents how close the car is to the chosen vector, higher = better + # A slope of 1.0 would mean the car is 45 degrees off + slope = find_slope(best_shot_vector, car_to_ball) + if forward_flag: + if ball_location[2] <= 300 and slope > 0.0: + hits[pair].append(jump_shot(ball_location, intercept_time, best_shot_vector, slope)) + if ball_location[2] > 300 and ball_location[2] < 600 and slope > 1.0 and ( + ball_location[2] - 250) * 0.14 > agent.me.boost: + hits[pair].append( + aerial_shot(ball_location, intercept_time, best_shot_vector, slope)) + elif backward_flag and ball_location[2] <= 280 and slope > 0.25: + hits[pair].append(jump_shot(ball_location, intercept_time, best_shot_vector, slope, -1)) + return hits diff --git a/src/gosling/utils.py b/src/gosling/utils.py new file mode 100644 index 0000000..9a08c30 --- /dev/null +++ b/src/gosling/utils.py @@ -0,0 +1,157 @@ +import math +from gosling.objects import Vector3 +from physics.simulations.ball import * + + +# This file is for small utilities for math and movement + +def backsolve(target, car, time, gravity=650): + # Finds the acceleration required for a car to reach a target in a specific amount of time + d = target - car.location + dvx = ((d[0] / time) - car.velocity[0]) / time + dvy = ((d[1] / time) - car.velocity[1]) / time + dvz = (((d[2] / time) - car.velocity[2]) / time) + (gravity * time) + return Vector3(dvx, dvy, dvz) + + +def cap(x, low, high): + # caps/clamps a number between a low and high value + if x < low: + return low + elif x > high: + return high + return x + + +def defaultPD(agent, local_target, direction=1.0): + # points the car towards a given local target. + # Direction can be changed to allow the car to steer towards a target while driving backwards + local_target *= direction + up = agent.me.local(Vector3(0, 0, 1)) # where "up" is in local coordinates + target_angles = [ + math.atan2(local_target[2], local_target[0]), # angle required to pitch towards target + math.atan2(local_target[1], local_target[0]), # angle required to yaw towards target + math.atan2(up[1], up[2])] # angle required to roll upright + # Once we have the angles we need to rotate, we feed them into PD loops to determing the controller inputs + agent.controller.steer = steerPD(target_angles[1], 0) * direction + agent.controller.pitch = steerPD(target_angles[0], agent.me.angular_velocity[1] / 4) + agent.controller.yaw = steerPD(target_angles[1], -agent.me.angular_velocity[2] / 4) + agent.controller.roll = steerPD(target_angles[2], agent.me.angular_velocity[0] / 2) + # Returns the angles, which can be useful for other purposes + return target_angles + + +def defaultThrottle(agent, target_speed, direction=1.0, use_boost: bool = True): + # accelerates the car to a desired speed using throttle and boost + car_speed = agent.me.local(agent.me.velocity)[0] + t = (target_speed * direction) - car_speed + agent.controller.throttle = cap((t ** 2) * sign(t) / 1000, -1.0, 1.0) + if use_boost: + agent.controller.boost = True if t > 150 and car_speed < 2275 and agent.controller.throttle == 1.0 else False + return car_speed + + +def in_field(point, radius): + # determines if a point is inside the standard soccer field + point = Vector3(abs(point[0]), abs(point[1]), abs(point[2])) + if point[0] > 4080 - radius: + return False + elif point[1] > 5900 - radius: + return False + elif point[0] > 880 - radius and point[1] > 5105 - radius: + return False + elif point[0] > 2650 and point[1] > -point[0] + 8025 - radius: + return False + return True + + +def find_slope(shot_vector, car_to_target): + # Finds the slope of your car's position relative to the shot vector (shot vector is y axis) + # 10 = you are on the axis and the ball is between you and the direction to shoot in + # -10 = you are on the wrong side + # 1.0 = you're about 45 degrees offcenter + d = shot_vector.dot(car_to_target) + e = abs(shot_vector.cross((0, 0, 1)).dot(car_to_target)) + return cap(d / e if e != 0 else 10 * sign(d), -3.0, 3.0) + + +def post_correction(ball_location, left_target, right_target): + # this function returns target locations that are corrected to account for the ball's radius + # If the left and right post swap sides, a goal cannot be scored + ball_radius = 120 # We purposly make this a bit larger so that our shots have a higher chance of success + goal_line_perp = (right_target - left_target).cross((0, 0, 1)) + left = left_target + ((left_target - ball_location).normalize().cross((0, 0, -1)) * ball_radius) + right = right_target + ((right_target - ball_location).normalize().cross((0, 0, 1)) * ball_radius) + left = left_target if (left - left_target).dot(goal_line_perp) > 0.0 else left + right = right_target if (right - right_target).dot(goal_line_perp) > 0.0 else right + swapped = True if (left - ball_location).normalize().cross((0, 0, 1)).dot( + (right - ball_location).normalize()) > -0.1 else False + return left, right, swapped + + +def quadratic(a, b, c): + # Returns the two roots of a quadratic + inside = math.sqrt((b * b) - (4 * a * c)) + if a != 0: + return (-b + inside) / (2 * a), (-b - inside) / (2 * a) + else: + return -1, -1 + + +def shot_valid(agent, shot, threshold=45): + # Returns True if the ball is still where the shot anticipates it to be + # First finds the two closest slices in the ball prediction to shot's intercept_time + # threshold controls the tolerance we allow the ball to be off by + slices = Ball.get_ball_prediction().slices + soonest = 0 + latest = len(slices) - 1 + while len(slices[soonest:latest + 1]) > 2: + midpoint = (soonest + latest) // 2 + if slices[midpoint].game_seconds > shot.intercept_time: + latest = midpoint + else: + soonest = midpoint + # preparing to interpolate between the selected slices + dt = slices[latest].game_seconds - slices[soonest].game_seconds + time_from_soonest = shot.intercept_time - slices[soonest].game_seconds + slopes = (Vector3(slices[latest].physics.location) - Vector3(slices[soonest].physics.location)) * (1 / dt) + # Determining exactly where the ball will be at the given shot's intercept_time + predicted_ball_location = Vector3(slices[soonest].physics.location) + (slopes * time_from_soonest) + # Comparing predicted location with where the shot expects the ball to be + return (shot.ball_location - predicted_ball_location).magnitude() < threshold + + +def side(x): + # returns -1 for blue team and 1 for orange team + if x == 0: + return -1 + return 1 + + +def sign(x): + # returns the sign of a number, -1, 0, +1 + if x < 0.0: + return -1 + elif x > 0.0: + return 1 + else: + return 0.0 + + +def steerPD(angle, rate): + # A Proportional-Derivative control loop used for defaultPD + return cap(((35 * (angle + rate)) ** 3) / 10, -1.0, 1.0) + + +def lerp(a, b, t): + # Linearly interpolate from a to b using t + # For instance, when t == 0, a is returned, and when t == 1, b is returned + # Works for both numbers and Vector3s + return (b - a) * t + a + + +def invlerp(a, b, v): + # Inverse linear interpolation from a to b with value v + # For instance, it returns 0 if v == a, and returns 1 if v == b, and returns 0.5 if v is exactly between a and b + # Works for both numbers and Vector3s + return (v - a) / (b - a) diff --git a/src/main.py b/src/main.py new file mode 100644 index 0000000..91b0f0b --- /dev/null +++ b/src/main.py @@ -0,0 +1,88 @@ +import configparser +import os +import pathlib +from strategy.drone import Drone +from physics.simulations.base_sim import BaseSim +from rlbot.agents.hivemind.python_hivemind import PythonHivemind +from rlbot.utils.structures.game_data_struct import GameTickPacket +from rlbot.utils.structures.bot_input_struct import PlayerInput +from scenario.base_scenario import BaseScenario +from strategy.base_ccp import SharedInfo, BaseCoach +from typing import Dict +from world import World + +# For dynamic importing - They are required! +from strategy.coaches import * +from scenario import * + +CONFIG_FILE = os.path.join(pathlib.Path(__file__).parent.absolute(), 'settings', 'default.ini') + + +class TheHivemind(PythonHivemind, SharedInfo): + """"The Rocket League entry point for the hivemind bot system.""" + + def __init__(self, agent_metadata_queue, quit_event, options): + + # Read the config file + config = configparser.ConfigParser() + config.read(CONFIG_FILE) + self.config = config + + # Check if testing mode is enabled + self.test = config['general']['mode'] == "TEST" + if self.test: + self.scenario: BaseScenario = None + + self.coach: BaseCoach = None + + super().__init__(agent_metadata_queue, quit_event, options) + + def initialize_hive(self, packet: GameTickPacket) -> None: + """"Initialization of the Hivemind. Set the Selector here. + + :param packet: GameTickPacket instance containing information about the environment. + :type packet: GameTickPacket + """ + + # Setting information that is shared throughout the STP model and the simulations + our_team_index = packet.game_cars[next(iter(self.drone_indices))].team # Your team + SharedInfo.world = World(packet, self.get_field_info()) + SharedInfo.drones = [Drone(f"Drone {i}", our_team_index, i) for i in self.drone_indices] + BaseSim.world = SharedInfo.world + BaseSim.agent = self + + # Reading selector dynamically from cfg file + selector = globals()[self.config['strategy']['coach']] + self.coach = selector() + + # Reading scenario dynamically from cfg file + if self.test: + scenario = globals()[self.config['test']['scenario']] + self.scenario = scenario(packet) + self.set_game_state(game_state=self.scenario.reset()) + + self.logger.info('Noodle hivemind initialized') + + def get_outputs(self, packet: GameTickPacket) -> Dict[int, PlayerInput]: + """This is called once every game step. + + :param packet: object containing information about the environment. + :type packet: GameTickPacket + :return: Dictionary containing this team its drone indices as keys, and PlayerInput objects as values. + :rtype packet: Dict[int, PlayerInput] + """ + if self.test: + new_game_state = self.scenario.reset_upon_condition(packet) + if new_game_state: + self.set_game_state(game_state=new_game_state) + self.coach = globals()[self.config['strategy']['coach']]() + return {drone.index: drone.get_player_input() for drone in self.drones} + + # Updates the world model with the current packet + self.world.update_obs(packet, self.get_field_info()) + + # Perform all steps in our STP model + self.coach.step() + + # Return the inputs for each drone + return {drone.index: drone.get_player_input() for drone in self.drones} diff --git a/src/physics/control/__init__.py b/src/physics/control/__init__.py new file mode 100644 index 0000000..4c21013 --- /dev/null +++ b/src/physics/control/__init__.py @@ -0,0 +1,4 @@ +from physics.control.base import BaseController +from physics.control.steer import PointPD + +__all__ = ["BaseController", "PointPD"] diff --git a/src/physics/control/base.py b/src/physics/control/base.py new file mode 100644 index 0000000..7d18e7c --- /dev/null +++ b/src/physics/control/base.py @@ -0,0 +1,17 @@ +from abc import ABC, abstractmethod +from strategy.drone import Drone + + +class BaseController(ABC): + """"Base template for controllers. + + :param drone: The drone agent + :type drone: Drone + """ + + def __init__(self, drone: Drone): + self.drone = drone + + @abstractmethod + def get_controls(self, *args): + """"Sets controls on a drone to what you are controlling.""" diff --git a/src/physics/control/steer.py b/src/physics/control/steer.py new file mode 100644 index 0000000..ff44c9b --- /dev/null +++ b/src/physics/control/steer.py @@ -0,0 +1,22 @@ +from physics.control import * +from physics.math import Orientation3, Vec3 + + +class PointPD(BaseController): + """"For controlling the position and orientation of a drone. + + :param drone: The drone agent + :type drone: Drone + """ + + def get_controls(self, orientation: Orientation3, target: Orientation3, direction: float = 1): + """"Controls a drone to a position and orientation. + + :param orientation: The current orientation of the drone. + :type orientation: Orientation3 + :param target: The target orientation for this drone. + :type target: Orientation3 + :param direction: Whether the car is moving forward or backwards. + :type direction: float + """ + # TODO: WIP. On hold - Not part of Milestone 1 diff --git a/src/physics/control/throttle.py b/src/physics/control/throttle.py new file mode 100644 index 0000000..8582324 --- /dev/null +++ b/src/physics/control/throttle.py @@ -0,0 +1 @@ +# TODO: WIP. On hold - Not part of Milestone 1 diff --git a/src/physics/math/__init__.py b/src/physics/math/__init__.py new file mode 100644 index 0000000..0bb002d --- /dev/null +++ b/src/physics/math/__init__.py @@ -0,0 +1,4 @@ +from physics.math.matrix3 import Orientation3 +from physics.math.vector3 import Vec3 + +__all__ = ["Orientation3", "Vec3"] diff --git a/src/physics/math/matrix3.py b/src/physics/math/matrix3.py new file mode 100644 index 0000000..1f6c10d --- /dev/null +++ b/src/physics/math/matrix3.py @@ -0,0 +1,54 @@ +import math +from physics.math.vector3 import Vec3 + + +class Orientation3: + """The Orientation3's sole purpose is to convert roll, pitch, and yaw data from the gametickpaket into an orientation matrix + An orientation matrix contains 3 Vector3s + - Matrix3[0] is the "forward" direction of a given car + - Matrix3[1] is the "left" direction of a given car + - Matrix3[2] is the "up" direction of a given car + If you have a distance between the car and some object, ie ball.location - car.location, + you can convert that to local coordinates by dotting it with this matrix + ie: local_ball_location = Orientation3 * (ball.location - car.location) + + :param : Rotation around the Y axis (left / transverse) + :type pitch: float + :param yaw: Rotation around the Z axis (upward / vertical) + :type yaw: float + :param roll: Rotation around the X axis (forward / longitudinal) + :type roll: float + """ + + def __init__(self, pitch: float, yaw: float, roll: float): + cp = math.cos(pitch) + sp = math.sin(pitch) + cy = math.cos(yaw) + sy = math.sin(yaw) + cr = math.cos(roll) + sr = math.sin(roll) + + # List of 3 vectors, each descriping the direction of an axis: Forward, Left, and Up + self.data = [ + Vec3(cp * cy, cp * sy, sp), + Vec3(cy * sp * sr - cr * sy, sy * sp * sr + cr * cy, -cp * sr), + Vec3(-cr * cy * sp - sr * sy, -cr * sy * sp + sr * cy, cp * cr)] + self.forward, self.left, self.up = self.data + + def __getitem__(self, key: int) -> Vec3: + """Retrieves either the forward / left / up vector of this orientation matrix.""" + return self.data[key] + + def __mul__(self, other: Vec3) -> Vec3: + """Performs default matrix multiplications.""" + if isinstance(other, Vec3): + return Vec3(self.forward * other, self.left * other, self.up * other) + + def __str__(self) -> str: + """Returns a 4 decimal accurate string representation of this orientation matrix.""" + dec = 4 + return f"{str(round(self.data[0][0], dec)).rjust(dec + 3)}, {str(round(self.data[0][1], dec)).rjust(dec + 3)}," \ + f" {str(round(self.data[0][2], dec)).rjust(dec + 3)}\n{str(round(self.data[1][0], dec)).rjust(dec + 3)}, " \ + f"{str(round(self.data[1][1], dec)).rjust(dec + 3)}, {str(round(self.data[1][2], dec)).rjust(dec + 3)}\n" \ + f"{str(round(self.data[2][0], dec)).rjust(dec + 3)}, {str(round(self.data[2][1], dec)).rjust(dec + 3)}, " \ + f"{str(round(self.data[2][2], dec)).rjust(dec + 3)}\n" diff --git a/src/physics/math/vector3.py b/src/physics/math/vector3.py new file mode 100644 index 0000000..83632c4 --- /dev/null +++ b/src/physics/math/vector3.py @@ -0,0 +1,207 @@ +from __future__ import annotations +import math +from gosling.objects import Vector3 as GoslingsVector3 +from rlbot.utils.game_state_util import Vector3 +from typing import Union, Tuple, List + +TUPLE_VEC = Tuple[Union[float, int], Union[float, int], Union[float, int]] +LIST_VEC = List[Union[float, int]] + + +class Vec3(Vector3): + + @staticmethod + def from_other_vec( + other: Union[Vec3, Vector3, TUPLE_VEC, LIST_VEC]): + """"Creates a Vec3 from a different Vector container for compatibility reasons.""" + if isinstance(other, list) or isinstance(other, tuple) or isinstance(other, Vec3): + return Vec3(other[0], other[1], other[2]) + else: + return Vec3(other.x, other.y, other.z) + + def magnitude(self) -> float: + """Magnitude() returns the length of the vector. + + :return: The magnitude of this vector. + :rtype: float""" + return math.sqrt((self.x * self.x) + (self.y * self.y) + (self.z * self.z)) + + def normalize(self, return_magnitude: bool = False) -> Union[Vec3, Tuple[Vec3, float]]: + """Normalize() returns a Vector3 that shares the same direction but has a length of 1.0 + Normalize(True) can also be used if you'd like the length of this Vector3 (used for optimization). + + :param return_magnitude: Whether you want to return the magnitude as well. + :type return_magnitude: bool + :return: Either a normalized vector or a tuple with a normalized vector and its original magnitude. + :rtype: Vec3 + """ + magnitude = self.magnitude() + if magnitude != 0: + if return_magnitude: + return Vec3(self.x / magnitude, self.y / magnitude, self.z / magnitude), magnitude + return Vec3(self.x / magnitude, self.y / magnitude, self.z / magnitude) + if return_magnitude: + return Vec3(0, 0, 0), 0 + return Vec3(0, 0, 0) + + def cross(self, vector: Union[Vec3, Vector3, TUPLE_VEC, LIST_VEC]) -> Vec3: + """Cross product between this vector and a given vector. + + :param vector: The other vector + :type vector: Vec3 + :return: A vector perpendicular to this vector and the given vector. + :rtype: Vec3 + """ + if isinstance(vector, Vector3): + return Vec3((self.y * vector.z) - (self.z * vector.y), (self.z * vector.x) - (self.x * vector.z), + (self.x * vector.y) - (self.y * vector.x)) + else: + return Vec3((self.y * vector[2]) - (self.z * vector[1]), (self.z * vector[0]) - (self.x * vector[2]), + (self.x * vector[1]) - (self.y * vector[0])) + + def flatten(self) -> Vec3: + """Sets Z (Vector3[2]) to 0. + + :return: A vector with Z=0. + :rtype: Vec3""" + return Vec3(self.x, self.y, 0) + + def angle_2d(self, vector: Union[Vec3, Vector3, TUPLE_VEC, LIST_VEC]) -> float: + """Returns the angle between this Vector3 and another Vector in 2D. + + :param vector: Vec3 + :type vector: Vec3 + :return: Angle between this vector and the given vector in 2D. + :rtype: float""" + if not isinstance(vector, Vec3): + vector = Vec3.from_other_vec(vector) + + return math.acos( + (self.flatten() * vector.flatten()) / (self.flatten().magnitude() * vector.flatten().magnitude())) + + def clamp(self, start: Union[Vec3, Vector3, TUPLE_VEC, LIST_VEC], + end: Union[Vec3, Vector3, TUPLE_VEC, LIST_VEC]) -> Vec3: + """Similar to integer clamping, Vector3's clamp() forces the Vector3's direction between a start and end Vector3 + Such that Start < Vector3 < End in terms of clockwise rotation. Note that this is only 2D, in the x and y axis. + + :param start: The minimal rotation in terms of clockwise rotation from start to end in 2d + :type start: Vec3 + :param end: The maximal rotation in terms of clockwise rotation from start to end in 2d + :type end: Vec3 + :return: The resulting vector clamped between the given angles. + :rtype: Vec3 + """ + if not isinstance(start, Vec3): + start = Vec3.from_other_vec(start) + + if not isinstance(end, Vec3): + end = Vec3.from_other_vec(end) + + s = self.normalize() + right = s * (end.cross(Vec3(0, 0, -1))) < 0 + left = s * (start.cross(Vec3(0, 0, -1))) > 0 + if (right and left) if end * (start.cross(Vec3(0, 0, -1))) > 0 else (right or left): + return self + if start * s < end * s: + return end + return start + + # def angle_3d(self, vector) -> float: + # """ Returns the angle between this Vector3 and another Vector3 """ + # return math.acos((self * vector) / (self.magnitude() * vector.magnitude())) + + # def render(self): + # """ Returns a list with the x and y values, to be used with pygame """ + # return [self.x, self.y] + + # def copy(self): + # """ Returns a copy of this Vector3 """ + # return Vec3(self.data[:]) + + # def rotate_2d(self, angle): + # """Rotates this Vector3 by the given angle in radians + # Note that this is only 2D, in the x and y axis""" + # return Vec3((math.cos(angle) * self.x) - (math.sin(angle) * self.y), + # (math.sin(angle) * self.x) + (math.cos(angle) * self.y), self.z) + + # def element_wise_division(self, vector): + # if isinstance(vector, Vec3) or len(vector) > 0: + # return Vec3(self[0] / vector[0], self[1] / vector[1], self[2] / vector[2]) + # raise TypeError("This cannot be done on a vector!") + # + # def element_wise_multiplication(self, vector): + # if isinstance(vector, Vec3) or len(vector) > 0: + # return Vec3(self[0] * vector[0], self[1] * vector[1], self[2] * vector[2]) + # raise TypeError("This cannot be done on a vector!") + + def __getitem__(self, item: int) -> float: + if item == 0: + return self.x + elif item == 1: + return self.y + else: + return self.z + + def __setitem__(self, key: int, value: float): + if key == 0: + self.x = value + elif key == 1: + self.y = value + else: + self.z = value + + def __str__(self) -> str: + return f"X: {self.x}, Y: {self.y}, Z:{self.z}" + + def __repr__(self) -> str: + return f"X: {self.x}, Y: {self.y}, Z:{self.z}" + + def __eq__(self, other: Union[Vec3, Vector3, TUPLE_VEC, LIST_VEC]) -> bool: + if isinstance(other, Vector3): + return (self.x == other.x) * (self.y == other.y) * (self.z == other.z) + else: + return (self.x == other[0]) * (self.y == other[1]) * (self.z == other[2]) + + def __add__(self, other: Union[float, int, Vec3, Vector3, TUPLE_VEC, LIST_VEC]) -> Vec3: + if isinstance(other, int) or isinstance(other, float): + return Vec3(self.x + other, self.y + other, self.z + other) + elif isinstance(other, Vector3): + return Vec3(self.x + other.x, self.y + other.y, self.z + other.z) + else: + return Vec3(self.x + other[0], self.y + other[1], self.z + other[2]) + + __radd__ = __add__ + + def __sub__(self, value: Union[float, int, Vec3, Vector3, TUPLE_VEC, LIST_VEC]) -> Vec3: + if isinstance(value, int) or isinstance(value, float): + return Vec3(self[0] - value, self[1] - value, self[2] - value) + if isinstance(value, Vec3): + return Vec3(self[0] - value[0], self[1] - value[1], self[2] - value[2]) + if isinstance(value, Vector3): + return Vec3(self[0] - value.x, self[1] - value.y, self[2] - value.z) + + __rsub__ = __sub__ + + def __neg__(self) -> Vec3: + return Vec3(-self[0], -self[1], -self[2]) + + def __mul__(self, value: Union[float, Vec3, Vector3]) -> Union[float, Vec3]: + if isinstance(value, int) or isinstance(value, float): + return Vec3(self[0] * value, self[1] * value, self[2] * value) + if isinstance(value, Vec3): + return self[0] * value[0] + self[1] * value[1] + self[2] * value[2] + if isinstance(value, Vector3): + return self[0] * value.x + self[1] * value.y + self[2] * value.z + + __rmul__ = __mul__ + + def __truediv__(self, value: float) -> Vec3: + if isinstance(value, int) or isinstance(value, float): + return Vec3(self[0] / value, self[1] / value, self[2] / value) + raise TypeError("This cannot be done on a vector!") + + def __rtruediv__(self, value): + raise TypeError("This cannot be done on a vector!") + + def __len__(self) -> int: + return 3 diff --git a/src/physics/simulations/__init__.py b/src/physics/simulations/__init__.py new file mode 100644 index 0000000..18e3ac0 --- /dev/null +++ b/src/physics/simulations/__init__.py @@ -0,0 +1,4 @@ +from physics.simulations.ball import Ball +from physics.simulations.base_sim import BaseSim + +__all__ = ["Ball", "BaseSim"] diff --git a/src/physics/simulations/ball.py b/src/physics/simulations/ball.py new file mode 100644 index 0000000..e90c3a0 --- /dev/null +++ b/src/physics/simulations/ball.py @@ -0,0 +1,78 @@ +from physics.simulations.base_sim import BaseSim +from rlbot.utils.structures.ball_prediction_struct import Slice, BallPrediction +from typing import Callable, Optional + +# field length(5120) + ball radius(93) = 5213 however that results in false positives +GOAL_THRESHOLD = 5235 + +# We will jump this number of frames when looking for a moment where the ball is inside the goal. +# Big number for efficiency, but not so big that the ball could go in and then back out during that +# time span. Unit is the number of frames in the ball prediction, and the prediction is at 60 frames per second. +GOAL_SEARCH_INCREMENT = 20 + + +class Ball(BaseSim): + """Ball simulation class. """ + + @staticmethod + def get_ball_prediction() -> BallPrediction: + """"Gets the BallPrediction object from Rlbot. + + :return: The BallPrediction object by Rlbot. + :rtype: BallPrediction""" + return Ball.agent.get_ball_prediction_struct() + + @staticmethod + def find_slice_at_time(game_time: float) -> Optional[Slice]: + """This will find the future position of the ball at the specified time. The returned + Slice object will also include the ball's velocity, etc. + + :param game_time: The time for which the Slice will be returned. + :type game_time: float + :return: Information about the ball in a future timestamp. + :rtype: Slice, optional + """ + ball_prediction = Ball.get_ball_prediction() + + start_time = ball_prediction.slices[0].game_seconds + approx_index = int((game_time - start_time) * 60) # We know that there are 60 slices per second. + if 0 <= approx_index < ball_prediction.num_slices: + return ball_prediction.slices[approx_index] + return None + + @staticmethod + def predict_future_goal() -> Optional[Slice]: + """Analyzes the ball prediction to see if the ball will enter one of the goals. Only works on standard arenas. + Will return the first ball slice which appears to be inside the goal, or None if it does not enter a goal. + + :return: Information about the ball in a future timestamp. + :rtype: Slice, optional + """ + return Ball.find_matching_slice(0, lambda s: abs(s.physics.location.y) >= GOAL_THRESHOLD, + search_increment=20) + + @staticmethod + def find_matching_slice(start_index: int, predicate: Callable[[Slice], bool], + search_increment: int = 1) -> Optional[Slice]: + """Tries to find the first slice in the ball prediction which satisfies the given predicate. For example, + you could find the first slice below a certain height. Will skip ahead through the packet by search_increment + for better efficiency, then backtrack to find the exact first slice. + + :param start_index: At how many steps in the future the algorithm starts checking. + :type start_index: float + :param predicate: A predicate for which a slice must satisfy the boolean. + :type predicate: Callable[[Slice], bool] + :param search_increment: The increment at which the future predictions are checked for the given predicate. + :type search_increment: int + :return: Information about the ball in a future timestamp. + :rtype: Slice, optional + """ + ball_prediction = Ball.get_ball_prediction() + + for coarse_index in range(start_index, ball_prediction.num_slices, search_increment): + if predicate(ball_prediction.slices[coarse_index]): + for j in range(max(start_index, coarse_index - search_increment), coarse_index): + ball_slice = ball_prediction.slices[j] + if predicate(ball_slice): + return ball_slice + return None diff --git a/src/physics/simulations/base_sim.py b/src/physics/simulations/base_sim.py new file mode 100644 index 0000000..209df9d --- /dev/null +++ b/src/physics/simulations/base_sim.py @@ -0,0 +1,8 @@ +from abc import ABC + + +class BaseSim(ABC): + """"Template for object simulations.""" + + world = None # For retrieving information about the world + agent = None # For agent only functions provided by RLBot diff --git a/src/physics/tests/math_test.py b/src/physics/tests/math_test.py new file mode 100644 index 0000000..b20396e --- /dev/null +++ b/src/physics/tests/math_test.py @@ -0,0 +1,235 @@ +import math +import unittest +from physics.math.vector3 import Vec3 +from physics.math.matrix3 import Orientation3 + + +class Vec3Tests(unittest.TestCase): + """"Unittests for the Vec3 class.""" + + """"Magic methods""" + + def test_constructor(self): + Vec3(10, 15, 20) + + def test_get_item(self): + vec = Vec3(10, 15, 20) + + self.assertEqual(vec[0], 10) + self.assertEqual(vec[1], 15) + self.assertEqual(vec[2], 20) + + def test_set_item(self): + vec = Vec3(10, 15, 20) + vec[0] = 5 + vec[1] = 10 + vec[2] = 0 + + self.assertEqual(vec[0], 5) + self.assertEqual(vec[1], 10) + self.assertEqual(vec[2], 0) + + def test_str(self): + vec = Vec3(10, 15, 20) + self.assertEqual(str(vec), "X: 10, Y: 15, Z:20") + + def test_repr(self): + vec = Vec3(10, 15, 20) + self.assertEqual(str(vec), "X: 10, Y: 15, Z:20") + + def test_eq(self): + vec_a = Vec3(10, 15, 20) + vec_b = Vec3(10, 15, 20) + vec_other = Vec3(10, 15, 25) + self.assertEqual(vec_a, vec_b) + self.assertNotEqual(vec_a, vec_other) + self.assertNotEqual(vec_other, vec_b) + + def test_add(self): + vec_a = Vec3(5, 5, 20) + vec_b = Vec3(10, 15, 5) + vec_other = Vec3(15, 20, 25) + + self.assertEqual(vec_a + vec_b, vec_other) + self.assertEqual(vec_a + [10, 15, 5], vec_other) + self.assertEqual(vec_a + 10, Vec3(15, 15, 30)) + self.assertNotEqual(vec_a + 10, vec_other) + + def test_radd(self): + vec_a = Vec3(5, 5, 20) + vec_b = Vec3(10, 15, 5) + vec_other = Vec3(15, 20, 25) + + self.assertEqual(vec_b + vec_a, vec_other) + self.assertEqual([10, 15, 5] + vec_a, vec_other) + self.assertEqual(10 + vec_a, Vec3(15, 15, 30)) + self.assertNotEqual(10 + vec_a, vec_other) + + def test_sub(self): + vec_a = Vec3(5, 5, 20) + vec_b = Vec3(10, 15, 5) + + self.assertEqual(vec_a - vec_b, Vec3(-5, -10, 15)) + + def test_neg(self): + vec_a = Vec3(5, 5, 20) + self.assertEqual(-vec_a, Vec3(-5, -5, -20)) + self.assertNotEqual(vec_a, Vec3(-5, -5, -20)) + + def test_mul(self): + vec_a = Vec3(5, 5, 20) + vec_b = Vec3(10, 15, 5) + self.assertEqual(vec_a * vec_b, 225) + self.assertEqual(vec_a * 5, Vec3(25, 25, 100)) + + mainvector = Vec3(10, 10, 10) + vec1 = Vec3(1, 1, 1) + vec2 = Vec3(0.5, 1, 1) + vec3 = Vec3(-2, 2, 2) + vec4 = Vec3(0, 1, 0) + self.assertEqual(mainvector * vec1, 30) + self.assertEqual(mainvector * vec2, 25) + self.assertEqual(mainvector * vec3, 20) + self.assertEqual(mainvector * vec4, 10) + + def test_rmul(self): + vec_a = Vec3(5, 5, 20) + vec_b = Vec3(10, 15, 5) + self.assertEqual(vec_b * vec_a, 225) + self.assertEqual(5 * vec_a, Vec3(25, 25, 100)) + + mainvector = Vec3(10, 10, 10) + vec1 = Vec3(1, 1, 1) + vec2 = Vec3(0.5, 1, 1) + vec3 = Vec3(-2, 2, 2) + vec4 = Vec3(0, 1, 0) + self.assertEqual(vec1 * mainvector, 30) + self.assertEqual(vec2 * mainvector, 25) + self.assertEqual(vec3 * mainvector, 20) + self.assertEqual(vec4 * mainvector, 10) + + def test_truediv(self): + # TODO: + pass + + def test_rtruediv(self): + # TODO: + pass + + def test_len(self): + vec3 = Vec3(0, 1, 2) + self.assertEqual(len(vec3), 3) + + """Functions""" + + def test_magnitude(self): + # TODO + vector = Vec3(10, 15, 20).magnitude() + self.assertEqual(vector, math.sqrt(725)) + self.assertEqual(Vec3(1, 1, 0.5).magnitude(), 1.5) + + def test_normalize(self): + vector = Vec3(1, 1, 1).normalize() + ans = 1 / math.sqrt(3) + self.assertAlmostEqual(vector.normalize()[0], ans, 5) + self.assertAlmostEqual(vector.normalize()[1], ans, 5) + self.assertAlmostEqual(vector.normalize()[2], ans, 5) + + def test_cross(self): + vec1 = Vec3(10, 15, 20) + vec2 = Vec3(1, 1, 1) + vec3 = Vec3(2, -5, 10) + self.assertEqual(vec1.cross(vec2), Vec3(-5, 10, -5)) + self.assertEqual(vec1.cross(vec3), Vec3(250, -60, -80)) + + def test_flatten(self): + vec1 = Vec3(1, 1, 1) + self.assertEqual(vec1.flatten(), Vec3(1, 1, 0)) + + def test_angle_2d(self): + vec1 = Vec3(1, 0, 0) + vec2 = Vec3(1, 1, 0) + vec3 = Vec3(1, -1, 0) + self.assertAlmostEqual(vec1.angle_2d(vec2), 0.7854, 3) + self.assertAlmostEqual(vec2.angle_2d(vec3), 1.5708, 3) + + def test_rotate_2d(self): + # TODO + pass + + def test_clamp(self): + # TODO + pass + + def test_element_wise_division(self): + # TODO: + pass + + def test_element_wise_multiplication(self): + # TODO: + pass + + def test_angle_3d(self): + # TODO: + pass + + def test_copy(self): + # TODO: + pass + + def test_render(self): + # TODO: + pass + + +class Orientation3Tests(unittest.TestCase): + """"Unittests for the Vec3 class.""" + + """"Magic methods""" + + def test_constructor(self): + Orientation3(30, 60, 45) + + def test_get_item(self): + ori = Orientation3(1, 0.4, 0.2) + + vec_0 = ori[0] + vec_1 = ori[1] + vec_2 = ori[2] + + self.assertAlmostEqual(vec_0[0], 0.4977, 3) + self.assertAlmostEqual(vec_0[1], 0.2104, 3) + self.assertAlmostEqual(vec_0[2], 0.8415, 3) + self.assertAlmostEqual(vec_1[0], -0.2277, 3) + self.assertAlmostEqual(vec_1[1], 0.9678, 3) + self.assertAlmostEqual(vec_1[2], -0.1073, 3) + self.assertAlmostEqual(vec_2[0], -0.837, 3) + self.assertAlmostEqual(vec_2[1], -0.1382, 3) + self.assertAlmostEqual(vec_2[2], 0.5295, 3) + + def test_mul(self): + ori = Orientation3(1, 0, 0) + + vec = Vec3(1, 0, 0) + + res = ori * vec + self.assertAlmostEqual(res[0], 0.5403, 3) + self.assertAlmostEqual(res[1], 0, 3) + self.assertAlmostEqual(res[2], -0.8415, 3) + + ori2 = Orientation3(1, 0.4, 0.2) + vec2 = Vec3(7.65, 2.64, 3.50) + + res2 = ori2 * vec2 + + self.assertAlmostEqual(res2[0], 7.3081, 3) + self.assertAlmostEqual(res2[1], 0.4375, 3) + self.assertAlmostEqual(res2[2], -4.9146, 3) + + def test_str(self): + ori = Orientation3(1, 0.4, 0.2) + str(ori) + + +if __name__ == '__main__': + unittest.main() diff --git a/src/scenario/__init__.py b/src/scenario/__init__.py new file mode 100644 index 0000000..5850306 --- /dev/null +++ b/src/scenario/__init__.py @@ -0,0 +1,10 @@ +from scenario.default_scenario import DefaultScenario +from scenario.shoot_ball_at_goal import ShootBallAtGoal +from scenario.dribble_easy import DribbleEasy +from scenario.dribble_hard import DribbleHard +from scenario.intercept import Intercept +from scenario.side_shot import SideShot +from scenario.ball_through_center import BallThroughCenter + +__all__ = ["DefaultScenario", "ShootBallAtGoal", "DribbleEasy", "DribbleHard", "SideShot", "Intercept", + "BallThroughCenter"] diff --git a/src/scenario/ball_through_center.py b/src/scenario/ball_through_center.py new file mode 100644 index 0000000..2fae4de --- /dev/null +++ b/src/scenario/ball_through_center.py @@ -0,0 +1,92 @@ +from numpy import pi, cos, sin +from numpy.random import uniform, seed +from physics.math import Vec3 +from rlbot.utils.structures.game_data_struct import GameTickPacket +from rlbot.utils.game_state_util import * +from scenario.base_scenario import BaseScenario +from typing import Optional + + +class Constants: + timeout = 4.0 # Time after which the scenario resets + radia_ball = 92.75 # Radius of the ball + y_limit_before_reset = 5000 + + # Angle at which the ball will be shot to the center of the field + ball_min_angle = -0.5 * pi * 0.8 # 20 percent to the right of down + ball_max_angle = (1.0 + 0.5 * 0.8) * pi # 20 percent to the left of down + + # Ball initialization distance from center of the field + ball_min_distance = 2000 + ball_max_distance = 4000 + + # Ball speed + ball_min_speed = 500 + ball_max_speed = 2000 + + # Car initialization + location_car = Vec3(0, -2000, 0) + velocity_car = Vec3(0, -100, 0) + rotation_car = Rotator(0, 1.57, 0) # Half pi rad + + +class BallThroughCenter(BaseScenario): + """ For testing interception capabilities of the bot. Passes the ball through the center of the field under + different angles. The scenario ends when a timer is reached. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + """ + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + seed(0) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none + :rtype: GameState, optional + """ + self._update_timer(packet) + + if packet.game_ball.physics.location.y > Constants.y_limit_before_reset or self.timer > Constants.timeout: + return self.reset() + else: + return None + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ + self.timer = 0.0 + + # Ball random angle from center, with velocity towards center + angle_ball = uniform(Constants.ball_min_angle, Constants.ball_max_angle) + distance = uniform(Constants.ball_min_distance, Constants.ball_max_distance) + speed_ball = uniform(Constants.ball_min_speed, Constants.ball_max_speed) + # print(f'distance: {distance}, Angle: {angle_ball}, Speed: {speed_ball}') + + # Determine x and y position and velocity from generated random angle + x_ball, y_ball = distance * cos(angle_ball), distance * sin(angle_ball) + x_vel_ball, y_vel_ball = speed_ball * -cos(angle_ball), speed_ball * -sin(angle_ball) + + ball_state = BallState(physics=Physics( + location=Vec3(x_ball, y_ball, Constants.radia_ball), # Always on the ground + velocity=Vec3(x_vel_ball, y_vel_ball, 0), + rotation=Rotator(0, 0, 0), + angular_velocity=Vec3(0, 0, 0) + )) + + car_state = CarState(physics=Physics( + location=Constants.location_car, + velocity=Constants.velocity_car, + rotation=Constants.rotation_car, + angular_velocity=Vector3(0, 0, 0)), + boost_amount=100) + + return GameState(cars={0: car_state}, ball=ball_state) diff --git a/src/scenario/base_scenario.py b/src/scenario/base_scenario.py new file mode 100644 index 0000000..3d7d58d --- /dev/null +++ b/src/scenario/base_scenario.py @@ -0,0 +1,43 @@ +from abc import ABC, abstractmethod +from rlbot.utils.game_state_util import GameState +from rlbot.utils.structures.game_data_struct import GameTickPacket +from typing import Optional + + +class BaseScenario(ABC): + """Simulates in game scenarios. Used for testing. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + + def __init__(self, packet: GameTickPacket): + self.timer = 0.0 + self.prev_time = packet.game_info.seconds_elapsed + self.delta_time = 0.0 + + @abstractmethod + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + + def _update_timer(self, packet: GameTickPacket): + """Tracks the time for time based scenario events. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + self.delta_time = packet.game_info.seconds_elapsed - self.prev_time + self.timer += self.delta_time + self.prev_time = packet.game_info.seconds_elapsed + + @abstractmethod + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ diff --git a/src/scenario/default_scenario.py b/src/scenario/default_scenario.py new file mode 100644 index 0000000..bf0e407 --- /dev/null +++ b/src/scenario/default_scenario.py @@ -0,0 +1,30 @@ +from rlbot.utils.game_state_util import GameState +from rlbot.utils.structures.game_data_struct import GameTickPacket +from typing import Optional +from scenario.base_scenario import BaseScenario + + +class DefaultScenario(BaseScenario): + """For simulating in game scenarios. This scenario does not alter the game in any way. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ diff --git a/src/scenario/dribble_easy.py b/src/scenario/dribble_easy.py new file mode 100644 index 0000000..bfbd1ac --- /dev/null +++ b/src/scenario/dribble_easy.py @@ -0,0 +1,81 @@ +from numpy import random +from physics.math import Vec3 +from rlbot.utils.game_state_util import * +from rlbot.utils.structures.game_data_struct import GameTickPacket +from scenario.base_scenario import BaseScenario +from typing import Optional + + +class Constants: + timeout = 8.0 + goal_location = Vec3(0, 5250, 600) + y_limit_before_reset = 5000 + + car_rotation = Rotator(0, 1.57, 0) + car_loc_x_range = [-2000, 2000] + car_loc_y_range = [-2100, -300] + car_loc_z = 17.02 + + # Fraction between car and goal location to place the ball on + goal_car_vec_to_ball_start_location_scale_range = [0.75, 0.95] + + +class DribbleEasy(BaseScenario): + """ For testing dribbling. Spawns the car in front of the ball perfectly in line with the orange goal. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + random.seed(0) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + self._update_timer(packet) + if packet.game_ball.physics.location.y > Constants.y_limit_before_reset or self.timer > Constants.timeout: + return self.reset() + else: + return None + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met.ç + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ + self.timer = 0.0 + + car_pos = Vec3(random.uniform(*Constants.car_loc_x_range), + random.uniform(*Constants.car_loc_y_range), + Constants.car_loc_z) + + # Creates a vec between the goal location and the car location, to init the ball + random_range = Constants.goal_car_vec_to_ball_start_location_scale_range + length_fraction = (random_range[0] + (random_range[1] - random_range[0]) * random.random()) + ball_pos = (car_pos - Constants.goal_location) * length_fraction + Constants.goal_location + + car_state = CarState(physics=Physics( + location=car_pos, + velocity=Vector3(0, -100, 0), + rotation=Constants.car_rotation, + angular_velocity=Vector3(0, 0, 0)), + boost_amount=100) + + ball_state = BallState(physics=Physics( + location=ball_pos + Vec3(0, 0, 40), + velocity=Vector3(0, 0, 5), + rotation=Rotator(0, 0, 0), + angular_velocity=Vector3(0, 0, 0) + )) + + return GameState( + cars={0: car_state}, + ball=ball_state + ) diff --git a/src/scenario/dribble_hard.py b/src/scenario/dribble_hard.py new file mode 100644 index 0000000..18a343a --- /dev/null +++ b/src/scenario/dribble_hard.py @@ -0,0 +1,95 @@ +from numpy import random +from physics.math import Vec3 +from rlbot.utils.game_state_util import * +from rlbot.utils.structures.game_data_struct import GameTickPacket +from scenario.base_scenario import BaseScenario +from typing import Optional + + +class Constants: + timeout = 8.0 + goal_location = Vec3(0, 5250, 600) + y_limit_before_reset = 5000 + + # Absolute positions for the ball + ball_loc_x_range = [-1500, 1500] + ball_loc_y_range = [-1500, 1500] + ball_loc_z_range = [40, 160] + + ball_vel_xyz_range = [-300, 300] + + # Ranges relative to some offset from the ball pos for increased randomness + car_loc_x_range = [-300, 300] + car_loc_y_range = [-300, 300] + car_loc_z_range = [40, 160] + + car_vel_xyz_range = [-300, 300] + + car_rotation = Rotator(0, 0, 1.57) + car_offset_from_goal_ball_vec = 1500 + + +class DribbleHard(BaseScenario): + """ For testing dribbling. Spawns the car around the ball but with random distortions to make the challenge harder. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + random.seed(0) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + self._update_timer(packet) + if packet.game_ball.physics.location.y > Constants.y_limit_before_reset or self.timer > Constants.timeout: + return self.reset() + else: + return None + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ + self.timer = 0.0 + + # Pick a ball location within defined bounds + ball_pos = Vec3(random.uniform(*Constants.ball_loc_x_range), + random.uniform(*Constants.ball_loc_y_range), + random.uniform(*Constants.ball_loc_z_range)) + + ball_state = BallState(physics=Physics( + location=ball_pos, + velocity=Vec3(random.uniform(*Constants.ball_vel_xyz_range), + random.uniform(*Constants.ball_vel_xyz_range), + random.uniform(*Constants.ball_vel_xyz_range)), + rotation=Rotator(0, 0, 0), + angular_velocity=Vector3(0, 0, 0), + )) + + # Place the car relative to the ball, but with an offset and added randomness to make it harder + car_pos = \ + ball_pos + \ + Vec3(random.uniform(*Constants.car_loc_x_range), + random.uniform(*Constants.car_loc_y_range), + random.uniform(*Constants.car_loc_z_range)) + \ + Constants.car_offset_from_goal_ball_vec * (ball_pos - Constants.goal_location).normalize() + + car_state = CarState(physics=Physics( + location=car_pos, + velocity=Vec3(random.uniform(*Constants.car_vel_xyz_range), + random.uniform(*Constants.car_vel_xyz_range), + random.uniform(*Constants.car_vel_xyz_range)), + rotation=Constants.car_rotation, + angular_velocity=Vector3(0, 0, 0)), + boost_amount=100) + + return GameState(cars={0: car_state}, ball=ball_state) diff --git a/src/scenario/intercept.py b/src/scenario/intercept.py new file mode 100644 index 0000000..2a3529e --- /dev/null +++ b/src/scenario/intercept.py @@ -0,0 +1,78 @@ +from numpy import random +from numpy.random import uniform, seed +from physics.math import Vec3 +from rlbot.utils.game_state_util import * +from rlbot.utils.structures.game_data_struct import GameTickPacket +from scenario.base_scenario import BaseScenario +from typing import Optional + + +class Constants: + timeout = 4.0 + goal_location = Vec3(0, -5250, 600) + + ball_loc_x_range = [-1500, 1500] + ball_loc_y_range = [-2000, 0] + ball_loc_z_range = [100, 500] + + ball_vel_x_range = [-500, 500] + ball_vel_y_range = [-1500, -2500] + ball_vel_z_range = [-500, -500] + + car_rotation = Rotator(0, 1.57, 0) + y_limit_before_reset = -5000 + + +class Intercept(BaseScenario): + """ For testing interception capabilities of the bot. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + seed(0) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + self._update_timer(packet) + + if packet.game_ball.physics.location.y < Constants.y_limit_before_reset or self.timer > Constants.timeout: + return self.reset() + else: + return None + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ + self.timer = 0.0 + + car_state = CarState(physics=Physics( + location=Constants.goal_location.flatten(), + velocity=Vec3(0, -100, 0), + rotation=Constants.car_rotation, + angular_velocity=Vector3(0, 0, 0)), + boost_amount=100) + + # Normalize the direction we want the ball to go to, so we can manually define the speed + ball_state = BallState(physics=Physics( + location=Vec3(uniform(*Constants.ball_loc_x_range), + uniform(*Constants.ball_loc_y_range), + uniform(*Constants.ball_loc_z_range)), + velocity=Vec3(uniform(*Constants.ball_vel_x_range), + uniform(*Constants.ball_vel_y_range), + uniform(*Constants.ball_vel_z_range)), + rotation=Rotator(0, 0, 0), + angular_velocity=Vec3(0, 0, 0) + )) + + return GameState(cars={0: car_state}, ball=ball_state) diff --git a/src/scenario/shoot_ball_at_goal.py b/src/scenario/shoot_ball_at_goal.py new file mode 100644 index 0000000..98fda05 --- /dev/null +++ b/src/scenario/shoot_ball_at_goal.py @@ -0,0 +1,95 @@ +import numpy as np +import random +from physics.math import Vec3 +from rlbot.utils.game_state_util import * +from rlbot.utils.structures.game_data_struct import GameTickPacket +from scenario.base_scenario import BaseScenario +from typing import Optional + + +class Constants: + timeout = 5.0 + y_limit_before_reset = -5000 + goal_location = Vec3(0, -5250, 600) + + ball_velocity = 2500 + shot_target_x_range = [-850, 850] + shot_target_z_range = [1000, 2500] + + car_location = Vector3(0, goal_location.y, 17.02) + car_rotation = Rotator(0, 1.57, 0) + + ball_loc_x_range = [-2500, 2500] + ball_loc_y = 0 + ball_loc_z = 500 + + +class ShootBallAtGoal(BaseScenario): + """ Shoots ball from center to goal. + + :param packet: Information about the current game state needed for the timer + :type packet: GameTickPacket + """ + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + self._update_timer(packet) + if packet.game_ball.physics.location.y < Constants.y_limit_before_reset or self.timer > Constants.timeout: + return self.reset() + else: + return None + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ + self.timer = 0.0 + car_state = CarState(physics=Physics( + location=Constants.car_location, + velocity=Vector3(0, 100, 0), + rotation=Constants.car_rotation, + angular_velocity=Vector3(0, 0, 0) + ), jumped=False, double_jumped=False, boost_amount=100) + + ball_pos = np.array([random.uniform(*Constants.ball_loc_x_range), + Constants.ball_loc_y, + Constants.ball_loc_z]) + + goal_target = np.array([random.uniform(*Constants.shot_target_x_range), + Constants.goal_location.y, + random.uniform(*Constants.shot_target_z_range)]) + + # Normalize the direction we want the ball to go to, so we can manually define the speed + dir_vector = np.subtract(goal_target, ball_pos) + direction = dir_vector / np.linalg.norm(dir_vector) + ball_velocity = direction * Constants.ball_velocity + + ball_state = BallState(physics=Physics( + location=np_vec3_to_vector3(ball_pos), + velocity=np_vec3_to_vector3(ball_velocity), + rotation=Rotator(0, 0, 0), + angular_velocity=Vector3(0, 0, 0) + )) + + return GameState(cars={0: car_state}, ball=ball_state) + + +def np_vec3_to_vector3(direction): + """ Converts numpy array to Vector3. """ + return Vector3(direction[0], direction[1], direction[2]) + + +def vector3_to_np_vec3(direction): + """ Converts Vector3 to numpy array. """ + return np.array([direction.x, direction.y, direction.z]) diff --git a/src/scenario/side_shot.py b/src/scenario/side_shot.py new file mode 100644 index 0000000..4e147fd --- /dev/null +++ b/src/scenario/side_shot.py @@ -0,0 +1,79 @@ +from numpy import random +from physics.math import Vec3 +from rlbot.utils.game_state_util import * +from rlbot.utils.structures.game_data_struct import GameTickPacket +from scenario.base_scenario import BaseScenario +from typing import Optional + + +class Constants: + timeout = 8.0 + y_limit_before_reset = 5000 + + line_y_range = [-1200, 1200] + line_x_side_offset = 2000 + line_x_side_range = [-300, 300] + + ball_loc_y_offset = 1000 + + car_rotation = Rotator(0, 1.57, 0) + + car_height = 17.02 + ball_radius = 92.75 + + +class SideShot(BaseScenario): + """ General testing. Spawns the ball somewhat to the side, and the car 1000 units away in y direction. Requires + slight redirection to get a proper shot at the orange goal. Always off center. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket""" + + def __init__(self, packet: GameTickPacket): + super().__init__(packet) + random.seed(0) + + def reset_upon_condition(self, packet: GameTickPacket) -> Optional[GameState]: + """This is called every step and can be used to modify the state of the game when a condition is met. + + :param packet: Update packet with information about the current game state + :type packet: GameTickPacket + :return: The state of the game if the scenario reset condition was met, otherwise none. + :rtype: GameState, optional + """ + self._update_timer(packet) + + if packet.game_ball.physics.location.y > Constants.y_limit_before_reset or self.timer > Constants.timeout: + return self.reset() + else: + return None + + def reset(self) -> GameState: + """Reinitialise this scenario when called. Is called upon initialization and when the reset conditions are met. + + :return: The freshly initialized game state for this scenario + :rtype: GameState, optional + """ + self.timer = 0.0 + + left_or_right = (random.random() > 0.5) * 2 - 1 + x_location = random.uniform(*Constants.line_x_side_range) + left_or_right * Constants.line_x_side_offset + + car_pos = Vec3(x_location, random.uniform(*Constants.line_y_range), Constants.car_height) + ball_pos = Vec3(car_pos.x, car_pos.y + Constants.ball_loc_y_offset, Constants.ball_radius) + + car_state = CarState(physics=Physics( + location=car_pos, + velocity=Vector3(0, -100, 0), + rotation=Constants.car_rotation, + angular_velocity=Vector3(0, 0, 0)), + boost_amount=100) + + ball_state = BallState(physics=Physics( + location=ball_pos, + velocity=Vector3(0, 0, 1), + rotation=Rotator(0, 0, 0), + angular_velocity=Vector3(0, 0, 0) + )) + + return GameState(cars={0: car_state}, ball=ball_state) diff --git a/src/settings/default.ini b/src/settings/default.ini new file mode 100644 index 0000000..91bfdec --- /dev/null +++ b/src/settings/default.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = DEFAULT + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DefaultScenario + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = TripleRotations diff --git a/src/settings/dribble_easy_test.ini b/src/settings/dribble_easy_test.ini new file mode 100644 index 0000000..b208c6f --- /dev/null +++ b/src/settings/dribble_easy_test.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DribbleEasy + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrDribble diff --git a/src/settings/dribble_hard_test.ini b/src/settings/dribble_hard_test.ini new file mode 100644 index 0000000..c8ad470 --- /dev/null +++ b/src/settings/dribble_hard_test.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DribbleHard + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrDribble diff --git a/src/settings/intercept_ball.ini b/src/settings/intercept_ball.ini new file mode 100644 index 0000000..74baa48 --- /dev/null +++ b/src/settings/intercept_ball.ini @@ -0,0 +1,17 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = Intercept + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrIntercept + diff --git a/src/settings/keeper_test.ini b/src/settings/keeper_test.ini new file mode 100644 index 0000000..087f154 --- /dev/null +++ b/src/settings/keeper_test.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = DEFAULT + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = ShootBallAtGoal + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrShadow diff --git a/src/settings/kickoff_captain.ini b/src/settings/kickoff_captain.ini new file mode 100644 index 0000000..33b94b4 --- /dev/null +++ b/src/settings/kickoff_captain.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DefaultScenario + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrKickoff diff --git a/src/settings/kickoff_demo.ini b/src/settings/kickoff_demo.ini new file mode 100644 index 0000000..dc9c0a7 --- /dev/null +++ b/src/settings/kickoff_demo.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DefaultScenario + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = KickoffDemoCoach diff --git a/src/settings/only_demo.ini b/src/settings/only_demo.ini new file mode 100644 index 0000000..8bf4a04 --- /dev/null +++ b/src/settings/only_demo.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DefaultScenario + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrDemo diff --git a/src/settings/only_prepare.ini b/src/settings/only_prepare.ini new file mode 100644 index 0000000..3f01144 --- /dev/null +++ b/src/settings/only_prepare.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DefaultScenario + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrPrepare diff --git a/src/settings/only_shadow.ini b/src/settings/only_shadow.ini new file mode 100644 index 0000000..e4e1e7c --- /dev/null +++ b/src/settings/only_shadow.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = DefaultScenario + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrShadow diff --git a/src/settings/side_shot_test.ini b/src/settings/side_shot_test.ini new file mode 100644 index 0000000..eafbcee --- /dev/null +++ b/src/settings/side_shot_test.ini @@ -0,0 +1,16 @@ +[general] # General settings + +# Bot mode: DEFAULT/TEST +mode = TEST + + +[test] # This is only used when mode equals TEST + +# Scenario to initialize +scenario = SideShot + + +[strategy] # Settings for the STP model + +# Class name for the used Selector +coach = MrDribble diff --git a/src/setup.py b/src/setup.py new file mode 100644 index 0000000..89e5a7e --- /dev/null +++ b/src/setup.py @@ -0,0 +1,28 @@ +import setuptools + +# with open('../README.md', 'r') as fh: +# long_description = fh.read() + +setuptools.setup( + name='RLUtilities', + version='1.0.2', + author='Sam Mish', + author_email='samuelpmish@gmail.com', + description='tools for controlling Rocket League bots', + # long_description=long_description, + # long_description_content_type='text/markdown', + url='https://github.com/samuelpmish/rlutilities', + packages=['rlutilities'], + package_data={ + 'rlutilities': [ + '*.pyd', + '__init__.py', + '*.pyi', + ] + }, + python_requires='==3.7.*', + classifiers=[ + 'Programming Language :: Python :: 3.7', + 'Operating System :: Microsoft :: Windows' + ] +) diff --git a/src/strategy/__init__.py b/src/strategy/__init__.py new file mode 100644 index 0000000..924ae79 --- /dev/null +++ b/src/strategy/__init__.py @@ -0,0 +1,6 @@ +from strategy.coaches import * +from strategy.captains import * +from strategy.players import * +from strategy.base_ccp import BaseCoach, BasePlayer, BaseCaptain, SharedInfo + +__all__ = ["BaseCoach", "BaseCaptain", "BasePlayer", "SharedInfo"] diff --git a/src/strategy/base_ccp.py b/src/strategy/base_ccp.py new file mode 100644 index 0000000..dcb18bd --- /dev/null +++ b/src/strategy/base_ccp.py @@ -0,0 +1,70 @@ +from abc import ABC, abstractmethod +from rlbot.agents.base_agent import SimpleControllerState +from typing import Tuple +from world.world import World + + +class SharedInfo: + """This class is used to share the world and drone info on all CCP levels.""" + world: World = None # World model + drones = [] + + +class BaseCCP(ABC, SharedInfo): + """"Abstract template for entities in the CCP model.""" + + @abstractmethod + def step(self) -> bool: + """Run step behaviour returns whether the current entity is finished. + + :return: Done flag. + :rtype: bool + """ + + +class BaseCoach(BaseCCP): + """"Abstract template for coach level strategy classes. This level controls what the global goal of the team is + and what play will be made by the captain level.""" + + @abstractmethod + def step(self) -> bool: + """Run step behaviour returns whether the current entity is finished. + + :return: Done flag. + :rtype: bool + """ + + +class BaseCaptain(BaseCCP): + """"Abstract template for captain level strategy classes. This level gives roles to individual cars.""" + + @abstractmethod + def step(self) -> bool: + """Run step behaviour returns whether the current entity is finished. + + :return: Done flag. + :rtype: bool + """ + + +class BasePlayer(BaseCCP): + """"Abstract template for player level strategy classes. Defines individual player moves.""" + + def __init__(self, drone): + self._own_drone_idx = drone.index + super().__init__() + + @property + def drone(self): + """"Returns a reference to your own car from the world model. + + :return: Car component of the world model for the car matching this player routine instance. """ + return self.world.allies[self._own_drone_idx] + + @abstractmethod + def step(self) -> Tuple[SimpleControllerState, bool]: + """Run step behaviour returns whether the current entity is finished. + + :return: Controller for the corresponding car and a flag indicating whether it is finished. + :rtype: (SimpleControllerState, bool) + """ diff --git a/src/strategy/captains/__init__.py b/src/strategy/captains/__init__.py new file mode 100644 index 0000000..789e8fd --- /dev/null +++ b/src/strategy/captains/__init__.py @@ -0,0 +1,13 @@ +from strategy.captains.all_drone_kickoff import AllDroneKickoff +from strategy.captains.kickoff_demolition import KickoffDemolition +from strategy.captains.all_drone_demo import AllDroneDemo +from strategy.captains.all_drone_shadow import AllDroneShadow +from strategy.captains.all_drone_dribble import AllDroneDribble +from strategy.captains.all_drone_get_boost import AllDroneGetBoost +from strategy.captains.defense import Defense +from strategy.captains.all_drone_intercept import AllDroneIntercept +from strategy.captains.kickoff import KickoffCaptain +from strategy.captains.attack import Attack + +__all__ = ['AllDroneKickoff', 'KickoffDemolition', 'AllDroneDemo', 'AllDroneGetBoost', 'AllDroneShadow', 'Defense', + 'AllDroneDribble', 'KickoffCaptain', 'Attack', 'AllDroneIntercept'] diff --git a/src/strategy/captains/all_drone_demo.py b/src/strategy/captains/all_drone_demo.py new file mode 100644 index 0000000..c97516e --- /dev/null +++ b/src/strategy/captains/all_drone_demo.py @@ -0,0 +1,34 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * + + +class Status: + RUNNING = 0 + + +class AllDroneDemo(BaseCaptain): + """Assigns the Demo Player to every drone in the team.""" + + def __init__(self): + self.state = Status.RUNNING + + # Initial role assignment! + for drone in self.drones: + drone.assign(Demolition(drone)) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + drone.assign(Demolition(drone)) + + # This play never ends + return False diff --git a/src/strategy/captains/all_drone_dribble.py b/src/strategy/captains/all_drone_dribble.py new file mode 100644 index 0000000..ec27e6a --- /dev/null +++ b/src/strategy/captains/all_drone_dribble.py @@ -0,0 +1,36 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * +from physics.math import Vec3 + + +class Status: + RUNNING = 0 + + +class AllDroneDribble(BaseCaptain): + """"Assigns the Dribble player to every drone in the team.""" + + def __init__(self): + self.state = Status.RUNNING + self.goal_target = Vec3(0, 5250, 350) + + # Initial role assignment! + for drone in self.drones: + drone.assign(Dribble(self.goal_target)) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + drone.assign(Dribble(self.goal_target)) + + # This play never ends + return False diff --git a/src/strategy/captains/all_drone_get_boost.py b/src/strategy/captains/all_drone_get_boost.py new file mode 100644 index 0000000..3bfcfb4 --- /dev/null +++ b/src/strategy/captains/all_drone_get_boost.py @@ -0,0 +1,34 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * + + +class Status: + RUNNING = 0 + + +class AllDroneGetBoost(BaseCaptain): + """"Assigns the GetBoost player to every drone in the team.""" + + def __init__(self): + self.state = Status.RUNNING + + # Initial role assignment! + for drone in self.drones: + drone.assign(GetBoost()) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + drone.assign(GetBoost()) + + # This play never ends + return False diff --git a/src/strategy/captains/all_drone_intercept.py b/src/strategy/captains/all_drone_intercept.py new file mode 100644 index 0000000..b1e6507 --- /dev/null +++ b/src/strategy/captains/all_drone_intercept.py @@ -0,0 +1,35 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * + + +class Status: + RUNNING = 0 + + +class AllDroneIntercept(BaseCaptain): + """"Assigns the Intercept Player to every drone in the team.""" + + def __init__(self): + self.state = Status.RUNNING + + # Initial role assignment! + for drone in self.drones: + drone.flush_actions() + drone.assign(Intercept()) + + def step(self): + """Return current state containing status. + + :return: Current State. True if play ends. + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + drone.assign(Intercept()) + + # This play never ends + return False diff --git a/src/strategy/captains/all_drone_kickoff.py b/src/strategy/captains/all_drone_kickoff.py new file mode 100644 index 0000000..7f8b6f6 --- /dev/null +++ b/src/strategy/captains/all_drone_kickoff.py @@ -0,0 +1,35 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * + + +class Status: + RUNNING = 0 + + +class AllDroneKickoff(BaseCaptain): + """"Assigns the Kickoff Player to every drone in the team.""" + + def __init__(self): + self.state = Status.RUNNING + + # Initial role assignment! + for drone in self.drones: + drone.flush_actions() + drone.assign(KickoffGosling()) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + return True + + # This play never ends + return False diff --git a/src/strategy/captains/all_drone_shadow.py b/src/strategy/captains/all_drone_shadow.py new file mode 100644 index 0000000..ec34ae7 --- /dev/null +++ b/src/strategy/captains/all_drone_shadow.py @@ -0,0 +1,43 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * + + +class Status: + RUNNING = 0 + + +class AllDroneShadow(BaseCaptain): + """" This class assigns the play shadowing to all the bots in the current team. """ + + def __init__(self): + self.state = Status.RUNNING + # print("init all drone shadow") + + # Initial role assignment! + for drone in self.drones: + drone.assign(Shadowing()) + + self.toggle = 1 + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: State + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If we have reached the position we wanted to shadow to, we wait so we are ready for a shot on goal. + if done: + if self.toggle == 1: + # print("Wait") + drone.assign(Wait(seconds=5)) + else: + # print("Shadowing") + drone.assign(Shadowing()) + self.toggle *= -1 + + # This play never ends + return False diff --git a/src/strategy/captains/attack.py b/src/strategy/captains/attack.py new file mode 100644 index 0000000..d28f9cd --- /dev/null +++ b/src/strategy/captains/attack.py @@ -0,0 +1,233 @@ +from strategy.base_ccp import BaseCaptain +from gosling.utils import * +from physics.math import Vec3 +from strategy.drone import Drone +from strategy.players import * +import numpy as np + + +class DefenderState: + """" + In shadowing, the keeper goes to the goal and focuses on strategic positioning. + Once the correct position is reached we wait for a shot at goal + In keeping, the keeper attempts to prevent a goal + """ + GET_BOOST = 0 + COVER = 1 + COVER_NEAR = 2 + SHOOT = 3 + INACTIVE = 4 + + +class AttackerState: + """ + Gets boost in the mid area, then shoots the ball if it is on the enemy side. + If not on the enemy side -> Call prepare first to ensure positioning is correct. + After the optional Prepare step it shoots. + """ + PREPARE = 1 + SHOOT = 2 + COVER = 3 + + +class Attack(BaseCaptain): + """" + Assign the roles (tactics) to the bots in the current team when defending. + We have one bot who is the assigned keeper and two other bots who fetch boost. + """ + LENGTH_MAP_ONE_SIDE = 5250 + BOOST_THRESHOLD = 30 + SHOOT_RANGE = 3000 + + ATTACKER_COVER_DIST_RATIO = 0.70 # Ratio at which the attacking drones cover + ATTACKER_CANCEL_CHECK_RANGE = 1500 # Range at which we consider to cancel our shot + ATTACKER_PREPARE_OFFSETS = [1500, 2500] # Prepare offest range + + DEFENDER_COVER_DIST_RATIO = 0.40 # Ratio at which the defending drone covers + DEFENDER_COVER_PREP_RATIO = 0.75 + DEFENDER_DRONE_PREPARE_X_WINDOW = 2400 # Max distance from y-centerline to consider preparing + DEFENDER_DRONE_SHOOT_X_WINDOW = 1400 # Max distance from y-centerline to consider shooting + + FACTOR_SCORE_HIGH_PENALTY = 1000 # The factor by which we multiply score (higher score is bad) + + def __init__(self): + self.team = self.drones[0].team + self.own_goal_location = Vec3(0, self.LENGTH_MAP_ONE_SIDE * side(self.team), 0) + self.other_goal_location = Vec3(0, self.LENGTH_MAP_ONE_SIDE * side(self.team) * -1, 0) + + # Clear the stacks + for drone in self.drones: + drone.flush_actions() + + drone_closest_to_ball = self._get_teammate_closest_to_ball(prefer_friendly_side=True) + self.attacker_drones = [drone_closest_to_ball] + self.attacker_states = {drone_closest_to_ball: AttackerState.PREPARE} + drone_closest_to_ball.assign(Prepare()) + + # Select keeper from remaining drones + remaining_drones = [drone for drone in self.drones if (drone != drone_closest_to_ball)] + if len(remaining_drones) > 0: + goal_dist = {(Vec3.from_other_vec(drone.car.physics.location) - self.own_goal_location).magnitude(): drone + for drone in remaining_drones} + self.defender_drone = goal_dist[min(goal_dist.keys())] + self.defender_state = DefenderState.COVER + self.defender_drone.assign(Cover(distance_ratio=self.DEFENDER_COVER_DIST_RATIO)) + else: + self.defender_drone = None + self.defender_state = DefenderState.INACTIVE + + # All other drones + remaining_drones = [drone for drone in remaining_drones if (drone != self.defender_drone)] + for idx, drone in enumerate(remaining_drones): + self.attacker_states[drone] = AttackerState.COVER + self.attacker_drones.append(drone) + drone.assign(Cover(distance_ratio=self.ATTACKER_COVER_DIST_RATIO)) + + offsets = np.linspace(*self.ATTACKER_PREPARE_OFFSETS, num=len(self.attacker_drones)) + self.attacker_offsets = {drone: offset for drone, offset in zip(self.attacker_drones, offsets)} + + def step(self): + """ + Return whether this captain is done or not + Assigns bots, to get boost and then go to the ball or shadow. + Assigns one bot to keeper which, intercept the ball if there is a predicted goal + + :return: Done flag, true if finished + :rtype: bool + """ + if self.defender_drone is not None: + self._control_defender() + self._control_attacker_drones() + + def _control_defender(self): + """Sets the control for the keeper drone""" + # Controlling the keeper + done = self.defender_drone.step() + + centerline_dist = abs(self.world.ball.physics.location.x) + + # done and COVER -> SHOOT if opportunity else COVER + if done and self.defender_state == DefenderState.COVER: + self._defender_act_ball_near_center(centerline_dist) + + elif done and self.defender_state == DefenderState.COVER_NEAR: + # print("CPT ATTACK: DEFENDER - INTERCEPT") + self.defender_drone.assign(Intercept()) + self.defender_state = DefenderState.SHOOT + + elif self.defender_state == DefenderState.COVER_NEAR: + + if centerline_dist > self.DEFENDER_DRONE_PREPARE_X_WINDOW: + # print("CPT ATTACK: DEFENDER - Too far from center - Back to Cover") + self.defender_drone.flush_actions() + self.defender_drone.assign(Cover(distance_ratio=self.DEFENDER_COVER_DIST_RATIO)) + self.defender_state = DefenderState.COVER + + elif centerline_dist < self.DEFENDER_DRONE_SHOOT_X_WINDOW: + # print("CPT ATTACK: DEFENDER - Right in center -> Skip prepare! SHOOT!") + self.defender_drone.flush_actions() + self.defender_drone.assign(Intercept()) + self.defender_state = DefenderState.SHOOT + + # done and SHOOT -> COVER + elif self.defender_state == DefenderState.SHOOT: + if done: + # print("CPT ATTACK: DEFENDER - SHOT DONE - Returning") + self.defender_drone.assign(Cover(distance_ratio=self.DEFENDER_COVER_DIST_RATIO)) + self.defender_state = DefenderState.COVER + elif centerline_dist > self.DEFENDER_DRONE_PREPARE_X_WINDOW: + # print("CPT ATTACK: DEFENDER - Shot out of window") + self.defender_drone.flush_actions() + self.defender_drone.assign(Cover(distance_ratio=self.DEFENDER_COVER_DIST_RATIO)) + self.defender_state = DefenderState.COVER + + def _defender_act_ball_near_center(self, centerline_dist: float): + """ Controls the defender drone depending on how far the ball is from the center Y-axis """ + if centerline_dist < self.DEFENDER_DRONE_SHOOT_X_WINDOW: + # print("CPT ATTACK: DEFENDER - Right in center - COVER -> INTERCEPT") + self.defender_drone.assign(Intercept()) + self.defender_state = DefenderState.SHOOT + + elif centerline_dist < self.DEFENDER_DRONE_PREPARE_X_WINDOW: + # print("CPT ATTACK: DEFENDER - NEAR CENTER - START PREPARING") + self.defender_drone.assign(Cover(distance_ratio=self.DEFENDER_COVER_PREP_RATIO)) + self.defender_state = DefenderState.COVER_NEAR + else: + self.defender_drone.assign(Cover(distance_ratio=self.DEFENDER_COVER_DIST_RATIO)) + + def _control_attacker_drones(self): + """Sets the controls for all other drones""" + # All other drones get boost and try to intercept + for drone in self.attacker_drones: + drone_state = self.attacker_states[drone] + done = drone.step() + + if done: + self._select_state_attacker_drone(drone, drone_state) + else: + self._check_and_adjust_current_state_attacker_drone(drone, drone_state) + + def _select_state_attacker_drone(self, drone: Drone, drone_state: int): + """Switch to different state upon completion. Assumes that the current state is done""" + if drone_state == AttackerState.PREPARE: + if self._allow_shoot_state(): + self.attacker_states[drone] = AttackerState.SHOOT + drone.assign(Intercept()) + else: + self.attacker_states[drone] = AttackerState.PREPARE + drone.assign(Prepare(offset_in_ball_direction=self.attacker_offsets[drone])) + + elif drone_state == AttackerState.SHOOT: + self.attacker_states[drone] = AttackerState.COVER + drone.assign(Cover(distance_ratio=self.ATTACKER_COVER_DIST_RATIO)) + + elif drone_state == AttackerState.COVER: + self.attacker_states[drone] = AttackerState.PREPARE + drone.assign(Prepare(offset_in_ball_direction=self.attacker_offsets[drone])) + + def _check_and_adjust_current_state_attacker_drone(self, drone: Drone, drone_state: int): + """Switch to different state before completion of the task when particular conditions are met""" + if drone_state == AttackerState.SHOOT: + if self._detect_bad_shot(drone): + self.attacker_states[drone] = AttackerState.COVER + drone.flush_actions() + drone.assign(Cover(distance_ratio=self.ATTACKER_COVER_DIST_RATIO)) + + def _allow_shoot_state(self) -> bool: + """"Only allows shoot state when no other drone is shooting.""" + for d, state in self.attacker_states.items(): + if AttackerState.SHOOT == state: + return False + return True + + def _get_teammate_closest_to_ball(self, prefer_friendly_side: bool = False) -> Drone: + """ Select the drone closest to the ball to attack first drones on correct side have a higher preference """ + if prefer_friendly_side: + penalty_points = {} + for drone in self.drones: + ball_dist = self.world.calc_dist_to_ball(drone.car) + if drone.car.physics.location.y * side(self.team) > self.world.ball.physics.location.y * \ + side(self.team): + penalty_points[ball_dist * self.FACTOR_SCORE_HIGH_PENALTY] = drone + else: + penalty_points[ball_dist] = drone + drone_closest_to_ball = penalty_points[min(penalty_points.keys())] + else: + ball_dist = {self.world.calc_dist_to_ball(drone.car): drone for drone in + self.drones} + drone_closest_to_ball = ball_dist[min(ball_dist.keys())] + return drone_closest_to_ball + + def _detect_bad_shot(self, drone: Drone) -> bool: + """"Check if our shot is bad""" + # Can only be False when we are close to the ball + if (Vec3.from_other_vec(self.world.ball.physics.location) - + Vec3.from_other_vec(drone.car.physics.location)).magnitude() < self.ATTACKER_CANCEL_CHECK_RANGE: + + # Cancel if we expect to fire to our own goal + if (self.world.ball.physics.location.y - drone.car.physics.location.y) * side(self.team) > 0: + # print(f"Bad shot detected. Ball: {self.world.ball.physics.location.y} " + # f"Drone: {drone.car.physics.location.y}") + return True + + return False diff --git a/src/strategy/captains/defense.py b/src/strategy/captains/defense.py new file mode 100644 index 0000000..67552d9 --- /dev/null +++ b/src/strategy/captains/defense.py @@ -0,0 +1,196 @@ +from strategy.base_ccp import BaseCaptain, BaseCCP +from gosling.utils import * +from physics.math import Vec3 +from strategy.drone import Drone +from strategy.players import * + + +class InterceptorState: + """" + The interceptor drone drives back to collect a big boost on it's own + side, then it drives in front of the goal and finally drives towards the + ball to intercept it. + """ + COVER = 1 + INTERCEPT = 2 + + +class KeeperState: + """" + In shadowing, the keeper goes to the goal and focuses on strategic positioning. + Once the correct position is reached we wait for a shot at goal + In keeping, the keeper attempts to prevent a goal + """ + SHADOWING = 0 + WAIT = 1 + KEEPING = 2 + + +class Defense(BaseCaptain): + """" + This class assigns the roles (tactics) to the bots in the current team when defending + We have one bot who is the assigned keeper and two other bots who fetch boost + """ + LENGTH_MAP_ONE_SIDE = 5250 # Length of map in Y direction + BOOST_THRESHOLD = 10 # Minimal boost before getting boost + START_KEEPER_CIRCLE_RANGE = 1800 # Circular area centered on goal in which the keeper starts keeping + START_KEEPER_SIDE_RANGE = 3000 # Rectangular area in the opposite corner of the keeper in which it starts keeping + KEEPER_SWITCH_TRESHOLD = 1000 # Minimal absolute X of the ball for the keeper to shadow in the other corner + + INTERCEPTOR_COVER_RATIO = 0.3 + INTERCEPTOR_CANCEL_CHECK_RANGE = 900 + + MAX_GOAL_TIME = 3 # We call keeper if they score in less than this seconds + MINIMUM_DIRECTION_RATIO = 0.4 # If lower than this we have a high change to have a chance to shoot backwards + JUMP_THRESHOLD = 300 # Higher than this we can't shoot the ball as the keepre + + def __init__(self): + # print("Captain Defense: Started") + self.team = self.drones[0].team + self.own_goal_location = Vec3(0, self.LENGTH_MAP_ONE_SIDE * side(self.team), 0) + + # Clear the stacks + for drone in self.drones: + drone.flush_actions() + + # Closest car to the goal becomes the keeper + goal_dist = {(Vec3.from_other_vec(car.physics.location) - self.own_goal_location).magnitude(): drone + for car, drone in zip(self.world.teams[self.team].cars, self.drones)} + self.keeper_drone = goal_dist[min(goal_dist.keys())] + self.keeper_state = KeeperState.SHADOWING + self.keeper_drone.assign(Shadowing()) + + # Other drones become interceptors + self.interceptor_drones = [drone for drone in self.drones if (drone != self.keeper_drone)] + self.interceptor_states = {drone: InterceptorState.COVER for drone in self.interceptor_drones} + [drone.assign(Cover(distance_ratio=self.INTERCEPTOR_COVER_RATIO)) for drone in self.interceptor_drones] + + def step(self) -> bool: + """ + Checks for each drone if it needs to get boost or can go intercept. + The keeper drone will go to the goal and wait. + + :return: Done flag, true if finished + :rtype: bool + """ + self._control_interceptors() + self._control_keeper() + return False + + def _control_interceptors(self): + """Updates the controls for all other drones""" + + for drone in self.interceptor_drones: + drone_state = self.interceptor_states[drone] + done = drone.step() + + if done: + self._select_state_interceptor(drone, drone_state) + else: + self._check_and_adjust_current_state_interceptor(drone, drone_state) + + def _select_state_interceptor(self, drone: Drone, drone_state: int): + """Switch to different state upon completion. Assumes that the current state is done""" + # COVER done -> INTERCEPT + if drone_state == InterceptorState.COVER: + if self._allow_intercept_state(): + self.interceptor_states[drone] = InterceptorState.INTERCEPT + drone.assign(Intercept()) + else: + self.interceptor_states[drone] = InterceptorState.COVER + drone.assign(Cover(distance_ratio=self.INTERCEPTOR_COVER_RATIO)) + + # INTERCEPT done -> COVER + if drone_state == InterceptorState.INTERCEPT: + self.interceptor_states[drone] = InterceptorState.COVER + drone.assign(Cover(distance_ratio=self.INTERCEPTOR_COVER_RATIO)) + + def _check_and_adjust_current_state_interceptor(self, drone: Drone, drone_state: int): + """If the interceptor can only make a bad shot, we give the interception a new state: cover""" + if drone_state == InterceptorState.INTERCEPT: + if self._detect_bad_shot(drone): + self.interceptor_states[drone] = InterceptorState.COVER + drone.flush_actions() + drone.assign(Cover(distance_ratio=self.INTERCEPTOR_COVER_RATIO)) + + def _allow_intercept_state(self) -> bool: + """"Only allows intercept state when no other drone is intercepting.""" + for d, state in self.interceptor_states.items(): + if InterceptorState.INTERCEPT == state: + return False + return True + + def _control_keeper(self): + """Sets the keeper controls""" + done = self.keeper_drone.step() + + # future goal predicted, ball near goal or ball in opposite corner -> go keeping + if self._keeper_start_check() and not self.keeper_state == KeeperState.KEEPING: + # print("Keeper state: Keeping") + self.keeper_state = KeeperState.KEEPING + self.keeper_drone.flush_actions() + self.keeper_drone.assign(Keeper()) + + # KEEPING done -> SHADOWING + elif done and self.keeper_state == KeeperState.KEEPING: + # print("Keeper state: Shadowing after keeping") + self.keeper_state = KeeperState.SHADOWING + self.keeper_drone.flush_actions() + self.keeper_drone.assign(Shadowing()) + + # SHADOWING done -> WAIT + elif done and self.keeper_state == KeeperState.SHADOWING: + # print("Keeper state: Wait after shadowing") + self.keeper_drone.assign(Wait(face_ball=True)) + self.keeper_state = KeeperState.WAIT + + # If the ball is on the other side that where you are waiting, and it is far away enough, switch sides. + # WAIT and ball on other side -> SHADOWING + elif self.keeper_state == KeeperState.WAIT and self._ball_is_on_opposite_side_of_keeper(): + # print("Keeper state: Switch to other side") + self.keeper_drone.flush_actions() + self.keeper_drone.assign(Shadowing()) + + def _ball_is_on_opposite_side_of_keeper(self) -> bool: + """Check if the ball is on the opposite side of the keeper based on the y-centerline of the field""" + return self.keeper_drone.car.physics.location.x * self.world.ball.physics.location.x > 0 and \ + abs(self.world.ball.physics.location.x) > self.KEEPER_SWITCH_TRESHOLD + + def _keeper_start_check(self) -> bool: + """"Checks the conditions for the keeper to start""" + ball_goal_dist = (Vec3.from_other_vec(self.world.ball.physics.location) - self.own_goal_location).magnitude() + + future_goal_predicted = self._future_goal_is_imminent() + in_goal_circle = ball_goal_dist < self.START_KEEPER_CIRCLE_RANGE + on_opposite_corner = self.keeper_drone.car.physics.location.x * self.world.ball.physics.location.x < 1 and \ + 0 < abs(self.world.ball.physics.location.x) < self.START_KEEPER_SIDE_RANGE and \ + abs(self.own_goal_location.y - self.world.ball.physics.location.y) < \ + self.START_KEEPER_CIRCLE_RANGE and self.world.ball.physics.location.z < self.JUMP_THRESHOLD + + return future_goal_predicted or in_goal_circle or on_opposite_corner + + def _future_goal_is_imminent(self) -> bool: + """Returns whether a future goal is imminent""" + return Ball.predict_future_goal() and Ball.predict_future_goal().physics.location.y * side(self.team) > 0 \ + and Ball.predict_future_goal().game_seconds - BaseCCP.world.game.seconds_elapsed < self.MAX_GOAL_TIME + + def _detect_bad_shot(self, drone: Drone) -> bool: + """"Check if our shot is bad""" + + # Can only be False when we are close to the ball + if self.world.calc_dist_to_ball(drone.car) < self.INTERCEPTOR_CANCEL_CHECK_RANGE: + + drone_to_ball = (Vec3.from_other_vec(self.world.ball.physics.location) - + Vec3.from_other_vec(drone.car.physics.location)).normalize() + + drone_to_goal = (Vec3.from_other_vec(self.own_goal_location) - + Vec3.from_other_vec(drone.car.physics.location)).normalize() + + # Cancel if we expect to fire to our own goal + if drone_to_ball * drone_to_goal > self.MINIMUM_DIRECTION_RATIO: + # print(f"Bad shot detected for drone {drone.index}") + # print(f"Bad shot detected. Ball: {self.world.ball.physics.location.y} " + # f"Drone: {drone.car.physics.location.y}") + return True + + return False diff --git a/src/strategy/captains/kickoff.py b/src/strategy/captains/kickoff.py new file mode 100644 index 0000000..fd00db7 --- /dev/null +++ b/src/strategy/captains/kickoff.py @@ -0,0 +1,83 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * +from physics.math.vector3 import Vec3 + + +class Status: + RUNNING = 0 + + +class KickoffCaptain(BaseCaptain): + """" + This class assigns the roles (tactics) to the bots in the current team. + Which performs the kickoff. + + All drones are accessible with self.drones. + """ + + COVER_RATIO = 0.85 + BALL_MAX_VELOCITY = 300 # If the velocity of the ball is higher than this, we stop the kickoff routine + + def __init__(self): + self.state = Status.RUNNING + self.prev_action_was_kickoff = True + self.first_contact = False + self.second_drone_following = False + # print('Init kickoff') + + # assign tasks to players + # get distance to ball + dists_ball = [(self.world.calc_dist_to_ball(drone.car), i) for i, drone in enumerate(self.drones)] + dists_ball = sorted(dists_ball, key=lambda x: x[0]) # sort by distance + # print(dists_ball) + + # closest drone do kickoff + self.drones[dists_ball[0][1]].assign(KickoffGosling()) + self.drones[dists_ball[0][1]].kickoff_taker = True + + # second closest go behind kickoff-captain + if len(self.drones) > 1: + self.drones[dists_ball[1][1]].assign(Cover(distance_ratio=self.COVER_RATIO)) + + # third get boost + if len(self.drones) > 2: + self.drones[dists_ball[2][1]].assign(Shadowing()) + + def step(self) -> bool: + """ + Step in kickoff, check if we need to reassign any bots. + Return if the kickoff is over. + + :return: Done flag, true if finished + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + if getattr(drone, 'following_kickoff', False) and self.first_contact: + velocity = Vec3.from_other_vec(self.world.ball.physics.velocity).magnitude() + if velocity < self.BALL_MAX_VELOCITY: + drone.assign(Dribble(self.world.ball.physics.location)) # shoot the ball + else: + # print('Kickoff ended 2') + return True + drone.following_kickoff = False + + # If state returns true if the state is not pending anymore (fail or success). + if done: + # check if the kickoff captain is done + if getattr(drone, 'kickoff_taker', False): + self.first_contact = True + if not self.second_drone_following: + # print('Kickoff ended 1') + return True # no drone following kickoff complete + else: + drone.assign(GetBoost()) # kickoff taker gets boost + continue + + # Next drone go to ball, third drone should drive to the ball + drone.assign(Dribble(self.world.ball.physics.location)) + + # This play never ends + return False diff --git a/src/strategy/captains/kickoff_demolition.py b/src/strategy/captains/kickoff_demolition.py new file mode 100644 index 0000000..ec9da7f --- /dev/null +++ b/src/strategy/captains/kickoff_demolition.py @@ -0,0 +1,49 @@ +from strategy.base_ccp import BaseCaptain +from strategy.players import * + + +class Status: + RUNNING = 0 + + +class KickoffDemolition(BaseCaptain): + """" + This class assigns the roles (tactics) to the bots in the current team. + + All drones are accessible with self.drones. + Set the tactic using Drone.set_tactic(TACTIC_INSTANCE). + """ + + def __init__(self): + self.state = Status.RUNNING + self.prev_action_was_kickoff = True + + # Initial role assignment! + for drone in self.drones: + drone.assign(KickoffGosling()) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: State + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + + # Switch between kickoffs and demos + if self.prev_action_was_kickoff: + drone.assign(Demolition(drone)) + # print("Assigned Demo") + self.prev_action_was_kickoff = False + else: + drone.assign(KickoffGosling()) + # print("Assigned Kickoff") + self.prev_action_was_kickoff = True + + # This play never ends + return False diff --git a/src/strategy/coaches/__init__.py b/src/strategy/coaches/__init__.py new file mode 100644 index 0000000..1729c8a --- /dev/null +++ b/src/strategy/coaches/__init__.py @@ -0,0 +1,13 @@ +from strategy.coaches.mr_kickoff import MrKickoff +from strategy.coaches.kickoff_demo_coach import KickoffDemoCoach +from strategy.coaches.mr_demo import MrDemo +from strategy.coaches.mr_shadow import MrShadow +from strategy.coaches.mr_dribble import MrDribble +from strategy.coaches.mr_get_boost import MrGetBoost +from strategy.coaches.triple_rotations import TripleRotations +from strategy.coaches.mr_intercept import MrIntercept +from strategy.coaches.mr_prepare import MrPrepare +from strategy.coaches.mr_cover import MrCover + +__all__ = ['MrKickoff', 'KickoffDemoCoach', 'MrGetBoost', 'MrDemo', 'MrShadow', 'TripleRotations', 'MrDribble', + "MrIntercept", "MrPrepare", "MrCover"] diff --git a/src/strategy/coaches/kickoff_demo_coach.py b/src/strategy/coaches/kickoff_demo_coach.py new file mode 100644 index 0000000..67bd256 --- /dev/null +++ b/src/strategy/coaches/kickoff_demo_coach.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class KickoffDemoCoach(BaseCoach): + """"This class selects the strategy that is currently being used.""" + + def __init__(self): + self.state = State.RUNNING + self.current = KickoffDemolition() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = KickoffDemolition() diff --git a/src/strategy/coaches/mr_cover.py b/src/strategy/coaches/mr_cover.py new file mode 100644 index 0000000..19f2b35 --- /dev/null +++ b/src/strategy/coaches/mr_cover.py @@ -0,0 +1,26 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * +from strategy.players import Cover + + +class MrCover(BaseCoach): + """"This class calls the captain that makes all drones cover, standing between the ball and your own goal.""" + + def __init__(self): + + # Initial role assignment! + for drone in self.drones: + drone.flush_actions() + drone.assign(Cover()) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: bool + """ + for drone in self.drones: + done = drone.step() # If state returns true if the state is not pending anymore (fail or success). + + if done: + drone.assign(Cover()) diff --git a/src/strategy/coaches/mr_demo.py b/src/strategy/coaches/mr_demo.py new file mode 100644 index 0000000..5356568 --- /dev/null +++ b/src/strategy/coaches/mr_demo.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class MrDemo(BaseCoach): + """"Selects exclusively the AllDroneDemo captain.""" + + def __init__(self): + self.state = State.RUNNING + self.current = AllDroneDemo() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = AllDroneDemo() diff --git a/src/strategy/coaches/mr_dribble.py b/src/strategy/coaches/mr_dribble.py new file mode 100644 index 0000000..f0624e9 --- /dev/null +++ b/src/strategy/coaches/mr_dribble.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class MrDribble(BaseCoach): + """"Selects exclusively the AllDroneDribble captain.""" + + def __init__(self): + self.state = State.RUNNING + self.current = AllDroneDribble() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = AllDroneDribble() diff --git a/src/strategy/coaches/mr_get_boost.py b/src/strategy/coaches/mr_get_boost.py new file mode 100644 index 0000000..b544276 --- /dev/null +++ b/src/strategy/coaches/mr_get_boost.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class MrGetBoost(BaseCoach): + """"Selects exclusively the AllDroneGetBoost captain.""" + + def __init__(self): + self.state = State.RUNNING + self.current = AllDroneGetBoost() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = AllDroneGetBoost() diff --git a/src/strategy/coaches/mr_intercept.py b/src/strategy/coaches/mr_intercept.py new file mode 100644 index 0000000..2bf43b4 --- /dev/null +++ b/src/strategy/coaches/mr_intercept.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class MrIntercept(BaseCoach): + """"Selects exclusively the AllDroneIntercept captain.""" + + def __init__(self): + self.state = State.RUNNING + self.current = AllDroneIntercept() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = AllDroneIntercept() diff --git a/src/strategy/coaches/mr_kickoff.py b/src/strategy/coaches/mr_kickoff.py new file mode 100644 index 0000000..226a8c8 --- /dev/null +++ b/src/strategy/coaches/mr_kickoff.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class MrKickoff(BaseCoach): + """"Selects exclusively the AllDroneKickoff captain.""" + + def __init__(self): + self.state = State.RUNNING + self.current = KickoffCaptain() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = KickoffCaptain() diff --git a/src/strategy/coaches/mr_prepare.py b/src/strategy/coaches/mr_prepare.py new file mode 100644 index 0000000..6cb3a5d --- /dev/null +++ b/src/strategy/coaches/mr_prepare.py @@ -0,0 +1,28 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * +from strategy.players import Prepare + + +class MrPrepare(BaseCoach): + """"This class calls the captain that makes all drones prepare to attack.""" + + def __init__(self): + + # Initial role assignment! + for drone in self.drones: + drone.flush_actions() + drone.assign(Prepare()) + + def step(self): + """Return current state containing status. + + :return: Current State. + :rtype: bool + """ + # Loop over all the drones in this team + for drone in self.drones: + done = drone.step() + + # If state returns true if the state is not pending anymore (fail or success). + if done: + drone.assign(Prepare()) diff --git a/src/strategy/coaches/mr_shadow.py b/src/strategy/coaches/mr_shadow.py new file mode 100644 index 0000000..d62b849 --- /dev/null +++ b/src/strategy/coaches/mr_shadow.py @@ -0,0 +1,24 @@ +from strategy.base_ccp import BaseCoach +from strategy.captains import * + + +class State: + RUNNING = 0 + + +class MrShadow(BaseCoach): + """"This class calls the captain that makes all drones perform shadowing.""" + + def __init__(self): + self.state = State.RUNNING + self.current = AllDroneShadow() + + def step(self): + # Implement your state switches here! + if self.state == State.RUNNING: + done = self.current.step() + + # Trivial state switching to the same state with the same play + if done: + self.state = State.RUNNING + self.current = AllDroneShadow() diff --git a/src/strategy/coaches/triple_rotations.py b/src/strategy/coaches/triple_rotations.py new file mode 100644 index 0000000..135dac5 --- /dev/null +++ b/src/strategy/coaches/triple_rotations.py @@ -0,0 +1,104 @@ +from gosling.utils import side +from strategy.base_ccp import BaseCoach +from strategy.captains import * +from physics.math import Vec3 + + +class State: + KICKOFF = 0 + ATTACKING = 1 + DEFENDING = 2 + state = ["Kickoff", "Attacking", "Defending"] + + +class InGameConstants: + MAX_BALL_VELOCITY = 6000 # According to the wiki the maximum velocity of the ball is 6000 + MAX_BALL_POSITION = 5120 # According to the wiki the goals are located at y = 5120 or -5120 + + +class TripleRotations(BaseCoach): + """"Rotation between three captains: Attack, Defense and Kickoff. Coach for Milestone 1.""" + + def __init__(self): + self.state = State.KICKOFF + + self.current = KickoffCaptain() + self.team = self.drones[0].team + self.opp_team = 1 if self.team == 0 else 0 + + self.side = side(self.team) + + # Tunable parameters - weights, used for deciding if we have the opportunity to attack + self.weight_closest = 1 + self.weight_velocity = 6 + self.weight_position = 10 + + # Tunable thresholds for switching, this prevents switching back and forth too often + self.threshold_defense = -0.2 + self.threshold_offense = 0.1 + + def step(self): + """ + Determine if the state of the game switched enough that another captain should take control. + :return: + """ + done = self.current.step() + + total_score = self._calculate_attack_score() + + # Switch to kickoff whenever there is a kickoff + if self.world.packet.game_info.is_kickoff_pause: + new_state = State.KICKOFF + if new_state != self.state: + self.current = KickoffCaptain() + + # Switch to attack immediately after the kickoff. + elif self.state == State.KICKOFF and done: + new_state = State.ATTACKING + self.current = Attack() + print(f'Coach Triple: Switched to {State.state[new_state]}') + + # Switch to defending if the threat becomes too big. + elif total_score < self.threshold_defense: + new_state = State.DEFENDING + if new_state != self.state: + print(f'Coach Triple: Switched to {State.state[new_state]}') + self.current = Defense() + + # Switch to attacking if the situation becomes more favourable. + elif total_score >= self.threshold_offense: + new_state = State.ATTACKING + if new_state != self.state: + print(f'Coach Triple: Switched to {State.state[new_state]}') + self.current = Attack() + else: + new_state = self.state + self.state = new_state + + def _calculate_attack_score(self) -> float: + # Determine if we are closer to the ball or the opponent + dist_us = 1.0 * (10 ** 10) + dist_opp = 1.0 * (10 ** 10) + ball_pos = Vec3.from_other_vec(self.world.ball.physics.location) + for car in self.world.teams[self.opp_team].cars: + vec_to_ball = ball_pos - Vec3.from_other_vec(car.physics.location) + dist_opp = min(vec_to_ball.magnitude(), dist_opp) + for car in self.world.teams[self.team].cars: + vec_to_ball = ball_pos - Vec3.from_other_vec(car.physics.location) + dist_us = min(vec_to_ball.magnitude(), dist_us) + enemy_closer = dist_opp < dist_us + + if enemy_closer: + closer_score = -1 + else: + closer_score = 1 + + # Calculate the ball speed towards the enemy goal and the position of the ball and turn them into a point value + v_ball_score = - self.world.ball.physics.velocity.y * self.side / InGameConstants.MAX_BALL_VELOCITY + ball_pos_score = - self.world.ball.physics.location.y * self.side / InGameConstants.MAX_BALL_POSITION + + # Calculate the total score by assigning a specific weight to each of the score values. + total_score = (closer_score * self.weight_closest + v_ball_score * self.weight_velocity + + ball_pos_score * self.weight_position) / (self.weight_closest + + self.weight_velocity + self.weight_position) + return total_score diff --git a/src/strategy/drone.py b/src/strategy/drone.py new file mode 100644 index 0000000..a0dea53 --- /dev/null +++ b/src/strategy/drone.py @@ -0,0 +1,113 @@ +from strategy.base_ccp import SharedInfo, BasePlayer +from strategy.utils import GoslingAgentWrapper +from gosling.objects import GoslingAgent, boost_object +from rlbot.utils.structures.bot_input_struct import PlayerInput +from rlbot.agents.base_agent import SimpleControllerState +from typing import List, Type +from world.components import Car + + +class Drone(SharedInfo): + """"The drone class that keeps track of and updates the controls of this drone for each timestep. + + :param name: The name of the drone as given by RLBOT. + :param team: The team of the drone as given by RLBOT (0 for blue or 1 for orange). + :param index: The unique index of the drone as given by RLBOT.""" + + def __init__(self, name: str, team: int, index: int): + super().__init__() + + # Default drone properties + self.name: str = name + self.team: int = team + self.index: int = index + + # A list that acts as the routines stack + self.player_stack: List = [] + + self.controller: SimpleControllerState = SimpleControllerState() + self.gosling_wrapper: GoslingAgentWrapper = GoslingAgentWrapper(name, team, index) + + def step(self) -> bool: + """"Step function for the drone class. Sets the controls for the current step. + + :return: Done flag if the routine stack is empty. + :rtype: bool + """ + + # run the routine on the end of the stack + if len(self.player_stack) > 0: + current_routine = self.player_stack[-1] + + # Switch for gosling wrapper and CCP style player movement. + if issubclass(type(current_routine), BasePlayer): + done = self._step_base_player(current_routine) + else: + done = self._step_gosling(current_routine) + + if done: + self.player_stack.pop() + + return len(self.player_stack) == 0 + + def assign(self, routine): + """"Assign a new player routine to this drone by pushing it on the stack. + + :param routine: Either a CCP BasePlayer instance or a Gosling routine. + """ + self.player_stack.append(routine) + + def get_player_input(self) -> PlayerInput: + """"Get the current controls from the drone. + + :return: Current controls for this car. + :rtype: PlayerInput + """ + + # Throw error if no controls were set. + if self.controller is None: + RuntimeError(f"Did not set the controls for drone {self.index}") + + # PlayerInput mapping + player_input = PlayerInput() + player_input.throttle = self.controller.throttle # -1 for full reverse, 1 for full forward + player_input.steer = self.controller.steer # -1 for full left, 1 for full right + player_input.pitch = self.controller.pitch # -1 for nose down, 1 for nose up + player_input.yaw = self.controller.yaw # -1 for full left, 1 for full right + player_input.roll = self.controller.roll # -1 for roll left, 1 for roll right + player_input.jump = self.controller.jump # true if you want to press the jump button + player_input.boost = self.controller.boost # true if you want to press the boost button + player_input.handbrake = self.controller.handbrake # true if you want to press the handbrake button + return player_input + + @property + def car(self) -> Car: + """"Get the car from the world model. + + :return: The Car object corresponding to this car. + :rtype: Car + """ + return self.world.cars[self.index] + + def flush_actions(self): + """ Removes all the items from the stack""" + self.player_stack = [] + self.gosling_wrapper.flush_actions() + + def _step_base_player(self, current_routine: BasePlayer) -> bool: + self.controller = SimpleControllerState() + self.controller, done = current_routine.step() + return done + + def _step_gosling(self, current_routine: Type[BasePlayer]) -> bool: + resulting_routine, done, flushed = self.gosling_wrapper.update( + current_routine) # Update and run gosling wrapper + self.controller = self.gosling_wrapper.controller # Set controller + if resulting_routine is not None: + if flushed: + self.player_stack = resulting_routine + else: + self.player_stack = self.player_stack[:-1] + resulting_routine + elif flushed: + self.player_stack = [] + return done diff --git a/src/strategy/players/__init__.py b/src/strategy/players/__init__.py new file mode 100644 index 0000000..f0a8d69 --- /dev/null +++ b/src/strategy/players/__init__.py @@ -0,0 +1,13 @@ +from strategy.players.gosling_style.kickoff import * +from strategy.players.gosling_style.keeper import * +from strategy.players.gosling_style.dribble import Dribble +from strategy.players.demolition import * +from strategy.players.gosling_style.get_boost import GetBoost +from strategy.players.gosling_style.shadowing import * +from strategy.players.gosling_style.wait import * +from strategy.players.gosling_style.intercept import Intercept +from strategy.players.gosling_style.prepare import Prepare +from strategy.players.gosling_style.cover import Cover + +__all__ = ['KickoffGosling', "Demolition", "Dribble", "Shadowing", "Keeper", "Wait", 'GetBoost', 'Intercept', + "Prepare", "Cover"] diff --git a/src/strategy/players/demolition.py b/src/strategy/players/demolition.py new file mode 100644 index 0000000..dd25e57 --- /dev/null +++ b/src/strategy/players/demolition.py @@ -0,0 +1,27 @@ +from gosling.routines import side +from gosling.utils import defaultPD, defaultThrottle +from strategy.base_ccp import BasePlayer +from rlbot.agents.base_agent import SimpleControllerState +from rlbot.utils.structures.game_data_struct import Vector3 + + +class Demolition(BasePlayer): + """ A 1v1 kickoff. Drive towards the ball and dodge towards it. """ + + def __init__(self, drone): + super().__init__(drone) + self.index = drone.index + + def step(self): + """Run step behaviour returns whether the current entity is finished. + + :return: Controller for the corresponding car and a flag indicating whether it is finished. + :rtype: (SimpleControllerState, bool) + """ + target = self.world.ball.physics.location + Vector3(0, 200 * side(self.world.teams[self.index]), 0) + local_target = self.world.me.local(target - self.world.me.location) + defaultPD(self.world, local_target) + defaultThrottle(self.world, 2300) + + # I think you need self.drone.physics.location + return SimpleControllerState(), False diff --git a/src/strategy/players/gosling_style/cover.py b/src/strategy/players/gosling_style/cover.py new file mode 100644 index 0000000..e0abc9a --- /dev/null +++ b/src/strategy/players/gosling_style/cover.py @@ -0,0 +1,75 @@ +import numpy as np +from gosling.objects import * +from gosling.utils import defaultPD, defaultThrottle +from typing import Optional + + +class GameConstants: + MAX_SPEED_NO_BOOST = 1400 + MAX_SPEED_BOOST = 2300 + X_FIELD = 4000 + Y_FIELD = 5000 + CEILING_HEIGHT = 2000 + + +class Cover: + """ Position yourself between the ball and your own goal. + + :param distance_ratio: At what ratio to position itself, 0 at own goal, 1 at ball + :type distance_ratio: float + """ + + QUICK_COMPENSATION_RANGE = 3000 # Tunable parameter used to determine whether agent should boost. + COVER_DONE_DIST = 500 + VELOCITY_CONTROL_GAIN = 100 + + def __init__(self, distance_ratio: float = 0.4): + self.distance_ratio = distance_ratio + self.agent: Optional[GoslingAgent] = None + + def run(self, agent: GoslingAgent): + """ Updates the controls for this Player. + + :param agent: Gosling agent. + :type agent: GoslingAgent + """ + self.agent = agent + cover_target = self._pos_between_ball_and_goal() + self._check_done_covering(cover_target) + self._control_agent(cover_target) + + def _get_dist_to_target(self, target: Vector3) -> float: + return (target - self.agent.me.location).magnitude() + + def _pos_between_ball_and_goal(self) -> Vector3: + goal_to_ball = self.agent.ball.location - self.agent.friend_goal.location + target = self.distance_ratio * goal_to_ball + self.agent.friend_goal.location + return self._clip_in_arena_bounds(target) + + def _control_agent(self, target: Vector3): + self._control_agent_steer(target) + self._control_agent_throttle(target) + + def _control_agent_steer(self, target: Vector3): + defaultPD(self.agent, self.agent.me.local(target - self.agent.me.location)) + + def _control_agent_throttle(self, target: Vector3): + use_boost = self._get_dist_to_target(target) > self.QUICK_COMPENSATION_RANGE + if use_boost: + defaultThrottle(self.agent, GameConstants.MAX_SPEED_BOOST) + else: + defaultThrottle(self.agent, min(GameConstants.MAX_SPEED_NO_BOOST, self._get_control_velocity(target))) + + def _get_control_velocity(self, target: Vector3) -> float: + return (target - self.agent.me.location).magnitude() + self.VELOCITY_CONTROL_GAIN + + def _check_done_covering(self, target: Vector3): + if self._get_dist_to_target(target) < self.COVER_DONE_DIST: + self.agent.pop() + + @staticmethod + def _clip_in_arena_bounds(target: Vector3) -> Vector3: + """Clips the location within the bounds of the arena""" + return Vector3(float(np.clip(target.x, -GameConstants.X_FIELD, GameConstants.X_FIELD)), + float(np.clip(target.y, -GameConstants.Y_FIELD, GameConstants.Y_FIELD)), + float(np.clip(target.z, 0, GameConstants.CEILING_HEIGHT))) diff --git a/src/strategy/players/gosling_style/dribble.py b/src/strategy/players/gosling_style/dribble.py new file mode 100644 index 0000000..a8de702 --- /dev/null +++ b/src/strategy/players/gosling_style/dribble.py @@ -0,0 +1,97 @@ +from gosling.objects import * +from gosling.routines import * +from gosling.utils import defaultPD, defaultThrottle +from physics.math import Vec3 +from typing import Optional + + +class Dribble: + """ Dribble with the ball to a particular target. + + :param target: The target to which we will dribble + :type target: Vec3 + :param flick: Whether to flick in the end or not to reach the target + :type flick: bool + """ + NEAR_BALL_CONTROL_RANGE = 500 + STEP = 1 / 60 + FLICK_DISTANCE = 2500 + + BALL_ACQUISITION_RANGE = 350 + BALL_CONTROL_XY_OFFSET = 25 + BALL_CONTROL_MAX_THROTTLE = 500 + BALL_CONTROL_ACCELERATION = 500 + + APPROACH_MAX_CONTROL_VELOCITY = 800 + APPROACH_BASE_VELOCITY = 300 + APPROACH_MINIMUM_VELOCITY = 2300 + APPROACH_VELOCITY_COMPENSATION = 25 + APPROACH_VELOCITY_TUNING = 2 + + def __init__(self, target: Vec3, flick: bool = True): + self.target = Vector3(target.x, target.y, target.z) + self.flick = flick + self.agent: Optional[GoslingAgent] = None + + def run(self, agent: GoslingAgent): + """Dribble with the ball to a particular target + + :param agent: Gosling agent. + :type agent: GoslingAgent + """ + self.agent = agent + if self._distance_to_ball() < self.NEAR_BALL_CONTROL_RANGE: + self._close_ball_control() + else: + self._approach_ball_control() + self._flick_to_target(self.flick) + + def _distance_to_ball(self) -> float: + return (self.agent.ball.location - self.agent.me.location).magnitude() + + def _close_ball_control(self): + defaultPD(self.agent, self._get_close_local_target()) + defaultThrottle(self.agent, self._get_close_throttle()) + + def _get_close_local_target(self) -> Vector3: + offset = -(self.target - self.agent.ball.location).normalize() * self.BALL_CONTROL_XY_OFFSET + return self.agent.me.local(self._get_future_ball_center() + offset - self.agent.me.location) + + def _get_close_throttle(self) -> float: + future_agent_location = self.agent.me.location + self.agent.me.velocity * self.STEP + dist_to_target = (self._get_future_ball_center() - future_agent_location).magnitude() + acceleration_target = self.BALL_CONTROL_ACCELERATION * (dist_to_target / self.NEAR_BALL_CONTROL_RANGE) ** 1.25 + return max((self.agent.ball.velocity.magnitude() + acceleration_target), self.BALL_CONTROL_MAX_THROTTLE) + + def _get_future_ball_center(self) -> float: + return self.agent.ball.location + self.agent.ball.velocity * self.STEP + + def _approach_ball_control(self): + defaultPD(self.agent, self._get_approach_local_target()) + defaultThrottle(self.agent, self._get_approach_throttle()) + + def _get_approach_throttle(self) -> float: + own_speed = self.agent.me.velocity.magnitude() + controlled_approach_velocity = self.APPROACH_BASE_VELOCITY + self.APPROACH_VELOCITY_TUNING * \ + self._distance_to_ball() - own_speed + return min(controlled_approach_velocity, self.APPROACH_MINIMUM_VELOCITY) + + def _get_approach_local_target(self) -> Vector3: + offset = (self.agent.ball.location - self.target).normalize() * self.BALL_ACQUISITION_RANGE + vel_comp = - self.agent.me.velocity.normalize() * min(self._velocity_control(), + self.APPROACH_MAX_CONTROL_VELOCITY) + target_car_pos = offset + self.agent.ball.location + vel_comp + return self.agent.me.local(target_car_pos - self.agent.me.location) + + def _flick_to_target(self, flick: bool): + if self._distance_to_target() < self.FLICK_DISTANCE and self._distance_to_ball() < \ + self.NEAR_BALL_CONTROL_RANGE and flick: + vector_to_target = self.agent.me.local(self.target - self.agent.me.location) + self.agent.pop() + self.agent.push(flip(vector_to_target)) + + def _distance_to_target(self) -> float: + return (self.agent.me.location - self.target).magnitude() + + def _velocity_control(self) -> float: + return self.agent.me.velocity.magnitude() * self.APPROACH_VELOCITY_COMPENSATION diff --git a/src/strategy/players/gosling_style/get_boost.py b/src/strategy/players/gosling_style/get_boost.py new file mode 100644 index 0000000..7378918 --- /dev/null +++ b/src/strategy/players/gosling_style/get_boost.py @@ -0,0 +1,85 @@ +from gosling.utils import defaultPD, defaultThrottle +from gosling.objects import Vector3, GoslingAgent, boost_object +from typing import Optional, Tuple + + +class GameConstants: + MAX_SPEED_BOOST = 2300 + + +class GetBoost: + """Drives towards the nearest active boost in the specified area. If no active boost is found, waits on the nearest + pad. Only considers large boost pads. + + :param which_boost: Which region of the map to drive to. Either blue, red, mid, any + :type which_boost: str""" + + MINIMUM_BOOST_LEVEL = 90 + BOOST_MAP = { + 'blue': [3, 4], + 'red': [29, 30], + 'mid': [15, 18], + 'any': [3, 4, 15, 18, 29, 30] + } + + def __init__(self, which_boost: str = 'any'): + self.which_boost = which_boost + self.agent: Optional[GoslingAgent] = None + + def run(self, agent: GoslingAgent): + """Runs the routine, setting the controls on the agent. + + :param agent: The agent on which the controls will be set. + :type agent: GoslingAgent + """ + self.agent = agent + boost_pad, brake_upon_destination = self._get_closest_boost_pad() + self._drive_to_boost_pad(boost_pad, brake_upon_destination) + self._boost_filled() + + def _get_boost_areas(self): + # 3, 4, 15, 18, 29, 30 find closest active + if self.which_boost == 'blue': + return ['blue', 'mid'] + elif self.which_boost == 'red': + return ['red', 'mid'] + elif self.which_boost == 'mid': + return ['mid', 'blue', 'red'] + elif self.which_boost == 'any': + return ['any'] + else: + raise ValueError(f"which_boost cannot be {self.which_boost}") + + def _get_closest_boost_pad(self) -> Tuple[boost_object, bool]: + for area in self._get_boost_areas(): + distances = {((self.agent.me.location - self.agent.boosts[pad_index].location).magnitude()): pad_index + for pad_index in self.BOOST_MAP[area] if self.agent.boosts[pad_index].active} + if len(distances) > 0: + boost_pad = distances[min(distances.keys())] + brake_upon_destination = False + break + else: + distances = {((self.agent.me.location - self.agent.boosts[pad_index].location).magnitude()): pad_index + for pad_index in self.BOOST_MAP[self._get_boost_areas()[0]]} + boost_pad = distances[min(distances.keys())] + brake_upon_destination = True + return boost_pad, brake_upon_destination + + def _drive_to_boost_pad(self, boost_pad: boost_object, brake_upon_destination: bool): + # Driving towards selected boost + target = self.agent.boosts[boost_pad].location # dont forget to remove this! + local_target = self.agent.me.local(target - self.agent.me.location) + defaultPD(self.agent, local_target) + if brake_upon_destination: + speed = min(GameConstants.MAX_SPEED_BOOST, (self.agent.me.location - target).magnitude()) + else: + speed = GameConstants.MAX_SPEED_BOOST + defaultThrottle(self.agent, speed) + + def _boost_filled(self) -> bool: + minimum_boost_level = self.MINIMUM_BOOST_LEVEL + if self.agent.me.boost >= minimum_boost_level: + self.agent.pop() + defaultThrottle(self.agent, 0) + return True + return False diff --git a/src/strategy/players/gosling_style/intercept.py b/src/strategy/players/gosling_style/intercept.py new file mode 100644 index 0000000..88fd7c5 --- /dev/null +++ b/src/strategy/players/gosling_style/intercept.py @@ -0,0 +1,193 @@ +from gosling.routines import * +from gosling.objects import GoslingAgent +from gosling.utils import defaultPD, defaultThrottle +from physics.math import Vec3 +from physics.simulations import Ball +from math import acos, sin, degrees +from rlbot.utils.structures.ball_prediction_struct import Slice +from typing import Optional + + +class GameConstants: + """"Ingame constants used for driving""" + MAX_SPEED_NO_BOOST = 1410 + MAX_SPEED = 2300 + BOOST_BONUS_ACCELERATION = 991.66 + BOOST_CONSUMPTION_RATE = 33.33 + IN_GAME_FPS = 60 + FIELD_LENGTH = 5150 + CAP_X_IN_GOAL_LENGTH = 750 + ACCELERATION_FORMULA_B = 1600 + ACCELERATION_FORMULA_A = -1.02 + + +class Intercept: + """Intercepts the ball from the current location. """ + + # Interception settings + DETECT_MISS_RANGE = 150 # The range in which we validate if the car has passed the ball - and missed + MAX_Z_INTERCEPT = 100 # The maximum height at which the car will try to flip to hit the ball + MAX_Z_FLIP = 80 + MAX_TIME_REMAING_BEFORE_FLIP = 0.5 + + # Flip settings + FLIP_RANGE = 1000 # The range at which to flip + + # Ball prediction settings + SLICE_ANGLE = 90 # Angle of the cone in front of the car for which interceptions are considered + TUNABLE_DELAY = 0.2 # Delay to reduce inaccuracies in ball predictions to prevent flipping prematurely + INTERCEPTION_MARGIN = 50 # Max distance at which the simulation will classify as hitting the ball + SIM_TIME_STEP = 1 / 30 # Time step for velocity simulations + GRID_SEARCH_INTERVAL = 20 # How many steps per second to consider for interception analysis + + def run(self, agent: GoslingAgent): + """Sets controls for the provided agent. + + :param agent: Gosling agent + :type agent: GoslingAgent + """ + target = self._get_target(agent) + if self._detect_missed_ball(agent): + agent.pop() + return + + if target: + self._control_with_intercept_point(agent, target) + else: + self._control_without_intercept_point(agent) + + @staticmethod + def _calc_local_diff(agent: GoslingAgent, ball_pos: Vector3) -> Vector3: + """Calculate the local vector from the agent to the ball. + + :param agent: Gosling agent object + :type agent: GoslingAgent + :param ball_pos: The position of the ball + :type ball_pos: Vector3 + :returns: A vector describing the local vector from car to ball + :rtype: Vector3 + """ + + # Transforms the ball and car coordinates local coordinates of the car + car_pos = agent.me.location.flatten() # 2D coordinate of the car + cos_theta = (car_pos.dot(ball_pos)) / (car_pos.magnitude() * ball_pos.magnitude()) + sin_theta = sin(acos(cos_theta)) + + # Local coordinates from the car its perspective + car_to_ball = ball_pos - car_pos # Relative coord wrt the car + local_diff = Vector3(car_to_ball.x * cos_theta - car_to_ball.y * sin_theta, + car_to_ball.x * sin_theta + car_to_ball.y * cos_theta, 0).flatten() + + return local_diff + + @staticmethod + def _time_to_idx(time: float) -> int: + """Converts time to an index for the BallPrediction struct. + + :param time: Time for which to retrieve the index + :type time: float + :returns: Index integer + :rtype: int + """ + return round(time * GameConstants.IN_GAME_FPS) + + @staticmethod + def _get_max_acceleration(vel: float, boost: bool = True) -> float: + """"Get the maximum acceleration for a given velocity.""" + if vel > GameConstants.MAX_SPEED_NO_BOOST: + return boost * GameConstants.BOOST_BONUS_ACCELERATION + else: + return vel * GameConstants.ACCELERATION_FORMULA_A + boost + GameConstants.ACCELERATION_FORMULA_B + \ + GameConstants.BOOST_BONUS_ACCELERATION # Coefficients for the velocity vs amax curve. + + def _in_range(self, time_left: float, dist_left: float, vel: float) -> bool: + """Check whether to start maximum acceleration, Calculates if we reach the target distance, + if we use maximum acceleration, accounts for the amount of boost we have left + """ + total_steps = round(time_left / self.SIM_TIME_STEP) # Amount of steps to loop through + + # Physics simulation stepping starts here + for i in range(total_steps): + a = Intercept._get_max_acceleration(vel, True) + vel = vel + a * self.SIM_TIME_STEP + dist_left -= vel * self.SIM_TIME_STEP + + # Start if we can reach the target within the given time by max acceleration + if -self.INTERCEPTION_MARGIN < dist_left < self.INTERCEPTION_MARGIN: + return True + # we do not reach the target with full boost + return False + + def _get_target(self, agent: GoslingAgent) -> Optional[Slice]: + """"Retrieves the interception target""" + # For predicting the location of the ball for the coming 6 seconds. + ball_prediction = Ball.get_ball_prediction() + min_t, max_t = 0, ball_prediction.num_slices / 60 # By default (0, 6) - From RLBOT + + for i in range(min_t, round(max_t * self.GRID_SEARCH_INTERVAL)): # Grid + time = i / self.GRID_SEARCH_INTERVAL + target = ball_prediction.slices[self._time_to_idx(time)] + + # Skip if the target is too high or low! + if target.physics.location.z > self.MAX_Z_INTERCEPT: + continue + + offset_angle = Vec3.from_other_vec( + self._calc_local_diff(agent, Vector3(target.physics.location))).angle_2d(Vec3(x=0, y=1, z=0)) + if degrees(offset_angle) < self.SLICE_ANGLE: + ball_distance = (agent.me.location - Vector3(target.physics.location)).magnitude() + + if self._in_range(time, ball_distance, agent.me.velocity.magnitude()): + return target + return None + + def _control_with_intercept_point(self, agent: GoslingAgent, target: Slice): + """""Controls the agent towards the given interception point""" + t_rem = target.game_seconds - agent.time # Time remaining for impact + + self._goto_target(agent, Vector3(target.physics.location.x, + target.physics.location.y, + target.physics.location.z)) + dist_left = (agent.me.location - agent.ball.location).magnitude() + + if dist_left < self.FLIP_RANGE: + dz = agent.ball.location.z - agent.me.location.z + if dz < self.MAX_Z_FLIP and t_rem < self.MAX_TIME_REMAING_BEFORE_FLIP: + # print(f"Remaining time: {t_rem} Target Z: {target.physics.location.z}" + # f" Agent Z: {agent.me.location.z} Ball Z: {agent.ball.location.z}") + + flip_target = agent.me.local(agent.ball.location - agent.me.location) + agent.pop() + agent.push(flip(flip_target)) # flip to local target + return + + def _control_without_intercept_point(self, agent: GoslingAgent): + """Controls the agent in case no interception point is found. Can switch to Aerial.""" + self._goto_target(agent, agent.ball.location) + + def _detect_missed_ball(self, agent: GoslingAgent) -> bool: + """"Checks if the car is close to the ball and missed a shot. """ + xy_diff = (agent.ball.location - agent.me.location).flatten() + if self.DETECT_MISS_RANGE > xy_diff.magnitude(): + xy_norm = xy_diff.normalize() + vel_norm = (agent.me.velocity - agent.ball.velocity).flatten().normalize() + + # Negative dot implies negative projection -> Ball is behind the agent! + if vel_norm.dot(xy_norm) < 0: + # print(f"Missed ball! {vel_norm.dot(xy_norm)}") + return True + return False + + def _goto_target(self, agent: GoslingAgent, target: Vector3): + """"Drives the agent to a particular target. """ + # Some adjustment to the final target to ensure it's inside the field and we dont try to drive through + # any goalposts to reach it + if abs(agent.me.location[1]) > GameConstants.FIELD_LENGTH: + target[0] = cap(target[0], -GameConstants.CAP_X_IN_GOAL_LENGTH, GameConstants.CAP_X_IN_GOAL_LENGTH) + + local_target = agent.me.local(target - agent.me.location) + angles = defaultPD(agent, local_target) + defaultThrottle(agent, GameConstants.MAX_SPEED) + + agent.controller.boost = False + agent.controller.handbrake = True if abs(angles[1]) > 2.3 else agent.controller.handbrake diff --git a/src/strategy/players/gosling_style/keeper.py b/src/strategy/players/gosling_style/keeper.py new file mode 100644 index 0000000..a13196b --- /dev/null +++ b/src/strategy/players/gosling_style/keeper.py @@ -0,0 +1,59 @@ +import math +from gosling.routines import aerial_shot, jump_shot, side +from gosling.objects import GoslingAgent, Vector3 +from physics.math import Vec3 +from physics.simulations.ball import * +from strategy.players.gosling_style.intercept import Intercept + + +class Keeper: + """Calculates the future position of the ball and tries to intercept it.""" + + JUMP_SHOT_HEIGHT_LIMIT = 300 + + def __init__(self): + + self.agent: Optional[GoslingAgent] = None + + def run(self, agent: GoslingAgent): + """ + Assigns which type of keeper routine we do. + We use aerial_shot for shots higher than 300uu and we use jump_shot for shots lower than 300uu + + :param agent: Gosling agent. + :type agent: GoslingAgent + """ + self.agent = agent + ball_slice_in_goal = Ball.predict_future_goal() + if ball_slice_in_goal: + self._choose_keeping_routine(ball_slice_in_goal) + else: + agent.pop() + agent.push(Intercept()) + return + + def _choose_keeping_routine(self, ball_slice_in_goal: Slice): + ball_intercept_location = self._future_intercept_location(ball_slice_in_goal) + + # The direction we shoot the ball towards + shot_vector = Vector3(0, -side(self.agent.team), 0) + + # Determine which gosling routine we want to use for keeping + if ball_intercept_location.physics.location.z > self.JUMP_SHOT_HEIGHT_LIMIT: + self.agent.pop() + self.agent.push(aerial_shot(Vector3(ball_intercept_location.physics.location), + ball_intercept_location.game_seconds, shot_vector, None)) + else: + self.agent.pop() + self.agent.push(jump_shot(Vector3(ball_intercept_location.physics.location), + ball_intercept_location.game_seconds, shot_vector, None)) + + def _future_intercept_location(self, ball_slice_in_goal: Slice) -> Slice: + time_of_goal = ball_slice_in_goal.game_seconds + time_until_goal = time_of_goal - self.agent.time + + # Scaling linear based on ball velocity, 0 = 0.80, 3000 = 0.95 + vel_ball = Vec3.from_other_vec(Ball.predict_future_goal().physics.velocity).magnitude() + factor = (vel_ball - 0) / (3000 - 0) * (0.95 - 0.8) + 0.8 # TODO: Imre - Explain please :p + time_we_intercept = self.agent.time + time_until_goal * factor + return Ball.find_slice_at_time(time_we_intercept) diff --git a/src/strategy/players/gosling_style/kickoff.py b/src/strategy/players/gosling_style/kickoff.py new file mode 100644 index 0000000..b5bac73 --- /dev/null +++ b/src/strategy/players/gosling_style/kickoff.py @@ -0,0 +1,29 @@ +from gosling.routines import * +from gosling.utils import defaultPD, defaultThrottle +from gosling.objects import GoslingAgent + + +class GameConstants: + MAX_SPEED_BOOST = 2300 + + +class KickoffGosling: + """ Drive towards the ball and dodge towards it when close. """ + + FLIP_DISTANCE = 650 + TARGET_OFFSET_FROM_CENTER = 200 + + def run(self, agent: GoslingAgent): + """ Run kickoff routine. + + :param agent: Gosling agent. + """ + target = agent.ball.location + Vector3(0, self.TARGET_OFFSET_FROM_CENTER * side(agent.team), 0) + + local_target = agent.me.local(target - agent.me.location) + defaultPD(agent, local_target) + defaultThrottle(agent, GameConstants.MAX_SPEED_BOOST) + + if local_target.magnitude() < self.FLIP_DISTANCE: + agent.pop() + agent.push(flip(agent.me.local(agent.foe_goal.location - agent.me.location))) diff --git a/src/strategy/players/gosling_style/prepare.py b/src/strategy/players/gosling_style/prepare.py new file mode 100644 index 0000000..3b029fc --- /dev/null +++ b/src/strategy/players/gosling_style/prepare.py @@ -0,0 +1,74 @@ +from gosling.utils import defaultPD, defaultThrottle +from gosling.objects import * +import numpy as np +from typing import Optional + + +class GameConstants: + max_speed_no_boost = 1400 + max_speed_boost = 2300 + x_field = 3500 + y_field = 4500 + ceiling_height = 2000 + + +class Prepare: + """ Position yourself behind the ball as seen from the enemy goal. + + :param center_player: Whether to prepare with an offset to the ball flipped over the y-axis. + :type center_player: bool + :param offset_in_ball_direction: How far to offset the target position from the ball. + :type offset_in_ball_direction: float + """ + QUICK_COMPENSATION_RANGE = 1500 # If far away quickly compensate by boosting. Otherwise do not. + FINISH_DIST = 750 + CONTROL_VELOCITY_GAIN = 500 + + def __init__(self, center_player=False, offset_in_ball_direction=2000): + self.offset_in_ball_direction = offset_in_ball_direction + self.center_player = center_player + self.agent: Optional[GoslingAgent] = None + + def run(self, agent: GoslingAgent): + """ Updates the controls for this Player. + + :param agent: Gosling agent. + :type agent: GoslingAgent + """ + self.agent = agent + prepare_to_shoot_target = self._get_prepared_location() + self._go_to_target(prepare_to_shoot_target) + + def _get_prepared_location(self) -> Vector3: + # Extend the vector from the foe goal through the ball + direction = (self.agent.ball.location - self.agent.foe_goal.location).normalize() + ball_direction_vector = self.offset_in_ball_direction * direction + if self.center_player: + ball_direction_vector.x *= -1 + + ball_to_foe_goal = (self.agent.ball.location - self.agent.foe_goal.location) + target = ball_direction_vector + ball_to_foe_goal + self.agent.foe_goal.location + return self._clip_in_arena_bounds(target) + + def _go_to_target(self, target: Vector3): + # Control our car + defaultPD(self.agent, self.agent.me.local(target - self.agent.me.location)) + dist_to_target = (target - self.agent.me.location).magnitude() + if dist_to_target < self.FINISH_DIST: + self.agent.pop() + + # Quickly compensate if far away + if dist_to_target > self.QUICK_COMPENSATION_RANGE: + defaultThrottle(self.agent, GameConstants.max_speed_boost) + else: + defaultThrottle(self.agent, min(GameConstants.max_speed_no_boost, self._get_controlled_velocity(target))) + + def _get_controlled_velocity(self, target: Vector3) -> float: + return (target - self.agent.me.location).magnitude() + self.CONTROL_VELOCITY_GAIN + + @staticmethod + def _clip_in_arena_bounds(target: Vector3) -> Vector3: + """Clips the location within the bounds of the arena""" + return Vector3(float(np.clip(target.x, -GameConstants.x_field, GameConstants.x_field)), + float(np.clip(target.y, -GameConstants.y_field, GameConstants.y_field)), + float(np.clip(target.z, 0, GameConstants.ceiling_height))) diff --git a/src/strategy/players/gosling_style/shadowing.py b/src/strategy/players/gosling_style/shadowing.py new file mode 100644 index 0000000..55aa3e6 --- /dev/null +++ b/src/strategy/players/gosling_style/shadowing.py @@ -0,0 +1,102 @@ +from gosling.routines import * +from gosling.utils import defaultPD, defaultThrottle +from rlbot.agents.base_agent import SimpleControllerState +from gosling.objects import * +from physics.math import Vec3 +import math + + +class State: + INITIALIZE = 0 + GOTO = 1 + CORRECT_ANGLE = 2 + + +class GameConstants: + MAX_SPEED_BOOST = 2300 + FIELD_LENGTH = 5150 + CAP_X_IN_GOAL_LENGTH = 750 + + +class Shadowing: + """ Drive back to the goalpost of your team.""" + + ANGLE_CORRECTION_DRIVE_DURATION = 0.5 + ANGLE_CORRECTION_SPEED = 500 + ANGLE_CORRECTION_ALLOWED_DIFF = 15 + + TARGET_RADIUS = 350 + TARGET_Y = 5115 + + def __init__(self): + self.scenario = None + self.goto_routine = None + self.target = None + self.dir_to_goal = None + self.agent: Optional[GoslingAgent] = None + + self.state = State.INITIALIZE + + # Correcting facing direction when we reached the target + self.start_time = 0.0 + self.toggle = 1 # Switches between 1 and -1 + + def run(self, agent: GoslingAgent): + """ Updates the controls for this Player. + + :param agent: Gosling agent. + :type agent: GoslingAgent + """ + self.agent = agent + if self.state == State.INITIALIZE: + self._initialize_shadowing() + if self.state == State.GOTO: + self._go_to_target() + if self.state == State.CORRECT_ANGLE: + self._correct_car_angle() + + def _initialize_shadowing(self): + self.target = Vector3(0, self.TARGET_Y * side(self.agent.team), 0) + self.dir_to_goal = Vector3(0, -side(self.agent.team), 0).normalize() + self.state = State.GOTO + + def _go_to_target(self): + self._drive_to_post() + if self.agent.me.local(self.target - self.agent.me.location).magnitude() <= self.TARGET_RADIUS: + self.state = State.CORRECT_ANGLE + self.start_time = self.agent.time + + def _correct_car_angle(self): + angle = self.agent.me.local(self.dir_to_goal).angle(Vector3(1, 0, 0)) + if angle < math.radians(self.ANGLE_CORRECTION_ALLOWED_DIFF): + self.agent.pop() + + if self.agent.time - self.start_time > self.ANGLE_CORRECTION_DRIVE_DURATION: + self.toggle *= -1 + self.start_time = self.agent.time + + defaultPD(self.agent, self.toggle * self.agent.me.local(self.dir_to_goal), self.toggle) + defaultThrottle(self.agent, self.ANGLE_CORRECTION_SPEED, self.toggle) + + def _drive_to_post(self): + car_to_target = self.target - self.agent.me.location + distance_remaining = car_to_target.flatten().magnitude() + + # See commends for adjustment in jump_shot or aerial for explanation + side_of_vector = sign(self.dir_to_goal.cross((0, 0, 1)).dot(car_to_target)) + car_to_target_perp = car_to_target.cross((0, 0, side_of_vector)).normalize() + adjustment = car_to_target.angle(self.dir_to_goal) * distance_remaining / 3.14 + final_target = self.target + (car_to_target_perp * adjustment) + + # Some adjustment to the final target to ensure it's inside the field and we dont try to drive through + # any goalposts to reach it + if abs(self.agent.me.location[1]) > GameConstants.FIELD_LENGTH: + final_target[0] = cap(final_target[0], -GameConstants.CAP_X_IN_GOAL_LENGTH, + GameConstants.CAP_X_IN_GOAL_LENGTH) + + local_target = self.agent.me.local(final_target - self.agent.me.location) + angles = defaultPD(self.agent, local_target) + defaultThrottle(self.agent, GameConstants.MAX_SPEED_BOOST) + + self.agent.controller.boost = False + self.agent.controller.handbrake = True if abs(angles[1]) > 2.3 else self.agent.controller.handbrake diff --git a/src/strategy/players/gosling_style/wait.py b/src/strategy/players/gosling_style/wait.py new file mode 100644 index 0000000..e023068 --- /dev/null +++ b/src/strategy/players/gosling_style/wait.py @@ -0,0 +1,70 @@ +from typing import Optional +from gosling.objects import * +from gosling.utils import defaultPD, defaultThrottle + + +class State: + INITIALIZE = 0 + WAIT = 1 + + +class Wait: + """ A wait function to make a drone stay at its current position. + + :param seconds: How long to wait. If it is None waits forever + :type seconds: float + :param face_ball: Whether to keeper correcting the angle to face the ball while waiting. + :type face_ball: bool + """ + + ANGLE_CORRECTION_DRIVE_DURATION = 0.5 + ANGLE_CORRECTION_SPEED = 250 + ANGLE_CORRECTION_ALLOWED_DIFF = 15 + + def __init__(self, seconds: float = None, face_ball: bool = False): + # Starting state + self.state = State.INITIALIZE + + # Settings for this instance + self.seconds: float = seconds + self.finish_timer: Optional[float] = None + self.agent: Optional[GoslingAgent] = None + self.face_ball = face_ball + + # Correcting facing direction if enabled + self.start_time = 0.0 + self.toggle = 1 # Switches between 1 and -1 + + def run(self, agent: GoslingAgent): + """Wait forever + + :param agent: Gosling agent. + :type agent: GoslingAgent + """ + self.agent = agent + if self.state == State.INITIALIZE: + self._initialize_wait() + if self._waiting_done(): + agent.pop() + + defaultThrottle(agent, 0) + if self.face_ball: + self._face_ball() + + def _waiting_done(self) -> bool: + return self.finish_timer is not None and self.agent.time > self.finish_timer + + def _initialize_wait(self): + if self.seconds is not None: + self.finish_timer = self.agent.time + self.seconds + self.state = State.WAIT + + def _face_ball(self): + vec_to_ball = self.agent.ball.location - self.agent.me.location + angle = self.agent.me.local(vec_to_ball).angle(Vector3(1, 0, 0)) + if angle > math.radians(self.ANGLE_CORRECTION_ALLOWED_DIFF): + if self.agent.time - self.start_time > self.ANGLE_CORRECTION_DRIVE_DURATION: + self.toggle *= -1 + self.start_time = self.agent.time + defaultPD(self.agent, self.toggle * self.agent.me.local(vec_to_ball), self.toggle) + defaultThrottle(self.agent, self.ANGLE_CORRECTION_SPEED, self.toggle) diff --git a/src/strategy/utils/__init__.py b/src/strategy/utils/__init__.py new file mode 100644 index 0000000..9f8cbf3 --- /dev/null +++ b/src/strategy/utils/__init__.py @@ -0,0 +1,3 @@ +from strategy.utils.gosling_wrapper import GoslingAgentWrapper + +__all__ = ["GoslingAgentWrapper"] diff --git a/src/strategy/utils/gosling_wrapper.py b/src/strategy/utils/gosling_wrapper.py new file mode 100644 index 0000000..b590824 --- /dev/null +++ b/src/strategy/utils/gosling_wrapper.py @@ -0,0 +1,60 @@ +from gosling.objects import GoslingAgent +from strategy.base_ccp import SharedInfo +from typing import List, Tuple, Optional + + +class GoslingAgentWrapper(GoslingAgent, SharedInfo): + """"Gosling wrapper used to enable the use of Gosling routines together with the CCP model. + + :param name: The name of the drone as given by RLBOT. + :param team: The team of the drone as given by RLBOT (0 for blue or 1 for orange). + :param index: The unique index of the drone as given by RLBOT.""" + + def __init__(self, name: str, team: int, index: int): + self.flush: bool = None + self.stack: List = None + + self.index = index + super(GoslingAgentWrapper, self).__init__(name, team, index) + self.initialize_agent() + + def get_field_info(self): + """"Returns the field info from the world model. + + :returns: Field info as if retrieved by a real RLBOT agent. + """ + return self.world.field_info + + def update(self, routine) -> Tuple[Optional[List], bool, bool]: + """"Update this wrapper and obtain the controls for the current routine. + + :return: The stack containing all newly obtained routines and the current one. + :rtype: list + :return: Done flag if the routine stack is empty. + :rtype: bool + :return: Whether the agents actions have been flushed. + :rtype: bool + """ + + # Reset controller + self.controller.__init__() + self.flush = False + + # Get ready, then preprocess + if not self.ready: + self.get_ready(self.world.packet) + self.preprocess(self.world.packet) + + # Create a fake stack and run. Run empties the stack when the routine finishes, resulting in done = True. + self.stack = [routine] + self.stack[-1].run(self) + + if len(self.stack) > 0: + return self.stack, len(self.stack) == 0, self.flush + else: + return None, len(self.stack) == 0, self.flush + + def flush_actions(self): + """ Removes all the items from the stack""" + self.stack = [] + self.flush = True diff --git a/src/world/__init__.py b/src/world/__init__.py new file mode 100644 index 0000000..70d2c9c --- /dev/null +++ b/src/world/__init__.py @@ -0,0 +1,3 @@ +from world.world import World + +__all__ = ["World"] diff --git a/src/world/components/__init__.py b/src/world/components/__init__.py new file mode 100644 index 0000000..d4ebeeb --- /dev/null +++ b/src/world/components/__init__.py @@ -0,0 +1,8 @@ +from world.components.world_component import WorldComponent +from world.components.ball import Ball +from world.components.world_boost_pad import WorldBoostPad +from world.components.car import Car +from world.components.game import Game +from world.components.team import Team + +__all__ = ["WorldComponent", "Ball", "WorldBoostPad", "Car", "Game", "Team"] diff --git a/src/world/components/ball.py b/src/world/components/ball.py new file mode 100644 index 0000000..023fc70 --- /dev/null +++ b/src/world/components/ball.py @@ -0,0 +1,35 @@ +from world.components import WorldComponent +from dataclasses import dataclass +from rlbot.utils.structures.game_data_struct import BallInfo +from rlbot.utils.structures.game_data_struct import DropShotInfo +from rlbot.utils.structures.game_data_struct import CollisionShape +from rlbot.utils.structures.game_data_struct import Touch +from rlbot.utils.structures.game_data_struct import Physics + + +@dataclass +class Ball(WorldComponent): + """"Ball Component for the world model. + + :param info: Information about the ball. + :type BallInfo: + """ + + collision_shape: CollisionShape + drop_shot_info: DropShotInfo + latest_touch: Touch + physics: Physics + + def __init__(self, info: BallInfo): + self.update(info) + + def update(self, info: BallInfo): + """"Update function for the ball class. Updates the internal state of this component to the current step. + + :param info: Information about the ball. + :type info: BallInfo + """ + self.collision_shape = info.collision_shape + self.drop_shot_info = info.drop_shot_info + self.latest_touch = info.latest_touch + self.physics = info.physics diff --git a/src/world/components/car.py b/src/world/components/car.py new file mode 100644 index 0000000..b72de37 --- /dev/null +++ b/src/world/components/car.py @@ -0,0 +1,50 @@ +from rlbot.utils.structures.game_data_struct import PlayerInfo +from rlbot.utils.structures.game_data_struct import Physics +from rlbot.utils.structures.game_data_struct import BoxShape +from world.components import WorldComponent +from dataclasses import dataclass + + +@dataclass +class Car(WorldComponent): + """"Car Component for the world model. + + :param info: Information about a single car. + :type PlayerInfo: + """ + + spawn_id: int + physics: Physics + is_demolished: bool + has_wheel_contact: bool + is_super_sonic: bool + is_bot: bool + jumped: bool + double_jumped: bool + name: str + team: int + boost: int + hitbox: BoxShape + + def __init__(self, info: PlayerInfo): + self.update(info) + + def update(self, info: PlayerInfo): + """Update function for the Car class. Updates the internal state of this component to the current step. + + :param info: Information about a single car. + :type info: PlayerInfo + """ + + self.spawn_id: int = info.spawn_id + self.physics: Physics = info.physics + self.is_demolished: bool = info.is_demolished + self.has_wheel_contact: bool = info.has_wheel_contact + self.is_super_sonic: bool = info.is_super_sonic + self.is_bot: bool = info.is_bot + self.jumped: bool = info.jumped + self.double_jumped: bool = info.double_jumped + self.name: str = info.name + self.team: int = info.team + self.boost: int = info.boost + self.hitbox: BoxShape = info.hitbox diff --git a/src/world/components/game.py b/src/world/components/game.py new file mode 100644 index 0000000..cbf463f --- /dev/null +++ b/src/world/components/game.py @@ -0,0 +1,42 @@ +from world.components import WorldComponent +from dataclasses import dataclass +from rlbot.utils.structures.game_data_struct import GameInfo + + +@dataclass +class Game(WorldComponent): + """"Game Component for the world model. + + :param info: Metadata about a single match. + :type GameInfo: + """ + + game_speed: float + game_time_remaining: float + is_kickoff_pause: bool + is_match_ended: bool + is_overtime: bool + is_round_active: bool + is_unlimited_time: bool + seconds_elapsed: float + world_gravity_z: float + + def __init__(self, info: GameInfo): + self.update(info) + + def update(self, info: GameInfo): + """Update function for the Game class. Updates the internal state of this component to the current step. + + :param info: Metadata about a single match. + :type info: GameInfo + """ + + self.game_speed = info.game_speed + self.game_time_remaining = info.game_time_remaining + self.is_kickoff_pause = info.is_kickoff_pause + self.is_match_ended = info.is_match_ended + self.is_overtime = info.is_overtime + self.is_round_active = info.is_round_active + self.is_unlimited_time = info.is_unlimited_time + self.seconds_elapsed = info.seconds_elapsed + self.world_gravity_z = info.world_gravity_z diff --git a/src/world/components/team.py b/src/world/components/team.py new file mode 100644 index 0000000..6e56ff4 --- /dev/null +++ b/src/world/components/team.py @@ -0,0 +1,31 @@ +from world.components import WorldComponent +from rlbot.utils.structures.game_data_struct import TeamInfo +from typing import List +from world.components import Car + + +class Team(WorldComponent): + """"Team Component for the world model. + + :param info: Metadata about a single team. + :type TeamInfo: + """ + + index: int + score: int + cars: List[Car] + + def __init__(self, info: TeamInfo, cars: List[Car]): + self.index = info.team_index + self.update(info, cars) + + def update(self, info: TeamInfo, cars: List[Car]): + """Updates the internal state of this component to the current step. + + :param info: Metadata about a single team. + :type info: TeamInfo + :param cars: All car components of the existing cars in the game + :type cars: List[Car] + """ + self.score = info.score + self.cars = [car for car in cars if car.team == self.index] diff --git a/src/world/components/world_boost_pad.py b/src/world/components/world_boost_pad.py new file mode 100644 index 0000000..7353419 --- /dev/null +++ b/src/world/components/world_boost_pad.py @@ -0,0 +1,32 @@ +from world.components import WorldComponent +from dataclasses import dataclass +from rlbot.utils.structures.game_data_struct import Vector3 +from rlbot.utils.structures.game_data_struct import BoostPadState, BoostPad + + +@dataclass +class WorldBoostPad(WorldComponent): + """BoostPad Component for the world model. + + :param info: Information about the ball. + :type BoostPadState: + """ + + index: int + is_active: bool + timer: float + location: Vector3 + + def __init__(self, index, info: BoostPadState, boostpad: BoostPad): + self.index = index + self.update(info) + self.location = boostpad.location + + def update(self, info: BoostPadState): + """Update function for the BoostPad class. Updates the internal state of this component to the current step. + + :param info: Information about the boostpads. + :type info: BoostPadState + """ + self.is_active = info.is_active + self.timer = info.timer diff --git a/src/world/components/world_component.py b/src/world/components/world_component.py new file mode 100644 index 0000000..53355e2 --- /dev/null +++ b/src/world/components/world_component.py @@ -0,0 +1,9 @@ +from abc import ABC, abstractmethod + + +class WorldComponent(ABC): + """"Base component for the world model.""" + + @abstractmethod + def update(self, *args, **kwargs): + """"Update function for this component.""" diff --git a/src/world/world.py b/src/world/world.py new file mode 100644 index 0000000..30f6a4b --- /dev/null +++ b/src/world/world.py @@ -0,0 +1,58 @@ +from rlbot.utils.structures.game_data_struct import GameTickPacket, FieldInfoPacket +from dataclasses import dataclass +from typing import List, Any +from world.components import * +from physics.math import Vec3 + + +@dataclass +class World: + """Contains information about the state of the world. + + :param packet: Object containing information about the environment. + :type packet: GameTickPacket + :param field_info: The field info resulting from rlbots PythonHivemind + :type field_info: FieldInfoPacket + """ + + def __init__(self, packet: GameTickPacket, field_info: FieldInfoPacket): + self.cars: List[Car] = [Car(car) for car in packet.game_cars] + self.teams: List[Team] = [Team(team, self.cars) for team in packet.teams] + self.ball: Ball = Ball(packet.game_ball) + self.game: Game = Game(packet.game_info) + + self.boost_pads: List[WorldBoostPad] = [] + for index, (boost_pad_dynamic, boost_pad_static) in enumerate(zip(packet.game_boosts, field_info.boost_pads)): + self.boost_pads.append(WorldBoostPad(index, boost_pad_dynamic, boost_pad_static)) + + self.num_cars: int = packet.num_cars + self.num_boost: int = packet.num_boost + self.time_delta: float = 1 / 60 + + self.packet: GameTickPacket = packet + self.field_info: FieldInfoPacket = field_info + + def update_obs(self, packet: GameTickPacket, field_info: FieldInfoPacket): + """"Update function for the world model. This function is called once every step. + + :param packet: Object containing information about the environment. + :type packet: GameTickPacket + :param field_info: The field info resulting from rlbots PythonHivemind + :type field_info: FieldInfoPacket + """ + for index, player_info in enumerate(packet.game_cars): + self.cars[index].update(player_info) + for index, boost_pad in enumerate(packet.game_boosts): + self.boost_pads[index].update(boost_pad) + + self.ball.update(packet.game_ball) + self.game.update(packet.game_info) + for index, team in enumerate(packet.teams): + self.teams[index].update(team, self.cars) + + self.packet = packet + self.field_info = field_info + + def calc_dist_to_ball(self, physics_object): + return (Vec3.from_other_vec(physics_object.physics.location) - Vec3.from_other_vec( + self.ball.physics.location)).magnitude()