Skip to content

Commit

Permalink
add drifting
Browse files Browse the repository at this point in the history
  • Loading branch information
Fer14 committed Sep 10, 2024
1 parent 3177626 commit 3576e1e
Show file tree
Hide file tree
Showing 9 changed files with 81 additions and 8 deletions.
20 changes: 17 additions & 3 deletions car.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

class Car:

def __init__(self, position, angle=0):
def __init__(self, position, angle=0, len_positions=15):

# self.position = [690, 740] # Starting Position
self.position = position
Expand All @@ -38,11 +38,25 @@ def __init__(self, position, angle=0):
self.time = 0 # Time Passed
self.laps = 0
self.last_position = self.position
self.last_positions = deque(maxlen=15)
self.last_angles = deque(maxlen=15)
self.last_positions = deque(maxlen=len_positions)
self.last_angles = deque(maxlen=len_positions)
self.crashed = False
self.n_drifts_left = 0
self.n_drifts_right = 0
self.len_positions = len_positions

drift = pygame.image.load("./images/drift.png").convert_alpha()
self.drift = pygame.transform.scale(drift, (20, 40))

pygame.image.save(self.drift, "drift.png")

def draw(self, screen, draw_radar=False):
if self.n_drifts_left >= 3 or self.n_drifts_right >= 3:
if len(self.last_positions) >= self.len_positions:
screen.blit(
pygame.transform.rotate(self.drift, 90 + self.angle),
self.last_positions[self.len_positions - 2],
)
screen.blit(self.rotated_sprite, self.position) # Draw Sprite
if draw_radar:
self.draw_radar(screen) # OPTIONAL FOR SENSORS
Expand Down
Binary file added images/drift.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
23 changes: 23 additions & 0 deletions neat_/neat_car.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,29 @@ def action(self):
output = self.net.activate(input)
choice = output.index(max(output))

if choice == 0:
self.angle += 10 # Left
self.n_drifts_left += 1
self.n_drifts_right = 0
elif choice == 1:
self.angle -= 10 # Right
self.n_drifts_left = 0
self.n_drifts_right += 1
elif choice == 2:
if self.speed - 2 >= 6:
self.speed -= 2 # Slow Down
self.n_drifts_right = 0
self.n_drifts_left = 0
else:
self.n_drifts_right = 0
self.n_drifts_left = 0
self.speed += 2 # Speed Up

def action_train(self):
input = self.get_data()
output = self.net.activate(input)
choice = output.index(max(output))

if choice == 0:
self.angle += 10 # Left
elif choice == 1:
Expand Down
2 changes: 1 addition & 1 deletion neat_/neat_race.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def training_race(self, cars: list[NeatCar], genomes):

# For Each Car Get The Acton It Takes
for car in cars:
car.action()
car.action_train()
# Check If Car Is Still Alive
# Increase Fitness If Yes And Break Loop If Not
still_alive = 0
Expand Down
24 changes: 23 additions & 1 deletion policy_gradient/pg_car.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def train(self, rewards):
self.onpolicy_reset()
return policy_loss.item()

def action(self):
def action_train(self):
state = self.get_data()

action = self.forward(state)
Expand All @@ -123,6 +123,28 @@ def action(self):
else:
self.speed += 2 # Speed Up

def action(self):
state = self.get_data()
action = self.forward(state)

if action == 0:
self.angle += 10 # Left
self.n_drifts_left += 1
self.n_drifts_right = 0
elif action == 1:
self.angle -= 10 # Right
self.n_drifts_left = 0
self.n_drifts_right += 1
elif action == 2:
self.n_drifts_right = 0
self.n_drifts_left = 0
if self.speed - 2 >= 6:
self.speed -= 2 # Slow Down
else:
self.n_drifts_right = 0
self.n_drifts_left = 0
self.speed += 2 # Speed Up

def get_reward(self):
if self.crashed:
self.crashed = False
Expand Down
2 changes: 1 addition & 1 deletion policy_gradient/pg_race.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def training_race(self, car: PGCar, episodes, train_every):
quit()
sys.exit()

car.action(current_state)
car.action_train(current_state)
car.update(self.game_map)
new_state, reward, done = self.step(car)
states.append(current_state)
Expand Down
9 changes: 8 additions & 1 deletion qlearning/q_car.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,17 +187,24 @@ def action_train(self, state):
def action(self):

state = self.get_data()

action = self.act_epsilon_greedy(state)

if action == 0:
self.angle += 10 # Left
self.n_drifts_left += 1
self.n_drifts_right = 0
elif action == 1:
self.angle -= 10 # Right
self.n_drifts_left = 0
self.n_drifts_right += 1
elif action == 2:
if self.speed - 2 >= 6:
self.speed -= 2 # Slow Down
self.n_drifts_right = 0
self.n_drifts_left = 0
else:
self.n_drifts_right = 0
self.n_drifts_left = 0
if self.speed + 2 <= 12:
self.speed += 2 # Speed Up

Expand Down
9 changes: 8 additions & 1 deletion sarsa/sarsa_car.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,17 +169,24 @@ def action_train(self, state):
def action(self):

state = self.get_data()

action = self.act_epsilon_greedy(state)

if action == 0:
self.angle += 10 # Left
self.n_drifts_left += 1
self.n_drifts_right = 0
elif action == 1:
self.angle -= 10 # Right
self.n_drifts_left = 0
self.n_drifts_right += 1
elif action == 2:
self.n_drifts_right = 0
self.n_drifts_left = 0
if self.speed - 2 >= 6:
self.speed -= 2 # Slow Down
else:
self.n_drifts_right = 0
self.n_drifts_left = 0
if self.speed + 2 <= 12:
self.speed += 2 # Speed Up

Expand Down
Binary file added sarsa/sarsa_policy.pth
Binary file not shown.

0 comments on commit 3576e1e

Please sign in to comment.