Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add CARLA examples for GUI and no-GUI pipelines #634

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
from torchvision import transforms
from PIL import Image
from brains.CARLA.utils.pilotnet_onehot import PilotNetOneHot
from brains.CARLA.utils.test_utils import traffic_light_to_int, model_control
from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
from os import path

import numpy as np

import torch
import torchvision
import cv2
import time
import os
import math
import carla

PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'il_models/'

class Brain:

def __init__(self, sensors, actuators, model=None, handler=None, config=None):
self.motors = actuators.get_motor('motors_0')
self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera
self.camera_seg = sensors.get_camera('camera_2') # segmentation camera
self.handler = handler
self.inference_times = []
self.gpu_inference = config['GPU']
self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu')

client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
world = client.get_world()
self.map = world.get_map()

weather = carla.WeatherParameters.ClearNoon
world.set_weather(weather)

self.vehicle = None
while self.vehicle is None:
for vehicle in world.get_actors().filter('vehicle.*'):
if vehicle.attributes.get('role_name') == 'ego_vehicle':
self.vehicle = vehicle
break
if self.vehicle is None:
print("Waiting for vehicle with role_name 'ego_vehicle'")
time.sleep(1) # sleep for 1 second before checking again

if model:
if not path.exists(PRETRAINED_MODELS + model):
print("File " + model + " cannot be found in " + PRETRAINED_MODELS)

if config['UseOptimized']:
self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device)
else:
self.net = PilotNetOneHot((288, 200, 6), 3, 4, 4).to(self.device)
self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device))
self.net.eval()

self.prev_hlc = 0


def update_frame(self, frame_id, data):
"""Update the information to be shown in one of the GUI's frames.

Arguments:
frame_id {str} -- Id of the frame that will represent the data
data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc)
"""
if data.shape[0] != data.shape[1]:
if data.shape[0] > data.shape[1]:
difference = data.shape[0] - data.shape[1]
extra_left, extra_right = int(difference/2), int(difference/2)
extra_top, extra_bottom = 0, 0
else:
difference = data.shape[1] - data.shape[0]
extra_left, extra_right = 0, 0
extra_top, extra_bottom = int(difference/2), int(difference/2)


data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)

self.handler.update_frame(frame_id, data)

def execute(self):
"""Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)"""

rgb_image = self.camera_rgb.getImage().data
seg_image = self.camera_seg.getImage().data

self.update_frame('frame_0', rgb_image)
self.update_frame('frame_1', seg_image)

try:
# calculate speed
speed_m_s = self.vehicle.get_velocity()
speed = 3.6 * math.sqrt(speed_m_s.x**2 + speed_m_s.y**2 + speed_m_s.z**2)

# randomly choose high-level command if at junction
vehicle_location = self.vehicle.get_transform().location
vehicle_waypoint = self.map.get_waypoint(vehicle_location)
next_to_junction = False
for j in range(1, 11):
next_waypoint = vehicle_waypoint.next(j * 1.0)[0]
if next_waypoint.is_junction:
next_to_junction = True
next_waypoints = vehicle_waypoint.next(j * 1.0)
break
if vehicle_waypoint.is_junction or next_to_junction:
if self.prev_hlc == 0:
valid_turns = []
for next_wp in next_waypoints:
yaw_diff = next_wp.transform.rotation.yaw - vehicle_waypoint.transform.rotation.yaw
yaw_diff = (yaw_diff + 180) % 360 - 180
if -15 < yaw_diff < 15:
valid_turns.append(3) # Go Straight
elif 15 < yaw_diff < 165:
valid_turns.append(1) # Turn Left
elif -165 < yaw_diff < -15:
valid_turns.append(2) # Turn Right
hlc = np.random.choice(valid_turns)
else:
hlc = self.prev_hlc
else:
hlc = 0

# get traffic light status
light_status = -1
if self.vehicle.is_at_traffic_light():
traffic_light = self.vehicle.get_traffic_light()
light_status = traffic_light.get_state()

print(f'hlc: {hlc}')
print(f'light: {light_status}')
frame_data = {
'hlc': hlc,
'measurements': speed,
'rgb': np.copy(rgb_image),
'segmentation': np.copy(seg_image),
'light': np.array([traffic_light_to_int(light_status)])
}

throttle, steer, brake = model_control(self.net,
frame_data,
ignore_traffic_light=False,
device=self.device,
combined_control=False)
self.motors.sendThrottle(throttle)
self.motors.sendSteer(steer)
self.motors.sendBrake(brake)
except Exception as err:
print(err)
195 changes: 195 additions & 0 deletions behavior_metrics/brains/CARLA/utils/convlstm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
"""
This implementation of Convolutional LSTM has been adapted from https://github.com/ndrplz/ConvLSTM_pytorch.
"""

import torch.nn as nn
import torch


class ConvLSTMCell(nn.Module):

def __init__(self, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.

Parameters
----------
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""

super(ConvLSTMCell, self).__init__()

self.input_dim = input_dim
self.hidden_dim = hidden_dim

self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias

self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)

def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state

combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis

combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)

c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)

return h_next, c_next

def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))


class ConvLSTM(nn.Module):

"""

Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
bias: Bias or no bias in Convolution
return_all_layers: Return the list of computations for all layers
Note: Will do same padding.

Input:
A tensor of size B, T, C, H, W or T, B, C, H, W
Output:
A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
0 - layer_output_list is the list of lists of length T of each output
1 - last_state_list is the list of last states
each element of the list is a tuple (h, c) for hidden state and memory
Example:
>> x = torch.rand((32, 10, 64, 128, 128))
>> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
>> _, last_states = convlstm(x)
>> h = last_states[0][0] # 0 for layer index, 0 for h index
"""

def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()

self._check_kernel_size_consistency(kernel_size)

# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')

self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers

cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]

cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))

self.cell_list = nn.ModuleList(cell_list)

def forward(self, input_tensor, hidden_state=None):
"""

Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful

Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)

b, _, _, h, w = input_tensor.size()

# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
# Since the init is done in forward. Can send image size here
hidden_state = self._init_hidden(batch_size=b,
image_size=(h, w))

layer_output_list = []
last_state_list = []

seq_len = input_tensor.size(1)
cur_layer_input = input_tensor

for layer_idx in range(self.num_layers):

h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)

layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output

layer_output_list.append(layer_output)
last_state_list.append([h, c])

if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]

return layer_output_list, last_state_list

def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states

@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')

@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
Loading
Loading