Skip to content

Commit

Permalink
Working experiments! Woohoo!
Browse files Browse the repository at this point in the history
  • Loading branch information
realm-robot-ws committed Mar 21, 2024
1 parent f7a0e16 commit 886211d
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 16 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ After putting all robots in their start positions:
2. **Start TB1:** SSH into turtle 1 and run `ROS_NAMESPACE=turtle1 roslaunch turtlebot3_bringup turtlebot3_robot.launch`
3. **Start TB2:** SSH into turtle 2 and run `ROS_NAMESPACE=turtle2 roslaunch turtlebot3_bringup turtlebot3_robot.launch`
4. **Start F1Tenth:** SSH into f1tenth and run `roslaunch racecar teleop.launch`
5. **Start camera:** SSH into f1tenth again and run `roslaunch realsense2_camera rs_camera.launch`
5. **Start camera:** SSH into f1tenth again and run `roslaunch realsense2_camera rs_camera.launch depth_width:=256 depth_height:=144 depth_fps:=90 enable_color:=false`
5. **Start experiments:** Attach to the bash service `docker attach bash`
a. Enable teleop control of F1Tenth by holding R1 on the controller
b. Start the experiment by `rostopic pub -1 /start_control`
Expand Down
20 changes: 11 additions & 9 deletions realm_gc/rgc/launch/tro_all.launch
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
-->

<launch>
<arg name="base_path" default="$(find rgc_control)/saved_policies/v1/predictrepair"/>

<!-- Start the F1Tenth stack -->
<include file="$(find rgc)/launch/tro_f1tenth.launch">
<arg name="base_path" default="$(find rgc_control)/saved_policies"/>
<arg name="traj_filepath" default="base/ego_traj.eqx"/>
<arg name="mlp_filepath" default="base/mlp.eqx"/>
<arg name="base_path" default="$(arg base_path)"/>
<arg name="traj_filepath" default="ego_traj.eqx"/>
<arg name="mlp_filepath" default="mlp.eqx"/>

<arg name="position_topic" default="/vicon/realm_f1tenth/realm_f1tenth"/>

Expand All @@ -18,23 +20,23 @@
<!-- Start the Turtlebot stacks -->
<group ns="turtle1">
<include file="$(find rgc)/launch/tro_turtlebot.launch">
<arg name="base_path" default="$(find rgc_control)/saved_policies"/>
<arg name="traj_filepath" default="base/non_ego_traj_0.eqx"/>
<arg name="base_path" default="$(find rgc_control)/saved_policies/v1/base"/>
<arg name="traj_filepath" default="non_ego_traj_0.eqx"/>

<arg name="position_topic" default="/vicon/realm_turtle_1/realm_turtle_1"/>
<arg name="T" default="12"/>
<arg name="randomize_trajectory" default="false"/>
<arg name="randomize_trajectory" default="true"/>
</include>
</group>

<group ns="turtle2">
<include file="$(find rgc)/launch/tro_turtlebot.launch">
<arg name="base_path" default="$(find rgc_control)/saved_policies"/>
<arg name="traj_filepath" default="base/non_ego_traj_1.eqx"/>
<arg name="base_path" default="$(find rgc_control)/saved_policies/v1/base"/>
<arg name="traj_filepath" default="non_ego_traj_1.eqx"/>

<arg name="position_topic" default="/vicon/realm_turtle_2/realm_turtle_2"/>
<arg name="T" default="12"/>
<arg name="randomize_trajectory" default="false"/>
<arg name="randomize_trajectory" default="true"/>
</include>
</group>
</launch>
6 changes: 6 additions & 0 deletions realm_gc/rgc_control/src/rgc_control/f1tenth_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ def __init__(self):
Image,
self.depth_image_callback,
)
self.image_pub = rospy.Publisher("f1tenth_img", Image, queue_size=1)

# Get MLP eqx filepath from rosparam supplied by roslaunch
self.mlp_eqx = os.path.join(
Expand Down Expand Up @@ -85,6 +86,8 @@ def depth_image_callback(self, msg):
self.depth_image = cv2.resize(
original_image, self.image_shape, interpolation=cv2.INTER_AREA
)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(self.depth_image, encoding="passthrough"))
self.depth_image = 0.001 * self.depth_image.astype(np.float64) # convert to meters

def reset_control(self, msg=None):
"""Reset the control to stop the experiment and publish the command."""
Expand Down Expand Up @@ -130,6 +133,9 @@ def update(self):

# Control speed rather than acceleration directly
self.desired_speed += self.dt * self.control.acceleration
if self.desired_speed > 1.5:
self.desired_speed = 1.5

msg.drive.mode = 0
msg.drive.speed = self.desired_speed

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ def compute_action(self, observation: SteeringObservation) -> F1TenthAction:
self.equilibrium_state[3],
]
).reshape(-1, 1)

error = state - goal
u = -self.K * error

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Define a policy for trajectory tracking."""
from dataclasses import dataclass

import rospy
import jax
import jax.numpy as jnp

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ def create_tro_f1tenth_policy(
return CompositePolicy(
[
ego_tracking_policy,
# ego_mlp_policy,
# barrier_policy,
ego_mlp_policy,
barrier_policy,
]
)

Expand All @@ -92,6 +92,23 @@ def create_tro_turtlebot_policy(
# Load the trajectory and flip the x and y coordinates, then add some noise
non_ego_traj = LinearTrajectory2D.from_eqx(2, traj_eqx_path)
p = jnp.fliplr(non_ego_traj.p)

# # Clamp the initial position to be the intended starting position
# if p[0, 1] <= -3.0:
# p = p.at[0, 0].set(-0.5)
# else:
# p = p.at[0, 0].set(0.5)

# Shift to +y to account for limited highbay space
p = p.at[:, 1].add(0.5)

# Upscale if it's small
if p.shape == (2, 2):
p_new = jnp.zeros((6, 2))
p_new = p_new.at[:, 0].set(jnp.interp(jnp.linspace(0, 1, 6), jnp.array([0.0, 1.0]), p[:, 0]))
p_new = p_new.at[:, 1].set(jnp.interp(jnp.linspace(0, 1, 6), jnp.array([0.0, 1.0]), p[:, 1]))
p = p_new

if randomize:
noise_scale = 0.05
p += np.random.normal(scale=np.sqrt(noise_scale), size=p.shape)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def compute_action(
kernel = self.gaussian_kernel(depth_image.shape, 1.0)
mean_distance = jnp.sum(depth_image * kernel) / jnp.sum(kernel)
accel = 5 * jnp.clip(mean_distance - self.min_distance, -2.0, 0.0)
print(f"Vision barrier: mean_distance: {mean_distance}, accel {accel}")

return F1TenthAction(
acceleration=accel,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def compute_action(self, obs: VisualObservation) -> F1TenthAction:

action = F1TenthAction(
acceleration=action[0],
steering_angle=action[1],
steering_angle=-action[1],
)

return action
13 changes: 10 additions & 3 deletions realm_gc/rgc_control/src/rgc_control/turtlebot_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,19 +43,26 @@ def __init__(self):
rospy.sleep(2.0) # additional waiting for state to converge
rospy.loginfo("State estimate has converged. Instantiating control policy.")

randomize_trajectory = rospy.get_param("~randomize_trajectory", False)
self.randomize_trajectory = rospy.get_param("~randomize_trajectory", False)
self.control_policy = create_tro_turtlebot_policy(
np.array([self.state.x, self.state.y]),
self.eqx_filepath,
randomize_trajectory,
self.randomize_trajectory,
)

def state_estimate_callback(self, msg):
self.state = msg

def stop_control_callback(self, msg):
super().stop_control_callback(msg)
self.control_policy = create_tro_turtlebot_policy(
np.array([self.state.x, self.state.y]),
self.eqx_filepath,
self.randomize_trajectory,
)

def reset_control(self, msg=None):
"""Reset the turtlebot to its start position."""
# Make sure to normalize the time.
if self.state is None:
# Stop if no state information
self.control = TurtlebotAction(0.0, 0.0)
Expand Down

0 comments on commit 886211d

Please sign in to comment.