Skip to content

Commit

Permalink
Intermediate
Browse files Browse the repository at this point in the history
  • Loading branch information
RobinSchmid7 committed Aug 16, 2023
1 parent a909a75 commit 6e333b1
Show file tree
Hide file tree
Showing 5 changed files with 287 additions and 15 deletions.
186 changes: 186 additions & 0 deletions scripts/dataset_generation/extract_binary_maps.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
import sys
import os
from pathlib import Path
import time
from tqdm import tqdm
import subprocess
import yaml

from tf_bag import BagTfTransformer
import rospy
import rosparam
from sensor_msgs.msg import Image, CameraInfo, CompressedImage
import rosbag

from postprocessing_tools_ros.merging import merge_bags_single, merge_bags_all

# from py_image_proc_cuda import ImageProcCuda
# from cv_bridge import CvBridge

from wild_visual_navigation import WVN_ROOT_DIR
from wild_visual_navigation.utils import perugia_dataset, ROOT_DIR

sys.path.append(f"{WVN_ROOT_DIR}/wild_visual_navigation_ros/scripts")
from wild_visual_navigation_node import WvnRosInterface

sys.path.append(f"{WVN_ROOT_DIR}/wild_visual_navigation_anymal/scripts")
from anymal_msg_converter_node import anymal_msg_callback

# We need to do the following
# 1. Debayering cam4 -> send via ros and wait for result ("correct params")
# 2. anymal_state_topic -> /wild_visual_navigation_node/robot_state
# 3. Feed into wild_visual_navigation_node ("correct params")
# # Iterate rosbags


def get_bag_info(rosbag_path: str) -> dict:
# This queries rosbag info using subprocess and get the YAML output to parse the topics
info_dict = yaml.safe_load(
subprocess.Popen(["rosbag", "info", "--yaml", rosbag_path], stdout=subprocess.PIPE).communicate()[0]
)
return info_dict


class BagTfTransformerWrapper:
def __init__(self, bag):
self.tf_listener = BagTfTransformer(bag)

def waitForTransform(self, parent_frame, child_frame, time, duration):
return self.tf_listener.waitForTransform(parent_frame, child_frame, time)

def lookupTransform(self, parent_frame, child_frame, time):
try:
return self.tf_listener.lookupTransform(parent_frame, child_frame, time)
except:
return (None, None)


def do(n, dry_run):
d = perugia_dataset[n]

if bool(dry_run):
print(d)
return

s = os.path.join(ROOT_DIR, d["name"])

valid_topics = ["/state_estimator/anymal_state", "/wide_angle_camera_front/img_out"]

rosbags = ["/home/rschmid/RosBags/6/images.bag",
"/home/rschmid/RosBags/6/2023-03-02-11-13-08_anymal-d020-lpc_mission_0.bag",
"/home/rschmid/RosBags/6/2023-03-02-11-13-08_anymal-d020-lpc_mission_1.bag"]

output_bag_wvn = s + "_wvn.bag"
output_bag_tf = s + "_tf.bag"

if not os.path.exists(output_bag_tf):
total_included_count, total_skipped_count = merge_bags_single(
input_bag=rosbags, output_bag=output_bag_tf, topics="/tf /tf_static", verbose=True
)
if not os.path.exists(output_bag_wvn):
total_included_count, total_skipped_count = merge_bags_single(
input_bag=rosbags, output_bag=output_bag_wvn, topics=" ".join(valid_topics), verbose=True
)

# Setup WVN node
rospy.init_node("wild_visual_navigation_node")

mission = s.split("/")[-1]

running_store_folder = f"/home/rschmid/RosBags/output/{mission}"

if os.path.exists(running_store_folder):
print("Folder already exists, but proceeding!")
# return

rosparam.set_param("wild_visual_navigation_node/mode", "extract_labels")
rosparam.set_param("wild_visual_navigation_node/running_store_folder", running_store_folder)

# for proprioceptive callback
state_msg_valid = False
desired_twist_msg_valid = False

wvn_ros_interface = WvnRosInterface()
print("-" * 80)

print("start loading tf")
tf_listener = BagTfTransformerWrapper(output_bag_tf)
wvn_ros_interface.setup_rosbag_replay(tf_listener)
print("done loading tf")

# Höngg new
info_msg = CameraInfo()
info_msg.height = 1080
info_msg.width = 1440
info_msg.distortion_model = "equidistant"
info_msg.D = [0.4316922809468283, 0.09279900476637248, -0.4010909691803734, 0.4756163338479413]
info_msg.K = [575.6050407221768, 0.0, 745.7312198525915, 0.0, 578.564849365178, 519.5207040671075, 0.0, 0.0, 1.0]
info_msg.P = [575.6050407221768, 0.0, 745.7312198525915, 0.0, 0.0, 578.564849365178, 519.5207040671075, 0.0, 0.0, 0.0, 1.0, 0.0]
info_msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]

rosbag_info_dict = get_bag_info(output_bag_wvn)
total_msgs = sum([x["messages"] for x in rosbag_info_dict["topics"] if x["topic"] in valid_topics])
total_time_img = 0
total_time_state = 0
n = 0

with rosbag.Bag(output_bag_wvn, "r") as bag:
start_time = rospy.Time.from_sec(bag.get_start_time() + d["start"])
end_time = rospy.Time.from_sec(bag.get_start_time() + d["stop"])
with tqdm(
total=total_msgs,
desc="Total",
colour="green",
position=1,
bar_format="{desc:<13}{percentage:3.0f}%|{bar:20}{r_bar}",
) as pbar:
for (topic, msg, ts) in bag.read_messages(topics=None, start_time=start_time, end_time=end_time):

if rospy.is_shutdown():
return
pbar.update(1)
st = time.time()
if topic == "/state_estimator/anymal_state":
state_msg = anymal_msg_callback(msg, return_msg=True)
state_msg_valid = True

elif topic == "/wide_angle_camera_front/img_out":
image_msg = msg
# print("Received /wide_angle_camera_front/img_out")

info_msg.header = msg.header
camera_options = {}
camera_options['name'] = "wide_angle_camera_front"
camera_options["use_for_training"] = True

info_msg.header = msg.header
try:
wvn_ros_interface.image_callback(image_msg, info_msg, camera_options)
except Exception as e:
print("Bad image_callback", e)

total_time_img += time.time() - st
# print(f"image time: {total_time_img} , state time: {total_time_state}")
# print("add image")
if state_msg_valid:
try:
wvn_ros_interface.robot_state_callback(state_msg, None)
except Exception as e:
print("Bad robot_state callback ", e)

state_msg_valid = False
total_time_state += time.time() - st

print("Finished with converting the dataset")
rospy.signal_shutdown("stop the node")


if __name__ == "__main__":
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--n", type=int, default=0, help="Store data")
parser.add_argument("--dry_run", type=int, default=0, help="Store data")
args = parser.parse_args()

do(args.n, args.dry_run)
Original file line number Diff line number Diff line change
@@ -1,7 +1,22 @@
# Input topics
robot_state_topic: "/wild_visual_navigation_node/robot_state"
desired_twist_topic: "/motion_reference/command_twist"
# desired_twist_topic: "/log/state/desiredRobotTwist"
# desired_twist_topic: "/motion_reference/command_twist"
desired_twist_topic: "/log/state/desiredRobotTwist"

camera_topics:
front:
image_topic: "/alphasense_driver_ros/cam4/debayered"
info_topic: "/alphasense_driver_ros/cam4/camera_info"
use_for_training: true
# left:
# image_topic: "/alphasense_driver_ros/cam3/debayered"
# info_topic: "/alphasense_driver_ros/cam3/camera_info"
# use_for_training: false
# right:
# image_topic: "/alphasense_driver_ros/cam5/debayered"
# info_topic: "/alphasense_driver_ros/cam5/camera_info"
# use_for_training: false


# Relevant frames
fixed_frame: odom
Expand All @@ -20,14 +35,14 @@ robot_max_velocity: 0.8
traversability_radius: 5.0 # meters
image_graph_dist_thr: 0.2 # meters
proprio_graph_dist_thr: 0.1 # meters
network_input_image_height: 224 # 448
network_input_image_width: 224 # 448
segmentation_type: "random"
network_input_image_height: 448 # 448
network_input_image_width: 448 # 448
segmentation_type: "slic"
feature_type: "dino"
dino_patch_size: 16 # DINO only
confidence_std_factor: 4.0
confidence_std_factor: 1.0
scale_traversability: True
scale_traversability_max_fpr: 0.25
scale_traversability_max_fpr: 0.15
min_samples_for_training: 5


Expand All @@ -53,11 +68,13 @@ colormap: "RdYlBu"

print_image_callback_time: false
print_proprio_callback_time: false
log_time: false
log_confidence: false
log_time: true
log_memory: true
log_confidence: true
verbose: false
debug_supervision_node_index_from_last: 10

use_debug_for_desired: true
extraction_store_folder: /home/rschmid/RosBags/output/6
exp: "nan"
use_binary_only: true
18 changes: 18 additions & 0 deletions wild_visual_navigation_ros/launch/replay_anymal_rviz.launch
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
<launch>
<!-- Arguments -->
<arg name="name" default="wild_visual_navigation_node"/>
<arg name="params_file" default="$(find wild_visual_navigation_ros)/config/wild_visual_navigation/default.yaml"/>
<arg name="anymal_config" default="/home/rschmid/RosBags/6_raw/2023-03-02-11-13-08_anymal-d020-lpc_mission.yaml" />

<arg name="anymal_rsl_launch" default="True"/>

<!-- Set sim time to true -->
<param name="/use_sim_time" type="bool" value="True" />

<!-- Load parameters -->
<rosparam command="load" file="$(arg params_file)" ns="$(arg name)"/>

<!-- Launch ANYmal message converter node -->
<node if="$(arg anymal_rsl_launch)" name="anymal_rsl_launch" pkg="anymal_rsl_launch" type="replay.py" args="d $(arg anymal_config)"/>

</launch>
51 changes: 51 additions & 0 deletions wild_visual_navigation_ros/launch/replay_traj.launch
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
<launch>
<!-- Arguments -->
<arg name="name" default="wild_visual_navigation_node"/>
<arg name="params_file" default="$(find wild_visual_navigation_ros)/config/wild_visual_navigation/default.yaml"/>
<arg name="rviz_config" default="anymal.rviz" />
<!-- <arg name="anymal_config" default="/media/Data/Datasets/2022_Hoeng_High_Grass/cerberus.yaml" />-->
<!-- <arg name="anymal_config" default="/home/rschmid/RosBags/6_raw/2023-03-02-11-13-08_anymal-d020-lpc_mission.yaml" />-->

<arg name="anymal_converter" default="False"/>
<arg name="anymal_rsl_launch" default="True"/>
<arg name="debayer" default="False"/>
<arg name="rviz" default="False"/>
<arg name="elevation_mapping" default="False"/>
<arg name="local_planner" default="False"/>

<!-- Set sim time to true -->
<param name="/use_sim_time" type="bool" value="True" />

<!-- Load parameters -->
<!-- <rosparam command="delete" param="$(arg name)" />-->
<rosparam command="load" file="$(arg params_file)" ns="$(arg name)"/>

<!-- Launch ANYmal message converter node -->
<!-- <node if="$(arg anymal_converter)" name="anymal_msg_converter_node" pkg="wild_visual_navigation_anymal" type="anymal_msg_converter_node.py"/>-->

<!-- Launch ANYmal message converter node -->
<!-- <node if="$(arg anymal_rsl_launch)" name="anymal_rsl_launch" pkg="anymal_rsl_launch" type="replay.py" args="d $(arg anymal_config)"/>-->

<!-- Use Elevation Mapping-->
<!-- <include if="$(arg elevation_mapping)" file="$(find wild_visual_navigation_ros)/launch/elevation_mapping_cupy.launch"/>-->

<!-- Use RMP Local Planner-->
<!-- <include if="$(arg local_planner)" file="$(find wild_visual_navigation_ros)/launch/field_local_planner.launch"/> -->

<!-- Use RViz-->
<!-- <node if="$(arg rviz)" name="wild_visual_navigation_rviz" pkg="rviz" type="rviz" args="-d $(find wild_visual_navigation_ros)/config/rviz/$(arg rviz_config)"/>-->

<!--
<node if="$(arg uncompress_alphasense_cam3)" name="uncompress_cam3" pkg="image_transport" type="republish">
<param name="in_transport" value="compressed"/>
<param name="in" value="/alphasense_driver_ros/cam3/debayered"/>
<param name="out_transport" value="raw"/>
<param name="out" value="/alphasense_driver_ros/cam3/debayered"/>
</node> -->

<!-- <node if="$(arg uncompress_alphasense_cam3)" name="uncompress_cam3" pkg="image_transport" type="republish" args="compressed in:=/alphasense_driver_ros/cam3/debayered raw out:=/alphasense_driver_ros/cam3/debayered" />-->
<!-- <node if="$(arg uncompress_alphasense_cam4)" name="uncompress_cam4" pkg="image_transport" type="republish" args="compressed in:=/alphasense_driver_ros/cam4/debayered raw out:=/alphasense_driver_ros/cam4/debayered" />-->
<!-- <node if="$(arg uncompress_alphasense_cam5)" name="uncompress_cam5" pkg="image_transport" type="republish" args="compressed in:=/alphasense_driver_ros/cam5/debayered raw out:=/alphasense_driver_ros/cam5/debayered" />-->


</launch>
Original file line number Diff line number Diff line change
Expand Up @@ -592,10 +592,10 @@ def robot_state_callback(self, state_msg, desired_twist_msg: TwistStamped):
current_twist_tensor = rc.twist_stamped_to_torch(state_msg.twist, device=self.device)
desired_twist_tensor = rc.twist_stamped_to_torch(desired_twist_msg, device=self.device)

# Update traversability
traversability, traversability_var, is_untraversable = self.supervision_generator.update_velocity_tracking(
current_twist_tensor, desired_twist_tensor, velocities=["vx", "vy"]
)
# Update traversability
traversability, traversability_var, is_untraversable = self.supervision_generator.update_velocity_tracking(
current_twist_tensor, desired_twist_tensor, velocities=["vx", "vy"]
)

# Create proprioceptive node for the graph
proprio_node = ProprioceptionNode(
Expand All @@ -616,8 +616,8 @@ def robot_state_callback(self, state_msg, desired_twist_msg: TwistStamped):
# Add node to the graph
self.traversability_estimator.add_proprio_node(proprio_node)

if self.mode == WVNMode.DEBUG or self.mode == WVNMode.ONLINE:
self.visualize_proprioception()
# if self.mode == WVNMode.DEBUG or self.mode == WVNMode.ONLINE:
self.visualize_proprioception()

if self.print_proprio_callback_time:
print(self.timer)
Expand Down

0 comments on commit 6e333b1

Please sign in to comment.