Skip to content

Commit

Permalink
update files for release
Browse files Browse the repository at this point in the history
  • Loading branch information
peterdavidfagan committed May 8, 2024
1 parent 30d8ebb commit b27c133
Show file tree
Hide file tree
Showing 6 changed files with 185 additions and 448 deletions.
2 changes: 1 addition & 1 deletion mujoco_robotics_environments/config/dataset/default.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
num_episodes: 100
num_episodes: 10
max_steps: 10
max_episodes_per_file: 10
25 changes: 25 additions & 0 deletions mujoco_robotics_environments/hf_scripts/hf_data_upload.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import os
import glob
import tarfile

import tensorflow_datasets as tfds
from huggingface_hub import HfApi

LOCAL_FILEPATH="/home/peter/Code/mujoco_robotics_environments/mujoco_robotics_environments/data"

if __name__=="__main__":

for folder_name in os.listdir(LOCAL_FILEPATH):
if os.path.isdir(os.path.join(LOCAL_FILEPATH, folder_name)):
OUTPUT_FILENAME = folder_name + '.tar.gz'
with tarfile.open(OUTPUT_FILENAME, "w:xz") as tar:
tar.add(os.path.join(LOCAL_FILEPATH, folder_name), arcname=".")

# upload to huggingface
api = HfApi()
api.upload_file(
repo_id="peterdavidfagan/transporter_networks_mujoco",
repo_type="dataset",
path_or_fileobj=f"./{OUTPUT_FILENAME}",
path_in_repo=f"/{OUTPUT_FILENAME}",
)
223 changes: 104 additions & 119 deletions mujoco_robotics_environments/tasks/rearrangement.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Mujoco environment for interactive task learning."""
from abc import abstractmethod
from typing import Optional
from typing import Optional, Dict
from copy import deepcopy
from pathlib import Path
import random
Expand Down Expand Up @@ -53,7 +53,7 @@ def __init__(
print("Viewer not requested, running headless.")
else:
self.has_viewer = self._cfg.viewer
# TODO: read arena from xml

# create arena
self._arena = empty.Arena()
table = Rectangle(
Expand Down Expand Up @@ -148,6 +148,8 @@ def __init__(
)
self.prop_random_state = np.random.RandomState(seed=self._cfg.task.initializers.seed)
self.prop_place_random_state = np.random.RandomState(seed=self._cfg.task.initializers.seed+1)

self.mode = None

def close(self) -> None:
if self.passive_view is not None:
Expand Down Expand Up @@ -276,6 +278,9 @@ def reset(self) -> dm_env.TimeStep:
# set the initial eef pose to home
self.eef_home_pose = self._robot.eef_pose.copy()
self.eef_home_pose[0] -= 0.1 # move up 10cm back so it is out of view of camera

# start in pick mode
self.mode="pick"

return dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
Expand All @@ -284,36 +289,43 @@ def reset(self) -> dm_env.TimeStep:
observation=self._compute_observation(),
)

def step(self, poses) -> dm_env.TimeStep:
"""Updates the environment according to the action and returns a
`TimeStep`.
def step(self, action_dict) -> dm_env.TimeStep:
"""
Updates the environment according to the action and returns a `TimeStep`.
"""

if self.mode == "pick":
self.pick(action_dict['pose'])
self.mode="place"
else:
self.place(action_dict['pose'])
self.mode="pick"

return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=0.0,
discount=0.0,
observation=self._compute_observation(),
)

def pick(self, pose):
"""
# split into pick/place poses
pick_pose = poses[:7]
place_pose = poses[7:]

# parse action into poses
pick_pos = pick_pose[:3]
pick_pos[-1] = 0.575 # hardcode for now
pick_orn = pick_pose[3:]
place_pos = place_pose[:3]
place_pos[-1] = 0.575 # hardcode for now
place_orn = place_pose[3:]

# move arm to pre pick position
pre_pick = pick_pos.copy()
Scripted pick behaviour.
"""
pose[2] = 0.575 # hardcode for now :(
pre_pick = pose.copy()
pre_pick[2] = 0.9
self._robot.arm_controller.set_target(
position=pre_pick,
position=pre_pick[:3],
velocity=np.zeros(3),
quat=pick_orn,
quat=pre_pick[3:],
angular_velocity=np.zeros(3),
)
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to pre pick position")

# move arm to pick position
self._robot.arm_controller.set_target(position=pick_pos)
self._robot.arm_controller.set_target(position=pose[:3])
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to pick position")

Expand All @@ -324,22 +336,38 @@ def step(self, poses) -> dm_env.TimeStep:


# move arm to pre grasp position
self._robot.arm_controller.set_target(position=pre_pick)
self._robot.arm_controller.set_target(position=pre_pick[:3])
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to pre grasp position")

# move arm to home position
quat = np.zeros(4,)
rot_mat = R.from_euler('xyz', [0, 180, 0], degrees=True).as_matrix().flatten()
mujoco.mju_mat2Quat(quat, rot_mat)
self._robot.arm_controller.set_target(
position=self.eef_home_pose,
quat=quat,
)
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to home position")

def place(self, pose):
"""
Scripted place behaviour.
"""
pose[2] = 0.575 # hardcode for now :(
# move arm to pre place position
pre_place = place_pos.copy()
pre_place = pose.copy()
pre_place[2] = 0.9
self._robot.arm_controller.set_target(
position=pre_place,
quat=place_orn,
position=pre_place[:3],
quat=pre_place[3:],
)
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to pre place position")

# move arm to place position
self._robot.arm_controller.set_target(position=place_pos)
self._robot.arm_controller.set_target(position=pose[:3])
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to place position")

Expand All @@ -349,7 +377,7 @@ def step(self, poses) -> dm_env.TimeStep:
raise RuntimeError("Failed to open gripper")

# move arm to pre place position
self._robot.arm_controller.set_target(position=pre_place)
self._robot.arm_controller.set_target(position=pre_place[:3])
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to pre place position")

Expand All @@ -364,13 +392,6 @@ def step(self, poses) -> dm_env.TimeStep:
if not self._robot.run_controller(2.0):
raise RuntimeError("Failed to move arm to home position")

return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=0.0,
discount=0.0,
observation=self._compute_observation(),
)

def observation_spec(self) -> dm_env.specs.Array:
"""Returns the observation spec."""
# get shape of overhead camera
Expand All @@ -381,12 +402,13 @@ def observation_spec(self) -> dm_env.specs.Array:
"overhead_camera/rgb": dm_env.specs.Array(shape=camera_shape, dtype=np.float32),
}

def action_spec(self) -> dm_env.specs.Array:
def action_spec(self) -> Dict[str, dm_env.specs.Array]:
"""Returns the action spec."""
return dm_env.specs.Array(
shape=(14,),
dtype=np.float32,
)
return {
"pose": dm_env.specs.Array(shape=(7,), dtype=np.float64), # [x, y, z, qx, qy, qz, qw]
"pixel_coords": dm_env.specs.Array(shape=(2,), dtype=np.int64), # [u, v]
"gripper_rot": dm_env.specs.Array(shape=(1,), dtype=np.float64),
}

def _compute_observation(self) -> np.ndarray:
"""Returns the observation."""
Expand Down Expand Up @@ -478,15 +500,39 @@ def world_2_pixel(self, camera_name, coords):
image_coords = intrinsics @ camera_coords
image_coords = image_coords[:2] / image_coords[2]

return image_coords
return jnp.round(image_coords).astype(jnp.int32)

def get_camera_params(self, camera_name):
"""Returns the camera parameters."""
intrinsics = self._get_camera_intrinsics(camera_name, self.overhead_camera_height, self.overhead_camera_width)
extrinsics = self._get_camera_extrinsics(camera_name)
return {"intrinsics": intrinsics, "extrinsics": extrinsics}

def get_camera_metadata(self):
"""Returns the camera parameters."""
intrinsics = self._get_camera_intrinsics("overhead_camera/overhead_camera", self.overhead_camera_height, self.overhead_camera_width)
extrinsics = self._get_camera_extrinsics("overhead_camera/overhead_camera")
# convert rotation to quat
quat = R.from_matrix(extrinsics[:3, :3]).as_quat()
return {
"intrinsics": {
"fx": intrinsics[0, 0],
"fy": intrinsics[1, 1],
"cx": intrinsics[0, 2],
"cy": intrinsics[1, 2],
},
"extrinsics": {
"x": extrinsics[3, 0],
"y": extrinsics[3, 1],
"z": extrinsics[3, 2],
"qx": quat[0],
"qy": quat[1],
"qz": quat[2],
"qw": quat[3],
}}

def prop_pick(self, prop_id):
"""Returns pick pose for a given prop."""
# get prop pose information
obj_pose = self.props_info[prop_id]["position"]
obj_quat = self.props_info[prop_id]["orientation"]
Expand All @@ -502,7 +548,7 @@ def prop_pick(self, prop_id):
return np.concatenate([obj_pose, grasp_quat])

def prop_place(self, prop_id, min_pose=None, max_pose=None):
"""Returns collisiin free place pose for a given prop."""
"""Returns collision free place pose for a given prop."""
# don't want to mess with actual physics
dummy_physics = deepcopy(self._physics)
prop_name = f"{prop_id}/{prop_id}"
Expand Down Expand Up @@ -601,39 +647,7 @@ def random_pick_and_place(self):


return pick_pose, place_pose

# def generate_task(self) -> RearrangementTask:
# """Generate a task given current environment state"""

# while True:
# # sample subject
# s_quant = random.choice(GenRefExp.quant)
# s_refexp, s_obj = GenRefExp.generate(s_quant, self.props_info)

# # sample spatial relationship
# rel, rel_fun = EvalSpatial.sample()

# props = GenRefExp.filter_props(self.props_info, s_obj, rel_fun)
# if props:
# o_quant = random.choice(GenRefExp.quant)
# o_refexp, _ = GenRefExp.generate(o_quant, props)

# task = RearrangementTask(
# instruction=f"Move {s_refexp} {rel} {o_refexp}",
# s_refexp=s_refexp,
# rel=rel,
# o_refexp=o_refexp,
# )
# if task.s_refexp_lf is not None and task.o_refexp_lf is not None:
# return task

# def update_task(self, task: RearrangementTask) -> None:
# self.task = task

# def success_score(self) -> float:
# """get the succes score of task. Implementation based on RAVENS"""
# raise NotImplementedError

def sort_colours(self):
"""Generates pick/place action for sorting blue/green coloured blocks"""
colours = ["blue", "green"]
Expand Down Expand Up @@ -671,7 +685,7 @@ def sort_colours(self):
continue

return False, None, None

if __name__=="__main__":
# clear hydra global state to avoid conflicts with other hydra instances
hydra.core.global_hydra.GlobalHydra.instance().clear()
Expand All @@ -690,52 +704,23 @@ def sort_colours(self):
# instantiate color separation task
env = RearrangementEnv(COLOR_SEPARATING_CONFIG, True)
_, _, _, obs = env.reset()

### checking that domain model and prop info are consistent
### TODO move to test
# pro_info = env.props_info
# print(pro_info)
# domain_model = props_info2domain_model(pro_info)
# print(domain_model)
# props_info_new = domain_model2props_info(domain_model)
# print(props_info_new)
# # generate task
# task = env.generate_task()
# print(f"Task: {task.instruction}")

# s_entities = Semantics.eval_refexp(task.s_refexp_lf, domain_model).entities
# o_entities = Semantics.eval_refexp(task.o_refexp_lf, domain_model).entities

# print(s_entities)
# print(o_entities)

# perform reference resolution with a domain model

# complete singe loop of sorting colours

while env.sort_colours()[0]:
_, pick_pose, place_pose = env.sort_colours()

pick_action = {
"pose": pick_pose,
"pixel_coords": env.world_2_pixel("overhead_camera/overhead_camera", pick_pose[:3]),
"gripper_rot": None,
}

place_action = {
"pose": place_pose,
"pixel_coords": env.world_2_pixel("overhead_camera/overhead_camera", place_pose[:3]),
"gripper_rot": None,
}

# TODO: delete below commented code when done debugging
#pixel = env.world_2_pixel("overhead_camera/overhead_camera", pick_pose[:3])
#print("pixel", pixel)
# overlay pixel on image
#from PIL import Image, ImageDraw
#print(obs["overhead_camera/rgb"].shape)
#image = Image.fromarray(obs["overhead_camera/rgb"])
#print(image.size)
#draw = ImageDraw.Draw(image)
#draw.ellipse((pixel[0]-10, pixel[1]-10, pixel[0]+10, pixel[1]+10), fill=(255, 0, 0, 0))
#image.show()
# convert pixel coords back to world coords
#world_coords = env.pixel_2_world("overhead_camera/overhead_camera", pixel)
#print("world", world_coords)

_, _, _, obs = env.step(np.concatenate([pick_pose, place_pose]))
_, _, _, obs = env.step(pick_action)
_, _, _, obs = env.step(place_action)
env.close()

# close passive viewer and check obs render
#rgb = obs["overhead_camera/rgb"]
#depth = obs["overhead_camera/depth"]
#from PIL import Image
#img = Image.fromarray(depth * 255).convert("L")
#img.show()
Loading

0 comments on commit b27c133

Please sign in to comment.