forked from stepjam/RLBench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsingle_task_rl_domain_randomization.py
53 lines (41 loc) · 1.42 KB
/
single_task_rl_domain_randomization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from rlbench import DomainRandomizationEnvironment
from rlbench import RandomizeEvery
from rlbench import VisualRandomizationConfig
from rlbench import ArmActionMode
from rlbench import ObservationConfig
from rlbench.action_modes import ActionMode
from rlbench.tasks import ReachTarget
import numpy as np
class Agent(object):
def __init__(self, action_size):
self.action_size = action_size
def act(self, obs):
arm = np.random.normal(0.0, 0.1, size=(self.action_size - 1,))
gripper = [1.0] # Always open
return np.concatenate([arm, gripper], axis=-1)
obs_config = ObservationConfig()
obs_config.set_all(True)
# We will borrow some from the tests dir
rand_config = VisualRandomizationConfig(
image_directory='../tests/unit/assets/textures')
action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
env = DomainRandomizationEnvironment(
action_mode, obs_config=obs_config, headless=False,
randomize_every=RandomizeEvery.EPISODE, frequency=1,
visual_randomization_config=rand_config
)
env.launch()
task = env.get_task(ReachTarget)
agent = Agent(env.action_size)
training_steps = 120
episode_length = 20
obs = None
for i in range(training_steps):
if i % episode_length == 0:
print('Reset Episode')
descriptions, obs = task.reset()
print(descriptions)
action = agent.act(obs)
obs, reward, terminate = task.step(action)
print('Done')
env.shutdown()