import numpy as np from gym import utils from gym.envs.mujoco import MuJocoPyEnv from gym.spaces import Box DEFAULT_CAMERA_CONFIG = { "trackbodyid": 2, "distance": 4.0, "lookat": np.array((0.0, 0.0, 1.15)), "elevation": -20.0, } class Walker2dEnv(MuJocoPyEnv, utils.EzPickle): metadata = { "render_modes": [ "human", "rgb_array", "depth_array", ], "render_fps": 125, } def __init__( self, xml_file="walker2d.xml", forward_reward_weight=1.0, ctrl_cost_weight=1e-3, healthy_reward=1.0, terminate_when_unhealthy=True, healthy_z_range=(0.8, 2.0), healthy_angle_range=(-1.0, 1.0), reset_noise_scale=5e-3, exclude_current_positions_from_observation=True, **kwargs ): utils.EzPickle.__init__( self, xml_file, forward_reward_weight, ctrl_cost_weight, healthy_reward, terminate_when_unhealthy, healthy_z_range, healthy_angle_range, reset_noise_scale, exclude_current_positions_from_observation, **kwargs ) self._forward_reward_weight = forward_reward_weight self._ctrl_cost_weight = ctrl_cost_weight self._healthy_reward = healthy_reward self._terminate_when_unhealthy = terminate_when_unhealthy self._healthy_z_range = healthy_z_range self._healthy_angle_range = healthy_angle_range self._reset_noise_scale = reset_noise_scale self._exclude_current_positions_from_observation = ( exclude_current_positions_from_observation ) if exclude_current_positions_from_observation: observation_space = Box( low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64 ) else: observation_space = Box( low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64 ) MuJocoPyEnv.__init__( self, xml_file, 4, observation_space=observation_space, **kwargs ) @property def healthy_reward(self): return ( float(self.is_healthy or self._terminate_when_unhealthy) * self._healthy_reward ) def control_cost(self, action): control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) return control_cost @property def is_healthy(self): z, angle = self.sim.data.qpos[1:3] min_z, max_z = self._healthy_z_range min_angle, max_angle = self._healthy_angle_range healthy_z = min_z < z < max_z healthy_angle = min_angle < angle < max_angle is_healthy = healthy_z and healthy_angle return is_healthy @property def terminated(self): terminated = not self.is_healthy if self._terminate_when_unhealthy else False return terminated def _get_obs(self): position = self.sim.data.qpos.flat.copy() velocity = np.clip(self.sim.data.qvel.flat.copy(), -10, 10) if self._exclude_current_positions_from_observation: position = position[1:] observation = np.concatenate((position, velocity)).ravel() return observation def step(self, action): x_position_before = self.sim.data.qpos[0] self.do_simulation(action, self.frame_skip) x_position_after = self.sim.data.qpos[0] x_velocity = (x_position_after - x_position_before) / self.dt ctrl_cost = self.control_cost(action) forward_reward = self._forward_reward_weight * x_velocity healthy_reward = self.healthy_reward rewards = forward_reward + healthy_reward costs = ctrl_cost observation = self._get_obs() reward = rewards - costs terminated = self.terminated info = { "x_position": x_position_after, "x_velocity": x_velocity, } if self.render_mode == "human": self.render() return observation, reward, terminated, False, info def reset_model(self): noise_low = -self._reset_noise_scale noise_high = self._reset_noise_scale qpos = self.init_qpos + self.np_random.uniform( low=noise_low, high=noise_high, size=self.model.nq ) qvel = self.init_qvel + self.np_random.uniform( low=noise_low, high=noise_high, size=self.model.nv ) self.set_state(qpos, qvel) observation = self._get_obs() return observation def viewer_setup(self): assert self.viewer is not None for key, value in DEFAULT_CAMERA_CONFIG.items(): if isinstance(value, np.ndarray): getattr(self.viewer.cam, key)[:] = value else: setattr(self.viewer.cam, key, value)