Spaces:
Sleeping
Sleeping
""" | |
http://incompleteideas.net/MountainCar/MountainCar1.cp | |
permalink: https://perma.cc/6Z2N-PFWC | |
""" | |
import math | |
from typing import Optional | |
import numpy as np | |
import gym | |
from gym import spaces | |
from gym.envs.classic_control import utils | |
from gym.error import DependencyNotInstalled | |
class MountainCarEnv(gym.Env): | |
""" | |
### Description | |
The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically | |
at the bottom of a sinusoidal valley, with the only possible actions being the accelerations | |
that can be applied to the car in either direction. The goal of the MDP is to strategically | |
accelerate the car to reach the goal state on top of the right hill. There are two versions | |
of the mountain car domain in gym: one with discrete actions and one with continuous. | |
This version is the one with discrete actions. | |
This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf) | |
``` | |
@TECHREPORT{Moore90efficientmemory-based, | |
author = {Andrew William Moore}, | |
title = {Efficient Memory-based Learning for Robot Control}, | |
institution = {University of Cambridge}, | |
year = {1990} | |
} | |
``` | |
### Observation Space | |
The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following: | |
| Num | Observation | Min | Max | Unit | | |
|-----|--------------------------------------|------|-----|--------------| | |
| 0 | position of the car along the x-axis | -Inf | Inf | position (m) | | |
| 1 | velocity of the car | -Inf | Inf | position (m) | | |
### Action Space | |
There are 3 discrete deterministic actions: | |
| Num | Observation | Value | Unit | | |
|-----|-------------------------|-------|--------------| | |
| 0 | Accelerate to the left | Inf | position (m) | | |
| 1 | Don't accelerate | Inf | position (m) | | |
| 2 | Accelerate to the right | Inf | position (m) | | |
### Transition Dynamics: | |
Given an action, the mountain car follows the following transition dynamics: | |
*velocity<sub>t+1</sub> = velocity<sub>t</sub> + (action - 1) * force - cos(3 * position<sub>t</sub>) * gravity* | |
*position<sub>t+1</sub> = position<sub>t</sub> + velocity<sub>t+1</sub>* | |
where force = 0.001 and gravity = 0.0025. The collisions at either end are inelastic with the velocity set to 0 | |
upon collision with the wall. The position is clipped to the range `[-1.2, 0.6]` and | |
velocity is clipped to the range `[-0.07, 0.07]`. | |
### Reward: | |
The goal is to reach the flag placed on top of the right hill as quickly as possible, as such the agent is | |
penalised with a reward of -1 for each timestep. | |
### Starting State | |
The position of the car is assigned a uniform random value in *[-0.6 , -0.4]*. | |
The starting velocity of the car is always assigned to 0. | |
### Episode End | |
The episode ends if either of the following happens: | |
1. Termination: The position of the car is greater than or equal to 0.5 (the goal position on top of the right hill) | |
2. Truncation: The length of the episode is 200. | |
### Arguments | |
``` | |
gym.make('MountainCar-v0') | |
``` | |
### Version History | |
* v0: Initial versions release (1.0.0) | |
""" | |
metadata = { | |
"render_modes": ["human", "rgb_array"], | |
"render_fps": 30, | |
} | |
def __init__(self, render_mode: Optional[str] = None, goal_velocity=0): | |
self.min_position = -1.2 | |
self.max_position = 0.6 | |
self.max_speed = 0.07 | |
self.goal_position = 0.5 | |
self.goal_velocity = goal_velocity | |
self.force = 0.001 | |
self.gravity = 0.0025 | |
self.low = np.array([self.min_position, -self.max_speed], dtype=np.float32) | |
self.high = np.array([self.max_position, self.max_speed], dtype=np.float32) | |
self.render_mode = render_mode | |
self.screen_width = 600 | |
self.screen_height = 400 | |
self.screen = None | |
self.clock = None | |
self.isopen = True | |
self.action_space = spaces.Discrete(3) | |
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32) | |
def step(self, action: int): | |
assert self.action_space.contains( | |
action | |
), f"{action!r} ({type(action)}) invalid" | |
position, velocity = self.state | |
velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity) | |
velocity = np.clip(velocity, -self.max_speed, self.max_speed) | |
position += velocity | |
position = np.clip(position, self.min_position, self.max_position) | |
if position == self.min_position and velocity < 0: | |
velocity = 0 | |
terminated = bool( | |
position >= self.goal_position and velocity >= self.goal_velocity | |
) | |
reward = -1.0 | |
self.state = (position, velocity) | |
if self.render_mode == "human": | |
self.render() | |
return np.array(self.state, dtype=np.float32), reward, terminated, False, {} | |
def reset( | |
self, | |
*, | |
seed: Optional[int] = None, | |
options: Optional[dict] = None, | |
): | |
super().reset(seed=seed) | |
# Note that if you use custom reset bounds, it may lead to out-of-bound | |
# state/observations. | |
low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4) | |
self.state = np.array([self.np_random.uniform(low=low, high=high), 0]) | |
if self.render_mode == "human": | |
self.render() | |
return np.array(self.state, dtype=np.float32), {} | |
def _height(self, xs): | |
return np.sin(3 * xs) * 0.45 + 0.55 | |
def render(self): | |
if self.render_mode is None: | |
gym.logger.warn( | |
"You are calling render method without specifying any render mode. " | |
"You can specify the render_mode at initialization, " | |
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' | |
) | |
return | |
try: | |
import pygame | |
from pygame import gfxdraw | |
except ImportError: | |
raise DependencyNotInstalled( | |
"pygame is not installed, run `pip install gym[classic_control]`" | |
) | |
if self.screen is None: | |
pygame.init() | |
if self.render_mode == "human": | |
pygame.display.init() | |
self.screen = pygame.display.set_mode( | |
(self.screen_width, self.screen_height) | |
) | |
else: # mode in "rgb_array" | |
self.screen = pygame.Surface((self.screen_width, self.screen_height)) | |
if self.clock is None: | |
self.clock = pygame.time.Clock() | |
world_width = self.max_position - self.min_position | |
scale = self.screen_width / world_width | |
carwidth = 40 | |
carheight = 20 | |
self.surf = pygame.Surface((self.screen_width, self.screen_height)) | |
self.surf.fill((255, 255, 255)) | |
pos = self.state[0] | |
xs = np.linspace(self.min_position, self.max_position, 100) | |
ys = self._height(xs) | |
xys = list(zip((xs - self.min_position) * scale, ys * scale)) | |
pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0)) | |
clearance = 10 | |
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0 | |
coords = [] | |
for c in [(l, b), (l, t), (r, t), (r, b)]: | |
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos)) | |
coords.append( | |
( | |
c[0] + (pos - self.min_position) * scale, | |
c[1] + clearance + self._height(pos) * scale, | |
) | |
) | |
gfxdraw.aapolygon(self.surf, coords, (0, 0, 0)) | |
gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0)) | |
for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]: | |
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos)) | |
wheel = ( | |
int(c[0] + (pos - self.min_position) * scale), | |
int(c[1] + clearance + self._height(pos) * scale), | |
) | |
gfxdraw.aacircle( | |
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128) | |
) | |
gfxdraw.filled_circle( | |
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128) | |
) | |
flagx = int((self.goal_position - self.min_position) * scale) | |
flagy1 = int(self._height(self.goal_position) * scale) | |
flagy2 = flagy1 + 50 | |
gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0)) | |
gfxdraw.aapolygon( | |
self.surf, | |
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)], | |
(204, 204, 0), | |
) | |
gfxdraw.filled_polygon( | |
self.surf, | |
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)], | |
(204, 204, 0), | |
) | |
self.surf = pygame.transform.flip(self.surf, False, True) | |
self.screen.blit(self.surf, (0, 0)) | |
if self.render_mode == "human": | |
pygame.event.pump() | |
self.clock.tick(self.metadata["render_fps"]) | |
pygame.display.flip() | |
elif self.render_mode == "rgb_array": | |
return np.transpose( | |
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) | |
) | |
def get_keys_to_action(self): | |
# Control with left and right arrow keys. | |
return {(): 1, (276,): 0, (275,): 2, (275, 276): 1} | |
def close(self): | |
if self.screen is not None: | |
import pygame | |
pygame.display.quit() | |
pygame.quit() | |
self.isopen = False | |