id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,289,300 | test_envs.py | WindyLab_Gym-PPS/gym/envs/tests/test_envs.py | import pytest
import numpy as np
from gym import envs
from gym.envs.tests.spec_list import spec_list
from gym.spaces import Box
from gym.utils.env_checker import check_env
# This runs a smoketest on each official registered env. We may want
# to try also running environments which are not officially registered
# envs.
@pytest.mark.parametrize("spec", spec_list)
def test_env(spec):
# Capture warnings
with pytest.warns(None) as warnings:
env = spec.make()
# Test if env adheres to Gym API
check_env(env, warn=True, skip_render_check=True)
# Check that dtype is explicitly declared for gym.Box spaces
for warning_msg in warnings:
assert "autodetected dtype" not in str(warning_msg.message)
ob_space = env.observation_space
act_space = env.action_space
ob = env.reset()
assert ob_space.contains(ob), "Reset observation: {!r} not in space".format(ob)
if isinstance(ob_space, Box):
# Only checking dtypes for Box spaces to avoid iterating through tuple entries
assert (
ob.dtype == ob_space.dtype
), "Reset observation dtype: {}, expected: {}".format(ob.dtype, ob_space.dtype)
a = act_space.sample()
observation, reward, done, _info = env.step(a)
assert ob_space.contains(observation), "Step observation: {!r} not in space".format(
observation
)
assert np.isscalar(reward), "{} is not a scalar for {}".format(reward, env)
assert isinstance(done, bool), "Expected {} to be a boolean".format(done)
if isinstance(ob_space, Box):
assert (
observation.dtype == ob_space.dtype
), "Step observation dtype: {}, expected: {}".format(ob.dtype, ob_space.dtype)
for mode in env.metadata.get("render.modes", []):
env.render(mode=mode)
# Make sure we can render the environment after close.
for mode in env.metadata.get("render.modes", []):
env.render(mode=mode)
env.close()
# Run a longer rollout on some environments
def test_random_rollout():
for env in [envs.make("CartPole-v0"), envs.make("FrozenLake-v1")]:
agent = lambda ob: env.action_space.sample()
ob = env.reset()
for _ in range(10):
assert env.observation_space.contains(ob)
a = agent(ob)
assert env.action_space.contains(a)
(ob, _reward, done, _info) = env.step(a)
if done:
break
env.close()
def test_env_render_result_is_immutable():
environs = [
envs.make("Taxi-v3"),
envs.make("FrozenLake-v1"),
]
for env in environs:
env.reset()
output = env.render(mode="ansi")
assert isinstance(output, str)
env.close()
| 2,735 | Python | .py | 68 | 33.647059 | 88 | 0.656109 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,301 | test_envs_semantics.py | WindyLab_Gym-PPS/gym/envs/tests/test_envs_semantics.py | """
Currently disabled since this was done in a very poor way
Hashed str representation of objects
"""
import json
import hashlib
import os
import pytest
from gym import spaces, logger
from gym.envs.tests.spec_list import spec_list
DATA_DIR = os.path.dirname(__file__)
ROLLOUT_STEPS = 100
episodes = ROLLOUT_STEPS
steps = ROLLOUT_STEPS
ROLLOUT_FILE = os.path.join(DATA_DIR, "rollout.json")
if not os.path.isfile(ROLLOUT_FILE):
with open(ROLLOUT_FILE, "w") as outfile:
json.dump({}, outfile, indent=2)
def hash_object(unhashed):
return hashlib.sha256(
str(unhashed).encode("utf-16")
).hexdigest() # This is really bad, str could be same while values change
def generate_rollout_hash(spec):
spaces.seed(0)
env = spec.make()
env.seed(0)
observation_list = []
action_list = []
reward_list = []
done_list = []
total_steps = 0
for episode in range(episodes):
if total_steps >= ROLLOUT_STEPS:
break
observation = env.reset()
for step in range(steps):
action = env.action_space.sample()
observation, reward, done, _ = env.step(action)
action_list.append(action)
observation_list.append(observation)
reward_list.append(reward)
done_list.append(done)
total_steps += 1
if total_steps >= ROLLOUT_STEPS:
break
if done:
break
observations_hash = hash_object(observation_list)
actions_hash = hash_object(action_list)
rewards_hash = hash_object(reward_list)
dones_hash = hash_object(done_list)
env.close()
return observations_hash, actions_hash, rewards_hash, dones_hash
@pytest.mark.parametrize("spec", spec_list)
def test_env_semantics(spec):
logger.warn("Skipping this test. Existing hashes were generated in a bad way")
return
with open(ROLLOUT_FILE) as data_file:
rollout_dict = json.load(data_file)
if spec.id not in rollout_dict:
if not spec.nondeterministic:
logger.warn(
"Rollout does not exist for {}, run generate_json.py to generate rollouts for new envs".format(
spec.id
)
)
return
logger.info("Testing rollout for {} environment...".format(spec.id))
observations_now, actions_now, rewards_now, dones_now = generate_rollout_hash(spec)
errors = []
if rollout_dict[spec.id]["observations"] != observations_now:
errors.append(
"Observations not equal for {} -- expected {} but got {}".format(
spec.id, rollout_dict[spec.id]["observations"], observations_now
)
)
if rollout_dict[spec.id]["actions"] != actions_now:
errors.append(
"Actions not equal for {} -- expected {} but got {}".format(
spec.id, rollout_dict[spec.id]["actions"], actions_now
)
)
if rollout_dict[spec.id]["rewards"] != rewards_now:
errors.append(
"Rewards not equal for {} -- expected {} but got {}".format(
spec.id, rollout_dict[spec.id]["rewards"], rewards_now
)
)
if rollout_dict[spec.id]["dones"] != dones_now:
errors.append(
"Dones not equal for {} -- expected {} but got {}".format(
spec.id, rollout_dict[spec.id]["dones"], dones_now
)
)
if len(errors):
for error in errors:
logger.warn(error)
raise ValueError(errors)
| 3,577 | Python | .py | 98 | 28.479592 | 111 | 0.614182 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,302 | test_determinism.py | WindyLab_Gym-PPS/gym/envs/tests/test_determinism.py | import numpy as np
import pytest
from gym.envs.tests.spec_list import spec_list
@pytest.mark.parametrize("spec", spec_list)
def test_env(spec):
# Note that this precludes running this test in multiple
# threads. However, we probably already can't do multithreading
# due to some environments.
env1 = spec.make()
env1.seed(0)
initial_observation1 = env1.reset()
env1.action_space.seed(0)
action_samples1 = [env1.action_space.sample() for i in range(4)]
step_responses1 = [env1.step(action) for action in action_samples1]
env1.close()
env2 = spec.make()
env2.seed(0)
initial_observation2 = env2.reset()
env2.action_space.seed(0)
action_samples2 = [env2.action_space.sample() for i in range(4)]
step_responses2 = [env2.step(action) for action in action_samples2]
env2.close()
for i, (action_sample1, action_sample2) in enumerate(
zip(action_samples1, action_samples2)
):
try:
assert_equals(action_sample1, action_sample2)
except AssertionError:
print("env1.action_space=", env1.action_space)
print("env2.action_space=", env2.action_space)
print("action_samples1=", action_samples1)
print("action_samples2=", action_samples2)
print(
"[{}] action_sample1: {}, action_sample2: {}".format(
i, action_sample1, action_sample2
)
)
raise
# Don't check rollout equality if it's a a nondeterministic
# environment.
if spec.nondeterministic:
return
assert_equals(initial_observation1, initial_observation2)
for i, ((o1, r1, d1, i1), (o2, r2, d2, i2)) in enumerate(
zip(step_responses1, step_responses2)
):
assert_equals(o1, o2, "[{}] ".format(i))
assert r1 == r2, "[{}] r1: {}, r2: {}".format(i, r1, r2)
assert d1 == d2, "[{}] d1: {}, d2: {}".format(i, d1, d2)
# Go returns a Pachi game board in info, which doesn't
# properly check equality. For now, we hack around this by
# just skipping Go.
if spec.id not in ["Go9x9-v0", "Go19x19-v0"]:
assert_equals(i1, i2, "[{}] ".format(i))
def assert_equals(a, b, prefix=None):
assert type(a) == type(b), "{}Differing types: {} and {}".format(prefix, a, b)
if isinstance(a, dict):
assert list(a.keys()) == list(b.keys()), "{}Key sets differ: {} and {}".format(
prefix, a, b
)
for k in a.keys():
v_a = a[k]
v_b = b[k]
assert_equals(v_a, v_b)
elif isinstance(a, np.ndarray):
np.testing.assert_array_equal(a, b)
elif isinstance(a, tuple):
for elem_from_a, elem_from_b in zip(a, b):
assert_equals(elem_from_a, elem_from_b)
else:
assert a == b
| 2,863 | Python | .py | 71 | 32.338028 | 87 | 0.6 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,303 | test_registration.py | WindyLab_Gym-PPS/gym/envs/tests/test_registration.py | # -*- coding: utf-8 -*-
import gym
from gym import error, envs
from gym.envs import registration
from gym.envs.classic_control import cartpole
class ArgumentEnv(gym.Env):
def __init__(self, arg1, arg2, arg3):
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
gym.register(
id="test.ArgumentEnv-v0",
entry_point="gym.envs.tests.test_registration:ArgumentEnv",
kwargs={
"arg1": "arg1",
"arg2": "arg2",
},
)
def test_make():
env = envs.make("CartPole-v0")
assert env.spec.id == "CartPole-v0"
assert isinstance(env.unwrapped, cartpole.CartPoleEnv)
def test_make_with_kwargs():
env = envs.make("test.ArgumentEnv-v0", arg2="override_arg2", arg3="override_arg3")
assert env.spec.id == "test.ArgumentEnv-v0"
assert isinstance(env.unwrapped, ArgumentEnv)
assert env.arg1 == "arg1"
assert env.arg2 == "override_arg2"
assert env.arg3 == "override_arg3"
def test_make_deprecated():
try:
envs.make("Humanoid-v0")
except error.Error:
pass
else:
assert False
def test_spec():
spec = envs.spec("CartPole-v0")
assert spec.id == "CartPole-v0"
def test_spec_with_kwargs():
map_name_value = "8x8"
env = gym.make("FrozenLake-v1", map_name=map_name_value)
assert env.spec._kwargs["map_name"] == map_name_value
def test_missing_lookup():
registry = registration.EnvRegistry()
registry.register(id="Test-v0", entry_point=None)
registry.register(id="Test-v15", entry_point=None)
registry.register(id="Test-v9", entry_point=None)
registry.register(id="Other-v100", entry_point=None)
try:
registry.spec("Test-v1") # must match an env name but not the version above
except error.DeprecatedEnv:
pass
else:
assert False
try:
registry.spec("Unknown-v1")
except error.UnregisteredEnv:
pass
else:
assert False
def test_malformed_lookup():
registry = registration.EnvRegistry()
try:
registry.spec(u"“Breakout-v0”")
except error.Error as e:
assert "malformed environment ID" in "{}".format(
e
), "Unexpected message: {}".format(e)
else:
assert False
| 2,244 | Python | .py | 71 | 26 | 86 | 0.657674 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,304 | test_mujoco_v2_to_v3_conversion.py | WindyLab_Gym-PPS/gym/envs/tests/test_mujoco_v2_to_v3_conversion.py | import unittest
import numpy as np
from gym import envs
from gym.envs.tests.spec_list import skip_mujoco, SKIP_MUJOCO_WARNING_MESSAGE
def verify_environments_match(
old_environment_id, new_environment_id, seed=1, num_actions=1000
):
old_environment = envs.make(old_environment_id)
new_environment = envs.make(new_environment_id)
old_environment.seed(seed)
new_environment.seed(seed)
old_reset_observation = old_environment.reset()
new_reset_observation = new_environment.reset()
np.testing.assert_allclose(old_reset_observation, new_reset_observation)
for i in range(num_actions):
action = old_environment.action_space.sample()
old_observation, old_reward, old_done, old_info = old_environment.step(action)
new_observation, new_reward, new_done, new_info = new_environment.step(action)
eps = 1e-6
np.testing.assert_allclose(old_observation, new_observation, atol=eps)
np.testing.assert_allclose(old_reward, new_reward, atol=eps)
np.testing.assert_allclose(old_done, new_done, atol=eps)
for key in old_info:
np.testing.assert_allclose(old_info[key], new_info[key], atol=eps)
@unittest.skipIf(skip_mujoco, SKIP_MUJOCO_WARNING_MESSAGE)
class Mujocov2Tov3ConversionTest(unittest.TestCase):
def test_environments_match(self):
test_cases = (
{"old_id": "Swimmer-v2", "new_id": "Swimmer-v3"},
{"old_id": "Hopper-v2", "new_id": "Hopper-v3"},
{"old_id": "Walker2d-v2", "new_id": "Walker2d-v3"},
{"old_id": "HalfCheetah-v2", "new_id": "HalfCheetah-v3"},
{"old_id": "Ant-v2", "new_id": "Ant-v3"},
{"old_id": "Humanoid-v2", "new_id": "Humanoid-v3"},
)
for test_case in test_cases:
verify_environments_match(test_case["old_id"], test_case["new_id"])
# Raises KeyError because the new envs have extra info
with self.assertRaises(KeyError):
verify_environments_match("Swimmer-v3", "Swimmer-v2")
# Raises KeyError because the new envs have extra info
with self.assertRaises(KeyError):
verify_environments_match("Humanoid-v3", "Humanoid-v2")
# Raises KeyError because the new envs have extra info
with self.assertRaises(KeyError):
verify_environments_match("Swimmer-v3", "Swimmer-v2")
if __name__ == "__main__":
unittest.main()
| 2,439 | Python | .py | 48 | 42.979167 | 86 | 0.665684 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,305 | spec_list.py | WindyLab_Gym-PPS/gym/envs/tests/spec_list.py | from gym import envs, logger
import os
SKIP_MUJOCO_WARNING_MESSAGE = (
"Cannot run mujoco test (either license key not found or mujoco not"
"installed properly)."
)
skip_mujoco = not (os.environ.get("MUJOCO_KEY"))
if not skip_mujoco:
try:
import mujoco_py
except ImportError:
skip_mujoco = True
def should_skip_env_spec_for_tests(spec):
# We skip tests for envs that require dependencies or are otherwise
# troublesome to run frequently
ep = spec.entry_point
# Skip mujoco tests for pull request CI
if skip_mujoco and (
ep.startswith("gym.envs.mujoco") or ep.startswith("gym.envs.robotics:")
):
return True
try:
import atari_py
except ImportError:
if ep.startswith("gym.envs.atari"):
return True
try:
import Box2D
except ImportError:
if ep.startswith("gym.envs.box2d"):
return True
if (
"GoEnv" in ep
or "HexEnv" in ep
or (
ep.startswith("gym.envs.atari")
and not spec.id.startswith("Pong")
and not spec.id.startswith("Seaquest")
)
):
logger.warn("Skipping tests for env {}".format(ep))
return True
return False
spec_list = [
spec
for spec in sorted(envs.registry.all(), key=lambda x: x.id)
if spec.entry_point is not None and not should_skip_env_spec_for_tests(spec)
]
| 1,431 | Python | .py | 48 | 23.375 | 80 | 0.637555 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,306 | test_frozenlake_dfs.py | WindyLab_Gym-PPS/gym/envs/tests/test_frozenlake_dfs.py | import pytest
import numpy as np
from gym.envs.toy_text.frozen_lake import generate_random_map
# Test that FrozenLake map generation creates valid maps of various sizes.
def test_frozenlake_dfs_map_generation():
def frozenlake_dfs_path_exists(res):
frontier, discovered = [], set()
frontier.append((0, 0))
while frontier:
r, c = frontier.pop()
if not (r, c) in discovered:
discovered.add((r, c))
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
for x, y in directions:
r_new = r + x
c_new = c + y
if r_new < 0 or r_new >= size or c_new < 0 or c_new >= size:
continue
if res[r_new][c_new] == "G":
return True
if res[r_new][c_new] not in "#H":
frontier.append((r_new, c_new))
return False
map_sizes = [5, 10, 200]
for size in map_sizes:
new_frozenlake = generate_random_map(size)
assert len(new_frozenlake) == size
assert len(new_frozenlake[0]) == size
assert frozenlake_dfs_path_exists(new_frozenlake)
| 1,224 | Python | .py | 29 | 30.034483 | 80 | 0.52309 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,307 | test_kellycoinflip.py | WindyLab_Gym-PPS/gym/envs/tests/test_kellycoinflip.py | from gym.envs.toy_text.kellycoinflip import KellyCoinflipEnv
class TestKellyCoinflipEnv:
@staticmethod
def test_done_when_reaches_max_wealth():
# https://github.com/openai/gym/issues/1266
env = KellyCoinflipEnv()
env.seed(1)
env.reset()
done = False
while not done:
action = int(env.wealth * 20) # bet 20% of the wealth
observation, reward, done, info = env.step(action)
assert env.wealth == env.max_wealth
| 500 | Python | .py | 13 | 30.384615 | 66 | 0.643892 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,308 | guessing_game.py | WindyLab_Gym-PPS/gym/envs/toy_text/guessing_game.py | import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class GuessingGame(gym.Env):
"""Number guessing game
The object of the game is to guess within 1% of the randomly chosen number
within 200 time steps
After each step the agent is provided with one of four possible observations
which indicate where the guess is in relation to the randomly chosen number
0 - No guess yet submitted (only after reset)
1 - Guess is lower than the target
2 - Guess is equal to the target
3 - Guess is higher than the target
The rewards are:
0 if the agent's guess is outside of 1% of the target
1 if the agent's guess is inside 1% of the target
The episode terminates after the agent guesses within 1% of the target or
200 steps have been taken
The agent will need to use a memory of previously submitted actions and observations
in order to efficiently explore the available actions
The purpose is to have agents optimize their exploration parameters (e.g. how far to
explore from previous actions) based on previous experience. Because the goal changes
each episode a state-value or action-value function isn't able to provide any additional
benefit apart from being able to tell whether to increase or decrease the next guess.
The perfect agent would likely learn the bounds of the action space (without referring
to them explicitly) and then follow binary tree style exploration towards to goal number
"""
def __init__(self):
self.range = 1000 # Randomly selected number is within +/- this value
self.bounds = 10000
self.action_space = spaces.Box(
low=np.array([-self.bounds]).astype(np.float32),
high=np.array([self.bounds]).astype(np.float32),
)
self.observation_space = spaces.Discrete(4)
self.number = 0
self.guess_count = 0
self.guess_max = 200
self.observation = 0
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
if isinstance(action, (int, float)):
action = np.array([action])
elif isinstance(action, list):
action = np.array(action)
assert self.action_space.contains(action)
if action < self.number:
self.observation = 1
elif action == self.number:
self.observation = 2
elif action > self.number:
self.observation = 3
reward = 0
done = False
if (
(self.number - self.range * 0.01)
< action
< (self.number + self.range * 0.01)
):
reward = 1
done = True
self.guess_count += 1
if self.guess_count >= self.guess_max:
done = True
return (
self.observation,
reward,
done,
{"number": self.number, "guesses": self.guess_count},
)
def reset(self):
self.number = self.np_random.uniform(-self.range, self.range)
self.guess_count = 0
self.observation = 0
return self.observation
| 3,267 | Python | .py | 80 | 32.5125 | 92 | 0.647264 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,309 | cliffwalking.py | WindyLab_Gym-PPS/gym/envs/toy_text/cliffwalking.py | import numpy as np
import sys
from contextlib import closing
from io import StringIO
from gym.envs.toy_text import discrete
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class CliffWalkingEnv(discrete.DiscreteEnv):
"""
This is a simple implementation of the Gridworld Cliff
reinforcement learning task.
Adapted from Example 6.6 (page 106) from Reinforcement Learning: An Introduction
by Sutton and Barto:
http://incompleteideas.net/book/bookdraft2018jan1.pdf
With inspiration from:
https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py
The board is a 4x12 matrix, with (using NumPy matrix indexing):
[3, 0] as the start at bottom-left
[3, 11] as the goal at bottom-right
[3, 1..10] as the cliff at bottom-center
Each time step incurs -1 reward, and stepping into the cliff incurs -100 reward
and a reset to the start. An episode terminates when the agent reaches the goal.
"""
metadata = {"render.modes": ["human", "ansi"]}
def __init__(self):
self.shape = (4, 12)
self.start_state_index = np.ravel_multi_index((3, 0), self.shape)
nS = np.prod(self.shape)
nA = 4
# Cliff Location
self._cliff = np.zeros(self.shape, dtype=np.bool)
self._cliff[3, 1:-1] = True
# Calculate transition probabilities and rewards
P = {}
for s in range(nS):
position = np.unravel_index(s, self.shape)
P[s] = {a: [] for a in range(nA)}
P[s][UP] = self._calculate_transition_prob(position, [-1, 0])
P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1])
P[s][DOWN] = self._calculate_transition_prob(position, [1, 0])
P[s][LEFT] = self._calculate_transition_prob(position, [0, -1])
# Calculate initial state distribution
# We always start in state (3, 0)
isd = np.zeros(nS)
isd[self.start_state_index] = 1.0
super(CliffWalkingEnv, self).__init__(nS, nA, P, isd)
def _limit_coordinates(self, coord):
"""
Prevent the agent from falling out of the grid world
:param coord:
:return:
"""
coord[0] = min(coord[0], self.shape[0] - 1)
coord[0] = max(coord[0], 0)
coord[1] = min(coord[1], self.shape[1] - 1)
coord[1] = max(coord[1], 0)
return coord
def _calculate_transition_prob(self, current, delta):
"""
Determine the outcome for an action. Transition Prob is always 1.0.
:param current: Current position on the grid as (row, col)
:param delta: Change in position for transition
:return: (1.0, new_state, reward, done)
"""
new_position = np.array(current) + np.array(delta)
new_position = self._limit_coordinates(new_position).astype(int)
new_state = np.ravel_multi_index(tuple(new_position), self.shape)
if self._cliff[tuple(new_position)]:
return [(1.0, self.start_state_index, -100, False)]
terminal_state = (self.shape[0] - 1, self.shape[1] - 1)
is_done = tuple(new_position) == terminal_state
return [(1.0, new_state, -1, is_done)]
def render(self, mode="human"):
outfile = StringIO() if mode == "ansi" else sys.stdout
for s in range(self.nS):
position = np.unravel_index(s, self.shape)
if self.s == s:
output = " x "
# Print terminal state
elif position == (3, 11):
output = " T "
elif self._cliff[position]:
output = " C "
else:
output = " o "
if position[1] == 0:
output = output.lstrip()
if position[1] == self.shape[1] - 1:
output = output.rstrip()
output += "\n"
outfile.write(output)
outfile.write("\n")
# No need to return anything for human
if mode != "human":
with closing(outfile):
return outfile.getvalue()
| 4,116 | Python | .py | 98 | 32.938776 | 94 | 0.591592 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,310 | frozen_lake.py | WindyLab_Gym-PPS/gym/envs/toy_text/frozen_lake.py | import sys
from contextlib import closing
import numpy as np
from io import StringIO
from gym import utils
from gym.envs.toy_text import discrete
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
MAPS = {
"4x4": ["SFFF", "FHFH", "FFFH", "HFFG"],
"8x8": [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG",
],
}
def generate_random_map(size=8, p=0.8):
"""Generates a random valid map (one that has a path from start to goal)
:param size: size of each side of the grid
:param p: probability that a tile is frozen
"""
valid = False
# DFS to check that it's a valid path.
def is_valid(res):
frontier, discovered = [], set()
frontier.append((0, 0))
while frontier:
r, c = frontier.pop()
if not (r, c) in discovered:
discovered.add((r, c))
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
for x, y in directions:
r_new = r + x
c_new = c + y
if r_new < 0 or r_new >= size or c_new < 0 or c_new >= size:
continue
if res[r_new][c_new] == "G":
return True
if res[r_new][c_new] != "H":
frontier.append((r_new, c_new))
return False
while not valid:
p = min(1, p)
res = np.random.choice(["F", "H"], (size, size), p=[p, 1 - p])
res[0][0] = "S"
res[-1][-1] = "G"
valid = is_valid(res)
return ["".join(x) for x in res]
class FrozenLakeEnv(discrete.DiscreteEnv):
"""
Winter is here. You and your friends were tossing around a frisbee at the
park when you made a wild throw that left the frisbee out in the middle of
the lake. The water is mostly frozen, but there are a few holes where the
ice has melted. If you step into one of those holes, you'll fall into the
freezing water. At this time, there's an international frisbee shortage, so
it's absolutely imperative that you navigate across the lake and retrieve
the disc. However, the ice is slippery, so you won't always move in the
direction you intend.
The surface is described using a grid like the following
SFFF
FHFH
FFFH
HFFG
S : starting point, safe
F : frozen surface, safe
H : hole, fall to your doom
G : goal, where the frisbee is located
The episode ends when you reach the goal or fall in a hole.
You receive a reward of 1 if you reach the goal, and zero otherwise.
"""
metadata = {"render.modes": ["human", "ansi"]}
def __init__(self, desc=None, map_name="4x4", is_slippery=True):
if desc is None and map_name is None:
desc = generate_random_map()
elif desc is None:
desc = MAPS[map_name]
self.desc = desc = np.asarray(desc, dtype="c")
self.nrow, self.ncol = nrow, ncol = desc.shape
self.reward_range = (0, 1)
nA = 4
nS = nrow * ncol
isd = np.array(desc == b"S").astype("float64").ravel()
isd /= isd.sum()
P = {s: {a: [] for a in range(nA)} for s in range(nS)}
def to_s(row, col):
return row * ncol + col
def inc(row, col, a):
if a == LEFT:
col = max(col - 1, 0)
elif a == DOWN:
row = min(row + 1, nrow - 1)
elif a == RIGHT:
col = min(col + 1, ncol - 1)
elif a == UP:
row = max(row - 1, 0)
return (row, col)
def update_probability_matrix(row, col, action):
newrow, newcol = inc(row, col, action)
newstate = to_s(newrow, newcol)
newletter = desc[newrow, newcol]
done = bytes(newletter) in b"GH"
reward = float(newletter == b"G")
return newstate, reward, done
for row in range(nrow):
for col in range(ncol):
s = to_s(row, col)
for a in range(4):
li = P[s][a]
letter = desc[row, col]
if letter in b"GH":
li.append((1.0, s, 0, True))
else:
if is_slippery:
for b in [(a - 1) % 4, a, (a + 1) % 4]:
li.append(
(1.0 / 3.0, *update_probability_matrix(row, col, b))
)
else:
li.append((1.0, *update_probability_matrix(row, col, a)))
super(FrozenLakeEnv, self).__init__(nS, nA, P, isd)
def render(self, mode="human"):
outfile = StringIO() if mode == "ansi" else sys.stdout
row, col = self.s // self.ncol, self.s % self.ncol
desc = self.desc.tolist()
desc = [[c.decode("utf-8") for c in line] for line in desc]
desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True)
if self.lastaction is not None:
outfile.write(
" ({})\n".format(["Left", "Down", "Right", "Up"][self.lastaction])
)
else:
outfile.write("\n")
outfile.write("\n".join("".join(line) for line in desc) + "\n")
if mode != "human":
with closing(outfile):
return outfile.getvalue()
| 5,545 | Python | .py | 143 | 27.832168 | 88 | 0.512835 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,311 | hotter_colder.py | WindyLab_Gym-PPS/gym/envs/toy_text/hotter_colder.py | import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class HotterColder(gym.Env):
"""Hotter Colder
The goal of hotter colder is to guess closer to a randomly selected number
After each step the agent receives an observation of:
0 - No guess yet submitted (only after reset)
1 - Guess is lower than the target
2 - Guess is equal to the target
3 - Guess is higher than the target
The rewards is calculated as:
(min(action, self.number) + self.range) / (max(action, self.number) + self.range)
Ideally an agent will be able to recognize the 'scent' of a higher reward and
increase the rate in which is guesses in that direction until the reward reaches
its maximum
"""
def __init__(self):
self.range = 1000 # +/- the value number can be between
self.bounds = 2000 # Action space bounds
self.action_space = spaces.Box(
low=np.array([-self.bounds]).astype(np.float32),
high=np.array([self.bounds]).astype(np.float32),
)
self.observation_space = spaces.Discrete(4)
self.number = 0
self.guess_count = 0
self.guess_max = 200
self.observation = 0
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
if isinstance(action, (int, float)):
action = np.array([action])
elif isinstance(action, list):
action = np.array(action)
assert self.action_space.contains(action)
if action < self.number:
self.observation = 1
elif action == self.number:
self.observation = 2
elif action > self.number:
self.observation = 3
reward = (
(min(action, self.number) + self.bounds)
/ (max(action, self.number) + self.bounds)
) ** 2
self.guess_count += 1
done = self.guess_count >= self.guess_max
return (
self.observation,
reward[0],
done,
{"number": self.number, "guesses": self.guess_count},
)
def reset(self):
self.number = self.np_random.uniform(-self.range, self.range)
self.guess_count = 0
self.observation = 0
return self.observation
| 2,401 | Python | .py | 64 | 29.0625 | 85 | 0.61399 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,312 | nchain.py | WindyLab_Gym-PPS/gym/envs/toy_text/nchain.py | import gym
from gym import spaces
from gym.utils import seeding
class NChainEnv(gym.Env):
"""n-Chain environment
This game presents moves along a linear chain of states, with two actions:
0) forward, which moves along the chain but returns no reward
1) backward, which returns to the beginning and has a small reward
The end of the chain, however, presents a large reward, and by moving
'forward' at the end of the chain this large reward can be repeated.
At each action, there is a small probability that the agent 'slips' and the
opposite transition is instead taken.
The observed state is the current state in the chain (0 to n-1).
This environment is described in section 6.1 of:
A Bayesian Framework for Reinforcement Learning by Malcolm Strens (2000)
http://ceit.aut.ac.ir/~shiry/lecture/machine-learning/papers/BRL-2000.pdf
"""
def __init__(self, n=5, slip=0.2, small=2, large=10):
self.n = n
self.slip = slip # probability of 'slipping' an action
self.small = small # payout for 'backwards' action
self.large = large # payout at end of chain for 'forwards' action
self.state = 0 # Start at beginning of the chain
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Discrete(self.n)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
if self.np_random.rand() < self.slip:
action = not action # agent slipped, reverse action taken
if action: # 'backwards': go back to the beginning, get small reward
reward = self.small
self.state = 0
elif self.state < self.n - 1: # 'forwards': go up along the chain
reward = 0
self.state += 1
else: # 'forwards': stay at the end of the chain, collect large reward
reward = self.large
done = False
return self.state, reward, done, {}
def reset(self):
self.state = 0
return self.state
| 2,171 | Python | .py | 46 | 39.565217 | 79 | 0.660833 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,313 | __init__.py | WindyLab_Gym-PPS/gym/envs/toy_text/__init__.py | from gym.envs.toy_text.blackjack import BlackjackEnv
from gym.envs.toy_text.roulette import RouletteEnv
from gym.envs.toy_text.frozen_lake import FrozenLakeEnv
from gym.envs.toy_text.nchain import NChainEnv
from gym.envs.toy_text.hotter_colder import HotterColder
from gym.envs.toy_text.guessing_game import GuessingGame
from gym.envs.toy_text.kellycoinflip import KellyCoinflipEnv
from gym.envs.toy_text.kellycoinflip import KellyCoinflipGeneralizedEnv
from gym.envs.toy_text.cliffwalking import CliffWalkingEnv
from gym.envs.toy_text.taxi import TaxiEnv
| 556 | Python | .py | 10 | 54.6 | 71 | 0.8663 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,314 | discrete.py | WindyLab_Gym-PPS/gym/envs/toy_text/discrete.py | import numpy as np
from gym import Env, spaces
from gym.utils import seeding
def categorical_sample(prob_n, np_random):
"""
Sample from categorical distribution
Each row specifies class probabilities
"""
prob_n = np.asarray(prob_n)
csprob_n = np.cumsum(prob_n)
return (csprob_n > np_random.rand()).argmax()
class DiscreteEnv(Env):
"""
Has the following members
- nS: number of states
- nA: number of actions
- P: transitions (*)
- isd: initial state distribution (**)
(*) dictionary of lists, where
P[s][a] == [(probability, nextstate, reward, done), ...]
(**) list or array of length nS
"""
def __init__(self, nS, nA, P, isd):
self.P = P
self.isd = isd
self.lastaction = None # for rendering
self.nS = nS
self.nA = nA
self.action_space = spaces.Discrete(self.nA)
self.observation_space = spaces.Discrete(self.nS)
self.seed()
self.s = categorical_sample(self.isd, self.np_random)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.s = categorical_sample(self.isd, self.np_random)
self.lastaction = None
return int(self.s)
def step(self, a):
transitions = self.P[self.s][a]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, d = transitions[i]
self.s = s
self.lastaction = a
return (int(s), r, d, {"prob": p})
| 1,556 | Python | .py | 46 | 27.152174 | 75 | 0.610033 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,315 | roulette.py | WindyLab_Gym-PPS/gym/envs/toy_text/roulette.py | import gym
from gym import spaces
from gym.utils import seeding
class RouletteEnv(gym.Env):
"""Simple roulette environment
The roulette wheel has s spots. If the bet is 0 and a 0 comes up, you win a reward of s-2.
If any other number comes up you get a reward of -1.
For non-zero bets, if the parity of your bet matches the parity of the spin, you win 1.
Otherwise you receive a reward of -1.
The last action (s+1) stops the rollout for a return of 0 (walking away)"""
def __init__(self, spots=37):
self.n = spots + 1
self.action_space = spaces.Discrete(self.n)
self.observation_space = spaces.Discrete(1)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
if action == self.n - 1:
# observation, reward, done, info
return 0, 0, True, {}
# N.B. np.random.randint draws from [A, B) while random.randint draws from [A,B]
val = self.np_random.randint(0, self.n - 1)
if val == action == 0:
reward = self.n - 2.0
elif val != 0 and action != 0 and val % 2 == action % 2:
reward = 1.0
else:
reward = -1.0
return 0, reward, False, {}
def reset(self):
return 0
| 1,397 | Python | .py | 34 | 33.323529 | 94 | 0.610495 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,316 | taxi.py | WindyLab_Gym-PPS/gym/envs/toy_text/taxi.py | import sys
from contextlib import closing
from io import StringIO
from gym import utils
from gym.envs.toy_text import discrete
import numpy as np
MAP = [
"+---------+",
"|R: | : :G|",
"| : | : : |",
"| : : : : |",
"| | : | : |",
"|Y| : |B: |",
"+---------+",
]
class TaxiEnv(discrete.DiscreteEnv):
"""
The Taxi Problem
from "Hierarchical Reinforcement Learning with the MAXQ Value Function Decomposition"
by Tom Dietterich
Description:
There are four designated locations in the grid world indicated by R(ed), G(reen), Y(ellow), and B(lue). When the episode starts, the taxi starts off at a random square and the passenger is at a random location. The taxi drives to the passenger's location, picks up the passenger, drives to the passenger's destination (another one of the four specified locations), and then drops off the passenger. Once the passenger is dropped off, the episode ends.
Observations:
There are 500 discrete states since there are 25 taxi positions, 5 possible locations of the passenger (including the case when the passenger is in the taxi), and 4 destination locations.
Note that there are 400 states that can actually be reached during an episode. The missing states correspond to situations in which the passenger is at the same location as their destination, as this typically signals the end of an episode.
Four additional states can be observed right after a successful episodes, when both the passenger and the taxi are at the destination.
This gives a total of 404 reachable discrete states.
Passenger locations:
- 0: R(ed)
- 1: G(reen)
- 2: Y(ellow)
- 3: B(lue)
- 4: in taxi
Destinations:
- 0: R(ed)
- 1: G(reen)
- 2: Y(ellow)
- 3: B(lue)
Actions:
There are 6 discrete deterministic actions:
- 0: move south
- 1: move north
- 2: move east
- 3: move west
- 4: pickup passenger
- 5: drop off passenger
Rewards:
There is a default per-step reward of -1,
except for delivering the passenger, which is +20,
or executing "pickup" and "drop-off" actions illegally, which is -10.
Rendering:
- blue: passenger
- magenta: destination
- yellow: empty taxi
- green: full taxi
- other letters (R, G, Y and B): locations for passengers and destinations
state space is represented by:
(taxi_row, taxi_col, passenger_location, destination)
"""
metadata = {"render.modes": ["human", "ansi"]}
def __init__(self):
self.desc = np.asarray(MAP, dtype="c")
self.locs = locs = [(0, 0), (0, 4), (4, 0), (4, 3)]
num_states = 500
num_rows = 5
num_columns = 5
max_row = num_rows - 1
max_col = num_columns - 1
initial_state_distrib = np.zeros(num_states)
num_actions = 6
P = {
state: {action: [] for action in range(num_actions)}
for state in range(num_states)
}
for row in range(num_rows):
for col in range(num_columns):
for pass_idx in range(len(locs) + 1): # +1 for being inside taxi
for dest_idx in range(len(locs)):
state = self.encode(row, col, pass_idx, dest_idx)
if pass_idx < 4 and pass_idx != dest_idx:
initial_state_distrib[state] += 1
for action in range(num_actions):
# defaults
new_row, new_col, new_pass_idx = row, col, pass_idx
reward = (
-1
) # default reward when there is no pickup/dropoff
done = False
taxi_loc = (row, col)
if action == 0:
new_row = min(row + 1, max_row)
elif action == 1:
new_row = max(row - 1, 0)
if action == 2 and self.desc[1 + row, 2 * col + 2] == b":":
new_col = min(col + 1, max_col)
elif action == 3 and self.desc[1 + row, 2 * col] == b":":
new_col = max(col - 1, 0)
elif action == 4: # pickup
if pass_idx < 4 and taxi_loc == locs[pass_idx]:
new_pass_idx = 4
else: # passenger not at location
reward = -10
elif action == 5: # dropoff
if (taxi_loc == locs[dest_idx]) and pass_idx == 4:
new_pass_idx = dest_idx
done = True
reward = 20
elif (taxi_loc in locs) and pass_idx == 4:
new_pass_idx = locs.index(taxi_loc)
else: # dropoff at wrong location
reward = -10
new_state = self.encode(
new_row, new_col, new_pass_idx, dest_idx
)
P[state][action].append((1.0, new_state, reward, done))
initial_state_distrib /= initial_state_distrib.sum()
discrete.DiscreteEnv.__init__(
self, num_states, num_actions, P, initial_state_distrib
)
def encode(self, taxi_row, taxi_col, pass_loc, dest_idx):
# (5) 5, 5, 4
i = taxi_row
i *= 5
i += taxi_col
i *= 5
i += pass_loc
i *= 4
i += dest_idx
return i
def decode(self, i):
out = []
out.append(i % 4)
i = i // 4
out.append(i % 5)
i = i // 5
out.append(i % 5)
i = i // 5
out.append(i)
assert 0 <= i < 5
return reversed(out)
def render(self, mode="human"):
outfile = StringIO() if mode == "ansi" else sys.stdout
out = self.desc.copy().tolist()
out = [[c.decode("utf-8") for c in line] for line in out]
taxi_row, taxi_col, pass_idx, dest_idx = self.decode(self.s)
def ul(x):
return "_" if x == " " else x
if pass_idx < 4:
out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
out[1 + taxi_row][2 * taxi_col + 1], "yellow", highlight=True
)
pi, pj = self.locs[pass_idx]
out[1 + pi][2 * pj + 1] = utils.colorize(
out[1 + pi][2 * pj + 1], "blue", bold=True
)
else: # passenger in taxi
out[1 + taxi_row][2 * taxi_col + 1] = utils.colorize(
ul(out[1 + taxi_row][2 * taxi_col + 1]), "green", highlight=True
)
di, dj = self.locs[dest_idx]
out[1 + di][2 * dj + 1] = utils.colorize(out[1 + di][2 * dj + 1], "magenta")
outfile.write("\n".join(["".join(row) for row in out]) + "\n")
if self.lastaction is not None:
outfile.write(
" ({})\n".format(
["South", "North", "East", "West", "Pickup", "Dropoff"][
self.lastaction
]
)
)
else:
outfile.write("\n")
# No need to return anything for human
if mode != "human":
with closing(outfile):
return outfile.getvalue()
| 7,673 | Python | .py | 176 | 30.414773 | 456 | 0.499933 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,317 | blackjack.py | WindyLab_Gym-PPS/gym/envs/toy_text/blackjack.py | import gym
from gym import spaces
from gym.utils import seeding
def cmp(a, b):
return float(a > b) - float(a < b)
# 1 = Ace, 2-10 = Number cards, Jack/Queen/King = 10
deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
def draw_card(np_random):
return int(np_random.choice(deck))
def draw_hand(np_random):
return [draw_card(np_random), draw_card(np_random)]
def usable_ace(hand): # Does this hand have a usable ace?
return 1 in hand and sum(hand) + 10 <= 21
def sum_hand(hand): # Return current hand total
if usable_ace(hand):
return sum(hand) + 10
return sum(hand)
def is_bust(hand): # Is this hand a bust?
return sum_hand(hand) > 21
def score(hand): # What is the score of this hand (0 if bust)
return 0 if is_bust(hand) else sum_hand(hand)
def is_natural(hand): # Is this hand a natural blackjack?
return sorted(hand) == [1, 10]
class BlackjackEnv(gym.Env):
"""Simple blackjack environment
Blackjack is a card game where the goal is to obtain cards that sum to as
near as possible to 21 without going over. They're playing against a fixed
dealer.
Face cards (Jack, Queen, King) have point value 10.
Aces can either count as 11 or 1, and it's called 'usable' at 11.
This game is placed with an infinite deck (or with replacement).
The game starts with dealer having one face up and one face down card, while
player having two face up cards. (Virtually for all Blackjack games today).
The player can request additional cards (hit=1) until they decide to stop
(stick=0) or exceed 21 (bust).
After the player sticks, the dealer reveals their facedown card, and draws
until their sum is 17 or greater. If the dealer goes bust the player wins.
If neither player nor dealer busts, the outcome (win, lose, draw) is
decided by whose sum is closer to 21. The reward for winning is +1,
drawing is 0, and losing is -1.
The observation of a 3-tuple of: the players current sum,
the dealer's one showing card (1-10 where 1 is ace),
and whether or not the player holds a usable ace (0 or 1).
This environment corresponds to the version of the blackjack problem
described in Example 5.1 in Reinforcement Learning: An Introduction
by Sutton and Barto.
http://incompleteideas.net/book/the-book-2nd.html
"""
def __init__(self, natural=False):
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple(
(spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2))
)
self.seed()
# Flag to payout 1.5 on a "natural" blackjack win, like casino rules
# Ref: http://www.bicyclecards.com/how-to-play/blackjack/
self.natural = natural
# Start the first game
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
if action: # hit: add a card to players hand and return
self.player.append(draw_card(self.np_random))
if is_bust(self.player):
done = True
reward = -1.0
else:
done = False
reward = 0.0
else: # stick: play out the dealers hand, and score
done = True
while sum_hand(self.dealer) < 17:
self.dealer.append(draw_card(self.np_random))
reward = cmp(score(self.player), score(self.dealer))
if self.natural and is_natural(self.player) and reward == 1.0:
reward = 1.5
return self._get_obs(), reward, done, {}
def _get_obs(self):
return (sum_hand(self.player), self.dealer[0], usable_ace(self.player))
def reset(self):
self.dealer = draw_hand(self.np_random)
self.player = draw_hand(self.np_random)
return self._get_obs()
| 3,978 | Python | .py | 86 | 39.023256 | 80 | 0.654663 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,318 | kellycoinflip.py | WindyLab_Gym-PPS/gym/envs/toy_text/kellycoinflip.py | from scipy.stats import genpareto, norm
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
def flip(edge, np_random):
return 1 if np_random.uniform() < edge else -1
class KellyCoinflipEnv(gym.Env):
"""The Kelly coinflip game is a simple gambling introduced by Haghani & Dewey 2016's
'Rational Decision-Making Under Uncertainty: Observed Betting Patterns on a Biased
Coin' (https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2856963), to test human
decision-making in a setting like that of the stock market: positive expected value
but highly stochastic; they found many subjects performed badly, often going broke,
even though optimal play would reach the maximum with ~95% probability. In the
coinflip game, the player starts with $25.00 to gamble over 300 rounds; each round,
they can bet anywhere up to their net worth (in penny increments), and then a coin is
flipped; with P=0.6, the player wins twice what they bet, otherwise, they lose it.
$250 is the maximum players are allowed to have. At the end of the 300 rounds, they
keep whatever they have. The human subjects earned an average of $91; a simple use of
the Kelly criterion (https://en.wikipedia.org/wiki/Kelly_criterion), giving a
strategy of betting 20% until the cap is hit, would earn $240; a decision tree
analysis shows that optimal play earns $246 (https://www.gwern.net/Coin-flip).
The game short-circuits when either wealth = $0 (since one can never recover) or
wealth = cap (trivial optimal play: one simply bets nothing thereafter).
In this implementation, we default to the paper settings of $25, 60% odds, wealth cap
of $250, and 300 rounds. To specify the action space in advance, we multiply the
wealth cap (in dollars) by 100 (to allow for all penny bets); should one attempt to
bet more money than one has, it is rounded down to one's net worth. (Alternately, a
mistaken bet could end the episode immediately; it's not clear to me which version
would be better.) For a harder version which randomizes the 3 key parameters, see the
Generalized Kelly coinflip game."""
metadata = {"render.modes": ["human"]}
def __init__(self, initial_wealth=25.0, edge=0.6, max_wealth=250.0, max_rounds=300):
self.action_space = spaces.Discrete(int(max_wealth * 100)) # betting in penny
# increments
self.observation_space = spaces.Tuple(
(
spaces.Box(0, max_wealth, [1], dtype=np.float32), # (w,b)
spaces.Discrete(max_rounds + 1),
)
)
self.reward_range = (0, max_wealth)
self.edge = edge
self.wealth = initial_wealth
self.initial_wealth = initial_wealth
self.max_rounds = max_rounds
self.max_wealth = max_wealth
self.np_random = None
self.rounds = None
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
bet_in_dollars = min(
action / 100.0, self.wealth
) # action = desired bet in pennies
self.rounds -= 1
coinflip = flip(self.edge, self.np_random)
self.wealth = min(self.max_wealth, self.wealth + coinflip * bet_in_dollars)
done = self.wealth < 0.01 or self.wealth == self.max_wealth or not self.rounds
reward = self.wealth if done else 0.0
return self._get_obs(), reward, done, {}
def _get_obs(self):
return np.array([self.wealth], dtype=np.float32), self.rounds
def reset(self):
self.rounds = self.max_rounds
self.wealth = self.initial_wealth
return self._get_obs()
def render(self, mode="human"):
print("Current wealth: ", self.wealth, "; Rounds left: ", self.rounds)
class KellyCoinflipGeneralizedEnv(gym.Env):
"""The Generalized Kelly coinflip game is an extension by ArthurB & Gwern Branwen
which expands the Kelly coinflip game MDP into a POMDP, where the 3 key parameters
(edge, maximum wealth, and number of rounds) are unknown random variables drawn
from 3 distributions: a Beta(7,3) for the coinflip edge 0-1, a N(300,25) the total
number of rounds, and a Pareto(5,200) for the wealth cap. These distributions are
chosen to be conjugate & easily updatable, to allow for inference (other choices
like the geometric for number of rounds wouldn't make observations informative),
and to loosely reflect what a human might expect in the original Kelly coinflip
game given that the number of rounds wasn't strictly fixed and they weren't told
the wealth cap until they neared it. With these particular distributions, the
entire history of the game can be summarized into a few sufficient statistics of
rounds-elapsed/wins/losses/max-wealth-ever-reached, from which the Bayes-optimal
decision can (in theory) be made; to avoid all agents having to tediously track
those sufficient statistics manually in the same way, the observation space is
augmented from wealth/rounds-left (rounds-left is deleted because it is a hidden
variable) to current-wealth/rounds-elapsed/wins/losses/maximum-observed-wealth.
The simple Kelly coinflip game can easily be solved by calculating decision trees,
but the Generalized Kelly coinflip game may be intractable (although the analysis
for the edge case alone suggests that the Bayes-optimal value may be very close to
what one would calculate using a decision tree for any specific case), and
represents a good challenge for RL agents."""
metadata = {"render.modes": ["human"]}
def __init__(
self,
initial_wealth=25.0,
edge_prior_alpha=7,
edge_prior_beta=3,
max_wealth_alpha=5.0,
max_wealth_m=200.0,
max_rounds_mean=300.0,
max_rounds_sd=25.0,
reseed=True,
clip_distributions=False,
):
# clip_distributions=True asserts that state and action space are not modified at reset()
# store the hyper-parameters for passing back into __init__() during resets so
# the same hyper-parameters govern the next game's parameters, as the user
# expects:
# TODO: this is boilerplate, is there any more elegant way to do this?
self.initial_wealth = float(initial_wealth)
self.edge_prior_alpha = edge_prior_alpha
self.edge_prior_beta = edge_prior_beta
self.max_wealth_alpha = max_wealth_alpha
self.max_wealth_m = max_wealth_m
self.max_rounds_mean = max_rounds_mean
self.max_rounds_sd = max_rounds_sd
self.clip_distributions = clip_distributions
if reseed or not hasattr(self, "np_random"):
self.seed()
# draw this game's set of parameters:
edge = self.np_random.beta(edge_prior_alpha, edge_prior_beta)
if self.clip_distributions:
# (clip/resample some parameters to be able to fix obs/action space sizes/bounds)
max_wealth_bound = round(
genpareto.ppf(0.85, max_wealth_alpha, max_wealth_m)
)
max_wealth = max_wealth_bound + 1.0
while max_wealth > max_wealth_bound:
max_wealth = round(
genpareto.rvs(
max_wealth_alpha, max_wealth_m, random_state=self.np_random
)
)
max_rounds_bound = int(
round(norm.ppf(0.99, max_rounds_mean, max_rounds_sd))
)
max_rounds = max_rounds_bound + 1
while max_rounds > max_rounds_bound:
max_rounds = int(
round(self.np_random.normal(max_rounds_mean, max_rounds_sd))
)
else:
max_wealth = round(
genpareto.rvs(
max_wealth_alpha, max_wealth_m, random_state=self.np_random
)
)
max_wealth_bound = max_wealth
max_rounds = int(
round(self.np_random.normal(max_rounds_mean, max_rounds_sd))
)
max_rounds_bound = max_rounds
# add an additional global variable which is the sufficient statistic for the
# Pareto distribution on wealth cap; alpha doesn't update, but x_m does, and
# simply is the highest wealth count we've seen to date:
self.max_ever_wealth = float(self.initial_wealth)
# for the coinflip edge, it is total wins/losses:
self.wins = 0
self.losses = 0
# for the number of rounds, we need to remember how many rounds we've played:
self.rounds_elapsed = 0
# the rest proceeds as before:
self.action_space = spaces.Discrete(int(max_wealth_bound * 100))
self.observation_space = spaces.Tuple(
(
spaces.Box(
0, max_wealth_bound, shape=[1], dtype=np.float32
), # current wealth
spaces.Discrete(max_rounds_bound + 1), # rounds elapsed
spaces.Discrete(max_rounds_bound + 1), # wins
spaces.Discrete(max_rounds_bound + 1), # losses
spaces.Box(0, max_wealth_bound, [1], dtype=np.float32),
)
) # maximum observed wealth
self.reward_range = (0, max_wealth)
self.edge = edge
self.wealth = self.initial_wealth
self.max_rounds = max_rounds
self.rounds = self.max_rounds
self.max_wealth = max_wealth
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
bet_in_dollars = min(action / 100.0, self.wealth)
self.rounds -= 1
coinflip = flip(self.edge, self.np_random)
self.wealth = min(self.max_wealth, self.wealth + coinflip * bet_in_dollars)
self.rounds_elapsed += 1
if coinflip:
self.max_ever_wealth = max(self.wealth, self.max_ever_wealth)
self.wins += 1
else:
self.losses += 1
done = self.wealth < 0.01 or self.wealth == self.max_wealth or not self.rounds
reward = self.wealth if done else 0.0
return self._get_obs(), reward, done, {}
def _get_obs(self):
return (
np.array([float(self.wealth)], dtype=np.float32),
self.rounds_elapsed,
self.wins,
self.losses,
np.array([float(self.max_ever_wealth)], dtype=np.float32),
)
def reset(self):
# re-init everything to draw new parameters etc, but preserve the RNG for
# reproducibility and pass in the same hyper-parameters as originally specified:
self.__init__(
initial_wealth=self.initial_wealth,
edge_prior_alpha=self.edge_prior_alpha,
edge_prior_beta=self.edge_prior_beta,
max_wealth_alpha=self.max_wealth_alpha,
max_wealth_m=self.max_wealth_m,
max_rounds_mean=self.max_rounds_mean,
max_rounds_sd=self.max_rounds_sd,
reseed=False,
clip_distributions=self.clip_distributions,
)
return self._get_obs()
def render(self, mode="human"):
print(
"Current wealth: ",
self.wealth,
"; Rounds left: ",
self.rounds,
"; True edge: ",
self.edge,
"; True max wealth: ",
self.max_wealth,
"; True stopping time: ",
self.max_rounds,
"; Rounds left: ",
self.max_rounds - self.rounds_elapsed,
)
| 11,761 | Python | .py | 238 | 39.802521 | 97 | 0.639094 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,319 | cube_crash.py | WindyLab_Gym-PPS/gym/envs/unittest/cube_crash.py | import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
# Unit test environment for CNNs and CNN+RNN algorithms.
# Looks like this (RGB observations):
#
# ---------------------------
# | |
# | |
# | |
# | ** |
# | ** |
# | |
# | |
# | |
# | |
# | |
# ======== ==============
#
# Goal is to go through the hole at the bottom. Agent controls square using Left-Nop-Right actions.
# It falls down automatically, episode length is a bit less than FIELD_H
#
# CubeCrash-v0 # shaped reward
# CubeCrashSparse-v0 # reward 0 or 1 at the end
# CubeCrashScreenBecomesBlack-v0 # for RNNs
#
# To see how it works, run:
#
# python examples/agents/keyboard_agent.py CubeCrashScreen-v0
FIELD_W = 32
FIELD_H = 40
HOLE_WIDTH = 8
color_black = np.array((0, 0, 0)).astype("float32")
color_white = np.array((255, 255, 255)).astype("float32")
color_green = np.array((0, 255, 0)).astype("float32")
class CubeCrash(gym.Env):
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 60,
"video.res_w": FIELD_W,
"video.res_h": FIELD_H,
}
use_shaped_reward = True
use_black_screen = False
use_random_colors = False # Makes env too hard
def __init__(self):
self.seed()
self.viewer = None
self.observation_space = spaces.Box(
0, 255, (FIELD_H, FIELD_W, 3), dtype=np.uint8
)
self.action_space = spaces.Discrete(3)
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def random_color(self):
return np.array(
[
self.np_random.randint(low=0, high=255),
self.np_random.randint(low=0, high=255),
self.np_random.randint(low=0, high=255),
]
).astype("uint8")
def reset(self):
self.cube_x = self.np_random.randint(low=3, high=FIELD_W - 3)
self.cube_y = self.np_random.randint(low=3, high=FIELD_H // 6)
self.hole_x = self.np_random.randint(low=HOLE_WIDTH, high=FIELD_W - HOLE_WIDTH)
self.bg_color = self.random_color() if self.use_random_colors else color_black
self.potential = None
self.step_n = 0
while 1:
self.wall_color = (
self.random_color() if self.use_random_colors else color_white
)
self.cube_color = (
self.random_color() if self.use_random_colors else color_green
)
if (
np.linalg.norm(self.wall_color - self.bg_color) < 50
or np.linalg.norm(self.cube_color - self.bg_color) < 50
):
continue
break
return self.step(0)[0]
def step(self, action):
if action == 0:
pass
elif action == 1:
self.cube_x -= 1
elif action == 2:
self.cube_x += 1
else:
assert 0, "Action %i is out of range" % action
self.cube_y += 1
self.step_n += 1
obs = np.zeros((FIELD_H, FIELD_W, 3), dtype=np.uint8)
obs[:, :, :] = self.bg_color
obs[FIELD_H - 5 : FIELD_H, :, :] = self.wall_color
obs[
FIELD_H - 5 : FIELD_H,
self.hole_x - HOLE_WIDTH // 2 : self.hole_x + HOLE_WIDTH // 2 + 1,
:,
] = self.bg_color
obs[
self.cube_y - 1 : self.cube_y + 2, self.cube_x - 1 : self.cube_x + 2, :
] = self.cube_color
if self.use_black_screen and self.step_n > 4:
obs[:] = np.zeros((3,), dtype=np.uint8)
done = False
reward = 0
dist = np.abs(self.cube_x - self.hole_x)
if self.potential is not None and self.use_shaped_reward:
reward = (self.potential - dist) * 0.01
self.potential = dist
if self.cube_x - 1 < 0 or self.cube_x + 1 >= FIELD_W:
done = True
reward = -1
elif self.cube_y + 1 >= FIELD_H - 5:
if dist >= HOLE_WIDTH // 2:
done = True
reward = -1
elif self.cube_y == FIELD_H:
done = True
reward = +1
self.last_obs = obs
return obs, reward, done, {}
def render(self, mode="human"):
if mode == "rgb_array":
return self.last_obs
elif mode == "human":
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(self.last_obs)
return self.viewer.isopen
else:
assert 0, "Render mode '%s' is not supported" % mode
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class CubeCrashSparse(CubeCrash):
use_shaped_reward = False
class CubeCrashScreenBecomesBlack(CubeCrash):
use_shaped_reward = False
use_black_screen = True
| 5,318 | Python | .py | 148 | 27.709459 | 99 | 0.521283 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,320 | memorize_digits.py | WindyLab_Gym-PPS/gym/envs/unittest/memorize_digits.py | import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
# Unit test environment for CNNs.
# Looks like this (RGB observations):
#
# ---------------------------
# | |
# | ****** |
# | ****** |
# | ** ** |
# | ** ** |
# | ** |
# | ** |
# | **** |
# | **** |
# | **** |
# | **** |
# | ********** |
# | ********** |
# | |
# ---------------------------
#
# Agent should hit action 2 to gain reward. Catches off-by-one errors in your agent.
#
# To see how it works, run:
#
# python examples/agents/keyboard_agent.py MemorizeDigits-v0
FIELD_W = 32
FIELD_H = 24
bogus_mnist = [
[" **** ", "* *", "* *", "* *", "* *", " **** "],
[" ** ", " * * ", " * ", " * ", " * ", " *** "],
[" **** ", "* *", " *", " *** ", "** ", "******"],
[" **** ", "* *", " ** ", " *", "* *", " **** "],
[" * * ", " * * ", " * * ", " **** ", " * ", " * "],
[" **** ", " * ", " **** ", " * ", " * ", " **** "],
[" *** ", " * ", " **** ", " * * ", " * * ", " **** "],
[" **** ", " * ", " * ", " * ", " * ", " * "],
[" **** ", "* *", " **** ", "* *", "* *", " **** "],
[" **** ", "* *", "* *", " *****", " *", " **** "],
]
color_black = np.array((0, 0, 0)).astype("float32")
color_white = np.array((255, 255, 255)).astype("float32")
class MemorizeDigits(gym.Env):
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 60,
"video.res_w": FIELD_W,
"video.res_h": FIELD_H,
}
use_random_colors = False
def __init__(self):
self.seed()
self.viewer = None
self.observation_space = spaces.Box(
0, 255, (FIELD_H, FIELD_W, 3), dtype=np.uint8
)
self.action_space = spaces.Discrete(10)
self.bogus_mnist = np.zeros((10, 6, 6), dtype=np.uint8)
for digit in range(10):
for y in range(6):
self.bogus_mnist[digit, y, :] = [
ord(char) for char in bogus_mnist[digit][y]
]
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def random_color(self):
return np.array(
[
self.np_random.randint(low=0, high=255),
self.np_random.randint(low=0, high=255),
self.np_random.randint(low=0, high=255),
]
).astype("uint8")
def reset(self):
self.digit_x = self.np_random.randint(low=FIELD_W // 5, high=FIELD_W // 5 * 4)
self.digit_y = self.np_random.randint(low=FIELD_H // 5, high=FIELD_H // 5 * 4)
self.color_bg = self.random_color() if self.use_random_colors else color_black
self.step_n = 0
while 1:
self.color_digit = (
self.random_color() if self.use_random_colors else color_white
)
if np.linalg.norm(self.color_digit - self.color_bg) < 50:
continue
break
self.digit = -1
return self.step(0)[0]
def step(self, action):
reward = -1
done = False
self.step_n += 1
if self.digit == -1:
pass
else:
if self.digit == action:
reward = +1
done = self.step_n > 20 and 0 == self.np_random.randint(low=0, high=5)
self.digit = self.np_random.randint(low=0, high=10)
obs = np.zeros((FIELD_H, FIELD_W, 3), dtype=np.uint8)
obs[:, :, :] = self.color_bg
digit_img = np.zeros((6, 6, 3), dtype=np.uint8)
digit_img[:] = self.color_bg
xxx = self.bogus_mnist[self.digit] == 42
digit_img[xxx] = self.color_digit
obs[
self.digit_y - 3 : self.digit_y + 3, self.digit_x - 3 : self.digit_x + 3
] = digit_img
self.last_obs = obs
return obs, reward, done, {}
def render(self, mode="human"):
if mode == "rgb_array":
return self.last_obs
elif mode == "human":
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(self.last_obs)
return self.viewer.isopen
else:
assert 0, "Render mode '%s' is not supported" % mode
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
| 4,827 | Python | .py | 129 | 29.868217 | 86 | 0.424055 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,321 | __init__.py | WindyLab_Gym-PPS/gym/envs/unittest/__init__.py | from gym.envs.unittest.cube_crash import CubeCrash
from gym.envs.unittest.cube_crash import CubeCrashSparse
from gym.envs.unittest.cube_crash import CubeCrashScreenBecomesBlack
from gym.envs.unittest.memorize_digits import MemorizeDigits
| 238 | Python | .py | 4 | 58.5 | 68 | 0.880342 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,322 | mountain_car.py | WindyLab_Gym-PPS/gym/envs/classic_control/mountain_car.py | """
http://incompleteideas.net/MountainCar/MountainCar1.cp
permalink: https://perma.cc/6Z2N-PFWC
"""
import math
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class MountainCarEnv(gym.Env):
"""
Description:
The agent (a car) is started at the bottom of a valley. For any given
state the agent may choose to accelerate to the left, right or cease
any acceleration.
Source:
The environment appeared first in Andrew Moore's PhD Thesis (1990).
Observation:
Type: Box(2)
Num Observation Min Max
0 Car Position -1.2 0.6
1 Car Velocity -0.07 0.07
Actions:
Type: Discrete(3)
Num Action
0 Accelerate to the Left
1 Don't accelerate
2 Accelerate to the Right
Note: This does not affect the amount of velocity affected by the
gravitational pull acting on the car.
Reward:
Reward of 0 is awarded if the agent reached the flag (position = 0.5)
on top of the mountain.
Reward of -1 is awarded if the position of the agent is less than 0.5.
Starting State:
The position of the car is assigned a uniform random value in
[-0.6 , -0.4].
The starting velocity of the car is always assigned to 0.
Episode Termination:
The car position is more than 0.5
Episode length is greater than 200
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, goal_velocity=0):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.goal_velocity = goal_velocity
self.force = 0.001
self.gravity = 0.0025
self.low = np.array([self.min_position, -self.max_speed], dtype=np.float32)
self.high = np.array([self.max_position, self.max_speed], dtype=np.float32)
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (
action,
type(action),
)
position, velocity = self.state
velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if position == self.min_position and velocity < 0:
velocity = 0
done = bool(position >= self.goal_position and velocity >= self.goal_velocity)
reward = -1.0
self.state = (position, velocity)
return np.array(self.state, dtype=np.float32), reward, done, {}
def reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state, dtype=np.float32)
def _height(self, xs):
return np.sin(3 * xs) * 0.45 + 0.55
def render(self, mode="human"):
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width / world_width
carwidth = 40
carheight = 20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(0.5, 0.5, 0.5)
frontwheel.add_attr(
rendering.Transform(translation=(carwidth / 4, clearance))
)
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(
rendering.Transform(translation=(-carwidth / 4, clearance))
)
backwheel.add_attr(self.cartrans)
backwheel.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position - self.min_position) * scale
flagy1 = self._height(self.goal_position) * scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon(
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]
)
flag.set_color(0.8, 0.8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(
(pos - self.min_position) * scale, self._height(pos) * scale
)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def get_keys_to_action(self):
# Control with left and right arrow keys.
return {(): 1, (276,): 0, (275,): 2, (275, 276): 1}
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 6,141 | Python | .py | 141 | 33.957447 | 88 | 0.592388 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,323 | acrobot.py | WindyLab_Gym-PPS/gym/envs/classic_control/acrobot.py | """classic Acrobot task"""
import numpy as np
from numpy import sin, cos, pi
from gym import core, spaces
from gym.utils import seeding
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann <[email protected]>"
# SOURCE:
# https://github.com/rlpy/rlpy/blob/master/rlpy/Domains/Acrobot.py
class AcrobotEnv(core.Env):
"""
Acrobot is a 2-link pendulum with only the second joint actuated.
Initially, both links point downwards. The goal is to swing the
end-effector at a height at least the length of one link above the base.
Both links can swing freely and can pass by each other, i.e., they don't
collide when they have the same angle.
**STATE:**
The state consists of the sin() and cos() of the two rotational joint
angles and the joint angular velocities :
[cos(theta1) sin(theta1) cos(theta2) sin(theta2) thetaDot1 thetaDot2].
For the first link, an angle of 0 corresponds to the link pointing downwards.
The angle of the second link is relative to the angle of the first link.
An angle of 0 corresponds to having the same angle between the two links.
A state of [1, 0, 1, 0, ..., ...] means that both links point downwards.
**ACTIONS:**
The action is either applying +1, 0 or -1 torque on the joint between
the two pendulum links.
.. note::
The dynamics equations were missing some terms in the NIPS paper which
are present in the book. R. Sutton confirmed in personal correspondence
that the experimental results shown in the paper and the book were
generated with the equations shown in the book.
However, there is the option to run the domain with the paper equations
by setting book_or_nips = 'nips'
**REFERENCE:**
.. seealso::
R. Sutton: Generalization in Reinforcement Learning:
Successful Examples Using Sparse Coarse Coding (NIPS 1996)
.. seealso::
R. Sutton and A. G. Barto:
Reinforcement learning: An introduction.
Cambridge: MIT press, 1998.
.. warning::
This version of the domain uses the Runge-Kutta method for integrating
the system dynamics and is more realistic, but also considerably harder
than the original version which employs Euler integration,
see the AcrobotLegacy class.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 15}
dt = 0.2
LINK_LENGTH_1 = 1.0 # [m]
LINK_LENGTH_2 = 1.0 # [m]
LINK_MASS_1 = 1.0 #: [kg] mass of link 1
LINK_MASS_2 = 1.0 #: [kg] mass of link 2
LINK_COM_POS_1 = 0.5 #: [m] position of the center of mass of link 1
LINK_COM_POS_2 = 0.5 #: [m] position of the center of mass of link 2
LINK_MOI = 1.0 #: moments of inertia for both links
MAX_VEL_1 = 4 * pi
MAX_VEL_2 = 9 * pi
AVAIL_TORQUE = [-1.0, 0.0, +1]
torque_noise_max = 0.0
#: use dynamics equations from the nips paper or the book
book_or_nips = "book"
action_arrow = None
domain_fig = None
actions_num = 3
def __init__(self):
self.viewer = None
high = np.array(
[1.0, 1.0, 1.0, 1.0, self.MAX_VEL_1, self.MAX_VEL_2], dtype=np.float32
)
low = -high
self.observation_space = spaces.Box(low=low, high=high, dtype=np.float32)
self.action_space = spaces.Discrete(3)
self.state = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.state = self.np_random.uniform(low=-0.1, high=0.1, size=(4,)).astype(
np.float32
)
return self._get_ob()
def step(self, a):
s = self.state
torque = self.AVAIL_TORQUE[a]
# Add noise to the force action
if self.torque_noise_max > 0:
torque += self.np_random.uniform(
-self.torque_noise_max, self.torque_noise_max
)
# Now, augment the state with our force action so it can be passed to
# _dsdt
s_augmented = np.append(s, torque)
ns = rk4(self._dsdt, s_augmented, [0, self.dt])
ns[0] = wrap(ns[0], -pi, pi)
ns[1] = wrap(ns[1], -pi, pi)
ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)
ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)
self.state = ns
terminal = self._terminal()
reward = -1.0 if not terminal else 0.0
return (self._get_ob(), reward, terminal, {})
def _get_ob(self):
s = self.state
return np.array(
[cos(s[0]), sin(s[0]), cos(s[1]), sin(s[1]), s[2], s[3]], dtype=np.float32
)
def _terminal(self):
s = self.state
return bool(-cos(s[0]) - cos(s[1] + s[0]) > 1.0)
def _dsdt(self, s_augmented):
m1 = self.LINK_MASS_1
m2 = self.LINK_MASS_2
l1 = self.LINK_LENGTH_1
lc1 = self.LINK_COM_POS_1
lc2 = self.LINK_COM_POS_2
I1 = self.LINK_MOI
I2 = self.LINK_MOI
g = 9.8
a = s_augmented[-1]
s = s_augmented[:-1]
theta1 = s[0]
theta2 = s[1]
dtheta1 = s[2]
dtheta2 = s[3]
d1 = (
m1 * lc1 ** 2
+ m2 * (l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(theta2))
+ I1
+ I2
)
d2 = m2 * (lc2 ** 2 + l1 * lc2 * cos(theta2)) + I2
phi2 = m2 * lc2 * g * cos(theta1 + theta2 - pi / 2.0)
phi1 = (
-m2 * l1 * lc2 * dtheta2 ** 2 * sin(theta2)
- 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 * sin(theta2)
+ (m1 * lc1 + m2 * l1) * g * cos(theta1 - pi / 2)
+ phi2
)
if self.book_or_nips == "nips":
# the following line is consistent with the description in the
# paper
ddtheta2 = (a + d2 / d1 * phi1 - phi2) / (m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
else:
# the following line is consistent with the java implementation and the
# book
ddtheta2 = (
a + d2 / d1 * phi1 - m2 * l1 * lc2 * dtheta1 ** 2 * sin(theta2) - phi2
) / (m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
ddtheta1 = -(d2 * ddtheta2 + phi1) / d1
return (dtheta1, dtheta2, ddtheta1, ddtheta2, 0.0)
def render(self, mode="human"):
from gym.envs.classic_control import rendering
s = self.state
if self.viewer is None:
self.viewer = rendering.Viewer(500, 500)
bound = self.LINK_LENGTH_1 + self.LINK_LENGTH_2 + 0.2 # 2.2 for default
self.viewer.set_bounds(-bound, bound, -bound, bound)
if s is None:
return None
p1 = [-self.LINK_LENGTH_1 * cos(s[0]), self.LINK_LENGTH_1 * sin(s[0])]
p2 = [
p1[0] - self.LINK_LENGTH_2 * cos(s[0] + s[1]),
p1[1] + self.LINK_LENGTH_2 * sin(s[0] + s[1]),
]
xys = np.array([[0, 0], p1, p2])[:, ::-1]
thetas = [s[0] - pi / 2, s[0] + s[1] - pi / 2]
link_lengths = [self.LINK_LENGTH_1, self.LINK_LENGTH_2]
self.viewer.draw_line((-2.2, 1), (2.2, 1))
for ((x, y), th, llen) in zip(xys, thetas, link_lengths):
l, r, t, b = 0, llen, 0.1, -0.1
jtransform = rendering.Transform(rotation=th, translation=(x, y))
link = self.viewer.draw_polygon([(l, b), (l, t), (r, t), (r, b)])
link.add_attr(jtransform)
link.set_color(0, 0.8, 0.8)
circ = self.viewer.draw_circle(0.1)
circ.set_color(0.8, 0.8, 0)
circ.add_attr(jtransform)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def wrap(x, m, M):
"""Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which
truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n
For example, m = -180, M = 180 (degrees), x = 360 --> returns 0.
Args:
x: a scalar
m: minimum possible value in range
M: maximum possible value in range
Returns:
x: a scalar, wrapped
"""
diff = M - m
while x > M:
x = x - diff
while x < m:
x = x + diff
return x
def bound(x, m, M=None):
"""Either have m as scalar, so bound(x,m,M) which returns m <= x <= M *OR*
have m as length 2 vector, bound(x,m, <IGNORED>) returns m[0] <= x <= m[1].
Args:
x: scalar
Returns:
x: scalar, bound between min (m) and Max (M)
"""
if M is None:
M = m[1]
m = m[0]
# bound x between min (m) and Max (M)
return min(max(x, m), M)
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
Args:
derivs: the derivative of the system and has the signature ``dy = derivs(yi)``
y0: initial state vector
t: sample times
args: additional arguments passed to the derivative function
kwargs: additional keyword arguments passed to the derivative function
Example 1 ::
## 2D system
def derivs(x):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
This would then require re-adding the time variable to the signature of derivs.
Returns:
yout: Runge-Kutta approximation of the ODE
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
for i in np.arange(len(t) - 1):
thist = t[i]
dt = t[i + 1] - thist
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0))
k2 = np.asarray(derivs(y0 + dt2 * k1))
k3 = np.asarray(derivs(y0 + dt2 * k2))
k4 = np.asarray(derivs(y0 + dt * k3))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
# We only care about the final timestep and we cleave off action value which will be zero
return yout[-1][:4]
| 10,723 | Python | .py | 270 | 31.718519 | 93 | 0.571154 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,324 | continuous_mountain_car.py | WindyLab_Gym-PPS/gym/envs/classic_control/continuous_mountain_car.py | # -*- coding: utf-8 -*-
"""
@author: Olivier Sigaud
A merge between two sources:
* Adaptation of the MountainCar Environment from the "FAReinforcement" library
of Jose Antonio Martin H. (version 1.0), adapted by 'Tom Schaul, [email protected]'
and then modified by Arnaud de Broissia
* the OpenAI/gym MountainCar environment
itself from
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
permalink: https://perma.cc/6Z2N-PFWC
"""
import math
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class Continuous_MountainCarEnv(gym.Env):
"""
Description:
The agent (a car) is started at the bottom of a valley. For any given
state the agent may choose to accelerate to the left, right or cease
any acceleration.
Observation:
Type: Box(2)
Num Observation Min Max
0 Car Position -1.2 0.6
1 Car Velocity -0.07 0.07
Actions:
Type: Box(1)
Num Action Min Max
0 the power coef -1.0 1.0
Note: actual driving force is calculated by multipling the power coef by power (0.0015)
Reward:
Reward of 100 is awarded if the agent reached the flag (position = 0.45) on top of the mountain.
Reward is decrease based on amount of energy consumed each step.
Starting State:
The position of the car is assigned a uniform random value in
[-0.6 , -0.4].
The starting velocity of the car is always assigned to 0.
Episode Termination:
The car position is more than 0.45
Episode length is greater than 200
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, goal_velocity=0):
self.min_action = -1.0
self.max_action = 1.0
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = (
0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version
)
self.goal_velocity = goal_velocity
self.power = 0.0015
self.low_state = np.array(
[self.min_position, -self.max_speed], dtype=np.float32
)
self.high_state = np.array(
[self.max_position, self.max_speed], dtype=np.float32
)
self.viewer = None
self.action_space = spaces.Box(
low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(
low=self.low_state, high=self.high_state, dtype=np.float32
)
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
position = self.state[0]
velocity = self.state[1]
force = min(max(action[0], self.min_action), self.max_action)
velocity += force * self.power - 0.0025 * math.cos(3 * position)
if velocity > self.max_speed:
velocity = self.max_speed
if velocity < -self.max_speed:
velocity = -self.max_speed
position += velocity
if position > self.max_position:
position = self.max_position
if position < self.min_position:
position = self.min_position
if position == self.min_position and velocity < 0:
velocity = 0
# Convert a possible numpy bool to a Python bool.
done = bool(position >= self.goal_position and velocity >= self.goal_velocity)
reward = 0
if done:
reward = 100.0
reward -= math.pow(action[0], 2) * 0.1
self.state = np.array([position, velocity], dtype=np.float32)
return self.state, reward, done, {}
def reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state, dtype=np.float32)
def _height(self, xs):
return np.sin(3 * xs) * 0.45 + 0.55
def render(self, mode="human"):
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width / world_width
carwidth = 40
carheight = 20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(0.5, 0.5, 0.5)
frontwheel.add_attr(
rendering.Transform(translation=(carwidth / 4, clearance))
)
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(
rendering.Transform(translation=(-carwidth / 4, clearance))
)
backwheel.add_attr(self.cartrans)
backwheel.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position - self.min_position) * scale
flagy1 = self._height(self.goal_position) * scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon(
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]
)
flag.set_color(0.8, 0.8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(
(pos - self.min_position) * scale, self._height(pos) * scale
)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 6,795 | Python | .py | 160 | 33.1 | 105 | 0.590999 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,325 | __init__.py | WindyLab_Gym-PPS/gym/envs/classic_control/__init__.py | from gym.envs.classic_control.cartpole import CartPoleEnv
from gym.envs.classic_control.mountain_car import MountainCarEnv
from gym.envs.classic_control.continuous_mountain_car import Continuous_MountainCarEnv
from gym.envs.classic_control.pendulum import PendulumEnv
from gym.envs.classic_control.acrobot import AcrobotEnv
| 325 | Python | .py | 5 | 63.8 | 86 | 0.877743 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,326 | pendulum.py | WindyLab_Gym-PPS/gym/envs/classic_control/pendulum.py | import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from os import path
class PendulumEnv(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, g=10.0):
self.max_speed = 8
self.max_torque = 2.0
self.dt = 0.05
self.g = g
self.m = 1.0
self.l = 1.0
self.viewer = None
high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
th, thdot = self.state # th := theta
g = self.g
m = self.m
l = self.l
dt = self.dt
u = np.clip(u, -self.max_torque, self.max_torque)[0]
self.last_u = u # for rendering
costs = angle_normalize(th) ** 2 + 0.1 * thdot ** 2 + 0.001 * (u ** 2)
newthdot = (
thdot
+ (-3 * g / (2 * l) * np.sin(th + np.pi) + 3.0 / (m * l ** 2) * u) * dt
)
newth = th + newthdot * dt
newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
self.state = np.array([newth, newthdot])
return self._get_obs(), -costs, False, {}
def reset(self):
high = np.array([np.pi, 1])
self.state = self.np_random.uniform(low=-high, high=high)
self.last_u = None
return self._get_obs()
def _get_obs(self):
theta, thetadot = self.state
return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32)
def render(self, mode="human"):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, 0.2)
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = path.join(path.dirname(__file__), "assets/clockwise.png")
self.img = rendering.Image(fname, 1.0, 1.0)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if self.last_u is not None:
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
| 3,139 | Python | .py | 77 | 31.61039 | 86 | 0.565417 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,327 | cartpole.py | WindyLab_Gym-PPS/gym/envs/classic_control/cartpole.py | """
Classic cart-pole system implemented by Rich Sutton et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along
a frictionless track. The pendulum starts upright, and the goal is to
prevent it from falling over by increasing and reducing the cart's
velocity.
Source:
This environment corresponds to the version of the cart-pole problem
described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -0.418 rad (-24 deg) 0.418 rad (24 deg)
3 Pole Angular Velocity -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not
fixed; it depends on the angle the pole is pointing. This is because
the center of gravity of the pole increases the amount of energy needed
to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees.
Cart Position is more than 2.4 (center of the cart reaches the edge of
the display).
Episode length is greater than 200.
Solved Requirements:
Considered solved when the average return is greater than or equal to
195.0 over 100 consecutive trials.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 50}
def __init__(self):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = self.masspole + self.masscart
self.length = 0.5 # actually half the pole's length
self.polemass_length = self.masspole * self.length
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = "euler"
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds.
high = np.array(
[
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max,
],
dtype=np.float32,
)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
x, x_dot, theta, theta_dot = self.state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
# For the interested reader:
# https://coneural.org/florian/papers/05_cart_pole.pdf
temp = (
force + self.polemass_length * theta_dot ** 2 * sintheta
) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length * (4.0 / 3.0 - self.masspole * costheta ** 2 / self.total_mass)
)
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == "euler":
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x, x_dot, theta, theta_dot)
done = bool(
x < -self.x_threshold
or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians
)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this "
"environment has already returned done = True. You "
"should always call 'reset()' once you receive 'done = "
"True' -- any further steps are undefined behavior."
)
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state, dtype=np.float32), reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
return np.array(self.state, dtype=np.float32)
def render(self, mode="human"):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width / world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = (
-polewidth / 2,
polewidth / 2,
polelen - polewidth / 2,
-polewidth / 2,
)
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(0.8, 0.6, 0.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth / 2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(0.5, 0.5, 0.8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None:
return None
# Edit the pole polygon vertex
pole = self._pole_geom
l, r, t, b = (
-polewidth / 2,
polewidth / 2,
polelen - polewidth / 2,
-polewidth / 2,
)
pole.v = [(l, b), (l, t), (r, t), (r, b)]
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 8,356 | Python | .py | 197 | 32.213198 | 87 | 0.566486 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,328 | rendering.py | WindyLab_Gym-PPS/gym/envs/classic_control/rendering.py | """
2D rendering framework
"""
import os
import sys
if "Apple" in sys.version:
if "DYLD_FALLBACK_LIBRARY_PATH" in os.environ:
os.environ["DYLD_FALLBACK_LIBRARY_PATH"] += ":/usr/lib"
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
from gym import error
try:
import pyglet
except ImportError as e:
raise ImportError(
"""
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet'.
But if you really just want to install all Gym dependencies and not have to think about it,
'pip install -e .[all]' or 'pip install gym[all]' will do it.
"""
)
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError(
"""
Error occurred while running `from pyglet.gl import *`
HINT: make sure you have OpenGL installed. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
"""
)
import math
import numpy as np
import copy
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return pyglet.canvas.get_display()
# returns already available pyglet_display,
# if there is no pyglet display available then it creates one
elif isinstance(spec, str):
return pyglet.canvas.Display(spec)
else:
raise error.Error(
"Invalid display specification: {}. (Must be a string like :0 or None.)".format(
spec
)
)
def get_window(width, height, display, **kwargs):
"""
Will create a pyglet window from the display specification provided.
"""
screen = display.get_screens() # available screens
config = screen[0].get_best_config() # selecting the first screen
context = config.create_context(None) # create GL context
return pyglet.window.Window(
width=width,
height=height,
display=display,
config=config,
context=context,
**kwargs
)
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = get_window(width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.isopen = True
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def window_closed_by_user(self):
self.isopen = False
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width / (right - left)
scaley = self.height / (top - bottom)
self.transform = Transform(
translation=(-left * scalex, -bottom * scaley), scale=(scalex, scaley)
)
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1, 1, 1, 1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.frombuffer(image_data.get_data(), dtype=np.uint8)
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1, :, 0:3]
self.window.flip()
self.onetime_geoms = []
return arr if return_rgb_array else self.isopen
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = (
pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
)
self.window.flip()
arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep="")
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1, :, 0:3]
def __del__(self):
self.close()
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color = Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b):
self._color.vec4 = (r, g, b, 1)
def set_color_alpha(self, r, g, b, a):
self._color.vec4 = (r, g, b, a)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1, 1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glTranslatef(
self.translation[0], self.translation[1], 0
) # translate to GL loc ppint
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4:
glBegin(GL_QUADS)
elif len(self.v) > 4:
glBegin(GL_POLYGON)
else:
glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2 * math.pi * i / res
points.append((math.cos(ang) * radius, math.sin(ang) * radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled:
return FilledPolygon(v)
else:
return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width / 2, -width / 2
box = make_polygon([(l, b), (l, t), (r, t), (r, b)])
circ0 = make_circle(width / 2)
circ1 = make_circle(width / 2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
def make_unicycle(radius):
circ = make_circle(radius)
l, r, t, b = 0, radius * 2.5, radius / 5, -radius / 5
box = make_polygon([(l, b), (l, t), (r, t), (r, b)])
geom = Compound([circ, box])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1], 0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Traj(PolyLine):
def __init__(self, v, close):
PolyLine.__init__(self, v, close)
def render1(self):
p_prev = self.v[-1]
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v[::-1]:
distance = np.sqrt( (p[0]-p_prev[0])**2 + (p[1]-p_prev[1])**2 )
if distance > 0.1:
pass
else:
glVertex3f(p[0], p[1], 0)
p_prev = copy.copy(p)
glEnd()
def set_traj(self, v):
self.v = v
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.set_color(1.0, 1.0, 1.0)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(
-self.width / 2, -self.height / 2, width=self.width, height=self.height
)
# ================================================================
class SimpleImageViewer(object):
def __init__(self, display=None, maxwidth=500):
self.window = None
self.isopen = False
self.display = get_display(display)
self.maxwidth = maxwidth
def imshow(self, arr):
if self.window is None:
height, width, _channels = arr.shape
if width > self.maxwidth:
scale = self.maxwidth / width
width = int(scale * width)
height = int(scale * height)
self.window = get_window(
width=width,
height=height,
display=self.display,
vsync=False,
resizable=True,
)
self.width = width
self.height = height
self.isopen = True
@self.window.event
def on_resize(width, height):
self.width = width
self.height = height
@self.window.event
def on_close():
self.isopen = False
assert len(arr.shape) == 3, "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(
arr.shape[1], arr.shape[0], "RGB", arr.tobytes(), pitch=arr.shape[1] * -3
)
texture = image.get_texture()
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
texture.width = self.width
texture.height = self.height
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
texture.blit(0, 0) # draw
self.window.flip()
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def __del__(self):
self.close()
| 13,702 | Python | .py | 390 | 26.953846 | 115 | 0.587171 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,329 | lunar_lander.py | WindyLab_Gym-PPS/gym/envs/box2d/lunar_lander.py | """
Rocket trajectory optimization is a classic topic in Optimal Control.
According to Pontryagin's maximum principle it's optimal to fire engine full throttle or
turn it off. That's the reason this environment is OK to have discreet actions (engine on or off).
The landing pad is always at coordinates (0,0). The coordinates are the first two numbers in the state vector.
Reward for moving from the top of the screen to the landing pad and zero speed is about 100..140 points.
If the lander moves away from the landing pad it loses reward. The episode finishes if the lander crashes or
comes to rest, receiving an additional -100 or +100 points. Each leg with ground contact is +10 points.
Firing the main engine is -0.3 points each frame. Firing the side engine is -0.03 points each frame.
Solved is 200 points.
Landing outside the landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land
on its first attempt. Please see the source code for details.
To see a heuristic landing, run:
python gym/envs/box2d/lunar_lander.py
To play yourself, run:
python examples/agents/keyboard_agent.py LunarLander-v2
Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
"""
import math
import sys
import numpy as np
import Box2D
from Box2D.b2 import (
edgeShape,
circleShape,
fixtureDef,
polygonShape,
revoluteJointDef,
contactListener,
)
import gym
from gym import spaces
from gym.utils import seeding, EzPickle
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MAIN_ENGINE_POWER = 13.0
SIDE_ENGINE_POWER = 0.6
INITIAL_RANDOM = 1000.0 # Set 1500 to make game harder
LANDER_POLY = [(-14, +17), (-17, 0), (-17, -10), (+17, -10), (+17, 0), (+14, +17)]
LEG_AWAY = 20
LEG_DOWN = 18
LEG_W, LEG_H = 2, 8
LEG_SPRING_TORQUE = 40
SIDE_ENGINE_HEIGHT = 14.0
SIDE_ENGINE_AWAY = 12.0
VIEWPORT_W = 600
VIEWPORT_H = 400
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if (
self.env.lander == contact.fixtureA.body
or self.env.lander == contact.fixtureB.body
):
self.env.game_over = True
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = True
def EndContact(self, contact):
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = False
class LunarLander(gym.Env, EzPickle):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": FPS}
continuous = False
def __init__(self):
EzPickle.__init__(self)
self.seed()
self.viewer = None
self.world = Box2D.b2World()
self.moon = None
self.lander = None
self.particles = []
self.prev_reward = None
# useful range is -1 .. +1, but spikes can be higher
self.observation_space = spaces.Box(
-np.inf, np.inf, shape=(8,), dtype=np.float32
)
if self.continuous:
# Action is two floats [main engine, left-right engines].
# Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.
# Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off
self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32)
else:
# Nop, fire left engine, main engine, right engine
self.action_space = spaces.Discrete(4)
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.moon:
return
self.world.contactListener = None
self._clean_particles(True)
self.world.DestroyBody(self.moon)
self.moon = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
def reset(self):
self._destroy()
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
# terrain
CHUNKS = 11
height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,))
chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]
self.helipad_x1 = chunk_x[CHUNKS // 2 - 1]
self.helipad_x2 = chunk_x[CHUNKS // 2 + 1]
self.helipad_y = H / 4
height[CHUNKS // 2 - 2] = self.helipad_y
height[CHUNKS // 2 - 1] = self.helipad_y
height[CHUNKS // 2 + 0] = self.helipad_y
height[CHUNKS // 2 + 1] = self.helipad_y
height[CHUNKS // 2 + 2] = self.helipad_y
smooth_y = [
0.33 * (height[i - 1] + height[i + 0] + height[i + 1])
for i in range(CHUNKS)
]
self.moon = self.world.CreateStaticBody(
shapes=edgeShape(vertices=[(0, 0), (W, 0)])
)
self.sky_polys = []
for i in range(CHUNKS - 1):
p1 = (chunk_x[i], smooth_y[i])
p2 = (chunk_x[i + 1], smooth_y[i + 1])
self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1)
self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])
self.moon.color1 = (0.0, 0.0, 0.0)
self.moon.color2 = (0.0, 0.0, 0.0)
initial_y = VIEWPORT_H / SCALE
self.lander = self.world.CreateDynamicBody(
position=(VIEWPORT_W / SCALE / 2, initial_y),
angle=0.0,
fixtures=fixtureDef(
shape=polygonShape(
vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY]
),
density=5.0,
friction=0.1,
categoryBits=0x0010,
maskBits=0x001, # collide only with ground
restitution=0.0,
), # 0.99 bouncy
)
self.lander.color1 = (0.5, 0.4, 0.9)
self.lander.color2 = (0.3, 0.3, 0.5)
self.lander.ApplyForceToCenter(
(
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),
),
True,
)
self.legs = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y),
angle=(i * 0.05),
fixtures=fixtureDef(
shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001,
),
)
leg.ground_contact = False
leg.color1 = (0.5, 0.4, 0.9)
leg.color2 = (0.3, 0.3, 0.5)
rjd = revoluteJointDef(
bodyA=self.lander,
bodyB=leg,
localAnchorA=(0, 0),
localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE),
enableMotor=True,
enableLimit=True,
maxMotorTorque=LEG_SPRING_TORQUE,
motorSpeed=+0.3 * i, # low enough not to jump back into the sky
)
if i == -1:
rjd.lowerAngle = (
+0.9 - 0.5
) # The most esoteric numbers here, angled legs have freedom to travel within
rjd.upperAngle = +0.9
else:
rjd.lowerAngle = -0.9
rjd.upperAngle = -0.9 + 0.5
leg.joint = self.world.CreateJoint(rjd)
self.legs.append(leg)
self.drawlist = [self.lander] + self.legs
return self.step(np.array([0, 0]) if self.continuous else 0)[0]
def _create_particle(self, mass, x, y, ttl):
p = self.world.CreateDynamicBody(
position=(x, y),
angle=0.0,
fixtures=fixtureDef(
shape=circleShape(radius=2 / SCALE, pos=(0, 0)),
density=mass,
friction=0.1,
categoryBits=0x0100,
maskBits=0x001, # collide only with ground
restitution=0.3,
),
)
p.ttl = ttl
self.particles.append(p)
self._clean_particles(False)
return p
def _clean_particles(self, all):
while self.particles and (all or self.particles[0].ttl < 0):
self.world.DestroyBody(self.particles.pop(0))
def step(self, action):
if self.continuous:
action = np.clip(action, -1, +1).astype(np.float32)
else:
assert self.action_space.contains(action), "%r (%s) invalid " % (
action,
type(action),
)
# Engines
tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))
side = (-tip[1], tip[0])
dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)]
m_power = 0.0
if (self.continuous and action[0] > 0.0) or (
not self.continuous and action == 2
):
# Main engine
if self.continuous:
m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0
assert m_power >= 0.5 and m_power <= 1.0
else:
m_power = 1.0
ox = (
tip[0] * (4 / SCALE + 2 * dispersion[0]) + side[0] * dispersion[1]
) # 4 is move a bit downwards, +-2 for randomness
oy = -tip[1] * (4 / SCALE + 2 * dispersion[0]) - side[1] * dispersion[1]
impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)
p = self._create_particle(
3.5, # 3.5 is here to make particle speed adequate
impulse_pos[0],
impulse_pos[1],
m_power,
) # particles are just a decoration
p.ApplyLinearImpulse(
(ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power),
impulse_pos,
True,
)
self.lander.ApplyLinearImpulse(
(-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),
impulse_pos,
True,
)
s_power = 0.0
if (self.continuous and np.abs(action[1]) > 0.5) or (
not self.continuous and action in [1, 3]
):
# Orientation engines
if self.continuous:
direction = np.sign(action[1])
s_power = np.clip(np.abs(action[1]), 0.5, 1.0)
assert s_power >= 0.5 and s_power <= 1.0
else:
direction = action - 2
s_power = 1.0
ox = tip[0] * dispersion[0] + side[0] * (
3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE
)
oy = -tip[1] * dispersion[0] - side[1] * (
3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE
)
impulse_pos = (
self.lander.position[0] + ox - tip[0] * 17 / SCALE,
self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE,
)
p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)
p.ApplyLinearImpulse(
(ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power),
impulse_pos,
True,
)
self.lander.ApplyLinearImpulse(
(-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),
impulse_pos,
True,
)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.lander.position
vel = self.lander.linearVelocity
state = [
(pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2),
(pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_H / SCALE / 2),
vel.x * (VIEWPORT_W / SCALE / 2) / FPS,
vel.y * (VIEWPORT_H / SCALE / 2) / FPS,
self.lander.angle,
20.0 * self.lander.angularVelocity / FPS,
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0,
]
assert len(state) == 8
reward = 0
shaping = (
-100 * np.sqrt(state[0] * state[0] + state[1] * state[1])
- 100 * np.sqrt(state[2] * state[2] + state[3] * state[3])
- 100 * abs(state[4])
+ 10 * state[6]
+ 10 * state[7]
) # And ten points for legs contact, the idea is if you
# lose contact again after landing, you get negative reward
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
reward -= (
m_power * 0.30
) # less fuel spent is better, about -30 for heuristic landing
reward -= s_power * 0.03
done = False
if self.game_over or abs(state[0]) >= 1.0:
done = True
reward = -100
if not self.lander.awake:
done = True
reward = +100
return np.array(state, dtype=np.float32), reward, done, {}
def render(self, mode="human"):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(0, VIEWPORT_W / SCALE, 0, VIEWPORT_H / SCALE)
for obj in self.particles:
obj.ttl -= 0.15
obj.color1 = (
max(0.2, 0.2 + obj.ttl),
max(0.2, 0.5 * obj.ttl),
max(0.2, 0.5 * obj.ttl),
)
obj.color2 = (
max(0.2, 0.2 + obj.ttl),
max(0.2, 0.5 * obj.ttl),
max(0.2, 0.5 * obj.ttl),
)
self._clean_particles(False)
for p in self.sky_polys:
self.viewer.draw_polygon(p, color=(0, 0, 0))
for obj in self.particles + self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans * f.shape.pos)
self.viewer.draw_circle(
f.shape.radius, 20, color=obj.color1
).add_attr(t)
self.viewer.draw_circle(
f.shape.radius, 20, color=obj.color2, filled=False, linewidth=2
).add_attr(t)
else:
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)
for x in [self.helipad_x1, self.helipad_x2]:
flagy1 = self.helipad_y
flagy2 = flagy1 + 50 / SCALE
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))
self.viewer.draw_polygon(
[
(x, flagy2),
(x, flagy2 - 10 / SCALE),
(x + 25 / SCALE, flagy2 - 5 / SCALE),
],
color=(0.8, 0.8, 0),
)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class LunarLanderContinuous(LunarLander):
continuous = True
def heuristic(env, s):
"""
The heuristic for
1. Testing
2. Demonstration rollout.
Args:
env: The environment
s (list): The state. Attributes:
s[0] is the horizontal coordinate
s[1] is the vertical coordinate
s[2] is the horizontal speed
s[3] is the vertical speed
s[4] is the angle
s[5] is the angular speed
s[6] 1 if first leg has contact, else 0
s[7] 1 if second leg has contact, else 0
returns:
a: The heuristic to be fed into the step function defined above to determine the next step and reward.
"""
angle_targ = s[0] * 0.5 + s[2] * 1.0 # angle should point towards center
if angle_targ > 0.4:
angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad
if angle_targ < -0.4:
angle_targ = -0.4
hover_targ = 0.55 * np.abs(
s[0]
) # target y should be proportional to horizontal offset
angle_todo = (angle_targ - s[4]) * 0.5 - (s[5]) * 1.0
hover_todo = (hover_targ - s[1]) * 0.5 - (s[3]) * 0.5
if s[6] or s[7]: # legs have contact
angle_todo = 0
hover_todo = (
-(s[3]) * 0.5
) # override to reduce fall speed, that's all we need after contact
if env.continuous:
a = np.array([hover_todo * 20 - 1, -angle_todo * 20])
a = np.clip(a, -1, +1)
else:
a = 0
if hover_todo > np.abs(angle_todo) and hover_todo > 0.05:
a = 2
elif angle_todo < -0.05:
a = 3
elif angle_todo > +0.05:
a = 1
return a
def demo_heuristic_lander(env, seed=None, render=False):
env.seed(seed)
total_reward = 0
steps = 0
s = env.reset()
while True:
a = heuristic(env, s)
s, r, done, info = env.step(a)
total_reward += r
if render:
still_open = env.render()
if still_open == False:
break
if steps % 20 == 0 or done:
print("observations:", " ".join(["{:+0.2f}".format(x) for x in s]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
if done:
break
if render:
env.close()
return total_reward
if __name__ == "__main__":
demo_heuristic_lander(LunarLander(), render=True)
| 18,642 | Python | .py | 459 | 29.535948 | 120 | 0.536231 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,330 | __init__.py | WindyLab_Gym-PPS/gym/envs/box2d/__init__.py | try:
import Box2D
from gym.envs.box2d.lunar_lander import LunarLander
from gym.envs.box2d.lunar_lander import LunarLanderContinuous
from gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore
from gym.envs.box2d.car_racing import CarRacing
except ImportError:
Box2D = None
| 316 | Python | .py | 8 | 35.5 | 82 | 0.801948 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,331 | bipedal_walker.py | WindyLab_Gym-PPS/gym/envs/box2d/bipedal_walker.py | import sys
import math
import numpy as np
import Box2D
from Box2D.b2 import (
edgeShape,
circleShape,
fixtureDef,
polygonShape,
revoluteJointDef,
contactListener,
)
import gym
from gym import spaces
from gym.utils import colorize, seeding, EzPickle
# This is simple 4-joints walker robot environment.
#
# There are two versions:
#
# - Normal, with slightly uneven terrain.
#
# - Hardcore with ladders, stumps, pitfalls.
#
# Reward is given for moving forward, total 300+ points up to the far end. If the robot falls,
# it gets -100. Applying motor torque costs a small amount of points, more optimal agent
# will get better score.
#
# Heuristic is provided for testing, it's also useful to get demonstrations to
# learn from. To run heuristic:
#
# python gym/envs/box2d/bipedal_walker.py
#
# State consists of hull angle speed, angular velocity, horizontal speed, vertical speed,
# position of joints and joints angular speed, legs contact with ground, and 10 lidar
# rangefinder measurements to help to deal with the hardcore version. There's no coordinates
# in the state vector. Lidar is less useful in normal version, but it works.
#
# To solve the game you need to get 300 points in 1600 time steps.
#
# To solve hardcore version you need 300 points in 2000 time steps.
#
# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MOTORS_TORQUE = 80
SPEED_HIP = 4
SPEED_KNEE = 6
LIDAR_RANGE = 160 / SCALE
INITIAL_RANDOM = 5
HULL_POLY = [(-30, +9), (+6, +9), (+34, +1), (+34, -8), (-30, -8)]
LEG_DOWN = -8 / SCALE
LEG_W, LEG_H = 8 / SCALE, 34 / SCALE
VIEWPORT_W = 600
VIEWPORT_H = 400
TERRAIN_STEP = 14 / SCALE
TERRAIN_LENGTH = 200 # in steps
TERRAIN_HEIGHT = VIEWPORT_H / SCALE / 4
TERRAIN_GRASS = 10 # low long are grass spots, in steps
TERRAIN_STARTPAD = 20 # in steps
FRICTION = 2.5
HULL_FD = fixtureDef(
shape=polygonShape(vertices=[(x / SCALE, y / SCALE) for x, y in HULL_POLY]),
density=5.0,
friction=0.1,
categoryBits=0x0020,
maskBits=0x001, # collide only with ground
restitution=0.0,
) # 0.99 bouncy
LEG_FD = fixtureDef(
shape=polygonShape(box=(LEG_W / 2, LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001,
)
LOWER_FD = fixtureDef(
shape=polygonShape(box=(0.8 * LEG_W / 2, LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001,
)
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if (
self.env.hull == contact.fixtureA.body
or self.env.hull == contact.fixtureB.body
):
self.env.game_over = True
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = True
def EndContact(self, contact):
for leg in [self.env.legs[1], self.env.legs[3]]:
if leg in [contact.fixtureA.body, contact.fixtureB.body]:
leg.ground_contact = False
class BipedalWalker(gym.Env, EzPickle):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": FPS}
hardcore = False
def __init__(self):
EzPickle.__init__(self)
self.seed()
self.viewer = None
self.world = Box2D.b2World()
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)]),
friction=FRICTION,
)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=[(0, 0), (1, 1)]),
friction=FRICTION,
categoryBits=0x0001,
)
self.reset()
high = np.array([np.inf] * 24).astype(np.float32)
self.action_space = spaces.Box(
np.array([-1, -1, -1, -1]).astype(np.float32),
np.array([1, 1, 1, 1]).astype(np.float32),
)
self.observation_space = spaces.Box(-high, high)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.terrain:
return
self.world.contactListener = None
for t in self.terrain:
self.world.DestroyBody(t)
self.terrain = []
self.world.DestroyBody(self.hull)
self.hull = None
for leg in self.legs:
self.world.DestroyBody(leg)
self.legs = []
self.joints = []
def _generate_terrain(self, hardcore):
GRASS, STUMP, STAIRS, PIT, _STATES_ = range(5)
state = GRASS
velocity = 0.0
y = TERRAIN_HEIGHT
counter = TERRAIN_STARTPAD
oneshot = False
self.terrain = []
self.terrain_x = []
self.terrain_y = []
for i in range(TERRAIN_LENGTH):
x = i * TERRAIN_STEP
self.terrain_x.append(x)
if state == GRASS and not oneshot:
velocity = 0.8 * velocity + 0.01 * np.sign(TERRAIN_HEIGHT - y)
if i > TERRAIN_STARTPAD:
velocity += self.np_random.uniform(-1, 1) / SCALE # 1
y += velocity
elif state == PIT and oneshot:
counter = self.np_random.randint(3, 5)
poly = [
(x, y),
(x + TERRAIN_STEP, y),
(x + TERRAIN_STEP, y - 4 * TERRAIN_STEP),
(x, y - 4 * TERRAIN_STEP),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
self.fd_polygon.shape.vertices = [
(p[0] + TERRAIN_STEP * counter, p[1]) for p in poly
]
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
counter += 2
original_y = y
elif state == PIT and not oneshot:
y = original_y
if counter > 1:
y -= 4 * TERRAIN_STEP
elif state == STUMP and oneshot:
counter = self.np_random.randint(1, 3)
poly = [
(x, y),
(x + counter * TERRAIN_STEP, y),
(x + counter * TERRAIN_STEP, y + counter * TERRAIN_STEP),
(x, y + counter * TERRAIN_STEP),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
elif state == STAIRS and oneshot:
stair_height = +1 if self.np_random.rand() > 0.5 else -1
stair_width = self.np_random.randint(4, 5)
stair_steps = self.np_random.randint(3, 5)
original_y = y
for s in range(stair_steps):
poly = [
(
x + (s * stair_width) * TERRAIN_STEP,
y + (s * stair_height) * TERRAIN_STEP,
),
(
x + ((1 + s) * stair_width) * TERRAIN_STEP,
y + (s * stair_height) * TERRAIN_STEP,
),
(
x + ((1 + s) * stair_width) * TERRAIN_STEP,
y + (-1 + s * stair_height) * TERRAIN_STEP,
),
(
x + (s * stair_width) * TERRAIN_STEP,
y + (-1 + s * stair_height) * TERRAIN_STEP,
),
]
self.fd_polygon.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_polygon)
t.color1, t.color2 = (1, 1, 1), (0.6, 0.6, 0.6)
self.terrain.append(t)
counter = stair_steps * stair_width
elif state == STAIRS and not oneshot:
s = stair_steps * stair_width - counter - stair_height
n = s / stair_width
y = original_y + (n * stair_height) * TERRAIN_STEP
oneshot = False
self.terrain_y.append(y)
counter -= 1
if counter == 0:
counter = self.np_random.randint(TERRAIN_GRASS / 2, TERRAIN_GRASS)
if state == GRASS and hardcore:
state = self.np_random.randint(1, _STATES_)
oneshot = True
else:
state = GRASS
oneshot = True
self.terrain_poly = []
for i in range(TERRAIN_LENGTH - 1):
poly = [
(self.terrain_x[i], self.terrain_y[i]),
(self.terrain_x[i + 1], self.terrain_y[i + 1]),
]
self.fd_edge.shape.vertices = poly
t = self.world.CreateStaticBody(fixtures=self.fd_edge)
color = (0.3, 1.0 if i % 2 == 0 else 0.8, 0.3)
t.color1 = color
t.color2 = color
self.terrain.append(t)
color = (0.4, 0.6, 0.3)
poly += [(poly[1][0], 0), (poly[0][0], 0)]
self.terrain_poly.append((poly, color))
self.terrain.reverse()
def _generate_clouds(self):
# Sorry for the clouds, couldn't resist
self.cloud_poly = []
for i in range(TERRAIN_LENGTH // 20):
x = self.np_random.uniform(0, TERRAIN_LENGTH) * TERRAIN_STEP
y = VIEWPORT_H / SCALE * 3 / 4
poly = [
(
x
+ 15 * TERRAIN_STEP * math.sin(3.14 * 2 * a / 5)
+ self.np_random.uniform(0, 5 * TERRAIN_STEP),
y
+ 5 * TERRAIN_STEP * math.cos(3.14 * 2 * a / 5)
+ self.np_random.uniform(0, 5 * TERRAIN_STEP),
)
for a in range(5)
]
x1 = min([p[0] for p in poly])
x2 = max([p[0] for p in poly])
self.cloud_poly.append((poly, x1, x2))
def reset(self):
self._destroy()
self.world.contactListener_bug_workaround = ContactDetector(self)
self.world.contactListener = self.world.contactListener_bug_workaround
self.game_over = False
self.prev_shaping = None
self.scroll = 0.0
self.lidar_render = 0
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
self._generate_terrain(self.hardcore)
self._generate_clouds()
init_x = TERRAIN_STEP * TERRAIN_STARTPAD / 2
init_y = TERRAIN_HEIGHT + 2 * LEG_H
self.hull = self.world.CreateDynamicBody(
position=(init_x, init_y), fixtures=HULL_FD
)
self.hull.color1 = (0.5, 0.4, 0.9)
self.hull.color2 = (0.3, 0.3, 0.5)
self.hull.ApplyForceToCenter(
(self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM), 0), True
)
self.legs = []
self.joints = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LEG_FD,
)
leg.color1 = (0.6 - i / 10.0, 0.3 - i / 10.0, 0.5 - i / 10.0)
leg.color2 = (0.4 - i / 10.0, 0.2 - i / 10.0, 0.3 - i / 10.0)
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=leg,
localAnchorA=(0, LEG_DOWN),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=i,
lowerAngle=-0.8,
upperAngle=1.1,
)
self.legs.append(leg)
self.joints.append(self.world.CreateJoint(rjd))
lower = self.world.CreateDynamicBody(
position=(init_x, init_y - LEG_H * 3 / 2 - LEG_DOWN),
angle=(i * 0.05),
fixtures=LOWER_FD,
)
lower.color1 = (0.6 - i / 10.0, 0.3 - i / 10.0, 0.5 - i / 10.0)
lower.color2 = (0.4 - i / 10.0, 0.2 - i / 10.0, 0.3 - i / 10.0)
rjd = revoluteJointDef(
bodyA=leg,
bodyB=lower,
localAnchorA=(0, -LEG_H / 2),
localAnchorB=(0, LEG_H / 2),
enableMotor=True,
enableLimit=True,
maxMotorTorque=MOTORS_TORQUE,
motorSpeed=1,
lowerAngle=-1.6,
upperAngle=-0.1,
)
lower.ground_contact = False
self.legs.append(lower)
self.joints.append(self.world.CreateJoint(rjd))
self.drawlist = self.terrain + self.legs + [self.hull]
class LidarCallback(Box2D.b2.rayCastCallback):
def ReportFixture(self, fixture, point, normal, fraction):
if (fixture.filterData.categoryBits & 1) == 0:
return -1
self.p2 = point
self.fraction = fraction
return fraction
self.lidar = [LidarCallback() for _ in range(10)]
return self.step(np.array([0, 0, 0, 0]))[0]
def step(self, action):
# self.hull.ApplyForceToCenter((0, 20), True) -- Uncomment this to receive a bit of stability help
control_speed = False # Should be easier as well
if control_speed:
self.joints[0].motorSpeed = float(SPEED_HIP * np.clip(action[0], -1, 1))
self.joints[1].motorSpeed = float(SPEED_KNEE * np.clip(action[1], -1, 1))
self.joints[2].motorSpeed = float(SPEED_HIP * np.clip(action[2], -1, 1))
self.joints[3].motorSpeed = float(SPEED_KNEE * np.clip(action[3], -1, 1))
else:
self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0]))
self.joints[0].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1)
)
self.joints[1].motorSpeed = float(SPEED_KNEE * np.sign(action[1]))
self.joints[1].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[1]), 0, 1)
)
self.joints[2].motorSpeed = float(SPEED_HIP * np.sign(action[2]))
self.joints[2].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[2]), 0, 1)
)
self.joints[3].motorSpeed = float(SPEED_KNEE * np.sign(action[3]))
self.joints[3].maxMotorTorque = float(
MOTORS_TORQUE * np.clip(np.abs(action[3]), 0, 1)
)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.hull.position
vel = self.hull.linearVelocity
for i in range(10):
self.lidar[i].fraction = 1.0
self.lidar[i].p1 = pos
self.lidar[i].p2 = (
pos[0] + math.sin(1.5 * i / 10.0) * LIDAR_RANGE,
pos[1] - math.cos(1.5 * i / 10.0) * LIDAR_RANGE,
)
self.world.RayCast(self.lidar[i], self.lidar[i].p1, self.lidar[i].p2)
state = [
self.hull.angle, # Normal angles up to 0.5 here, but sure more is possible.
2.0 * self.hull.angularVelocity / FPS,
0.3 * vel.x * (VIEWPORT_W / SCALE) / FPS, # Normalized to get -1..1 range
0.3 * vel.y * (VIEWPORT_H / SCALE) / FPS,
self.joints[
0
].angle, # This will give 1.1 on high up, but it's still OK (and there should be spikes on hiting the ground, that's normal too)
self.joints[0].speed / SPEED_HIP,
self.joints[1].angle + 1.0,
self.joints[1].speed / SPEED_KNEE,
1.0 if self.legs[1].ground_contact else 0.0,
self.joints[2].angle,
self.joints[2].speed / SPEED_HIP,
self.joints[3].angle + 1.0,
self.joints[3].speed / SPEED_KNEE,
1.0 if self.legs[3].ground_contact else 0.0,
]
state += [l.fraction for l in self.lidar]
assert len(state) == 24
self.scroll = pos.x - VIEWPORT_W / SCALE / 5
shaping = (
130 * pos[0] / SCALE
) # moving forward is a way to receive reward (normalized to get 300 on completion)
shaping -= 5.0 * abs(
state[0]
) # keep head straight, other than that and falling, any behavior is unpunished
reward = 0
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
for a in action:
reward -= 0.00035 * MOTORS_TORQUE * np.clip(np.abs(a), 0, 1)
# normalized to about -50.0 using heuristic, more optimal agent should spend less
done = False
if self.game_over or pos[0] < 0:
reward = -100
done = True
if pos[0] > (TERRAIN_LENGTH - TERRAIN_GRASS) * TERRAIN_STEP:
done = True
return np.array(state, dtype=np.float32), reward, done, {}
def render(self, mode="human"):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(
self.scroll, VIEWPORT_W / SCALE + self.scroll, 0, VIEWPORT_H / SCALE
)
self.viewer.draw_polygon(
[
(self.scroll, 0),
(self.scroll + VIEWPORT_W / SCALE, 0),
(self.scroll + VIEWPORT_W / SCALE, VIEWPORT_H / SCALE),
(self.scroll, VIEWPORT_H / SCALE),
],
color=(0.9, 0.9, 1.0),
)
for poly, x1, x2 in self.cloud_poly:
if x2 < self.scroll / 2:
continue
if x1 > self.scroll / 2 + VIEWPORT_W / SCALE:
continue
self.viewer.draw_polygon(
[(p[0] + self.scroll / 2, p[1]) for p in poly], color=(1, 1, 1)
)
for poly, color in self.terrain_poly:
if poly[1][0] < self.scroll:
continue
if poly[0][0] > self.scroll + VIEWPORT_W / SCALE:
continue
self.viewer.draw_polygon(poly, color=color)
self.lidar_render = (self.lidar_render + 1) % 100
i = self.lidar_render
if i < 2 * len(self.lidar):
l = (
self.lidar[i]
if i < len(self.lidar)
else self.lidar[len(self.lidar) - i - 1]
)
self.viewer.draw_polyline([l.p1, l.p2], color=(1, 0, 0), linewidth=1)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans * f.shape.pos)
self.viewer.draw_circle(
f.shape.radius, 30, color=obj.color1
).add_attr(t)
self.viewer.draw_circle(
f.shape.radius, 30, color=obj.color2, filled=False, linewidth=2
).add_attr(t)
else:
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)
flagy1 = TERRAIN_HEIGHT
flagy2 = flagy1 + 50 / SCALE
x = TERRAIN_STEP * 3
self.viewer.draw_polyline(
[(x, flagy1), (x, flagy2)], color=(0, 0, 0), linewidth=2
)
f = [
(x, flagy2),
(x, flagy2 - 10 / SCALE),
(x + 25 / SCALE, flagy2 - 5 / SCALE),
]
self.viewer.draw_polygon(f, color=(0.9, 0.2, 0))
self.viewer.draw_polyline(f + [f[0]], color=(0, 0, 0), linewidth=2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class BipedalWalkerHardcore(BipedalWalker):
hardcore = True
if __name__ == "__main__":
# Heurisic: suboptimal, have no notion of balance.
env = BipedalWalker()
env.reset()
steps = 0
total_reward = 0
a = np.array([0.0, 0.0, 0.0, 0.0])
STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1, 2, 3
SPEED = 0.29 # Will fall forward on higher speed
state = STAY_ON_ONE_LEG
moving_leg = 0
supporting_leg = 1 - moving_leg
SUPPORT_KNEE_ANGLE = +0.1
supporting_knee_angle = SUPPORT_KNEE_ANGLE
while True:
s, r, done, info = env.step(a)
total_reward += r
if steps % 20 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
print("hull " + str(["{:+0.2f}".format(x) for x in s[0:4]]))
print("leg0 " + str(["{:+0.2f}".format(x) for x in s[4:9]]))
print("leg1 " + str(["{:+0.2f}".format(x) for x in s[9:14]]))
steps += 1
contact0 = s[8]
contact1 = s[13]
moving_s_base = 4 + 5 * moving_leg
supporting_s_base = 4 + 5 * supporting_leg
hip_targ = [None, None] # -0.8 .. +1.1
knee_targ = [None, None] # -0.6 .. +0.9
hip_todo = [0.0, 0.0]
knee_todo = [0.0, 0.0]
if state == STAY_ON_ONE_LEG:
hip_targ[moving_leg] = 1.1
knee_targ[moving_leg] = -0.6
supporting_knee_angle += 0.03
if s[2] > SPEED:
supporting_knee_angle += 0.03
supporting_knee_angle = min(supporting_knee_angle, SUPPORT_KNEE_ANGLE)
knee_targ[supporting_leg] = supporting_knee_angle
if s[supporting_s_base + 0] < 0.10: # supporting leg is behind
state = PUT_OTHER_DOWN
if state == PUT_OTHER_DOWN:
hip_targ[moving_leg] = +0.1
knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE
knee_targ[supporting_leg] = supporting_knee_angle
if s[moving_s_base + 4]:
state = PUSH_OFF
supporting_knee_angle = min(s[moving_s_base + 2], SUPPORT_KNEE_ANGLE)
if state == PUSH_OFF:
knee_targ[moving_leg] = supporting_knee_angle
knee_targ[supporting_leg] = +1.0
if s[supporting_s_base + 2] > 0.88 or s[2] > 1.2 * SPEED:
state = STAY_ON_ONE_LEG
moving_leg = 1 - moving_leg
supporting_leg = 1 - moving_leg
if hip_targ[0]:
hip_todo[0] = 0.9 * (hip_targ[0] - s[4]) - 0.25 * s[5]
if hip_targ[1]:
hip_todo[1] = 0.9 * (hip_targ[1] - s[9]) - 0.25 * s[10]
if knee_targ[0]:
knee_todo[0] = 4.0 * (knee_targ[0] - s[6]) - 0.25 * s[7]
if knee_targ[1]:
knee_todo[1] = 4.0 * (knee_targ[1] - s[11]) - 0.25 * s[12]
hip_todo[0] -= 0.9 * (0 - s[0]) - 1.5 * s[1] # PID to keep head strait
hip_todo[1] -= 0.9 * (0 - s[0]) - 1.5 * s[1]
knee_todo[0] -= 15.0 * s[3] # vertical speed, to damp oscillations
knee_todo[1] -= 15.0 * s[3]
a[0] = hip_todo[0]
a[1] = knee_todo[0]
a[2] = hip_todo[1]
a[3] = knee_todo[1]
a = np.clip(0.5 * a, -1.0, 1.0)
env.render()
if done:
break
| 24,191 | Python | .py | 582 | 29.853952 | 141 | 0.520122 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,332 | car_racing.py | WindyLab_Gym-PPS/gym/envs/box2d/car_racing.py | """
Easiest continuous control task to learn from pixels, a top-down racing
environment.
Discrete control is reasonable in this environment as well, on/off
discretization is fine.
State consists of STATE_W x STATE_H pixels.
The reward is -0.1 every frame and +1000/N for every track tile visited, where
N is the total number of tiles visited in the track. For example, if you have
finished in 732 frames, your reward is 1000 - 0.1*732 = 926.8 points.
The game is solved when the agent consistently gets 900+ points. The generated
track is random every episode.
The episode finishes when all the tiles are visited. The car also can go
outside of the PLAYFIELD - that is far off the track, then it will get -100
and die.
Some indicators are shown at the bottom of the window along with the state RGB
buffer. From left to right: the true speed, four ABS sensors, the steering
wheel position and gyroscope.
To play yourself (it's rather fast for humans), type:
python gym/envs/box2d/car_racing.py
Remember it's a powerful rear-wheel drive car - don't press the accelerator
and turn at the same time.
Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
"""
import sys
import math
import numpy as np
import Box2D
from Box2D.b2 import fixtureDef
from Box2D.b2 import polygonShape
from Box2D.b2 import contactListener
import gym
from gym import spaces
from gym.envs.box2d.car_dynamics import Car
from gym.utils import seeding, EzPickle
import pyglet
pyglet.options["debug_gl"] = False
from pyglet import gl
STATE_W = 96 # less than Atari 160x192
STATE_H = 96
VIDEO_W = 600
VIDEO_H = 400
WINDOW_W = 1000
WINDOW_H = 800
SCALE = 6.0 # Track scale
TRACK_RAD = 900 / SCALE # Track is heavily morphed circle with this radius
PLAYFIELD = 2000 / SCALE # Game over boundary
FPS = 50 # Frames per second
ZOOM = 2.7 # Camera zoom
ZOOM_FOLLOW = True # Set to False for fixed view (don't use zoom)
TRACK_DETAIL_STEP = 21 / SCALE
TRACK_TURN_RATE = 0.31
TRACK_WIDTH = 40 / SCALE
BORDER = 8 / SCALE
BORDER_MIN_COUNT = 4
ROAD_COLOR = [0.4, 0.4, 0.4]
class FrictionDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
self._contact(contact, True)
def EndContact(self, contact):
self._contact(contact, False)
def _contact(self, contact, begin):
tile = None
obj = None
u1 = contact.fixtureA.body.userData
u2 = contact.fixtureB.body.userData
if u1 and "road_friction" in u1.__dict__:
tile = u1
obj = u2
if u2 and "road_friction" in u2.__dict__:
tile = u2
obj = u1
if not tile:
return
tile.color[0] = ROAD_COLOR[0]
tile.color[1] = ROAD_COLOR[1]
tile.color[2] = ROAD_COLOR[2]
if not obj or "tiles" not in obj.__dict__:
return
if begin:
obj.tiles.add(tile)
if not tile.road_visited:
tile.road_visited = True
self.env.reward += 1000.0 / len(self.env.track)
self.env.tile_visited_count += 1
else:
obj.tiles.remove(tile)
class CarRacing(gym.Env, EzPickle):
metadata = {
"render.modes": ["human", "rgb_array", "state_pixels"],
"video.frames_per_second": FPS,
}
def __init__(self, verbose=1):
EzPickle.__init__(self)
self.seed()
self.contactListener_keepref = FrictionDetector(self)
self.world = Box2D.b2World((0, 0), contactListener=self.contactListener_keepref)
self.viewer = None
self.invisible_state_window = None
self.invisible_video_window = None
self.road = None
self.car = None
self.reward = 0.0
self.prev_reward = 0.0
self.verbose = verbose
self.fd_tile = fixtureDef(
shape=polygonShape(vertices=[(0, 0), (1, 0), (1, -1), (0, -1)])
)
self.action_space = spaces.Box(
np.array([-1, 0, 0]).astype(np.float32),
np.array([+1, +1, +1]).astype(np.float32),
) # steer, gas, brake
self.observation_space = spaces.Box(
low=0, high=255, shape=(STATE_H, STATE_W, 3), dtype=np.uint8
)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.road:
return
for t in self.road:
self.world.DestroyBody(t)
self.road = []
self.car.destroy()
def _create_track(self):
CHECKPOINTS = 12
# Create checkpoints
checkpoints = []
for c in range(CHECKPOINTS):
noise = self.np_random.uniform(0, 2 * math.pi * 1 / CHECKPOINTS)
alpha = 2 * math.pi * c / CHECKPOINTS + noise
rad = self.np_random.uniform(TRACK_RAD / 3, TRACK_RAD)
if c == 0:
alpha = 0
rad = 1.5 * TRACK_RAD
if c == CHECKPOINTS - 1:
alpha = 2 * math.pi * c / CHECKPOINTS
self.start_alpha = 2 * math.pi * (-0.5) / CHECKPOINTS
rad = 1.5 * TRACK_RAD
checkpoints.append((alpha, rad * math.cos(alpha), rad * math.sin(alpha)))
self.road = []
# Go from one checkpoint to another to create track
x, y, beta = 1.5 * TRACK_RAD, 0, 0
dest_i = 0
laps = 0
track = []
no_freeze = 2500
visited_other_side = False
while True:
alpha = math.atan2(y, x)
if visited_other_side and alpha > 0:
laps += 1
visited_other_side = False
if alpha < 0:
visited_other_side = True
alpha += 2 * math.pi
while True: # Find destination from checkpoints
failed = True
while True:
dest_alpha, dest_x, dest_y = checkpoints[dest_i % len(checkpoints)]
if alpha <= dest_alpha:
failed = False
break
dest_i += 1
if dest_i % len(checkpoints) == 0:
break
if not failed:
break
alpha -= 2 * math.pi
continue
r1x = math.cos(beta)
r1y = math.sin(beta)
p1x = -r1y
p1y = r1x
dest_dx = dest_x - x # vector towards destination
dest_dy = dest_y - y
# destination vector projected on rad:
proj = r1x * dest_dx + r1y * dest_dy
while beta - alpha > 1.5 * math.pi:
beta -= 2 * math.pi
while beta - alpha < -1.5 * math.pi:
beta += 2 * math.pi
prev_beta = beta
proj *= SCALE
if proj > 0.3:
beta -= min(TRACK_TURN_RATE, abs(0.001 * proj))
if proj < -0.3:
beta += min(TRACK_TURN_RATE, abs(0.001 * proj))
x += p1x * TRACK_DETAIL_STEP
y += p1y * TRACK_DETAIL_STEP
track.append((alpha, prev_beta * 0.5 + beta * 0.5, x, y))
if laps > 4:
break
no_freeze -= 1
if no_freeze == 0:
break
# Find closed loop range i1..i2, first loop should be ignored, second is OK
i1, i2 = -1, -1
i = len(track)
while True:
i -= 1
if i == 0:
return False # Failed
pass_through_start = (
track[i][0] > self.start_alpha and track[i - 1][0] <= self.start_alpha
)
if pass_through_start and i2 == -1:
i2 = i
elif pass_through_start and i1 == -1:
i1 = i
break
if self.verbose == 1:
print("Track generation: %i..%i -> %i-tiles track" % (i1, i2, i2 - i1))
assert i1 != -1
assert i2 != -1
track = track[i1 : i2 - 1]
first_beta = track[0][1]
first_perp_x = math.cos(first_beta)
first_perp_y = math.sin(first_beta)
# Length of perpendicular jump to put together head and tail
well_glued_together = np.sqrt(
np.square(first_perp_x * (track[0][2] - track[-1][2]))
+ np.square(first_perp_y * (track[0][3] - track[-1][3]))
)
if well_glued_together > TRACK_DETAIL_STEP:
return False
# Red-white border on hard turns
border = [False] * len(track)
for i in range(len(track)):
good = True
oneside = 0
for neg in range(BORDER_MIN_COUNT):
beta1 = track[i - neg - 0][1]
beta2 = track[i - neg - 1][1]
good &= abs(beta1 - beta2) > TRACK_TURN_RATE * 0.2
oneside += np.sign(beta1 - beta2)
good &= abs(oneside) == BORDER_MIN_COUNT
border[i] = good
for i in range(len(track)):
for neg in range(BORDER_MIN_COUNT):
border[i - neg] |= border[i]
# Create tiles
for i in range(len(track)):
alpha1, beta1, x1, y1 = track[i]
alpha2, beta2, x2, y2 = track[i - 1]
road1_l = (
x1 - TRACK_WIDTH * math.cos(beta1),
y1 - TRACK_WIDTH * math.sin(beta1),
)
road1_r = (
x1 + TRACK_WIDTH * math.cos(beta1),
y1 + TRACK_WIDTH * math.sin(beta1),
)
road2_l = (
x2 - TRACK_WIDTH * math.cos(beta2),
y2 - TRACK_WIDTH * math.sin(beta2),
)
road2_r = (
x2 + TRACK_WIDTH * math.cos(beta2),
y2 + TRACK_WIDTH * math.sin(beta2),
)
vertices = [road1_l, road1_r, road2_r, road2_l]
self.fd_tile.shape.vertices = vertices
t = self.world.CreateStaticBody(fixtures=self.fd_tile)
t.userData = t
c = 0.01 * (i % 3)
t.color = [ROAD_COLOR[0] + c, ROAD_COLOR[1] + c, ROAD_COLOR[2] + c]
t.road_visited = False
t.road_friction = 1.0
t.fixtures[0].sensor = True
self.road_poly.append(([road1_l, road1_r, road2_r, road2_l], t.color))
self.road.append(t)
if border[i]:
side = np.sign(beta2 - beta1)
b1_l = (
x1 + side * TRACK_WIDTH * math.cos(beta1),
y1 + side * TRACK_WIDTH * math.sin(beta1),
)
b1_r = (
x1 + side * (TRACK_WIDTH + BORDER) * math.cos(beta1),
y1 + side * (TRACK_WIDTH + BORDER) * math.sin(beta1),
)
b2_l = (
x2 + side * TRACK_WIDTH * math.cos(beta2),
y2 + side * TRACK_WIDTH * math.sin(beta2),
)
b2_r = (
x2 + side * (TRACK_WIDTH + BORDER) * math.cos(beta2),
y2 + side * (TRACK_WIDTH + BORDER) * math.sin(beta2),
)
self.road_poly.append(
([b1_l, b1_r, b2_r, b2_l], (1, 1, 1) if i % 2 == 0 else (1, 0, 0))
)
self.track = track
return True
def reset(self):
self._destroy()
self.reward = 0.0
self.prev_reward = 0.0
self.tile_visited_count = 0
self.t = 0.0
self.road_poly = []
while True:
success = self._create_track()
if success:
break
if self.verbose == 1:
print(
"retry to generate track (normal if there are not many"
"instances of this message)"
)
self.car = Car(self.world, *self.track[0][1:4])
return self.step(None)[0]
def step(self, action):
if action is not None:
self.car.steer(-action[0])
self.car.gas(action[1])
self.car.brake(action[2])
self.car.step(1.0 / FPS)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
self.t += 1.0 / FPS
self.state = self.render("state_pixels")
step_reward = 0
done = False
if action is not None: # First step without action, called from reset()
self.reward -= 0.1
# We actually don't want to count fuel spent, we want car to be faster.
# self.reward -= 10 * self.car.fuel_spent / ENGINE_POWER
self.car.fuel_spent = 0.0
step_reward = self.reward - self.prev_reward
self.prev_reward = self.reward
if self.tile_visited_count == len(self.track):
done = True
x, y = self.car.hull.position
if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD:
done = True
step_reward = -100
return self.state, step_reward, done, {}
def render(self, mode="human"):
assert mode in ["human", "state_pixels", "rgb_array"]
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H)
self.score_label = pyglet.text.Label(
"0000",
font_size=36,
x=20,
y=WINDOW_H * 2.5 / 40.00,
anchor_x="left",
anchor_y="center",
color=(255, 255, 255, 255),
)
self.transform = rendering.Transform()
if "t" not in self.__dict__:
return # reset() not called yet
# Animate zoom first second:
zoom = 0.1 * SCALE * max(1 - self.t, 0) + ZOOM * SCALE * min(self.t, 1)
scroll_x = self.car.hull.position[0]
scroll_y = self.car.hull.position[1]
angle = -self.car.hull.angle
vel = self.car.hull.linearVelocity
if np.linalg.norm(vel) > 0.5:
angle = math.atan2(vel[0], vel[1])
self.transform.set_scale(zoom, zoom)
self.transform.set_translation(
WINDOW_W / 2
- (scroll_x * zoom * math.cos(angle) - scroll_y * zoom * math.sin(angle)),
WINDOW_H / 4
- (scroll_x * zoom * math.sin(angle) + scroll_y * zoom * math.cos(angle)),
)
self.transform.set_rotation(angle)
self.car.draw(self.viewer, mode != "state_pixels")
arr = None
win = self.viewer.window
win.switch_to()
win.dispatch_events()
win.clear()
t = self.transform
if mode == "rgb_array":
VP_W = VIDEO_W
VP_H = VIDEO_H
elif mode == "state_pixels":
VP_W = STATE_W
VP_H = STATE_H
else:
pixel_scale = 1
if hasattr(win.context, "_nscontext"):
pixel_scale = (
win.context._nscontext.view().backingScaleFactor()
) # pylint: disable=protected-access
VP_W = int(pixel_scale * WINDOW_W)
VP_H = int(pixel_scale * WINDOW_H)
gl.glViewport(0, 0, VP_W, VP_H)
t.enable()
self.render_road()
for geom in self.viewer.onetime_geoms:
geom.render()
self.viewer.onetime_geoms = []
t.disable()
self.render_indicators(WINDOW_W, WINDOW_H)
if mode == "human":
win.flip()
return self.viewer.isopen
image_data = (
pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
)
arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep="")
arr = arr.reshape(VP_H, VP_W, 4)
arr = arr[::-1, :, 0:3]
return arr
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def render_road(self):
colors = [0.4, 0.8, 0.4, 1.0] * 4
polygons_ = [
+PLAYFIELD,
+PLAYFIELD,
0,
+PLAYFIELD,
-PLAYFIELD,
0,
-PLAYFIELD,
-PLAYFIELD,
0,
-PLAYFIELD,
+PLAYFIELD,
0,
]
k = PLAYFIELD / 20.0
colors.extend([0.4, 0.9, 0.4, 1.0] * 4 * 20 * 20)
for x in range(-20, 20, 2):
for y in range(-20, 20, 2):
polygons_.extend(
[
k * x + k,
k * y + 0,
0,
k * x + 0,
k * y + 0,
0,
k * x + 0,
k * y + k,
0,
k * x + k,
k * y + k,
0,
]
)
for poly, color in self.road_poly:
colors.extend([color[0], color[1], color[2], 1] * len(poly))
for p in poly:
polygons_.extend([p[0], p[1], 0])
vl = pyglet.graphics.vertex_list(
len(polygons_) // 3, ("v3f", polygons_), ("c4f", colors)
) # gl.GL_QUADS,
vl.draw(gl.GL_QUADS)
vl.delete()
def render_indicators(self, W, H):
s = W / 40.0
h = H / 40.0
colors = [0, 0, 0, 1] * 4
polygons = [W, 0, 0, W, 5 * h, 0, 0, 5 * h, 0, 0, 0, 0]
def vertical_ind(place, val, color):
colors.extend([color[0], color[1], color[2], 1] * 4)
polygons.extend(
[
place * s,
h + h * val,
0,
(place + 1) * s,
h + h * val,
0,
(place + 1) * s,
h,
0,
(place + 0) * s,
h,
0,
]
)
def horiz_ind(place, val, color):
colors.extend([color[0], color[1], color[2], 1] * 4)
polygons.extend(
[
(place + 0) * s,
4 * h,
0,
(place + val) * s,
4 * h,
0,
(place + val) * s,
2 * h,
0,
(place + 0) * s,
2 * h,
0,
]
)
true_speed = np.sqrt(
np.square(self.car.hull.linearVelocity[0])
+ np.square(self.car.hull.linearVelocity[1])
)
vertical_ind(5, 0.02 * true_speed, (1, 1, 1))
vertical_ind(7, 0.01 * self.car.wheels[0].omega, (0.0, 0, 1)) # ABS sensors
vertical_ind(8, 0.01 * self.car.wheels[1].omega, (0.0, 0, 1))
vertical_ind(9, 0.01 * self.car.wheels[2].omega, (0.2, 0, 1))
vertical_ind(10, 0.01 * self.car.wheels[3].omega, (0.2, 0, 1))
horiz_ind(20, -10.0 * self.car.wheels[0].joint.angle, (0, 1, 0))
horiz_ind(30, -0.8 * self.car.hull.angularVelocity, (1, 0, 0))
vl = pyglet.graphics.vertex_list(
len(polygons) // 3, ("v3f", polygons), ("c4f", colors)
) # gl.GL_QUADS,
vl.draw(gl.GL_QUADS)
vl.delete()
self.score_label.text = "%04i" % self.reward
self.score_label.draw()
if __name__ == "__main__":
from pyglet.window import key
a = np.array([0.0, 0.0, 0.0])
def key_press(k, mod):
global restart
if k == 0xFF0D:
restart = True
if k == key.LEFT:
a[0] = -1.0
if k == key.RIGHT:
a[0] = +1.0
if k == key.UP:
a[1] = +1.0
if k == key.DOWN:
a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
def key_release(k, mod):
if k == key.LEFT and a[0] == -1.0:
a[0] = 0
if k == key.RIGHT and a[0] == +1.0:
a[0] = 0
if k == key.UP:
a[1] = 0
if k == key.DOWN:
a[2] = 0
env = CarRacing()
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
record_video = False
if record_video:
from gym.wrappers.monitor import Monitor
env = Monitor(env, "/tmp/video-test", force=True)
isopen = True
while isopen:
env.reset()
total_reward = 0.0
steps = 0
restart = False
while True:
s, r, done, info = env.step(a)
total_reward += r
if steps % 200 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
isopen = env.render()
if done or restart or isopen == False:
break
env.close()
| 21,173 | Python | .py | 570 | 25.468421 | 88 | 0.493494 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,333 | test_lunar_lander.py | WindyLab_Gym-PPS/gym/envs/box2d/test_lunar_lander.py | import pytest
try:
import Box2D
from .lunar_lander import LunarLander, LunarLanderContinuous, demo_heuristic_lander
except ImportError:
Box2D = None
@pytest.mark.skipif(Box2D is None, reason="Box2D not installed")
def test_lunar_lander():
_test_lander(LunarLander(), seed=0)
@pytest.mark.skipif(Box2D is None, reason="Box2D not installed")
def test_lunar_lander_continuous():
_test_lander(LunarLanderContinuous(), seed=0)
@pytest.mark.skipif(Box2D is None, reason="Box2D not installed")
def _test_lander(env, seed=None, render=False):
total_reward = demo_heuristic_lander(env, seed=seed, render=render)
assert total_reward > 100
| 664 | Python | .py | 16 | 38.3125 | 87 | 0.764431 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,334 | car_dynamics.py | WindyLab_Gym-PPS/gym/envs/box2d/car_dynamics.py | """
Top-down car dynamics simulation.
Some ideas are taken from this great tutorial http://www.iforce2d.net/b2dtut/top-down-car by Chris Campbell.
This simulation is a bit more detailed, with wheels rotation.
Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
"""
import numpy as np
import math
import Box2D
from Box2D.b2 import (
edgeShape,
circleShape,
fixtureDef,
polygonShape,
revoluteJointDef,
contactListener,
shape,
)
SIZE = 0.02
ENGINE_POWER = 100000000 * SIZE * SIZE
WHEEL_MOMENT_OF_INERTIA = 4000 * SIZE * SIZE
FRICTION_LIMIT = (
1000000 * SIZE * SIZE
) # friction ~= mass ~= size^2 (calculated implicitly using density)
WHEEL_R = 27
WHEEL_W = 14
WHEELPOS = [(-55, +80), (+55, +80), (-55, -82), (+55, -82)]
HULL_POLY1 = [(-60, +130), (+60, +130), (+60, +110), (-60, +110)]
HULL_POLY2 = [(-15, +120), (+15, +120), (+20, +20), (-20, 20)]
HULL_POLY3 = [
(+25, +20),
(+50, -10),
(+50, -40),
(+20, -90),
(-20, -90),
(-50, -40),
(-50, -10),
(-25, +20),
]
HULL_POLY4 = [(-50, -120), (+50, -120), (+50, -90), (-50, -90)]
WHEEL_COLOR = (0.0, 0.0, 0.0)
WHEEL_WHITE = (0.3, 0.3, 0.3)
MUD_COLOR = (0.4, 0.4, 0.0)
class Car:
def __init__(self, world, init_angle, init_x, init_y):
self.world = world
self.hull = self.world.CreateDynamicBody(
position=(init_x, init_y),
angle=init_angle,
fixtures=[
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY1]
),
density=1.0,
),
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY2]
),
density=1.0,
),
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY3]
),
density=1.0,
),
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY4]
),
density=1.0,
),
],
)
self.hull.color = (0.8, 0.0, 0.0)
self.wheels = []
self.fuel_spent = 0.0
WHEEL_POLY = [
(-WHEEL_W, +WHEEL_R),
(+WHEEL_W, +WHEEL_R),
(+WHEEL_W, -WHEEL_R),
(-WHEEL_W, -WHEEL_R),
]
for wx, wy in WHEELPOS:
front_k = 1.0 if wy > 0 else 1.0
w = self.world.CreateDynamicBody(
position=(init_x + wx * SIZE, init_y + wy * SIZE),
angle=init_angle,
fixtures=fixtureDef(
shape=polygonShape(
vertices=[
(x * front_k * SIZE, y * front_k * SIZE)
for x, y in WHEEL_POLY
]
),
density=0.1,
categoryBits=0x0020,
maskBits=0x001,
restitution=0.0,
),
)
w.wheel_rad = front_k * WHEEL_R * SIZE
w.color = WHEEL_COLOR
w.gas = 0.0
w.brake = 0.0
w.steer = 0.0
w.phase = 0.0 # wheel angle
w.omega = 0.0 # angular velocity
w.skid_start = None
w.skid_particle = None
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=w,
localAnchorA=(wx * SIZE, wy * SIZE),
localAnchorB=(0, 0),
enableMotor=True,
enableLimit=True,
maxMotorTorque=180 * 900 * SIZE * SIZE,
motorSpeed=0,
lowerAngle=-0.4,
upperAngle=+0.4,
)
w.joint = self.world.CreateJoint(rjd)
w.tiles = set()
w.userData = w
self.wheels.append(w)
self.drawlist = self.wheels + [self.hull]
self.particles = []
def gas(self, gas):
"""control: rear wheel drive
Args:
gas (float): How much gas gets applied. Gets clipped between 0 and 1.
"""
gas = np.clip(gas, 0, 1)
for w in self.wheels[2:4]:
diff = gas - w.gas
if diff > 0.1:
diff = 0.1 # gradually increase, but stop immediately
w.gas += diff
def brake(self, b):
"""control: brake
Args:
b (0..1): Degree to which the brakes are applied. More than 0.9 blocks the wheels to zero rotation"""
for w in self.wheels:
w.brake = b
def steer(self, s):
"""control: steer
Args:
s (-1..1): target position, it takes time to rotate steering wheel from side-to-side"""
self.wheels[0].steer = s
self.wheels[1].steer = s
def step(self, dt):
for w in self.wheels:
# Steer each wheel
dir = np.sign(w.steer - w.joint.angle)
val = abs(w.steer - w.joint.angle)
w.joint.motorSpeed = dir * min(50.0 * val, 3.0)
# Position => friction_limit
grass = True
friction_limit = FRICTION_LIMIT * 0.6 # Grass friction if no tile
for tile in w.tiles:
friction_limit = max(
friction_limit, FRICTION_LIMIT * tile.road_friction
)
grass = False
# Force
forw = w.GetWorldVector((0, 1))
side = w.GetWorldVector((1, 0))
v = w.linearVelocity
vf = forw[0] * v[0] + forw[1] * v[1] # forward speed
vs = side[0] * v[0] + side[1] * v[1] # side speed
# WHEEL_MOMENT_OF_INERTIA*np.square(w.omega)/2 = E -- energy
# WHEEL_MOMENT_OF_INERTIA*w.omega * domega/dt = dE/dt = W -- power
# domega = dt*W/WHEEL_MOMENT_OF_INERTIA/w.omega
# add small coef not to divide by zero
w.omega += (
dt
* ENGINE_POWER
* w.gas
/ WHEEL_MOMENT_OF_INERTIA
/ (abs(w.omega) + 5.0)
)
self.fuel_spent += dt * ENGINE_POWER * w.gas
if w.brake >= 0.9:
w.omega = 0
elif w.brake > 0:
BRAKE_FORCE = 15 # radians per second
dir = -np.sign(w.omega)
val = BRAKE_FORCE * w.brake
if abs(val) > abs(w.omega):
val = abs(w.omega) # low speed => same as = 0
w.omega += dir * val
w.phase += w.omega * dt
vr = w.omega * w.wheel_rad # rotating wheel speed
f_force = -vf + vr # force direction is direction of speed difference
p_force = -vs
# Physically correct is to always apply friction_limit until speed is equal.
# But dt is finite, that will lead to oscillations if difference is already near zero.
# Random coefficient to cut oscillations in few steps (have no effect on friction_limit)
f_force *= 205000 * SIZE * SIZE
p_force *= 205000 * SIZE * SIZE
force = np.sqrt(np.square(f_force) + np.square(p_force))
# Skid trace
if abs(force) > 2.0 * friction_limit:
if (
w.skid_particle
and w.skid_particle.grass == grass
and len(w.skid_particle.poly) < 30
):
w.skid_particle.poly.append((w.position[0], w.position[1]))
elif w.skid_start is None:
w.skid_start = w.position
else:
w.skid_particle = self._create_particle(
w.skid_start, w.position, grass
)
w.skid_start = None
else:
w.skid_start = None
w.skid_particle = None
if abs(force) > friction_limit:
f_force /= force
p_force /= force
force = friction_limit # Correct physics here
f_force *= force
p_force *= force
w.omega -= dt * f_force * w.wheel_rad / WHEEL_MOMENT_OF_INERTIA
w.ApplyForceToCenter(
(
p_force * side[0] + f_force * forw[0],
p_force * side[1] + f_force * forw[1],
),
True,
)
def draw(self, viewer, draw_particles=True):
if draw_particles:
for p in self.particles:
viewer.draw_polyline(p.poly, color=p.color, linewidth=5)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
path = [trans * v for v in f.shape.vertices]
viewer.draw_polygon(path, color=obj.color)
if "phase" not in obj.__dict__:
continue
a1 = obj.phase
a2 = obj.phase + 1.2 # radians
s1 = math.sin(a1)
s2 = math.sin(a2)
c1 = math.cos(a1)
c2 = math.cos(a2)
if s1 > 0 and s2 > 0:
continue
if s1 > 0:
c1 = np.sign(c1)
if s2 > 0:
c2 = np.sign(c2)
white_poly = [
(-WHEEL_W * SIZE, +WHEEL_R * c1 * SIZE),
(+WHEEL_W * SIZE, +WHEEL_R * c1 * SIZE),
(+WHEEL_W * SIZE, +WHEEL_R * c2 * SIZE),
(-WHEEL_W * SIZE, +WHEEL_R * c2 * SIZE),
]
viewer.draw_polygon([trans * v for v in white_poly], color=WHEEL_WHITE)
def _create_particle(self, point1, point2, grass):
class Particle:
pass
p = Particle()
p.color = WHEEL_COLOR if not grass else MUD_COLOR
p.ttl = 1
p.poly = [(point1[0], point1[1]), (point2[0], point2[1])]
p.grass = grass
self.particles.append(p)
while len(self.particles) > 30:
self.particles.pop(0)
return p
def destroy(self):
self.world.DestroyBody(self.hull)
self.hull = None
for w in self.wheels:
self.world.DestroyBody(w)
self.wheels = []
| 10,672 | Python | .py | 284 | 24.447183 | 113 | 0.467227 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,335 | sync_vector_env.py | WindyLab_Gym-PPS/gym/vector/sync_vector_env.py | import numpy as np
from copy import deepcopy
from gym import logger
from gym.vector.vector_env import VectorEnv
from gym.vector.utils import concatenate, create_empty_array
__all__ = ["SyncVectorEnv"]
class SyncVectorEnv(VectorEnv):
"""Vectorized environment that serially runs multiple environments.
Parameters
----------
env_fns : iterable of callable
Functions that create the environments.
observation_space : `gym.spaces.Space` instance, optional
Observation space of a single environment. If `None`, then the
observation space of the first environment is taken.
action_space : `gym.spaces.Space` instance, optional
Action space of a single environment. If `None`, then the action space
of the first environment is taken.
copy : bool (default: `True`)
If `True`, then the `reset` and `step` methods return a copy of the
observations.
"""
def __init__(self, env_fns, observation_space=None, action_space=None, copy=True):
self.env_fns = env_fns
self.envs = [env_fn() for env_fn in env_fns]
self.copy = copy
self.metadata = self.envs[0].metadata
if (observation_space is None) or (action_space is None):
observation_space = observation_space or self.envs[0].observation_space
action_space = action_space or self.envs[0].action_space
super(SyncVectorEnv, self).__init__(
num_envs=len(env_fns),
observation_space=observation_space,
action_space=action_space,
)
self._check_observation_spaces()
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros
)
self._rewards = np.zeros((self.num_envs,), dtype=np.float64)
self._dones = np.zeros((self.num_envs,), dtype=np.bool_)
self._actions = None
def seed(self, seeds=None):
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
if isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
assert len(seeds) == self.num_envs
for env, seed in zip(self.envs, seeds):
env.seed(seed)
def reset_wait(self):
self._dones[:] = False
observations = []
for env in self.envs:
observation = env.reset()
observations.append(observation)
self.observations = concatenate(
observations, self.observations, self.single_observation_space
)
return deepcopy(self.observations) if self.copy else self.observations
def step_async(self, actions):
self._actions = actions
def step_wait(self):
observations, infos = [], []
for i, (env, action) in enumerate(zip(self.envs, self._actions)):
observation, self._rewards[i], self._dones[i], info = env.step(action)
if self._dones[i]:
observation = env.reset()
observations.append(observation)
infos.append(info)
self.observations = concatenate(
observations, self.observations, self.single_observation_space
)
return (
deepcopy(self.observations) if self.copy else self.observations,
np.copy(self._rewards),
np.copy(self._dones),
infos,
)
def close_extras(self, **kwargs):
[env.close() for env in self.envs]
def _check_observation_spaces(self):
for env in self.envs:
if not (env.observation_space == self.single_observation_space):
break
else:
return True
raise RuntimeError(
"Some environments have an observation space "
"different from `{0}`. In order to batch observations, the "
"observation spaces from all environments must be "
"equal.".format(self.single_observation_space)
)
| 3,991 | Python | .py | 93 | 33.612903 | 86 | 0.62687 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,336 | vector_env.py | WindyLab_Gym-PPS/gym/vector/vector_env.py | import gym
from gym.spaces import Tuple
from gym.vector.utils.spaces import batch_space
__all__ = ["VectorEnv"]
class VectorEnv(gym.Env):
r"""Base class for vectorized environments.
Each observation returned from vectorized environment is a batch of observations
for each sub-environment. And :meth:`step` is also expected to receive a batch of
actions for each sub-environment.
.. note::
All sub-environments should share the identical observation and action spaces.
In other words, a vector of multiple different environments is not supported.
Parameters
----------
num_envs : int
Number of environments in the vectorized environment.
observation_space : `gym.spaces.Space` instance
Observation space of a single environment.
action_space : `gym.spaces.Space` instance
Action space of a single environment.
"""
def __init__(self, num_envs, observation_space, action_space):
super(VectorEnv, self).__init__()
self.num_envs = num_envs
self.is_vector_env = True
self.observation_space = batch_space(observation_space, n=num_envs)
self.action_space = Tuple((action_space,) * num_envs)
self.closed = False
self.viewer = None
# The observation and action spaces of a single environment are
# kept in separate properties
self.single_observation_space = observation_space
self.single_action_space = action_space
def reset_async(self):
pass
def reset_wait(self, **kwargs):
raise NotImplementedError()
def reset(self):
r"""Reset all sub-environments and return a batch of initial observations.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
"""
self.reset_async()
return self.reset_wait()
def step_async(self, actions):
pass
def step_wait(self, **kwargs):
raise NotImplementedError()
def step(self, actions):
r"""Take an action for each sub-environments.
Parameters
----------
actions : iterable of samples from `action_space`
List of actions.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
rewards : `np.ndarray` instance (dtype `np.float_`)
A vector of rewards from the vectorized environment.
dones : `np.ndarray` instance (dtype `np.bool_`)
A vector whose entries indicate whether the episode has ended.
infos : list of dict
A list of auxiliary diagnostic information dicts from sub-environments.
"""
self.step_async(actions)
return self.step_wait()
def close_extras(self, **kwargs):
r"""Clean up the extra resources e.g. beyond what's in this base class."""
raise NotImplementedError()
def close(self, **kwargs):
r"""Close all sub-environments and release resources.
It also closes all the existing image viewers, then calls :meth:`close_extras` and set
:attr:`closed` as ``True``.
.. warning::
This function itself does not close the environments, it should be handled
in :meth:`close_extras`. This is generic for both synchronous and asynchronous
vectorized environments.
.. note::
This will be automatically called when garbage collected or program exited.
"""
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras(**kwargs)
self.closed = True
def seed(self, seeds=None):
"""
Parameters
----------
seeds : list of int, or int, optional
Random seed for each individual environment. If `seeds` is a list of
length `num_envs`, then the items of the list are chosen as random
seeds. If `seeds` is an int, then each environment uses the random
seed `seeds + n`, where `n` is the index of the environment (between
`0` and `num_envs - 1`).
"""
pass
def __del__(self):
if not getattr(self, "closed", True):
self.close(terminate=True)
def __repr__(self):
if self.spec is None:
return "{}({})".format(self.__class__.__name__, self.num_envs)
else:
return "{}({}, {})".format(
self.__class__.__name__, self.spec.id, self.num_envs
)
class VectorEnvWrapper(VectorEnv):
r"""Wraps the vectorized environment to allow a modular transformation.
This class is the base class for all wrappers for vectorized environments. The subclass
could override some methods to change the behavior of the original vectorized environment
without touching the original code.
.. note::
Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
"""
def __init__(self, env):
assert isinstance(env, VectorEnv)
self.env = env
# explicitly forward the methods defined in VectorEnv
# to self.env (instead of the base class)
def reset_async(self):
return self.env.reset_async()
def reset_wait(self):
return self.env.reset_wait()
def step_async(self, actions):
return self.env.step_async(actions)
def step_wait(self):
return self.env.step_wait()
def close(self, **kwargs):
return self.env.close(**kwargs)
def close_extras(self, **kwargs):
return self.env.close_extras(**kwargs)
def seed(self, seeds=None):
return self.env.seed(seeds)
# implicitly forward all other methods and attributes to self.env
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name)
)
return getattr(self.env, name)
@property
def unwrapped(self):
return self.env.unwrapped
def __repr__(self):
return "<{}, {}>".format(self.__class__.__name__, self.env)
| 6,317 | Python | .py | 150 | 33.533333 | 98 | 0.633628 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,337 | async_vector_env.py | WindyLab_Gym-PPS/gym/vector/async_vector_env.py | import numpy as np
import multiprocessing as mp
import time
import sys
from enum import Enum
from copy import deepcopy
from gym import logger
from gym.vector.vector_env import VectorEnv
from gym.error import (
AlreadyPendingCallError,
NoAsyncCallError,
ClosedEnvironmentError,
CustomSpaceError,
)
from gym.vector.utils import (
create_shared_memory,
create_empty_array,
write_to_shared_memory,
read_from_shared_memory,
concatenate,
CloudpickleWrapper,
clear_mpi_env_vars,
)
__all__ = ["AsyncVectorEnv"]
class AsyncState(Enum):
DEFAULT = "default"
WAITING_RESET = "reset"
WAITING_STEP = "step"
class AsyncVectorEnv(VectorEnv):
"""Vectorized environment that runs multiple environments in parallel. It
uses `multiprocessing` processes, and pipes for communication.
Parameters
----------
env_fns : iterable of callable
Functions that create the environments.
observation_space : `gym.spaces.Space` instance, optional
Observation space of a single environment. If `None`, then the
observation space of the first environment is taken.
action_space : `gym.spaces.Space` instance, optional
Action space of a single environment. If `None`, then the action space
of the first environment is taken.
shared_memory : bool (default: `True`)
If `True`, then the observations from the worker processes are
communicated back through shared variables. This can improve the
efficiency if the observations are large (e.g. images).
copy : bool (default: `True`)
If `True`, then the `reset` and `step` methods return a copy of the
observations.
context : str, optional
Context for multiprocessing. If `None`, then the default context is used.
Only available in Python 3.
daemon : bool (default: `True`)
If `True`, then subprocesses have `daemon` flag turned on; that is, they
will quit if the head process quits. However, `daemon=True` prevents
subprocesses to spawn children, so for some environments you may want
to have it set to `False`
worker : function, optional
WARNING - advanced mode option! If set, then use that worker in a subprocess
instead of a default one. Can be useful to override some inner vector env
logic, for instance, how resets on done are handled. Provides high
degree of flexibility and a high chance to shoot yourself in the foot; thus,
if you are writing your own worker, it is recommended to start from the code
for `_worker` (or `_worker_shared_memory`) method below, and add changes
"""
def __init__(
self,
env_fns,
observation_space=None,
action_space=None,
shared_memory=True,
copy=True,
context=None,
daemon=True,
worker=None,
):
ctx = mp.get_context(context)
self.env_fns = env_fns
self.shared_memory = shared_memory
self.copy = copy
dummy_env = env_fns[0]()
self.metadata = dummy_env.metadata
if (observation_space is None) or (action_space is None):
observation_space = observation_space or dummy_env.observation_space
action_space = action_space or dummy_env.action_space
dummy_env.close()
del dummy_env
super(AsyncVectorEnv, self).__init__(
num_envs=len(env_fns),
observation_space=observation_space,
action_space=action_space,
)
if self.shared_memory:
try:
_obs_buffer = create_shared_memory(
self.single_observation_space, n=self.num_envs, ctx=ctx
)
self.observations = read_from_shared_memory(
_obs_buffer, self.single_observation_space, n=self.num_envs
)
except CustomSpaceError:
raise ValueError(
"Using `shared_memory=True` in `AsyncVectorEnv` "
"is incompatible with non-standard Gym observation spaces "
"(i.e. custom spaces inheriting from `gym.Space`), and is "
"only compatible with default Gym spaces (e.g. `Box`, "
"`Tuple`, `Dict`) for batching. Set `shared_memory=False` "
"if you use custom observation spaces."
)
else:
_obs_buffer = None
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros
)
self.parent_pipes, self.processes = [], []
self.error_queue = ctx.Queue()
target = _worker_shared_memory if self.shared_memory else _worker
target = worker or target
with clear_mpi_env_vars():
for idx, env_fn in enumerate(self.env_fns):
parent_pipe, child_pipe = ctx.Pipe()
process = ctx.Process(
target=target,
name="Worker<{0}>-{1}".format(type(self).__name__, idx),
args=(
idx,
CloudpickleWrapper(env_fn),
child_pipe,
parent_pipe,
_obs_buffer,
self.error_queue,
),
)
self.parent_pipes.append(parent_pipe)
self.processes.append(process)
process.daemon = daemon
process.start()
child_pipe.close()
self._state = AsyncState.DEFAULT
self._check_observation_spaces()
def seed(self, seeds=None):
self._assert_is_running()
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
if isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
assert len(seeds) == self.num_envs
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
"Calling `seed` while waiting "
"for a pending call to `{0}` to complete.".format(self._state.value),
self._state.value,
)
for pipe, seed in zip(self.parent_pipes, seeds):
pipe.send(("seed", seed))
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
def reset_async(self):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
"Calling `reset_async` while waiting "
"for a pending call to `{0}` to complete".format(self._state.value),
self._state.value,
)
for pipe in self.parent_pipes:
pipe.send(("reset", None))
self._state = AsyncState.WAITING_RESET
def reset_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `reset_wait` times out. If
`None`, the call to `reset_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise NoAsyncCallError(
"Calling `reset_wait` without any prior " "call to `reset_async`.",
AsyncState.WAITING_RESET.value,
)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError(
"The call to `reset_wait` has timed out after "
"{0} second{1}.".format(timeout, "s" if timeout > 1 else "")
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
if not self.shared_memory:
self.observations = concatenate(
results, self.observations, self.single_observation_space
)
return deepcopy(self.observations) if self.copy else self.observations
def step_async(self, actions):
"""
Parameters
----------
actions : iterable of samples from `action_space`
List of actions.
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
"Calling `step_async` while waiting "
"for a pending call to `{0}` to complete.".format(self._state.value),
self._state.value,
)
for pipe, action in zip(self.parent_pipes, actions):
pipe.send(("step", action))
self._state = AsyncState.WAITING_STEP
def step_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `step_wait` times out. If
`None`, the call to `step_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
rewards : `np.ndarray` instance (dtype `np.float_`)
A vector of rewards from the vectorized environment.
dones : `np.ndarray` instance (dtype `np.bool_`)
A vector whose entries indicate whether the episode has ended.
infos : list of dict
A list of auxiliary diagnostic information.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError(
"Calling `step_wait` without any prior call " "to `step_async`.",
AsyncState.WAITING_STEP.value,
)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError(
"The call to `step_wait` has timed out after "
"{0} second{1}.".format(timeout, "s" if timeout > 1 else "")
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
observations_list, rewards, dones, infos = zip(*results)
if not self.shared_memory:
self.observations = concatenate(
observations_list, self.observations, self.single_observation_space
)
return (
deepcopy(self.observations) if self.copy else self.observations,
np.array(rewards),
np.array(dones, dtype=np.bool_),
infos,
)
def close_extras(self, timeout=None, terminate=False):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `close` times out. If `None`,
the call to `close` never times out. If the call to `close` times
out, then all processes are terminated.
terminate : bool (default: `False`)
If `True`, then the `close` operation is forced and all processes
are terminated.
"""
timeout = 0 if terminate else timeout
try:
if self._state != AsyncState.DEFAULT:
logger.warn(
"Calling `close` while waiting for a pending "
"call to `{0}` to complete.".format(self._state.value)
)
function = getattr(self, "{0}_wait".format(self._state.value))
function(timeout)
except mp.TimeoutError:
terminate = True
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(("close", None))
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.parent_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
def _poll(self, timeout=None):
self._assert_is_running()
if timeout is None:
return True
end_time = time.time() + timeout
delta = None
for pipe in self.parent_pipes:
delta = max(end_time - time.time(), 0)
if pipe is None:
return False
if pipe.closed or (not pipe.poll(delta)):
return False
return True
def _check_observation_spaces(self):
self._assert_is_running()
for pipe in self.parent_pipes:
pipe.send(("_check_observation_space", self.single_observation_space))
same_spaces, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
if not all(same_spaces):
raise RuntimeError(
"Some environments have an observation space "
"different from `{0}`. In order to batch observations, the "
"observation spaces from all environments must be "
"equal.".format(self.single_observation_space)
)
def _assert_is_running(self):
if self.closed:
raise ClosedEnvironmentError(
"Trying to operate on `{0}`, after a "
"call to `close()`.".format(type(self).__name__)
)
def _raise_if_errors(self, successes):
if all(successes):
return
num_errors = self.num_envs - sum(successes)
assert num_errors > 0
for _ in range(num_errors):
index, exctype, value = self.error_queue.get()
logger.error(
"Received the following error from Worker-{0}: "
"{1}: {2}".format(index, exctype.__name__, value)
)
logger.error("Shutting down Worker-{0}.".format(index))
self.parent_pipes[index].close()
self.parent_pipes[index] = None
logger.error("Raising the last exception back to the main process.")
raise exctype(value)
def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is None
env = env_fn()
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation = env.reset()
pipe.send((observation, True))
elif command == "step":
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
pipe.send(((observation, reward, done, info), True))
elif command == "seed":
env.seed(data)
pipe.send((None, True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_check_observation_space":
pipe.send((data == env.observation_space, True))
else:
raise RuntimeError(
"Received unknown command `{0}`. Must "
"be one of {`reset`, `step`, `seed`, `close`, "
"`_check_observation_space`}.".format(command)
)
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation = env.reset()
write_to_shared_memory(
index, observation, shared_memory, observation_space
)
pipe.send((None, True))
elif command == "step":
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
write_to_shared_memory(
index, observation, shared_memory, observation_space
)
pipe.send(((None, reward, done, info), True))
elif command == "seed":
env.seed(data)
pipe.send((None, True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_check_observation_space":
pipe.send((data == observation_space, True))
else:
raise RuntimeError(
"Received unknown command `{0}`. Must "
"be one of {`reset`, `step`, `seed`, `close`, "
"`_check_observation_space`}.".format(command)
)
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
| 17,588 | Python | .py | 422 | 29.973934 | 88 | 0.565492 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,338 | __init__.py | WindyLab_Gym-PPS/gym/vector/__init__.py | try:
from collections.abc import Iterable
except ImportError:
Iterable = (tuple, list)
from gym.vector.async_vector_env import AsyncVectorEnv
from gym.vector.sync_vector_env import SyncVectorEnv
from gym.vector.vector_env import VectorEnv, VectorEnvWrapper
__all__ = ["AsyncVectorEnv", "SyncVectorEnv", "VectorEnv", "VectorEnvWrapper", "make"]
def make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):
"""Create a vectorized environment from multiple copies of an environment,
from its id
Parameters
----------
id : str
The environment ID. This must be a valid ID from the registry.
num_envs : int
Number of copies of the environment.
asynchronous : bool (default: `True`)
If `True`, wraps the environments in an `AsyncVectorEnv` (which uses
`multiprocessing` to run the environments in parallel). If `False`,
wraps the environments in a `SyncVectorEnv`.
wrappers : Callable or Iterable of Callables (default: `None`)
If not `None`, then apply the wrappers to each internal
environment during creation.
Returns
-------
env : `gym.vector.VectorEnv` instance
The vectorized environment.
Example
-------
>>> import gym
>>> env = gym.vector.make('CartPole-v1', 3)
>>> env.reset()
array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],
[ 0.03073904, 0.00145001, -0.03088818, -0.03131252],
[ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],
dtype=float32)
"""
from gym.envs import make as make_
def _make_env():
env = make_(id, **kwargs)
if wrappers is not None:
if callable(wrappers):
env = wrappers(env)
elif isinstance(wrappers, Iterable) and all(
[callable(w) for w in wrappers]
):
for wrapper in wrappers:
env = wrapper(env)
else:
raise NotImplementedError
return env
env_fns = [_make_env for _ in range(num_envs)]
return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)
| 2,172 | Python | .py | 54 | 32.555556 | 86 | 0.637702 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,339 | spaces.py | WindyLab_Gym-PPS/gym/vector/utils/spaces.py | import numpy as np
from collections import OrderedDict
from gym.spaces import Space, Box, Discrete, MultiDiscrete, MultiBinary, Tuple, Dict
_BaseGymSpaces = (Box, Discrete, MultiDiscrete, MultiBinary)
__all__ = ["_BaseGymSpaces", "batch_space"]
def batch_space(space, n=1):
"""Create a (batched) space, containing multiple copies of a single space.
Parameters
----------
space : `gym.spaces.Space` instance
Space (e.g. the observation space) for a single environment in the
vectorized environment.
n : int
Number of environments in the vectorized environment.
Returns
-------
batched_space : `gym.spaces.Space` instance
Space (e.g. the observation space) for a batch of environments in the
vectorized environment.
Example
-------
>>> from gym.spaces import Box, Dict
>>> space = Dict({
... 'position': Box(low=0, high=1, shape=(3,), dtype=np.float32),
... 'velocity': Box(low=0, high=1, shape=(2,), dtype=np.float32)})
>>> batch_space(space, n=5)
Dict(position:Box(5, 3), velocity:Box(5, 2))
"""
if isinstance(space, _BaseGymSpaces):
return batch_space_base(space, n=n)
elif isinstance(space, Tuple):
return batch_space_tuple(space, n=n)
elif isinstance(space, Dict):
return batch_space_dict(space, n=n)
elif isinstance(space, Space):
return batch_space_custom(space, n=n)
else:
raise ValueError(
"Cannot batch space with type `{0}`. The space must "
"be a valid `gym.Space` instance.".format(type(space))
)
def batch_space_base(space, n=1):
if isinstance(space, Box):
repeats = tuple([n] + [1] * space.low.ndim)
low, high = np.tile(space.low, repeats), np.tile(space.high, repeats)
return Box(low=low, high=high, dtype=space.dtype)
elif isinstance(space, Discrete):
return MultiDiscrete(np.full((n,), space.n, dtype=space.dtype))
elif isinstance(space, MultiDiscrete):
repeats = tuple([n] + [1] * space.nvec.ndim)
high = np.tile(space.nvec, repeats) - 1
return Box(low=np.zeros_like(high), high=high, dtype=space.dtype)
elif isinstance(space, MultiBinary):
return Box(low=0, high=1, shape=(n,) + space.shape, dtype=space.dtype)
else:
raise ValueError("Space type `{0}` is not supported.".format(type(space)))
def batch_space_tuple(space, n=1):
return Tuple(tuple(batch_space(subspace, n=n) for subspace in space.spaces))
def batch_space_dict(space, n=1):
return Dict(
OrderedDict(
[
(key, batch_space(subspace, n=n))
for (key, subspace) in space.spaces.items()
]
)
)
def batch_space_custom(space, n=1):
return Tuple(tuple(space for _ in range(n)))
| 2,849 | Python | .py | 69 | 34.492754 | 84 | 0.639855 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,340 | __init__.py | WindyLab_Gym-PPS/gym/vector/utils/__init__.py | from gym.vector.utils.misc import CloudpickleWrapper, clear_mpi_env_vars
from gym.vector.utils.numpy_utils import concatenate, create_empty_array
from gym.vector.utils.shared_memory import (
create_shared_memory,
read_from_shared_memory,
write_to_shared_memory,
)
from gym.vector.utils.spaces import _BaseGymSpaces, batch_space
__all__ = [
"CloudpickleWrapper",
"clear_mpi_env_vars",
"concatenate",
"create_empty_array",
"create_shared_memory",
"read_from_shared_memory",
"write_to_shared_memory",
"_BaseGymSpaces",
"batch_space",
]
| 582 | Python | .py | 19 | 27.052632 | 72 | 0.731317 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,341 | numpy_utils.py | WindyLab_Gym-PPS/gym/vector/utils/numpy_utils.py | import numpy as np
from gym.spaces import Space, Tuple, Dict
from gym.vector.utils.spaces import _BaseGymSpaces
from collections import OrderedDict
__all__ = ["concatenate", "create_empty_array"]
def concatenate(items, out, space):
"""Concatenate multiple samples from space into a single object.
Parameters
----------
items : iterable of samples of `space`
Samples to be concatenated.
out : tuple, dict, or `np.ndarray`
The output object. This object is a (possibly nested) numpy array.
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
Returns
-------
out : tuple, dict, or `np.ndarray`
The output object. This object is a (possibly nested) numpy array.
Example
-------
>>> from gym.spaces import Box
>>> space = Box(low=0, high=1, shape=(3,), dtype=np.float32)
>>> out = np.zeros((2, 3), dtype=np.float32)
>>> items = [space.sample() for _ in range(2)]
>>> concatenate(items, out, space)
array([[0.6348213 , 0.28607962, 0.60760117],
[0.87383074, 0.192658 , 0.2148103 ]], dtype=float32)
"""
assert isinstance(items, (list, tuple))
if isinstance(space, _BaseGymSpaces):
return concatenate_base(items, out, space)
elif isinstance(space, Tuple):
return concatenate_tuple(items, out, space)
elif isinstance(space, Dict):
return concatenate_dict(items, out, space)
elif isinstance(space, Space):
return concatenate_custom(items, out, space)
else:
raise ValueError(
"Space of type `{0}` is not a valid `gym.Space` "
"instance.".format(type(space))
)
def concatenate_base(items, out, space):
return np.stack(items, axis=0, out=out)
def concatenate_tuple(items, out, space):
return tuple(
concatenate([item[i] for item in items], out[i], subspace)
for (i, subspace) in enumerate(space.spaces)
)
def concatenate_dict(items, out, space):
return OrderedDict(
[
(key, concatenate([item[key] for item in items], out[key], subspace))
for (key, subspace) in space.spaces.items()
]
)
def concatenate_custom(items, out, space):
return tuple(items)
def create_empty_array(space, n=1, fn=np.zeros):
"""Create an empty (possibly nested) numpy array.
Parameters
----------
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
n : int
Number of environments in the vectorized environment. If `None`, creates
an empty sample from `space`.
fn : callable
Function to apply when creating the empty numpy array. Examples of such
functions are `np.empty` or `np.zeros`.
Returns
-------
out : tuple, dict, or `np.ndarray`
The output object. This object is a (possibly nested) numpy array.
Example
-------
>>> from gym.spaces import Box, Dict
>>> space = Dict({
... 'position': Box(low=0, high=1, shape=(3,), dtype=np.float32),
... 'velocity': Box(low=0, high=1, shape=(2,), dtype=np.float32)})
>>> create_empty_array(space, n=2, fn=np.zeros)
OrderedDict([('position', array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)),
('velocity', array([[0., 0.],
[0., 0.]], dtype=float32))])
"""
if isinstance(space, _BaseGymSpaces):
return create_empty_array_base(space, n=n, fn=fn)
elif isinstance(space, Tuple):
return create_empty_array_tuple(space, n=n, fn=fn)
elif isinstance(space, Dict):
return create_empty_array_dict(space, n=n, fn=fn)
elif isinstance(space, Space):
return create_empty_array_custom(space, n=n, fn=fn)
else:
raise ValueError(
"Space of type `{0}` is not a valid `gym.Space` "
"instance.".format(type(space))
)
def create_empty_array_base(space, n=1, fn=np.zeros):
shape = space.shape if (n is None) else (n,) + space.shape
return fn(shape, dtype=space.dtype)
def create_empty_array_tuple(space, n=1, fn=np.zeros):
return tuple(create_empty_array(subspace, n=n, fn=fn) for subspace in space.spaces)
def create_empty_array_dict(space, n=1, fn=np.zeros):
return OrderedDict(
[
(key, create_empty_array(subspace, n=n, fn=fn))
for (key, subspace) in space.spaces.items()
]
)
def create_empty_array_custom(space, n=1, fn=np.zeros):
return None
| 4,632 | Python | .py | 114 | 33.578947 | 87 | 0.628177 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,342 | shared_memory.py | WindyLab_Gym-PPS/gym/vector/utils/shared_memory.py | import numpy as np
import multiprocessing as mp
from ctypes import c_bool
from collections import OrderedDict
from gym import logger
from gym.spaces import Tuple, Dict
from gym.error import CustomSpaceError
from gym.vector.utils.spaces import _BaseGymSpaces
__all__ = ["create_shared_memory", "read_from_shared_memory", "write_to_shared_memory"]
def create_shared_memory(space, n=1, ctx=mp):
"""Create a shared memory object, to be shared across processes. This
eventually contains the observations from the vectorized environment.
Parameters
----------
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
n : int
Number of environments in the vectorized environment (i.e. the number
of processes).
ctx : `multiprocessing` context
Context for multiprocessing.
Returns
-------
shared_memory : dict, tuple, or `multiprocessing.Array` instance
Shared object across processes.
"""
if isinstance(space, _BaseGymSpaces):
return create_base_shared_memory(space, n=n, ctx=ctx)
elif isinstance(space, Tuple):
return create_tuple_shared_memory(space, n=n, ctx=ctx)
elif isinstance(space, Dict):
return create_dict_shared_memory(space, n=n, ctx=ctx)
else:
raise CustomSpaceError(
"Cannot create a shared memory for space with "
"type `{0}`. Shared memory only supports "
"default Gym spaces (e.g. `Box`, `Tuple`, "
"`Dict`, etc...), and does not support custom "
"Gym spaces.".format(type(space))
)
def create_base_shared_memory(space, n=1, ctx=mp):
dtype = space.dtype.char
if dtype in "?":
dtype = c_bool
return ctx.Array(dtype, n * int(np.prod(space.shape)))
def create_tuple_shared_memory(space, n=1, ctx=mp):
return tuple(
create_shared_memory(subspace, n=n, ctx=ctx) for subspace in space.spaces
)
def create_dict_shared_memory(space, n=1, ctx=mp):
return OrderedDict(
[
(key, create_shared_memory(subspace, n=n, ctx=ctx))
for (key, subspace) in space.spaces.items()
]
)
def read_from_shared_memory(shared_memory, space, n=1):
"""Read the batch of observations from shared memory as a numpy array.
Parameters
----------
shared_memory : dict, tuple, or `multiprocessing.Array` instance
Shared object across processes. This contains the observations from the
vectorized environment. This object is created with `create_shared_memory`.
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
n : int
Number of environments in the vectorized environment (i.e. the number
of processes).
Returns
-------
observations : dict, tuple or `np.ndarray` instance
Batch of observations as a (possibly nested) numpy array.
Notes
-----
The numpy array objects returned by `read_from_shared_memory` shares the
memory of `shared_memory`. Any changes to `shared_memory` are forwarded
to `observations`, and vice-versa. To avoid any side-effect, use `np.copy`.
"""
if isinstance(space, _BaseGymSpaces):
return read_base_from_shared_memory(shared_memory, space, n=n)
elif isinstance(space, Tuple):
return read_tuple_from_shared_memory(shared_memory, space, n=n)
elif isinstance(space, Dict):
return read_dict_from_shared_memory(shared_memory, space, n=n)
else:
raise CustomSpaceError(
"Cannot read from a shared memory for space with "
"type `{0}`. Shared memory only supports "
"default Gym spaces (e.g. `Box`, `Tuple`, "
"`Dict`, etc...), and does not support custom "
"Gym spaces.".format(type(space))
)
def read_base_from_shared_memory(shared_memory, space, n=1):
return np.frombuffer(shared_memory.get_obj(), dtype=space.dtype).reshape(
(n,) + space.shape
)
def read_tuple_from_shared_memory(shared_memory, space, n=1):
return tuple(
read_from_shared_memory(memory, subspace, n=n)
for (memory, subspace) in zip(shared_memory, space.spaces)
)
def read_dict_from_shared_memory(shared_memory, space, n=1):
return OrderedDict(
[
(key, read_from_shared_memory(shared_memory[key], subspace, n=n))
for (key, subspace) in space.spaces.items()
]
)
def write_to_shared_memory(index, value, shared_memory, space):
"""Write the observation of a single environment into shared memory.
Parameters
----------
index : int
Index of the environment (must be in `[0, num_envs)`).
value : sample from `space`
Observation of the single environment to write to shared memory.
shared_memory : dict, tuple, or `multiprocessing.Array` instance
Shared object across processes. This contains the observations from the
vectorized environment. This object is created with `create_shared_memory`.
space : `gym.spaces.Space` instance
Observation space of a single environment in the vectorized environment.
Returns
-------
`None`
"""
if isinstance(space, _BaseGymSpaces):
write_base_to_shared_memory(index, value, shared_memory, space)
elif isinstance(space, Tuple):
write_tuple_to_shared_memory(index, value, shared_memory, space)
elif isinstance(space, Dict):
write_dict_to_shared_memory(index, value, shared_memory, space)
else:
raise CustomSpaceError(
"Cannot write to a shared memory for space with "
"type `{0}`. Shared memory only supports "
"default Gym spaces (e.g. `Box`, `Tuple`, "
"`Dict`, etc...), and does not support custom "
"Gym spaces.".format(type(space))
)
def write_base_to_shared_memory(index, value, shared_memory, space):
size = int(np.prod(space.shape))
destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype)
np.copyto(
destination[index * size : (index + 1) * size],
np.asarray(value, dtype=space.dtype).flatten(),
)
def write_tuple_to_shared_memory(index, values, shared_memory, space):
for value, memory, subspace in zip(values, shared_memory, space.spaces):
write_to_shared_memory(index, value, memory, subspace)
def write_dict_to_shared_memory(index, values, shared_memory, space):
for key, subspace in space.spaces.items():
write_to_shared_memory(index, values[key], shared_memory[key], subspace)
| 6,695 | Python | .py | 152 | 37.177632 | 87 | 0.670614 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,343 | misc.py | WindyLab_Gym-PPS/gym/vector/utils/misc.py | import contextlib
import os
__all__ = ["CloudpickleWrapper", "clear_mpi_env_vars"]
class CloudpickleWrapper(object):
def __init__(self, fn):
self.fn = fn
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.fn)
def __setstate__(self, ob):
import pickle
self.fn = pickle.loads(ob)
def __call__(self):
return self.fn()
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
`from mpi4py import MPI` will call `MPI_Init` by default. If the child
process has MPI environment variables, MPI will think that the child process
is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables
temporarily such as when we are starting multiprocessing Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ["OMPI_", "PMI_"]:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
| 1,170 | Python | .py | 33 | 28.787879 | 80 | 0.652753 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,344 | test_sync_vector_env.py | WindyLab_Gym-PPS/gym/vector/tests/test_sync_vector_env.py | import pytest
import numpy as np
from gym.spaces import Box, Tuple
from gym.vector.tests.utils import CustomSpace, make_env, make_custom_space_env
from gym.vector.sync_vector_env import SyncVectorEnv
def test_create_sync_vector_env():
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
finally:
env.close()
assert env.num_envs == 8
def test_reset_sync_vector_env():
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
observations = env.reset()
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
assert observations.shape == env.observation_space.shape
@pytest.mark.parametrize("use_single_action_space", [True, False])
def test_step_sync_vector_env(use_single_action_space):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
observations = env.reset()
if use_single_action_space:
actions = [env.single_action_space.sample() for _ in range(8)]
else:
actions = env.action_space.sample()
observations, rewards, dones, _ = env.step(actions)
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
assert observations.shape == env.observation_space.shape
assert isinstance(rewards, np.ndarray)
assert isinstance(rewards[0], (float, np.floating))
assert rewards.ndim == 1
assert rewards.size == 8
assert isinstance(dones, np.ndarray)
assert dones.dtype == np.bool_
assert dones.ndim == 1
assert dones.size == 8
def test_check_observations_sync_vector_env():
# CubeCrash-v0 - observation_space: Box(40, 32, 3)
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
# MemorizeDigits-v0 - observation_space: Box(24, 32, 3)
env_fns[1] = make_env("MemorizeDigits-v0", 1)
with pytest.raises(RuntimeError):
env = SyncVectorEnv(env_fns)
env.close()
def test_custom_space_sync_vector_env():
env_fns = [make_custom_space_env(i) for i in range(4)]
try:
env = SyncVectorEnv(env_fns)
reset_observations = env.reset()
actions = ("action-2", "action-3", "action-5", "action-7")
step_observations, rewards, dones, _ = env.step(actions)
finally:
env.close()
assert isinstance(env.single_observation_space, CustomSpace)
assert isinstance(env.observation_space, Tuple)
assert isinstance(reset_observations, tuple)
assert reset_observations == ("reset", "reset", "reset", "reset")
assert isinstance(step_observations, tuple)
assert step_observations == (
"step(action-2)",
"step(action-3)",
"step(action-5)",
"step(action-7)",
)
| 3,193 | Python | .py | 78 | 34.910256 | 79 | 0.676252 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,345 | test_vector_env_wrapper.py | WindyLab_Gym-PPS/gym/vector/tests/test_vector_env_wrapper.py | import gym
from gym.vector import make
from gym.vector import VectorEnvWrapper
class DummyWrapper(VectorEnvWrapper):
def __init__(self, env):
self.env = env
self.counter = 0
def reset_async(self):
super().reset_async()
self.counter += 1
def test_vector_env_wrapper_inheritance():
env = make("FrozenLake-v1", asynchronous=False)
wrapped = DummyWrapper(env)
wrapped.reset()
assert wrapped.counter == 1
| 461 | Python | .py | 15 | 25.666667 | 51 | 0.69161 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,346 | test_spaces.py | WindyLab_Gym-PPS/gym/vector/tests/test_spaces.py | import pytest
import numpy as np
from gym.spaces import Box, MultiDiscrete, Tuple, Dict
from gym.vector.tests.utils import spaces, custom_spaces, CustomSpace
from gym.vector.utils.spaces import _BaseGymSpaces, batch_space
expected_batch_spaces_4 = [
Box(low=-1.0, high=1.0, shape=(4,), dtype=np.float64),
Box(low=0.0, high=10.0, shape=(4, 1), dtype=np.float32),
Box(
low=np.array(
[[-1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
),
high=np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
),
dtype=np.float32,
),
Box(
low=np.array(
[
[[-1.0, 0.0], [0.0, -1.0]],
[[-1.0, 0.0], [0.0, -1.0]],
[[-1.0, 0.0], [0.0, -1]],
[[-1.0, 0.0], [0.0, -1.0]],
]
),
high=np.ones((4, 2, 2)),
dtype=np.float32,
),
Box(low=0, high=255, shape=(4,), dtype=np.uint8),
Box(low=0, high=255, shape=(4, 32, 32, 3), dtype=np.uint8),
MultiDiscrete([2, 2, 2, 2]),
Tuple((MultiDiscrete([3, 3, 3, 3]), MultiDiscrete([5, 5, 5, 5]))),
Tuple(
(
MultiDiscrete([7, 7, 7, 7]),
Box(
low=np.array([[0.0, -1.0], [0.0, -1.0], [0.0, -1.0], [0.0, -1]]),
high=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]),
dtype=np.float32,
),
)
),
Box(
low=np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]),
high=np.array([[10, 12, 16], [10, 12, 16], [10, 12, 16], [10, 12, 16]]),
dtype=np.int64,
),
Box(low=0, high=1, shape=(4, 19), dtype=np.int8),
Dict(
{
"position": MultiDiscrete([23, 23, 23, 23]),
"velocity": Box(low=0.0, high=1.0, shape=(4, 1), dtype=np.float32),
}
),
Dict(
{
"position": Dict(
{
"x": MultiDiscrete([29, 29, 29, 29]),
"y": MultiDiscrete([31, 31, 31, 31]),
}
),
"velocity": Tuple(
(
MultiDiscrete([37, 37, 37, 37]),
Box(low=0, high=255, shape=(4,), dtype=np.uint8),
)
),
}
),
]
expected_custom_batch_spaces_4 = [
Tuple((CustomSpace(), CustomSpace(), CustomSpace(), CustomSpace())),
Tuple(
(
Tuple((CustomSpace(), CustomSpace(), CustomSpace(), CustomSpace())),
Box(low=0, high=255, shape=(4,), dtype=np.uint8),
)
),
]
@pytest.mark.parametrize(
"space,expected_batch_space_4",
list(zip(spaces, expected_batch_spaces_4)),
ids=[space.__class__.__name__ for space in spaces],
)
def test_batch_space(space, expected_batch_space_4):
batch_space_4 = batch_space(space, n=4)
assert batch_space_4 == expected_batch_space_4
@pytest.mark.parametrize(
"space,expected_batch_space_4",
list(zip(custom_spaces, expected_custom_batch_spaces_4)),
ids=[space.__class__.__name__ for space in custom_spaces],
)
def test_batch_space_custom_space(space, expected_batch_space_4):
batch_space_4 = batch_space(space, n=4)
assert batch_space_4 == expected_batch_space_4
| 3,308 | Python | .py | 97 | 25.556701 | 84 | 0.49079 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,347 | test_shared_memory.py | WindyLab_Gym-PPS/gym/vector/tests/test_shared_memory.py | import pytest
import numpy as np
import multiprocessing as mp
from multiprocessing.sharedctypes import SynchronizedArray
from multiprocessing import Array, Process
from collections import OrderedDict
from gym.spaces import Box, Tuple, Dict
from gym.error import CustomSpaceError
from gym.vector.utils.spaces import _BaseGymSpaces
from gym.vector.tests.utils import spaces, custom_spaces
from gym.vector.utils.shared_memory import (
create_shared_memory,
read_from_shared_memory,
write_to_shared_memory,
)
expected_types = [
Array("d", 1),
Array("f", 1),
Array("f", 3),
Array("f", 4),
Array("B", 1),
Array("B", 32 * 32 * 3),
Array("i", 1),
(Array("i", 1), Array("i", 1)),
(Array("i", 1), Array("f", 2)),
Array("B", 3),
Array("B", 19),
OrderedDict([("position", Array("i", 1)), ("velocity", Array("f", 1))]),
OrderedDict(
[
("position", OrderedDict([("x", Array("i", 1)), ("y", Array("i", 1))])),
("velocity", (Array("i", 1), Array("B", 1))),
]
),
]
@pytest.mark.parametrize("n", [1, 8])
@pytest.mark.parametrize(
"space,expected_type",
list(zip(spaces, expected_types)),
ids=[space.__class__.__name__ for space in spaces],
)
@pytest.mark.parametrize(
"ctx", [None, "fork", "spawn"], ids=["default", "fork", "spawn"]
)
def test_create_shared_memory(space, expected_type, n, ctx):
def assert_nested_type(lhs, rhs, n):
assert type(lhs) == type(rhs)
if isinstance(lhs, (list, tuple)):
assert len(lhs) == len(rhs)
for lhs_, rhs_ in zip(lhs, rhs):
assert_nested_type(lhs_, rhs_, n)
elif isinstance(lhs, (dict, OrderedDict)):
assert set(lhs.keys()) ^ set(rhs.keys()) == set()
for key in lhs.keys():
assert_nested_type(lhs[key], rhs[key], n)
elif isinstance(lhs, SynchronizedArray):
# Assert the length of the array
assert len(lhs[:]) == n * len(rhs[:])
# Assert the data type
assert type(lhs[0]) == type(rhs[0]) # noqa: E721
else:
raise TypeError("Got unknown type `{0}`.".format(type(lhs)))
ctx = mp if (ctx is None) else mp.get_context(ctx)
shared_memory = create_shared_memory(space, n=n, ctx=ctx)
assert_nested_type(shared_memory, expected_type, n=n)
@pytest.mark.parametrize("n", [1, 8])
@pytest.mark.parametrize(
"ctx", [None, "fork", "spawn"], ids=["default", "fork", "spawn"]
)
@pytest.mark.parametrize("space", custom_spaces)
def test_create_shared_memory_custom_space(n, ctx, space):
ctx = mp if (ctx is None) else mp.get_context(ctx)
with pytest.raises(CustomSpaceError):
shared_memory = create_shared_memory(space, n=n, ctx=ctx)
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_write_to_shared_memory(space):
def assert_nested_equal(lhs, rhs):
assert isinstance(rhs, list)
if isinstance(lhs, (list, tuple)):
for i in range(len(lhs)):
assert_nested_equal(lhs[i], [rhs_[i] for rhs_ in rhs])
elif isinstance(lhs, (dict, OrderedDict)):
for key in lhs.keys():
assert_nested_equal(lhs[key], [rhs_[key] for rhs_ in rhs])
elif isinstance(lhs, SynchronizedArray):
assert np.all(np.array(lhs[:]) == np.stack(rhs, axis=0).flatten())
else:
raise TypeError("Got unknown type `{0}`.".format(type(lhs)))
def write(i, shared_memory, sample):
write_to_shared_memory(i, sample, shared_memory, space)
shared_memory_n8 = create_shared_memory(space, n=8)
samples = [space.sample() for _ in range(8)]
processes = [
Process(target=write, args=(i, shared_memory_n8, samples[i])) for i in range(8)
]
for process in processes:
process.start()
for process in processes:
process.join()
assert_nested_equal(shared_memory_n8, samples)
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_read_from_shared_memory(space):
def assert_nested_equal(lhs, rhs, space, n):
assert isinstance(rhs, list)
if isinstance(space, Tuple):
assert isinstance(lhs, tuple)
for i in range(len(lhs)):
assert_nested_equal(
lhs[i], [rhs_[i] for rhs_ in rhs], space.spaces[i], n
)
elif isinstance(space, Dict):
assert isinstance(lhs, OrderedDict)
for key in lhs.keys():
assert_nested_equal(
lhs[key], [rhs_[key] for rhs_ in rhs], space.spaces[key], n
)
elif isinstance(space, _BaseGymSpaces):
assert isinstance(lhs, np.ndarray)
assert lhs.shape == ((n,) + space.shape)
assert lhs.dtype == space.dtype
assert np.all(lhs == np.stack(rhs, axis=0))
else:
raise TypeError("Got unknown type `{0}`".format(type(space)))
def write(i, shared_memory, sample):
write_to_shared_memory(i, sample, shared_memory, space)
shared_memory_n8 = create_shared_memory(space, n=8)
memory_view_n8 = read_from_shared_memory(shared_memory_n8, space, n=8)
samples = [space.sample() for _ in range(8)]
processes = [
Process(target=write, args=(i, shared_memory_n8, samples[i])) for i in range(8)
]
for process in processes:
process.start()
for process in processes:
process.join()
assert_nested_equal(memory_view_n8, samples, space, n=8)
| 5,661 | Python | .py | 140 | 33.057143 | 87 | 0.608783 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,348 | test_numpy_utils.py | WindyLab_Gym-PPS/gym/vector/tests/test_numpy_utils.py | import pytest
import numpy as np
from collections import OrderedDict
from gym.spaces import Tuple, Dict
from gym.vector.utils.spaces import _BaseGymSpaces
from gym.vector.tests.utils import spaces
from gym.vector.utils.numpy_utils import concatenate, create_empty_array
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_concatenate(space):
def assert_type(lhs, rhs, n):
# Special case: if rhs is a list of scalars, lhs must be an np.ndarray
if np.isscalar(rhs[0]):
assert isinstance(lhs, np.ndarray)
assert all([np.isscalar(rhs[i]) for i in range(n)])
else:
assert all([isinstance(rhs[i], type(lhs)) for i in range(n)])
def assert_nested_equal(lhs, rhs, n):
assert isinstance(rhs, list)
assert (n > 0) and (len(rhs) == n)
assert_type(lhs, rhs, n)
if isinstance(lhs, np.ndarray):
assert lhs.shape[0] == n
for i in range(n):
assert np.all(lhs[i] == rhs[i])
elif isinstance(lhs, tuple):
for i in range(len(lhs)):
rhs_T_i = [rhs[j][i] for j in range(n)]
assert_nested_equal(lhs[i], rhs_T_i, n)
elif isinstance(lhs, OrderedDict):
for key in lhs.keys():
rhs_T_key = [rhs[j][key] for j in range(n)]
assert_nested_equal(lhs[key], rhs_T_key, n)
else:
raise TypeError("Got unknown type `{0}`.".format(type(lhs)))
samples = [space.sample() for _ in range(8)]
array = create_empty_array(space, n=8)
concatenated = concatenate(samples, array, space)
assert np.all(concatenated == array)
assert_nested_equal(array, samples, n=8)
@pytest.mark.parametrize("n", [1, 8])
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_create_empty_array(space, n):
def assert_nested_type(arr, space, n):
if isinstance(space, _BaseGymSpaces):
assert isinstance(arr, np.ndarray)
assert arr.dtype == space.dtype
assert arr.shape == (n,) + space.shape
elif isinstance(space, Tuple):
assert isinstance(arr, tuple)
assert len(arr) == len(space.spaces)
for i in range(len(arr)):
assert_nested_type(arr[i], space.spaces[i], n)
elif isinstance(space, Dict):
assert isinstance(arr, OrderedDict)
assert set(arr.keys()) ^ set(space.spaces.keys()) == set()
for key in arr.keys():
assert_nested_type(arr[key], space.spaces[key], n)
else:
raise TypeError("Got unknown type `{0}`.".format(type(arr)))
array = create_empty_array(space, n=n, fn=np.empty)
assert_nested_type(array, space, n=n)
@pytest.mark.parametrize("n", [1, 8])
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_create_empty_array_zeros(space, n):
def assert_nested_type(arr, space, n):
if isinstance(space, _BaseGymSpaces):
assert isinstance(arr, np.ndarray)
assert arr.dtype == space.dtype
assert arr.shape == (n,) + space.shape
assert np.all(arr == 0)
elif isinstance(space, Tuple):
assert isinstance(arr, tuple)
assert len(arr) == len(space.spaces)
for i in range(len(arr)):
assert_nested_type(arr[i], space.spaces[i], n)
elif isinstance(space, Dict):
assert isinstance(arr, OrderedDict)
assert set(arr.keys()) ^ set(space.spaces.keys()) == set()
for key in arr.keys():
assert_nested_type(arr[key], space.spaces[key], n)
else:
raise TypeError("Got unknown type `{0}`.".format(type(arr)))
array = create_empty_array(space, n=n, fn=np.zeros)
assert_nested_type(array, space, n=n)
@pytest.mark.parametrize(
"space", spaces, ids=[space.__class__.__name__ for space in spaces]
)
def test_create_empty_array_none_shape_ones(space):
def assert_nested_type(arr, space):
if isinstance(space, _BaseGymSpaces):
assert isinstance(arr, np.ndarray)
assert arr.dtype == space.dtype
assert arr.shape == space.shape
assert np.all(arr == 1)
elif isinstance(space, Tuple):
assert isinstance(arr, tuple)
assert len(arr) == len(space.spaces)
for i in range(len(arr)):
assert_nested_type(arr[i], space.spaces[i])
elif isinstance(space, Dict):
assert isinstance(arr, OrderedDict)
assert set(arr.keys()) ^ set(space.spaces.keys()) == set()
for key in arr.keys():
assert_nested_type(arr[key], space.spaces[key])
else:
raise TypeError("Got unknown type `{0}`.".format(type(arr)))
array = create_empty_array(space, n=None, fn=np.ones)
assert_nested_type(array, space)
| 5,070 | Python | .py | 114 | 35.219298 | 78 | 0.601989 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,349 | test_vector_env.py | WindyLab_Gym-PPS/gym/vector/tests/test_vector_env.py | import pytest
import numpy as np
from gym.spaces import Tuple
from gym.vector.tests.utils import CustomSpace, make_env
from gym.vector.async_vector_env import AsyncVectorEnv
from gym.vector.sync_vector_env import SyncVectorEnv
from gym.vector.vector_env import VectorEnv
@pytest.mark.parametrize("shared_memory", [True, False])
def test_vector_env_equal(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
num_steps = 100
try:
async_env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
sync_env = SyncVectorEnv(env_fns)
async_env.seed(0)
sync_env.seed(0)
assert async_env.num_envs == sync_env.num_envs
assert async_env.observation_space == sync_env.observation_space
assert async_env.single_observation_space == sync_env.single_observation_space
assert async_env.action_space == sync_env.action_space
assert async_env.single_action_space == sync_env.single_action_space
async_observations = async_env.reset()
sync_observations = sync_env.reset()
assert np.all(async_observations == sync_observations)
for _ in range(num_steps):
actions = async_env.action_space.sample()
assert actions in sync_env.action_space
async_observations, async_rewards, async_dones, _ = async_env.step(actions)
sync_observations, sync_rewards, sync_dones, _ = sync_env.step(actions)
assert np.all(async_observations == sync_observations)
assert np.all(async_rewards == sync_rewards)
assert np.all(async_dones == sync_dones)
finally:
async_env.close()
sync_env.close()
def test_custom_space_vector_env():
env = VectorEnv(4, CustomSpace(), CustomSpace())
assert isinstance(env.single_observation_space, CustomSpace)
assert isinstance(env.observation_space, Tuple)
assert isinstance(env.single_action_space, CustomSpace)
assert isinstance(env.action_space, Tuple)
| 2,019 | Python | .py | 41 | 42.02439 | 87 | 0.703006 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,350 | utils.py | WindyLab_Gym-PPS/gym/vector/tests/utils.py | import numpy as np
import gym
import time
from gym.spaces import Box, Discrete, MultiDiscrete, MultiBinary, Tuple, Dict
spaces = [
Box(low=np.array(-1.0), high=np.array(1.0), dtype=np.float64),
Box(low=np.array([0.0]), high=np.array([10.0]), dtype=np.float32),
Box(
low=np.array([-1.0, 0.0, 0.0]), high=np.array([1.0, 1.0, 1.0]), dtype=np.float32
),
Box(
low=np.array([[-1.0, 0.0], [0.0, -1.0]]), high=np.ones((2, 2)), dtype=np.float32
),
Box(low=0, high=255, shape=(), dtype=np.uint8),
Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8),
Discrete(2),
Tuple((Discrete(3), Discrete(5))),
Tuple(
(
Discrete(7),
Box(low=np.array([0.0, -1.0]), high=np.array([1.0, 1.0]), dtype=np.float32),
)
),
MultiDiscrete([11, 13, 17]),
MultiBinary(19),
Dict(
{
"position": Discrete(23),
"velocity": Box(
low=np.array([0.0]), high=np.array([1.0]), dtype=np.float32
),
}
),
Dict(
{
"position": Dict({"x": Discrete(29), "y": Discrete(31)}),
"velocity": Tuple(
(Discrete(37), Box(low=0, high=255, shape=(), dtype=np.uint8))
),
}
),
]
HEIGHT, WIDTH = 64, 64
class UnittestSlowEnv(gym.Env):
def __init__(self, slow_reset=0.3):
super(UnittestSlowEnv, self).__init__()
self.slow_reset = slow_reset
self.observation_space = Box(
low=0, high=255, shape=(HEIGHT, WIDTH, 3), dtype=np.uint8
)
self.action_space = Box(low=0.0, high=1.0, shape=(), dtype=np.float32)
def reset(self):
if self.slow_reset > 0:
time.sleep(self.slow_reset)
return self.observation_space.sample()
def step(self, action):
time.sleep(action)
observation = self.observation_space.sample()
reward, done = 0.0, False
return observation, reward, done, {}
class CustomSpace(gym.Space):
"""Minimal custom observation space."""
def __eq__(self, other):
return isinstance(other, CustomSpace)
custom_spaces = [
CustomSpace(),
Tuple((CustomSpace(), Box(low=0, high=255, shape=(), dtype=np.uint8))),
]
class CustomSpaceEnv(gym.Env):
def __init__(self):
super(CustomSpaceEnv, self).__init__()
self.observation_space = CustomSpace()
self.action_space = CustomSpace()
def reset(self):
return "reset"
def step(self, action):
observation = "step({0:s})".format(action)
reward, done = 0.0, False
return observation, reward, done, {}
def make_env(env_name, seed):
def _make():
env = gym.make(env_name)
env.seed(seed)
return env
return _make
def make_slow_env(slow_reset, seed):
def _make():
env = UnittestSlowEnv(slow_reset=slow_reset)
env.seed(seed)
return env
return _make
def make_custom_space_env(seed):
def _make():
env = CustomSpaceEnv()
env.seed(seed)
return env
return _make
| 3,115 | Python | .py | 97 | 24.917526 | 88 | 0.573004 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,351 | test_async_vector_env.py | WindyLab_Gym-PPS/gym/vector/tests/test_async_vector_env.py | import pytest
import numpy as np
from multiprocessing import TimeoutError
from gym.spaces import Box, Tuple
from gym.error import AlreadyPendingCallError, NoAsyncCallError, ClosedEnvironmentError
from gym.vector.tests.utils import (
CustomSpace,
make_env,
make_slow_env,
make_custom_space_env,
)
from gym.vector.async_vector_env import AsyncVectorEnv
@pytest.mark.parametrize("shared_memory", [True, False])
def test_create_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
finally:
env.close()
assert env.num_envs == 8
@pytest.mark.parametrize("shared_memory", [True, False])
def test_reset_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
observations = env.reset()
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
assert observations.shape == env.observation_space.shape
@pytest.mark.parametrize("shared_memory", [True, False])
@pytest.mark.parametrize("use_single_action_space", [True, False])
def test_step_async_vector_env(shared_memory, use_single_action_space):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
observations = env.reset()
if use_single_action_space:
actions = [env.single_action_space.sample() for _ in range(8)]
else:
actions = env.action_space.sample()
observations, rewards, dones, _ = env.step(actions)
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
assert observations.shape == env.observation_space.shape
assert isinstance(rewards, np.ndarray)
assert isinstance(rewards[0], (float, np.floating))
assert rewards.ndim == 1
assert rewards.size == 8
assert isinstance(dones, np.ndarray)
assert dones.dtype == np.bool_
assert dones.ndim == 1
assert dones.size == 8
@pytest.mark.parametrize("shared_memory", [True, False])
def test_copy_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory, copy=True)
observations = env.reset()
observations[0] = 128
assert not np.all(env.observations[0] == 128)
finally:
env.close()
@pytest.mark.parametrize("shared_memory", [True, False])
def test_no_copy_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory, copy=False)
observations = env.reset()
observations[0] = 128
assert np.all(env.observations[0] == 128)
finally:
env.close()
@pytest.mark.parametrize("shared_memory", [True, False])
def test_reset_timeout_async_vector_env(shared_memory):
env_fns = [make_slow_env(0.3, i) for i in range(4)]
with pytest.raises(TimeoutError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
env.reset_async()
observations = env.reset_wait(timeout=0.1)
finally:
env.close(terminate=True)
@pytest.mark.parametrize("shared_memory", [True, False])
def test_step_timeout_async_vector_env(shared_memory):
env_fns = [make_slow_env(0.0, i) for i in range(4)]
with pytest.raises(TimeoutError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
observations = env.reset()
env.step_async([0.1, 0.1, 0.3, 0.1])
observations, rewards, dones, _ = env.step_wait(timeout=0.1)
finally:
env.close(terminate=True)
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("shared_memory", [True, False])
def test_reset_out_of_order_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
with pytest.raises(NoAsyncCallError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
observations = env.reset_wait()
except NoAsyncCallError as exception:
assert exception.name == "reset"
raise
finally:
env.close(terminate=True)
with pytest.raises(AlreadyPendingCallError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
actions = env.action_space.sample()
observations = env.reset()
env.step_async(actions)
env.reset_async()
except NoAsyncCallError as exception:
assert exception.name == "step"
raise
finally:
env.close(terminate=True)
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("shared_memory", [True, False])
def test_step_out_of_order_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
with pytest.raises(NoAsyncCallError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
actions = env.action_space.sample()
observations = env.reset()
observations, rewards, dones, infos = env.step_wait()
except AlreadyPendingCallError as exception:
assert exception.name == "step"
raise
finally:
env.close(terminate=True)
with pytest.raises(AlreadyPendingCallError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
actions = env.action_space.sample()
env.reset_async()
env.step_async(actions)
except AlreadyPendingCallError as exception:
assert exception.name == "reset"
raise
finally:
env.close(terminate=True)
@pytest.mark.parametrize("shared_memory", [True, False])
def test_already_closed_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
with pytest.raises(ClosedEnvironmentError):
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
env.close()
observations = env.reset()
@pytest.mark.parametrize("shared_memory", [True, False])
def test_check_observations_async_vector_env(shared_memory):
# CubeCrash-v0 - observation_space: Box(40, 32, 3)
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
# MemorizeDigits-v0 - observation_space: Box(24, 32, 3)
env_fns[1] = make_env("MemorizeDigits-v0", 1)
with pytest.raises(RuntimeError):
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
env.close(terminate=True)
def test_custom_space_async_vector_env():
env_fns = [make_custom_space_env(i) for i in range(4)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=False)
reset_observations = env.reset()
actions = ("action-2", "action-3", "action-5", "action-7")
step_observations, rewards, dones, _ = env.step(actions)
finally:
env.close()
assert isinstance(env.single_observation_space, CustomSpace)
assert isinstance(env.observation_space, Tuple)
assert isinstance(reset_observations, tuple)
assert reset_observations == ("reset", "reset", "reset", "reset")
assert isinstance(step_observations, tuple)
assert step_observations == (
"step(action-2)",
"step(action-3)",
"step(action-5)",
"step(action-7)",
)
def test_custom_space_async_vector_env_shared_memory():
env_fns = [make_custom_space_env(i) for i in range(4)]
with pytest.raises(ValueError):
env = AsyncVectorEnv(env_fns, shared_memory=True)
env.close(terminate=True)
| 8,268 | Python | .py | 193 | 35.694301 | 87 | 0.668409 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,352 | test_resize_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_resize_observation.py | import pytest
import gym
from gym.wrappers import ResizeObservation
try:
import atari_py
except ImportError:
atari_py = None
@pytest.mark.skipif(
atari_py is None, reason="Only run this test when atari_py is installed"
)
@pytest.mark.parametrize(
"env_id", ["PongNoFrameskip-v0", "SpaceInvadersNoFrameskip-v0"]
)
@pytest.mark.parametrize("shape", [16, 32, (8, 5), [10, 7]])
def test_resize_observation(env_id, shape):
env = gym.make(env_id)
env = ResizeObservation(env, shape)
assert env.observation_space.shape[-1] == 3
obs = env.reset()
if isinstance(shape, int):
assert env.observation_space.shape[:2] == (shape, shape)
assert obs.shape == (shape, shape, 3)
else:
assert env.observation_space.shape[:2] == tuple(shape)
assert obs.shape == tuple(shape) + (3,)
| 839 | Python | .py | 25 | 29.48 | 76 | 0.686032 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,353 | test_flatten_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_flatten_observation.py | import pytest
import numpy as np
import gym
from gym.wrappers import FlattenObservation
from gym import spaces
@pytest.mark.parametrize("env_id", ["Blackjack-v0", "KellyCoinflip-v0"])
def test_flatten_observation(env_id):
env = gym.make(env_id)
wrapped_env = FlattenObservation(env)
obs = env.reset()
wrapped_obs = wrapped_env.reset()
if env_id == "Blackjack-v0":
space = spaces.Tuple(
(spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2))
)
wrapped_space = spaces.Box(0, 1, [32 + 11 + 2], dtype=np.int64)
elif env_id == "KellyCoinflip-v0":
space = spaces.Tuple(
(spaces.Box(0, 250.0, [1], dtype=np.float32), spaces.Discrete(300 + 1))
)
low = np.zeros((302,), dtype=np.float64)
high = np.array([250.0] + [1.0] * 301, dtype=np.float64)
wrapped_space = spaces.Box(low, high, [1 + (300 + 1)], dtype=np.float64)
assert space.contains(obs)
assert wrapped_space.contains(wrapped_obs)
| 1,014 | Python | .py | 25 | 34.48 | 83 | 0.643585 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,354 | test_transform_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_transform_observation.py | import pytest
import numpy as np
import gym
from gym.wrappers import TransformObservation
@pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v0"])
def test_transform_observation(env_id):
affine_transform = lambda x: 3 * x + 2
env = gym.make(env_id)
wrapped_env = TransformObservation(
gym.make(env_id), lambda obs: affine_transform(obs)
)
env.seed(0)
wrapped_env.seed(0)
obs = env.reset()
wrapped_obs = wrapped_env.reset()
assert np.allclose(wrapped_obs, affine_transform(obs))
action = env.action_space.sample()
obs, reward, done, _ = env.step(action)
wrapped_obs, wrapped_reward, wrapped_done, _ = wrapped_env.step(action)
assert np.allclose(wrapped_obs, affine_transform(obs))
assert np.allclose(wrapped_reward, reward)
assert wrapped_done == done
| 836 | Python | .py | 22 | 33.590909 | 75 | 0.710037 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,355 | rescale_action.py | WindyLab_Gym-PPS/gym/wrappers/rescale_action.py | import numpy as np
import gym
from gym import spaces
class RescaleAction(gym.ActionWrapper):
r"""Rescales the continuous action space of the environment to a range [min_action, max_action].
Example::
>>> RescaleAction(env, min_action, max_action).action_space == Box(min_action, max_action)
True
"""
def __init__(self, env, min_action, max_action):
assert isinstance(
env.action_space, spaces.Box
), "expected Box action space, got {}".format(type(env.action_space))
assert np.less_equal(min_action, max_action).all(), (min_action, max_action)
super(RescaleAction, self).__init__(env)
self.min_action = (
np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + min_action
)
self.max_action = (
np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + max_action
)
self.action_space = spaces.Box(
low=min_action,
high=max_action,
shape=env.action_space.shape,
dtype=env.action_space.dtype,
)
def action(self, action):
assert np.all(np.greater_equal(action, self.min_action)), (
action,
self.min_action,
)
assert np.all(np.less_equal(action, self.max_action)), (action, self.max_action)
low = self.env.action_space.low
high = self.env.action_space.high
action = low + (high - low) * (
(action - self.min_action) / (self.max_action - self.min_action)
)
action = np.clip(action, low, high)
return action
| 1,633 | Python | .py | 40 | 31.925 | 100 | 0.608833 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,356 | test_rescale_action.py | WindyLab_Gym-PPS/gym/wrappers/test_rescale_action.py | import pytest
import numpy as np
import gym
from gym.wrappers import RescaleAction
def test_rescale_action():
env = gym.make("CartPole-v1")
with pytest.raises(AssertionError):
env = RescaleAction(env, -1, 1)
del env
env = gym.make("Pendulum-v0")
wrapped_env = RescaleAction(gym.make("Pendulum-v0"), -1, 1)
seed = 0
env.seed(seed)
wrapped_env.seed(seed)
obs = env.reset()
wrapped_obs = wrapped_env.reset()
assert np.allclose(obs, wrapped_obs)
obs, reward, _, _ = env.step([1.5])
with pytest.raises(AssertionError):
wrapped_env.step([1.5])
wrapped_obs, wrapped_reward, _, _ = wrapped_env.step([0.75])
assert np.allclose(obs, wrapped_obs)
assert np.allclose(reward, wrapped_reward)
| 768 | Python | .py | 23 | 28.521739 | 64 | 0.673913 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,357 | test_pixel_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_pixel_observation.py | """Tests for the pixel observation wrapper."""
import pytest
import numpy as np
import gym
from gym import spaces
from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY
class FakeEnvironment(gym.Env):
def __init__(self):
self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32)
def render(self, width=32, height=32, *args, **kwargs):
del args
del kwargs
image_shape = (height, width, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self):
observation = self.observation_space.sample()
return observation
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
class FakeArrayObservationEnvironment(FakeEnvironment):
def __init__(self, *args, **kwargs):
self.observation_space = spaces.Box(
shape=(2,), low=-1, high=1, dtype=np.float32
)
super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs)
class FakeDictObservationEnvironment(FakeEnvironment):
def __init__(self, *args, **kwargs):
self.observation_space = spaces.Dict(
{
"state": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
)
super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs)
class TestPixelObservationWrapper(object):
@pytest.mark.parametrize("pixels_only", (True, False))
def test_dict_observation(self, pixels_only):
pixel_key = "rgb"
env = FakeDictObservationEnvironment()
# Make sure we are testing the right environment for the test.
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Dict)
width, height = (320, 240)
# The wrapper should only add one observation.
wrapped_env = PixelObservationWrapper(
env,
pixel_keys=(pixel_key,),
pixels_only=pixels_only,
render_kwargs={pixel_key: {"width": width, "height": height}},
)
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if pixels_only:
assert len(wrapped_env.observation_space.spaces) == 1
assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]
else:
assert (
len(wrapped_env.observation_space.spaces)
== len(observation_space.spaces) + 1
)
expected_keys = list(observation_space.spaces.keys()) + [pixel_key]
assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys
# Check that the added space item is consistent with the added observation.
observation = wrapped_env.reset()
rgb_observation = observation[pixel_key]
assert rgb_observation.shape == (height, width, 3)
assert rgb_observation.dtype == np.uint8
@pytest.mark.parametrize("pixels_only", (True, False))
def test_single_array_observation(self, pixels_only):
pixel_key = "depth"
env = FakeArrayObservationEnvironment()
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Box)
wrapped_env = PixelObservationWrapper(
env, pixel_keys=(pixel_key,), pixels_only=pixels_only
)
wrapped_env.observation_space = wrapped_env.observation_space
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if pixels_only:
assert len(wrapped_env.observation_space.spaces) == 1
assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]
else:
assert len(wrapped_env.observation_space.spaces) == 2
assert list(wrapped_env.observation_space.spaces.keys()) == [
STATE_KEY,
pixel_key,
]
observation = wrapped_env.reset()
depth_observation = observation[pixel_key]
assert depth_observation.shape == (32, 32, 3)
assert depth_observation.dtype == np.uint8
if not pixels_only:
assert isinstance(observation[STATE_KEY], np.ndarray)
| 4,304 | Python | .py | 94 | 36.478723 | 85 | 0.644343 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,358 | customize_pps.py | WindyLab_Gym-PPS/gym/wrappers/customize_pps.py | import os
import json
from typing import Any
import gym
import argparse
class PredatorPreySwarmCustomizer(gym.Wrapper):
def __init__(self, env, args):
super(PredatorPreySwarmCustomizer, self).__init__(env)
if isinstance(args, argparse.Namespace):
args_ = vars(args).items()
elif isinstance(args, dict):
args_ = args.items()
elif isinstance(args, str):
print(f"Retrieving customized param from '{args}'")
with open(args, "r") as file:
args_ = json.load(file).items()
else:
raise ValueError("Invalid argument type. Parameters must be a dictionary, or argparse.Namespace, or a json file directory")
for attr, value in args_:
self.set_param(attr, value)
self.__reinit__()
print('Environment parameter customization finished.')
def set_param(self, name: str, value: Any) -> None:
if name not in self.env.param_list:
raise KeyError(f"Parameter '{name}' does not exist!"
)
setattr(self.env, name, value)
self.__reinit__()
def get_param(self, name: str) -> Any:
if name not in self.env.param_list:
raise KeyError(f"Parameter '{name}' does not exist!"
)
return getattr(self.env, name)
def __reinit__(self):
self.env.__reinit__()
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
| 1,514 | Python | .py | 37 | 31.810811 | 135 | 0.615649 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,359 | test_clip_action.py | WindyLab_Gym-PPS/gym/wrappers/test_clip_action.py | import numpy as np
import gym
from gym.wrappers import ClipAction
def test_clip_action():
# mountaincar: action-based rewards
make_env = lambda: gym.make("MountainCarContinuous-v0")
env = make_env()
wrapped_env = ClipAction(make_env())
seed = 0
env.seed(seed)
wrapped_env.seed(seed)
env.reset()
wrapped_env.reset()
actions = [[0.4], [1.2], [-0.3], [0.0], [-2.5]]
for action in actions:
obs1, r1, d1, _ = env.step(
np.clip(action, env.action_space.low, env.action_space.high)
)
obs2, r2, d2, _ = wrapped_env.step(action)
assert np.allclose(r1, r2)
assert np.allclose(obs1, obs2)
assert d1 == d2
| 704 | Python | .py | 22 | 26 | 72 | 0.616864 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,360 | test_record_episode_statistics.py | WindyLab_Gym-PPS/gym/wrappers/test_record_episode_statistics.py | import pytest
import gym
from gym.wrappers import RecordEpisodeStatistics
@pytest.mark.parametrize("env_id", ["CartPole-v0", "Pendulum-v0"])
@pytest.mark.parametrize("deque_size", [2, 5])
def test_record_episode_statistics(env_id, deque_size):
env = gym.make(env_id)
env = RecordEpisodeStatistics(env, deque_size)
for n in range(5):
env.reset()
assert env.episode_returns[0] == 0.0
assert env.episode_lengths[0] == 0
for t in range(env.spec.max_episode_steps):
_, _, done, info = env.step(env.action_space.sample())
if done:
assert "episode" in info
assert all([item in info["episode"] for item in ["r", "l", "t"]])
break
assert len(env.return_queue) == deque_size
assert len(env.length_queue) == deque_size
@pytest.mark.parametrize("num_envs", [1, 4])
def test_record_episode_statistics_with_vectorenv(num_envs):
envs = gym.vector.make("CartPole-v0", num_envs=num_envs, asynchronous=False)
envs = RecordEpisodeStatistics(envs)
envs.reset()
for _ in range(envs.env.envs[0].spec.max_episode_steps + 1):
_, _, dones, infos = envs.step(envs.action_space.sample())
for idx, info in enumerate(infos):
if dones[idx]:
assert "episode" in info
assert all([item in info["episode"] for item in ["r", "l", "t"]])
break
| 1,430 | Python | .py | 32 | 36.75 | 81 | 0.623563 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,361 | flatten_observation.py | WindyLab_Gym-PPS/gym/wrappers/flatten_observation.py | import gym.spaces as spaces
from gym import ObservationWrapper
class FlattenObservation(ObservationWrapper):
r"""Observation wrapper that flattens the observation."""
def __init__(self, env):
super(FlattenObservation, self).__init__(env)
self.observation_space = spaces.flatten_space(env.observation_space)
def observation(self, observation):
return spaces.flatten(self.env.observation_space, observation)
| 446 | Python | .py | 9 | 44.111111 | 76 | 0.755196 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,362 | test_atari_preprocessing.py | WindyLab_Gym-PPS/gym/wrappers/test_atari_preprocessing.py | import numpy as np
import gym
from gym.wrappers import AtariPreprocessing
import pytest
pytest.importorskip("atari_py")
@pytest.fixture(scope="module")
def env_fn():
return lambda: gym.make("PongNoFrameskip-v4")
def test_atari_preprocessing_grayscale(env_fn):
import cv2
env1 = env_fn()
env2 = AtariPreprocessing(
env_fn(), screen_size=84, grayscale_obs=True, frame_skip=1, noop_max=0
)
env3 = AtariPreprocessing(
env_fn(), screen_size=84, grayscale_obs=False, frame_skip=1, noop_max=0
)
env4 = AtariPreprocessing(
env_fn(),
screen_size=84,
grayscale_obs=True,
frame_skip=1,
noop_max=0,
grayscale_newaxis=True,
)
env1.seed(0)
env2.seed(0)
env3.seed(0)
env4.seed(0)
obs1 = env1.reset()
obs2 = env2.reset()
obs3 = env3.reset()
obs4 = env4.reset()
assert env1.observation_space.shape == (210, 160, 3)
assert env2.observation_space.shape == (84, 84)
assert env3.observation_space.shape == (84, 84, 3)
assert env4.observation_space.shape == (84, 84, 1)
assert obs1.shape == (210, 160, 3)
assert obs2.shape == (84, 84)
assert obs3.shape == (84, 84, 3)
assert obs4.shape == (84, 84, 1)
assert np.allclose(obs3, cv2.resize(obs1, (84, 84), interpolation=cv2.INTER_AREA))
obs3_gray = cv2.cvtColor(obs3, cv2.COLOR_RGB2GRAY)
# the edges of the numbers do not render quite the same in the grayscale, so we ignore them
assert np.allclose(obs2[10:38], obs3_gray[10:38])
# the paddle also do not render quite the same
assert np.allclose(obs2[44:], obs3_gray[44:])
# now add a channel axis and re-test
obs3_gray = obs3_gray.reshape(84, 84, 1)
assert np.allclose(obs4[10:38], obs3_gray[10:38])
assert np.allclose(obs4[44:], obs3_gray[44:])
env1.close()
env2.close()
env3.close()
env4.close()
def test_atari_preprocessing_scale(env_fn):
# arbitrarily chosen number for stepping into env. and ensuring all observations are in the required range
max_test_steps = 10
for grayscale in [True, False]:
for scaled in [True, False]:
env = AtariPreprocessing(
env_fn(),
screen_size=84,
grayscale_obs=grayscale,
scale_obs=scaled,
frame_skip=1,
noop_max=0,
)
obs = env.reset().flatten()
done, step_i = False, 0
max_obs = 1 if scaled else 255
assert (0 <= obs).all() and (
obs <= max_obs
).all(), "Obs. must be in range [0,{}]".format(max_obs)
while not done or step_i <= max_test_steps:
obs, _, done, _ = env.step(env.action_space.sample())
obs = obs.flatten()
assert (0 <= obs).all() and (
obs <= max_obs
).all(), "Obs. must be in range [0,{}]".format(max_obs)
step_i += 1
env.close()
| 3,027 | Python | .py | 82 | 28.95122 | 110 | 0.595092 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,363 | gray_scale_observation.py | WindyLab_Gym-PPS/gym/wrappers/gray_scale_observation.py | import numpy as np
from gym.spaces import Box
from gym import ObservationWrapper
class GrayScaleObservation(ObservationWrapper):
r"""Convert the image observation from RGB to gray scale."""
def __init__(self, env, keep_dim=False):
super(GrayScaleObservation, self).__init__(env)
self.keep_dim = keep_dim
assert (
len(env.observation_space.shape) == 3
and env.observation_space.shape[-1] == 3
)
obs_shape = self.observation_space.shape[:2]
if self.keep_dim:
self.observation_space = Box(
low=0, high=255, shape=(obs_shape[0], obs_shape[1], 1), dtype=np.uint8
)
else:
self.observation_space = Box(
low=0, high=255, shape=obs_shape, dtype=np.uint8
)
def observation(self, observation):
import cv2
observation = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY)
if self.keep_dim:
observation = np.expand_dims(observation, -1)
return observation
| 1,060 | Python | .py | 27 | 30 | 86 | 0.61501 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,364 | atari_preprocessing.py | WindyLab_Gym-PPS/gym/wrappers/atari_preprocessing.py | import numpy as np
import gym
from gym.spaces import Box
from gym.wrappers import TimeLimit
try:
import cv2
except ImportError:
cv2 = None
class AtariPreprocessing(gym.Wrapper):
r"""Atari 2600 preprocessings.
This class follows the guidelines in
Machado et al. (2018), "Revisiting the Arcade Learning Environment:
Evaluation Protocols and Open Problems for General Agents".
Specifically:
* NoopReset: obtain initial state by taking random number of no-ops on reset.
* Frame skipping: 4 by default
* Max-pooling: most recent two observations
* Termination signal when a life is lost: turned off by default. Not recommended by Machado et al. (2018).
* Resize to a square image: 84x84 by default
* Grayscale observation: optional
* Scale observation: optional
Args:
env (Env): environment
noop_max (int): max number of no-ops
frame_skip (int): the frequency at which the agent experiences the game.
screen_size (int): resize Atari frame
terminal_on_life_loss (bool): if True, then step() returns done=True whenever a
life is lost.
grayscale_obs (bool): if True, then gray scale observation is returned, otherwise, RGB observation
is returned.
grayscale_newaxis (bool): if True and grayscale_obs=True, then a channel axis is added to
grayscale observations to make them 3-dimensional.
scale_obs (bool): if True, then observation normalized in range [0,1] is returned. It also limits memory
optimization benefits of FrameStack Wrapper.
"""
def __init__(
self,
env,
noop_max=30,
frame_skip=4,
screen_size=84,
terminal_on_life_loss=False,
grayscale_obs=True,
grayscale_newaxis=False,
scale_obs=False,
):
super().__init__(env)
assert (
cv2 is not None
), "opencv-python package not installed! Try running pip install gym[atari] to get dependencies for atari"
assert frame_skip > 0
assert screen_size > 0
assert noop_max >= 0
if frame_skip > 1:
assert "NoFrameskip" in env.spec.id, (
"disable frame-skipping in the original env. for more than one"
" frame-skip as it will be done by the wrapper"
)
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
self.frame_skip = frame_skip
self.screen_size = screen_size
self.terminal_on_life_loss = terminal_on_life_loss
self.grayscale_obs = grayscale_obs
self.grayscale_newaxis = grayscale_newaxis
self.scale_obs = scale_obs
# buffer of most recent two observations for max pooling
if grayscale_obs:
self.obs_buffer = [
np.empty(env.observation_space.shape[:2], dtype=np.uint8),
np.empty(env.observation_space.shape[:2], dtype=np.uint8),
]
else:
self.obs_buffer = [
np.empty(env.observation_space.shape, dtype=np.uint8),
np.empty(env.observation_space.shape, dtype=np.uint8),
]
self.ale = env.unwrapped.ale
self.lives = 0
self.game_over = False
_low, _high, _obs_dtype = (
(0, 255, np.uint8) if not scale_obs else (0, 1, np.float32)
)
_shape = (screen_size, screen_size, 1 if grayscale_obs else 3)
if grayscale_obs and not grayscale_newaxis:
_shape = _shape[:-1] # Remove channel axis
self.observation_space = Box(
low=_low, high=_high, shape=_shape, dtype=_obs_dtype
)
def step(self, action):
R = 0.0
for t in range(self.frame_skip):
_, reward, done, info = self.env.step(action)
R += reward
self.game_over = done
if self.terminal_on_life_loss:
new_lives = self.ale.lives()
done = done or new_lives < self.lives
self.lives = new_lives
if done:
break
if t == self.frame_skip - 2:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[1])
else:
self.ale.getScreenRGB2(self.obs_buffer[1])
elif t == self.frame_skip - 1:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB2(self.obs_buffer[0])
return self._get_obs(), R, done, info
def reset(self, **kwargs):
# NoopReset
self.env.reset(**kwargs)
noops = (
self.env.unwrapped.np_random.randint(1, self.noop_max + 1)
if self.noop_max > 0
else 0
)
for _ in range(noops):
_, _, done, _ = self.env.step(0)
if done:
self.env.reset(**kwargs)
self.lives = self.ale.lives()
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB2(self.obs_buffer[0])
self.obs_buffer[1].fill(0)
return self._get_obs()
def _get_obs(self):
if self.frame_skip > 1: # more efficient in-place pooling
np.maximum(self.obs_buffer[0], self.obs_buffer[1], out=self.obs_buffer[0])
obs = cv2.resize(
self.obs_buffer[0],
(self.screen_size, self.screen_size),
interpolation=cv2.INTER_AREA,
)
if self.scale_obs:
obs = np.asarray(obs, dtype=np.float32) / 255.0
else:
obs = np.asarray(obs, dtype=np.uint8)
if self.grayscale_obs and self.grayscale_newaxis:
obs = np.expand_dims(obs, axis=-1) # Add a channel axis
return obs
| 5,953 | Python | .py | 146 | 30.479452 | 115 | 0.590218 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,365 | pixel_observation.py | WindyLab_Gym-PPS/gym/wrappers/pixel_observation.py | import collections
from collections.abc import MutableMapping
import copy
import numpy as np
from gym import spaces
from gym import ObservationWrapper
STATE_KEY = "state"
class PixelObservationWrapper(ObservationWrapper):
"""Augment observations by pixel values."""
def __init__(
self, env, pixels_only=True, render_kwargs=None, pixel_keys=("pixels",)
):
"""Initializes a new pixel Wrapper.
Args:
env: The environment to wrap.
pixels_only: If `True` (default), the original observation returned
by the wrapped environment will be discarded, and a dictionary
observation will only include pixels. If `False`, the
observation dictionary will contain both the original
observations and the pixel observations.
render_kwargs: Optional `dict` containing keyword arguments passed
to the `self.render` method.
pixel_keys: Optional custom string specifying the pixel
observation's key in the `OrderedDict` of observations.
Defaults to 'pixels'.
Raises:
ValueError: If `env`'s observation spec is not compatible with the
wrapper. Supported formats are a single array, or a dict of
arrays.
ValueError: If `env`'s observation already contains any of the
specified `pixel_keys`.
"""
super(PixelObservationWrapper, self).__init__(env)
if render_kwargs is None:
render_kwargs = {}
for key in pixel_keys:
render_kwargs.setdefault(key, {})
render_mode = render_kwargs[key].pop("mode", "rgb_array")
assert render_mode == "rgb_array", render_mode
render_kwargs[key]["mode"] = "rgb_array"
wrapped_observation_space = env.observation_space
if isinstance(wrapped_observation_space, spaces.Box):
self._observation_is_dict = False
invalid_keys = set([STATE_KEY])
elif isinstance(wrapped_observation_space, (spaces.Dict, MutableMapping)):
self._observation_is_dict = True
invalid_keys = set(wrapped_observation_space.spaces.keys())
else:
raise ValueError("Unsupported observation space structure.")
if not pixels_only:
# Make sure that now keys in the `pixel_keys` overlap with
# `observation_keys`
overlapping_keys = set(pixel_keys) & set(invalid_keys)
if overlapping_keys:
raise ValueError(
"Duplicate or reserved pixel keys {!r}.".format(overlapping_keys)
)
if pixels_only:
self.observation_space = spaces.Dict()
elif self._observation_is_dict:
self.observation_space = copy.deepcopy(wrapped_observation_space)
else:
self.observation_space = spaces.Dict()
self.observation_space.spaces[STATE_KEY] = wrapped_observation_space
# Extend observation space with pixels.
pixels_spaces = {}
for pixel_key in pixel_keys:
pixels = self.env.render(**render_kwargs[pixel_key])
if np.issubdtype(pixels.dtype, np.integer):
low, high = (0, 255)
elif np.issubdtype(pixels.dtype, np.float):
low, high = (-float("inf"), float("inf"))
else:
raise TypeError(pixels.dtype)
pixels_space = spaces.Box(
shape=pixels.shape, low=low, high=high, dtype=pixels.dtype
)
pixels_spaces[pixel_key] = pixels_space
self.observation_space.spaces.update(pixels_spaces)
self._env = env
self._pixels_only = pixels_only
self._render_kwargs = render_kwargs
self._pixel_keys = pixel_keys
def observation(self, observation):
pixel_observation = self._add_pixel_observation(observation)
return pixel_observation
def _add_pixel_observation(self, wrapped_observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
observation = type(wrapped_observation)(wrapped_observation)
else:
observation = collections.OrderedDict()
observation[STATE_KEY] = wrapped_observation
pixel_observations = {
pixel_key: self.env.render(**self._render_kwargs[pixel_key])
for pixel_key in self._pixel_keys
}
observation.update(pixel_observations)
return observation
| 4,658 | Python | .py | 100 | 35.27 | 85 | 0.621938 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,366 | clip_action.py | WindyLab_Gym-PPS/gym/wrappers/clip_action.py | import numpy as np
from gym import ActionWrapper
from gym.spaces import Box
class ClipAction(ActionWrapper):
r"""Clip the continuous action within the valid bound."""
def __init__(self, env):
assert isinstance(env.action_space, Box)
super(ClipAction, self).__init__(env)
def action(self, action):
return np.clip(action, self.action_space.low, self.action_space.high)
| 407 | Python | .py | 10 | 35.7 | 77 | 0.715013 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,367 | filter_observation.py | WindyLab_Gym-PPS/gym/wrappers/filter_observation.py | import copy
from gym import spaces
from gym import ObservationWrapper
class FilterObservation(ObservationWrapper):
"""Filter dictionary observations by their keys.
Args:
env: The environment to wrap.
filter_keys: List of keys to be included in the observations.
Raises:
ValueError: If observation keys in not instance of None or
iterable.
ValueError: If any of the `filter_keys` are not included in
the original `env`'s observation space
"""
def __init__(self, env, filter_keys=None):
super(FilterObservation, self).__init__(env)
wrapped_observation_space = env.observation_space
assert isinstance(
wrapped_observation_space, spaces.Dict
), "FilterObservationWrapper is only usable with dict observations."
observation_keys = wrapped_observation_space.spaces.keys()
if filter_keys is None:
filter_keys = tuple(observation_keys)
missing_keys = set(key for key in filter_keys if key not in observation_keys)
if missing_keys:
raise ValueError(
"All the filter_keys must be included in the "
"original obsrevation space.\n"
"Filter keys: {filter_keys}\n"
"Observation keys: {observation_keys}\n"
"Missing keys: {missing_keys}".format(
filter_keys=filter_keys,
observation_keys=observation_keys,
missing_keys=missing_keys,
)
)
self.observation_space = type(wrapped_observation_space)(
[
(name, copy.deepcopy(space))
for name, space in wrapped_observation_space.spaces.items()
if name in filter_keys
]
)
self._env = env
self._filter_keys = tuple(filter_keys)
def observation(self, observation):
filter_observation = self._filter_observation(observation)
return filter_observation
def _filter_observation(self, observation):
observation = type(observation)(
[
(name, value)
for name, value in observation.items()
if name in self._filter_keys
]
)
return observation
| 2,341 | Python | .py | 57 | 29.842105 | 85 | 0.604231 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,368 | transform_reward.py | WindyLab_Gym-PPS/gym/wrappers/transform_reward.py | from gym import RewardWrapper
class TransformReward(RewardWrapper):
r"""Transform the reward via an arbitrary function.
Example::
>>> import gym
>>> env = gym.make('CartPole-v1')
>>> env = TransformReward(env, lambda r: 0.01*r)
>>> env.reset()
>>> observation, reward, done, info = env.step(env.action_space.sample())
>>> reward
0.01
Args:
env (Env): environment
f (callable): a function that transforms the reward
"""
def __init__(self, env, f):
super(TransformReward, self).__init__(env)
assert callable(f)
self.f = f
def reward(self, reward):
return self.f(reward)
| 704 | Python | .py | 21 | 26.047619 | 81 | 0.595556 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,369 | test_gray_scale_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_gray_scale_observation.py | import pytest
import numpy as np
import gym
from gym.wrappers import GrayScaleObservation
from gym.wrappers import AtariPreprocessing
pytest.importorskip("atari_py")
pytest.importorskip("cv2")
@pytest.mark.parametrize(
"env_id", ["PongNoFrameskip-v0", "SpaceInvadersNoFrameskip-v0"]
)
@pytest.mark.parametrize("keep_dim", [True, False])
def test_gray_scale_observation(env_id, keep_dim):
gray_env = AtariPreprocessing(gym.make(env_id), screen_size=84, grayscale_obs=True)
rgb_env = AtariPreprocessing(gym.make(env_id), screen_size=84, grayscale_obs=False)
wrapped_env = GrayScaleObservation(rgb_env, keep_dim=keep_dim)
assert rgb_env.observation_space.shape[-1] == 3
seed = 0
gray_env.seed(seed)
wrapped_env.seed(seed)
gray_obs = gray_env.reset()
wrapped_obs = wrapped_env.reset()
if keep_dim:
assert wrapped_env.observation_space.shape[-1] == 1
assert len(wrapped_obs.shape) == 3
wrapped_obs = wrapped_obs.squeeze(-1)
else:
assert len(wrapped_env.observation_space.shape) == 2
assert len(wrapped_obs.shape) == 2
# ALE gray scale is slightly different, but no more than by one shade
assert np.allclose(gray_obs.astype("int32"), wrapped_obs.astype("int32"), atol=1)
| 1,270 | Python | .py | 30 | 37.833333 | 87 | 0.722177 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,370 | test_filter_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_filter_observation.py | import pytest
import numpy as np
import gym
from gym import spaces
from gym.wrappers.filter_observation import FilterObservation
class FakeEnvironment(gym.Env):
def __init__(self, observation_keys=("state")):
self.observation_space = spaces.Dict(
{
name: spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32)
for name in observation_keys
}
)
self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32)
def render(self, width=32, height=32, *args, **kwargs):
del args
del kwargs
image_shape = (height, width, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self):
observation = self.observation_space.sample()
return observation
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
FILTER_OBSERVATION_TEST_CASES = (
(("key1", "key2"), ("key1",)),
(("key1", "key2"), ("key1", "key2")),
(("key1",), None),
(("key1",), ("key1",)),
)
ERROR_TEST_CASES = (
("key", ValueError, "All the filter_keys must be included..*"),
(False, TypeError, "'bool' object is not iterable"),
(1, TypeError, "'int' object is not iterable"),
)
class TestFilterObservation(object):
@pytest.mark.parametrize(
"observation_keys,filter_keys", FILTER_OBSERVATION_TEST_CASES
)
def test_filter_observation(self, observation_keys, filter_keys):
env = FakeEnvironment(observation_keys=observation_keys)
# Make sure we are testing the right environment for the test.
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Dict)
wrapped_env = FilterObservation(env, filter_keys=filter_keys)
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if filter_keys is None:
filter_keys = tuple(observation_keys)
assert len(wrapped_env.observation_space.spaces) == len(filter_keys)
assert tuple(wrapped_env.observation_space.spaces.keys()) == tuple(filter_keys)
# Check that the added space item is consistent with the added observation.
observation = wrapped_env.reset()
assert len(observation) == len(filter_keys)
@pytest.mark.parametrize("filter_keys,error_type,error_match", ERROR_TEST_CASES)
def test_raises_with_incorrect_arguments(
self, filter_keys, error_type, error_match
):
env = FakeEnvironment(observation_keys=("key1", "key2"))
ValueError
with pytest.raises(error_type, match=error_match):
FilterObservation(env, filter_keys=filter_keys)
| 2,796 | Python | .py | 64 | 36.3125 | 87 | 0.660767 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,371 | __init__.py | WindyLab_Gym-PPS/gym/wrappers/__init__.py | from gym import error
from gym.wrappers.monitor import Monitor
from gym.wrappers.time_limit import TimeLimit
from gym.wrappers.filter_observation import FilterObservation
from gym.wrappers.atari_preprocessing import AtariPreprocessing
from gym.wrappers.time_aware_observation import TimeAwareObservation
from gym.wrappers.rescale_action import RescaleAction
from gym.wrappers.flatten_observation import FlattenObservation
from gym.wrappers.gray_scale_observation import GrayScaleObservation
from gym.wrappers.frame_stack import LazyFrames
from gym.wrappers.frame_stack import FrameStack
from gym.wrappers.transform_observation import TransformObservation
from gym.wrappers.transform_reward import TransformReward
from gym.wrappers.resize_observation import ResizeObservation
from gym.wrappers.clip_action import ClipAction
from gym.wrappers.record_episode_statistics import RecordEpisodeStatistics
from gym.wrappers.record_video import RecordVideo, capped_cubic_video_schedule
from gym.wrappers.customize_pps import PredatorPreySwarmCustomizer
from gym.wrappers.njp import NJP
| 1,077 | Python | .py | 19 | 55.684211 | 78 | 0.889414 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,372 | monitor.py | WindyLab_Gym-PPS/gym/wrappers/monitor.py | import json
import os
import numpy as np
import gym
import warnings
from gym import Wrapper
from gym import error, version, logger
from gym.wrappers.monitoring import stats_recorder, video_recorder
from gym.utils import atomic_write, closer
from gym.utils.json_utils import json_encode_np
FILE_PREFIX = "openaigym"
MANIFEST_PREFIX = FILE_PREFIX + ".manifest"
class Monitor(Wrapper):
def __init__(
self,
env,
directory,
video_callable=None,
force=False,
resume=False,
write_upon_reset=False,
uid=None,
mode=None,
):
super(Monitor, self).__init__(env)
warnings.warn(
"The Monitor wrapper is being deprecated in favor of gym.wrappers.RecordVideo and gym.wrappers.RecordEpisodeStatistics (see https://github.com/openai/gym/issues/2297)"
)
self.videos = []
self.stats_recorder = None
self.video_recorder = None
self.enabled = False
self.episode_id = 0
self._monitor_id = None
self.env_semantics_autoreset = env.metadata.get("semantics.autoreset")
self._start(
directory, video_callable, force, resume, write_upon_reset, uid, mode
)
def step(self, action):
self._before_step(action)
observation, reward, done, info = self.env.step(action)
done = self._after_step(observation, reward, done, info)
return observation, reward, done, info
def reset(self, **kwargs):
self._before_reset()
observation = self.env.reset(**kwargs)
self._after_reset(observation)
return observation
def set_monitor_mode(self, mode):
logger.info("Setting the monitor mode is deprecated and will be removed soon")
self._set_mode(mode)
def _start(
self,
directory,
video_callable=None,
force=False,
resume=False,
write_upon_reset=False,
uid=None,
mode=None,
):
"""Start monitoring.
Args:
directory (str): A per-training run directory where to record stats.
video_callable (Optional[function, False]): function that takes in the index of the episode and outputs a boolean, indicating whether we should record a video on this episode. The default (for video_callable is None) is to take perfect cubes, capped at 1000. False disables video recording.
force (bool): Clear out existing training data from this directory (by deleting every file prefixed with "openaigym.").
resume (bool): Retain the training data already in this directory, which will be merged with our new data
write_upon_reset (bool): Write the manifest file on each reset. (This is currently a JSON file, so writing it is somewhat expensive.)
uid (Optional[str]): A unique id used as part of the suffix for the file. By default, uses os.getpid().
mode (['evaluation', 'training']): Whether this is an evaluation or training episode.
"""
if self.env.spec is None:
logger.warn(
"Trying to monitor an environment which has no 'spec' set. This usually means you did not create it via 'gym.make', and is recommended only for advanced users."
)
env_id = "(unknown)"
else:
env_id = self.env.spec.id
self.directory = os.path.abspath(directory)
if not os.path.exists(self.directory):
logger.info("Creating monitor directory %s", self.directory)
os.makedirs(self.directory, exist_ok=True)
if video_callable is None:
video_callable = capped_cubic_video_schedule
elif video_callable == False:
video_callable = disable_videos
elif not callable(video_callable):
raise error.Error(
"You must provide a function, None, or False for video_callable, not {}: {}".format(
type(video_callable), video_callable
)
)
self.video_callable = video_callable
# Check on whether we need to clear anything
if force:
clear_monitor_files(self.directory)
elif not resume:
training_manifests = detect_training_manifests(self.directory)
if len(training_manifests) > 0:
raise error.Error(
"""Trying to write to monitor directory {} with existing monitor files: {}.
You should use a unique directory for each training run, or use 'force=True' to automatically clear previous monitor files.""".format(
directory, ", ".join(training_manifests[:5])
)
)
self._monitor_id = monitor_closer.register(self)
self.enabled = True
# We use the 'openai-gym' prefix to determine if a file is
# ours
self.file_prefix = FILE_PREFIX
self.file_infix = "{}.{}".format(self._monitor_id, uid if uid else os.getpid())
self.stats_recorder = stats_recorder.StatsRecorder(
self.directory,
"{}.episode_batch.{}".format(self.file_prefix, self.file_infix),
autoreset=self.env_semantics_autoreset,
env_id=env_id,
)
self.write_upon_reset = write_upon_reset
if mode is not None:
self._set_mode(mode)
def _flush(self, force=False):
"""Flush all relevant monitor information to disk."""
if not self.write_upon_reset and not force:
return
self.stats_recorder.flush()
# Give it a very distinguished name, since we need to pick it
# up from the filesystem later.
path = os.path.join(
self.directory,
"{}.manifest.{}.manifest.json".format(self.file_prefix, self.file_infix),
)
logger.debug("Writing training manifest file to %s", path)
with atomic_write.atomic_write(path) as f:
# We need to write relative paths here since people may
# move the training_dir around. It would be cleaner to
# already have the basenames rather than basename'ing
# manually, but this works for now.
json.dump(
{
"stats": os.path.basename(self.stats_recorder.path),
"videos": [
(os.path.basename(v), os.path.basename(m))
for v, m in self.videos
],
"env_info": self._env_info(),
},
f,
default=json_encode_np,
)
def close(self):
"""Flush all monitor data to disk and close any open rending windows."""
super(Monitor, self).close()
if not self.enabled:
return
self.stats_recorder.close()
if self.video_recorder is not None:
self._close_video_recorder()
self._flush(force=True)
# Stop tracking this for autoclose
monitor_closer.unregister(self._monitor_id)
self.enabled = False
logger.info(
"""Finished writing results. You can upload them to the scoreboard via gym.upload(%r)""",
self.directory,
)
def _set_mode(self, mode):
if mode == "evaluation":
type = "e"
elif mode == "training":
type = "t"
else:
raise error.Error(
'Invalid mode {}: must be "training" or "evaluation"', mode
)
self.stats_recorder.type = type
def _before_step(self, action):
if not self.enabled:
return
self.stats_recorder.before_step(action)
def _after_step(self, observation, reward, done, info):
if not self.enabled:
return done
if done and self.env_semantics_autoreset:
# For envs with BlockingReset wrapping VNCEnv, this observation will be the first one of the new episode
self.reset_video_recorder()
self.episode_id += 1
self._flush()
# Record stats
self.stats_recorder.after_step(observation, reward, done, info)
# Record video
self.video_recorder.capture_frame()
return done
def _before_reset(self):
if not self.enabled:
return
self.stats_recorder.before_reset()
def _after_reset(self, observation):
if not self.enabled:
return
# Reset the stat count
self.stats_recorder.after_reset(observation)
self.reset_video_recorder()
# Bump *after* all reset activity has finished
self.episode_id += 1
self._flush()
def reset_video_recorder(self):
# Close any existing video recorder
if self.video_recorder:
self._close_video_recorder()
# Start recording the next video.
#
# TODO: calculate a more correct 'episode_id' upon merge
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=os.path.join(
self.directory,
"{}.video.{}.video{:06}".format(
self.file_prefix, self.file_infix, self.episode_id
),
),
metadata={"episode_id": self.episode_id},
enabled=self._video_enabled(),
)
self.video_recorder.capture_frame()
def _close_video_recorder(self):
self.video_recorder.close()
if self.video_recorder.functional:
self.videos.append(
(self.video_recorder.path, self.video_recorder.metadata_path)
)
def _video_enabled(self):
return self.video_callable(self.episode_id)
def _env_info(self):
env_info = {
"gym_version": version.VERSION,
}
if self.env.spec:
env_info["env_id"] = self.env.spec.id
return env_info
def __del__(self):
# Make sure we've closed up shop when garbage collecting
self.close()
def get_total_steps(self):
return self.stats_recorder.total_steps
def get_episode_rewards(self):
return self.stats_recorder.episode_rewards
def get_episode_lengths(self):
return self.stats_recorder.episode_lengths
def detect_training_manifests(training_dir, files=None):
if files is None:
files = os.listdir(training_dir)
return [
os.path.join(training_dir, f)
for f in files
if f.startswith(MANIFEST_PREFIX + ".")
]
def detect_monitor_files(training_dir):
return [
os.path.join(training_dir, f)
for f in os.listdir(training_dir)
if f.startswith(FILE_PREFIX + ".")
]
def clear_monitor_files(training_dir):
files = detect_monitor_files(training_dir)
if len(files) == 0:
return
logger.info(
"Clearing %d monitor files from previous run (because force=True was provided)",
len(files),
)
for file in files:
os.unlink(file)
def capped_cubic_video_schedule(episode_id):
if episode_id < 1000:
return int(round(episode_id ** (1.0 / 3))) ** 3 == episode_id
else:
return episode_id % 1000 == 0
def disable_videos(episode_id):
return False
monitor_closer = closer.Closer()
# This method gets used for a sanity check in scoreboard/api.py. It's
# not intended for use outside of the gym codebase.
def _open_monitors():
return list(monitor_closer.closeables.values())
def load_env_info_from_manifests(manifests, training_dir):
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
env_infos.append(contents["env_info"])
env_info = collapse_env_infos(env_infos, training_dir)
return env_info
def load_results(training_dir):
if not os.path.exists(training_dir):
logger.error("Training directory %s not found", training_dir)
return
manifests = detect_training_manifests(training_dir)
if not manifests:
logger.error("No manifests found in training directory %s", training_dir)
return
logger.debug("Uploading data from manifest %s", ", ".join(manifests))
# Load up stats + video files
stats_files = []
videos = []
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
# Make these paths absolute again
stats_files.append(os.path.join(training_dir, contents["stats"]))
videos += [
(os.path.join(training_dir, v), os.path.join(training_dir, m))
for v, m in contents["videos"]
]
env_infos.append(contents["env_info"])
env_info = collapse_env_infos(env_infos, training_dir)
(
data_sources,
initial_reset_timestamps,
timestamps,
episode_lengths,
episode_rewards,
episode_types,
initial_reset_timestamp,
) = merge_stats_files(stats_files)
return {
"manifests": manifests,
"env_info": env_info,
"data_sources": data_sources,
"timestamps": timestamps,
"episode_lengths": episode_lengths,
"episode_rewards": episode_rewards,
"episode_types": episode_types,
"initial_reset_timestamps": initial_reset_timestamps,
"initial_reset_timestamp": initial_reset_timestamp,
"videos": videos,
}
def merge_stats_files(stats_files):
timestamps = []
episode_lengths = []
episode_rewards = []
episode_types = []
initial_reset_timestamps = []
data_sources = []
for i, path in enumerate(stats_files):
with open(path) as f:
content = json.load(f)
if len(content["timestamps"]) == 0:
continue # so empty file doesn't mess up results, due to null initial_reset_timestamp
data_sources += [i] * len(content["timestamps"])
timestamps += content["timestamps"]
episode_lengths += content["episode_lengths"]
episode_rewards += content["episode_rewards"]
# Recent addition
episode_types += content.get("episode_types", [])
# Keep track of where each episode came from.
initial_reset_timestamps.append(content["initial_reset_timestamp"])
idxs = np.argsort(timestamps)
timestamps = np.array(timestamps)[idxs].tolist()
episode_lengths = np.array(episode_lengths)[idxs].tolist()
episode_rewards = np.array(episode_rewards)[idxs].tolist()
data_sources = np.array(data_sources)[idxs].tolist()
if episode_types:
episode_types = np.array(episode_types)[idxs].tolist()
else:
episode_types = None
if len(initial_reset_timestamps) > 0:
initial_reset_timestamp = min(initial_reset_timestamps)
else:
initial_reset_timestamp = 0
return (
data_sources,
initial_reset_timestamps,
timestamps,
episode_lengths,
episode_rewards,
episode_types,
initial_reset_timestamp,
)
# TODO training_dir isn't used except for error messages, clean up the layering
def collapse_env_infos(env_infos, training_dir):
assert len(env_infos) > 0
first = env_infos[0]
for other in env_infos[1:]:
if first != other:
raise error.Error(
"Found two unequal env_infos: {} and {}. This usually indicates that your training directory {} has commingled results from multiple runs.".format(
first, other, training_dir
)
)
for key in ["env_id", "gym_version"]:
if key not in first:
raise error.Error(
"env_info {} from training directory {} is missing expected key {}. This is unexpected and likely indicates a bug in gym.".format(
first, training_dir, key
)
)
return first
| 16,078 | Python | .py | 398 | 30.819095 | 302 | 0.611186 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,373 | record_episode_statistics.py | WindyLab_Gym-PPS/gym/wrappers/record_episode_statistics.py | import time
from collections import deque
import numpy as np
import gym
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super(RecordEpisodeStatistics, self).__init__(env)
self.num_envs = getattr(env, "num_envs", 1)
self.t0 = (
time.time()
) # TODO: use perf_counter when gym removes Python 2 support
self.episode_count = 0
self.episode_returns = None
self.episode_lengths = None
self.return_queue = deque(maxlen=deque_size)
self.length_queue = deque(maxlen=deque_size)
self.is_vector_env = getattr(env, "is_vector_env", False)
def reset(self, **kwargs):
observations = super(RecordEpisodeStatistics, self).reset(**kwargs)
self.episode_returns = np.zeros(self.num_envs, dtype=np.float32)
self.episode_lengths = np.zeros(self.num_envs, dtype=np.int32)
return observations
def step(self, action):
observations, rewards, dones, infos = super(RecordEpisodeStatistics, self).step(
action
)
self.episode_returns += rewards
self.episode_lengths += 1
if not self.is_vector_env:
infos = [infos]
dones = [dones]
for i in range(len(dones)):
if dones[i]:
infos[i] = infos[i].copy()
episode_return = self.episode_returns[i]
episode_length = self.episode_lengths[i]
episode_info = {
"r": episode_return,
"l": episode_length,
"t": round(time.time() - self.t0, 6),
}
infos[i]["episode"] = episode_info
self.return_queue.append(episode_return)
self.length_queue.append(episode_length)
self.episode_count += 1
self.episode_returns[i] = 0
self.episode_lengths[i] = 0
return (
observations,
rewards,
dones if self.is_vector_env else dones[0],
infos if self.is_vector_env else infos[0],
)
| 2,141 | Python | .py | 53 | 29.283019 | 88 | 0.571017 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,374 | record_video.py | WindyLab_Gym-PPS/gym/wrappers/record_video.py | import os
import gym
from typing import Callable
import warnings
from gym.wrappers.monitoring import video_recorder
def capped_cubic_video_schedule(episode_id):
if episode_id < 1000:
return int(round(episode_id ** (1.0 / 3))) ** 3 == episode_id
else:
return episode_id % 1000 == 0
class RecordVideo(gym.Wrapper):
def __init__(
self,
env,
video_folder: str,
episode_trigger: Callable[[int], bool] = None,
step_trigger: Callable[[int], bool] = None,
video_length: int = 0,
name_prefix: str = "rl-video",
):
super(RecordVideo, self).__init__(env)
if episode_trigger is None and step_trigger is None:
episode_trigger = capped_cubic_video_schedule
trigger_count = sum([x is not None for x in [episode_trigger, step_trigger]])
assert trigger_count == 1, "Must specify exactly one trigger"
self.episode_trigger = episode_trigger
self.step_trigger = step_trigger
self.video_recorder = None
self.video_folder = os.path.abspath(video_folder)
# Create output folder if needed
if os.path.isdir(self.video_folder):
warnings.warn(
f"Overwriting existing videos at {self.video_folder} folder (try specifying a different `video_folder` for the `RecordVideo` wrapper if this is not desired)"
)
os.makedirs(self.video_folder, exist_ok=True)
self.name_prefix = name_prefix
self.step_id = 0
self.video_length = video_length
self.recording = False
self.recorded_frames = 0
self.is_vector_env = getattr(env, "is_vector_env", False)
self.episode_id = 0
def reset(self, **kwargs):
observations = super(RecordVideo, self).reset(**kwargs)
if self._video_enabled():
self.start_video_recorder()
return observations
def start_video_recorder(self):
self.close_video_recorder()
video_name = f"{self.name_prefix}-step-{self.step_id}"
if self.episode_trigger:
video_name = f"{self.name_prefix}-episode-{self.episode_id}"
base_path = os.path.join(self.video_folder, video_name)
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=base_path,
metadata={"step_id": self.step_id, "episode_id": self.episode_id},
)
self.video_recorder.capture_frame()
self.recorded_frames = 1
self.recording = True
def _video_enabled(self):
if self.step_trigger:
return self.step_trigger(self.step_id)
else:
return self.episode_trigger(self.step_id)
def step(self, action):
observations, rewards, dones, infos = super(RecordVideo, self).step(action)
self.step_id += 1
if self.recording:
self.video_recorder.capture_frame()
self.recorded_frames += 1
if self.video_length > 0:
if self.recorded_frames > self.video_length:
self.close_video_recorder()
else:
if not self.is_vector_env:
if dones:
self.episode_id += 1
self.close_video_recorder()
elif dones[0]:
self.episode_id += 1
self.close_video_recorder()
elif self._video_enabled():
self.start_video_recorder()
return observations, rewards, dones, infos
def close_video_recorder(self) -> None:
if self.recording:
self.video_recorder.close()
self.recording = False
self.recorded_frames = 1
| 3,740 | Python | .py | 91 | 30.934066 | 173 | 0.601875 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,375 | transform_observation.py | WindyLab_Gym-PPS/gym/wrappers/transform_observation.py | from gym import ObservationWrapper
class TransformObservation(ObservationWrapper):
r"""Transform the observation via an arbitrary function.
Example::
>>> import gym
>>> env = gym.make('CartPole-v1')
>>> env = TransformObservation(env, lambda obs: obs + 0.1*np.random.randn(*obs.shape))
>>> env.reset()
array([-0.08319338, 0.04635121, -0.07394746, 0.20877492])
Args:
env (Env): environment
f (callable): a function that transforms the observation
"""
def __init__(self, env, f):
super(TransformObservation, self).__init__(env)
assert callable(f)
self.f = f
def observation(self, observation):
return self.f(observation)
| 741 | Python | .py | 19 | 31.684211 | 94 | 0.638655 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,376 | test_frame_stack.py | WindyLab_Gym-PPS/gym/wrappers/test_frame_stack.py | import pytest
pytest.importorskip("atari_py")
import numpy as np
import gym
from gym.wrappers import FrameStack
try:
import lz4
except ImportError:
lz4 = None
@pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v0", "Pong-v0"])
@pytest.mark.parametrize("num_stack", [2, 3, 4])
@pytest.mark.parametrize(
"lz4_compress",
[
pytest.param(
True,
marks=pytest.mark.skipif(
lz4 is None, reason="Need lz4 to run tests with compression"
),
),
False,
],
)
def test_frame_stack(env_id, num_stack, lz4_compress):
env = gym.make(env_id)
shape = env.observation_space.shape
env = FrameStack(env, num_stack, lz4_compress)
assert env.observation_space.shape == (num_stack,) + shape
assert env.observation_space.dtype == env.env.observation_space.dtype
obs = env.reset()
obs = np.asarray(obs)
assert obs.shape == (num_stack,) + shape
for i in range(1, num_stack):
assert np.allclose(obs[i - 1], obs[i])
obs, _, _, _ = env.step(env.action_space.sample())
obs = np.asarray(obs)
assert obs.shape == (num_stack,) + shape
for i in range(1, num_stack - 1):
assert np.allclose(obs[i - 1], obs[i])
assert not np.allclose(obs[-1], obs[-2])
obs, _, _, _ = env.step(env.action_space.sample())
assert len(obs) == num_stack
| 1,387 | Python | .py | 42 | 27.642857 | 77 | 0.632012 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,377 | test_record_video.py | WindyLab_Gym-PPS/gym/wrappers/test_record_video.py | import pytest
import os
import shutil
import gym
from gym.wrappers import RecordEpisodeStatistics, RecordVideo
def test_record_video_using_default_trigger():
env = gym.make("CartPole-v1")
env = gym.wrappers.RecordVideo(env, "videos")
env.reset()
for _ in range(199):
action = env.action_space.sample()
_, _, done, _ = env.step(action)
if done:
env.reset()
env.close()
assert os.path.isdir("videos")
mp4_files = [file for file in os.listdir("videos") if file.endswith(".mp4")]
assert len(mp4_files) == env.episode_id
shutil.rmtree("videos")
def test_record_video_step_trigger():
env = gym.make("CartPole-v1")
env = gym.wrappers.RecordVideo(env, "videos", step_trigger=lambda x: x % 100 == 0)
env.reset()
for _ in range(199):
action = env.action_space.sample()
_, _, done, _ = env.step(action)
if done:
env.reset()
env.close()
assert os.path.isdir("videos")
mp4_files = [file for file in os.listdir("videos") if file.endswith(".mp4")]
assert len(mp4_files) == 2
shutil.rmtree("videos")
def make_env(gym_id, seed):
def thunk():
env = gym.make(gym_id)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
if seed == 1:
env = gym.wrappers.RecordVideo(
env, "videos", step_trigger=lambda x: x % 100 == 0
)
return env
return thunk
def test_record_video_within_vector():
envs = gym.vector.SyncVectorEnv([make_env("CartPole-v1", 1 + i) for i in range(2)])
envs = gym.wrappers.RecordEpisodeStatistics(envs)
envs.reset()
for i in range(199):
_, _, _, infos = envs.step(envs.action_space.sample())
for info in infos:
if "episode" in info.keys():
print(f"i, episode_reward={info['episode']['r']}")
break
assert os.path.isdir("videos")
mp4_files = [file for file in os.listdir("videos") if file.endswith(".mp4")]
assert len(mp4_files) == 2
shutil.rmtree("videos")
| 2,115 | Python | .py | 59 | 29.050847 | 87 | 0.612903 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,378 | test_transform_reward.py | WindyLab_Gym-PPS/gym/wrappers/test_transform_reward.py | import pytest
import numpy as np
import gym
from gym.wrappers import TransformReward
@pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v0"])
def test_transform_reward(env_id):
# use case #1: scale
scales = [0.1, 200]
for scale in scales:
env = gym.make(env_id)
wrapped_env = TransformReward(gym.make(env_id), lambda r: scale * r)
action = env.action_space.sample()
env.seed(0)
env.reset()
wrapped_env.seed(0)
wrapped_env.reset()
_, reward, _, _ = env.step(action)
_, wrapped_reward, _, _ = wrapped_env.step(action)
assert wrapped_reward == scale * reward
del env, wrapped_env
# use case #2: clip
min_r = -0.0005
max_r = 0.0002
env = gym.make(env_id)
wrapped_env = TransformReward(gym.make(env_id), lambda r: np.clip(r, min_r, max_r))
action = env.action_space.sample()
env.seed(0)
env.reset()
wrapped_env.seed(0)
wrapped_env.reset()
_, reward, _, _ = env.step(action)
_, wrapped_reward, _, _ = wrapped_env.step(action)
assert abs(wrapped_reward) < abs(reward)
assert wrapped_reward == -0.0005 or wrapped_reward == 0.0002
del env, wrapped_env
# use case #3: sign
env = gym.make(env_id)
wrapped_env = TransformReward(gym.make(env_id), lambda r: np.sign(r))
env.seed(0)
env.reset()
wrapped_env.seed(0)
wrapped_env.reset()
for _ in range(1000):
action = env.action_space.sample()
_, wrapped_reward, done, _ = wrapped_env.step(action)
assert wrapped_reward in [-1.0, 0.0, 1.0]
if done:
break
del env, wrapped_env
| 1,669 | Python | .py | 49 | 27.959184 | 87 | 0.618929 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,379 | time_aware_observation.py | WindyLab_Gym-PPS/gym/wrappers/time_aware_observation.py | import numpy as np
from gym.spaces import Box
from gym import ObservationWrapper
class TimeAwareObservation(ObservationWrapper):
r"""Augment the observation with current time step in the trajectory.
.. note::
Currently it only works with one-dimensional observation space. It doesn't
support pixel observation space yet.
"""
def __init__(self, env):
super(TimeAwareObservation, self).__init__(env)
assert isinstance(env.observation_space, Box)
assert env.observation_space.dtype == np.float32
low = np.append(self.observation_space.low, 0.0)
high = np.append(self.observation_space.high, np.inf)
self.observation_space = Box(low, high, dtype=np.float32)
def observation(self, observation):
return np.append(observation, self.t)
def step(self, action):
self.t += 1
return super(TimeAwareObservation, self).step(action)
def reset(self, **kwargs):
self.t = 0
return super(TimeAwareObservation, self).reset(**kwargs)
| 1,053 | Python | .py | 24 | 37.041667 | 82 | 0.694417 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,380 | resize_observation.py | WindyLab_Gym-PPS/gym/wrappers/resize_observation.py | import numpy as np
from gym.spaces import Box
from gym import ObservationWrapper
class ResizeObservation(ObservationWrapper):
r"""Downsample the image observation to a square image."""
def __init__(self, env, shape):
super(ResizeObservation, self).__init__(env)
if isinstance(shape, int):
shape = (shape, shape)
assert all(x > 0 for x in shape), shape
self.shape = tuple(shape)
obs_shape = self.shape + self.observation_space.shape[2:]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
import cv2
observation = cv2.resize(
observation, self.shape[::-1], interpolation=cv2.INTER_AREA
)
if observation.ndim == 2:
observation = np.expand_dims(observation, -1)
return observation
| 884 | Python | .py | 21 | 34.285714 | 86 | 0.655374 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,381 | time_limit.py | WindyLab_Gym-PPS/gym/wrappers/time_limit.py | import gym
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
if max_episode_steps is None and self.env.spec is not None:
max_episode_steps = env.spec.max_episode_steps
if self.env.spec is not None:
self.env.spec.max_episode_steps = max_episode_steps
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(self, action):
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
| 988 | Python | .py | 23 | 34.130435 | 67 | 0.621228 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,382 | frame_stack.py | WindyLab_Gym-PPS/gym/wrappers/frame_stack.py | from collections import deque
import numpy as np
from gym.spaces import Box
from gym import ObservationWrapper
class LazyFrames(object):
r"""Ensures common frames are only stored once to optimize memory use.
To further reduce the memory use, it is optionally to turn on lz4 to
compress the observations.
.. note::
This object should only be converted to numpy array just before forward pass.
Args:
lz4_compress (bool): use lz4 to compress the frames internally
"""
__slots__ = ("frame_shape", "dtype", "shape", "lz4_compress", "_frames")
def __init__(self, frames, lz4_compress=False):
self.frame_shape = tuple(frames[0].shape)
self.shape = (len(frames),) + self.frame_shape
self.dtype = frames[0].dtype
if lz4_compress:
from lz4.block import compress
frames = [compress(frame) for frame in frames]
self._frames = frames
self.lz4_compress = lz4_compress
def __array__(self, dtype=None):
arr = self[:]
if dtype is not None:
return arr.astype(dtype)
return arr
def __len__(self):
return self.shape[0]
def __getitem__(self, int_or_slice):
if isinstance(int_or_slice, int):
return self._check_decompress(self._frames[int_or_slice]) # single frame
return np.stack(
[self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0
)
def __eq__(self, other):
return self.__array__() == other
def _check_decompress(self, frame):
if self.lz4_compress:
from lz4.block import decompress
return np.frombuffer(decompress(frame), dtype=self.dtype).reshape(
self.frame_shape
)
return frame
class FrameStack(ObservationWrapper):
r"""Observation wrapper that stacks the observations in a rolling manner.
For example, if the number of stacks is 4, then the returned observation contains
the most recent 4 observations. For environment 'Pendulum-v0', the original observation
is an array with shape [3], so if we stack 4 observations, the processed observation
has shape [4, 3].
.. note::
To be memory efficient, the stacked observations are wrapped by :class:`LazyFrame`.
.. note::
The observation space must be `Box` type. If one uses `Dict`
as observation space, it should apply `FlattenDictWrapper` at first.
Example::
>>> import gym
>>> env = gym.make('PongNoFrameskip-v0')
>>> env = FrameStack(env, 4)
>>> env.observation_space
Box(4, 210, 160, 3)
Args:
env (Env): environment object
num_stack (int): number of stacks
lz4_compress (bool): use lz4 to compress the frames internally
"""
def __init__(self, env, num_stack, lz4_compress=False):
super(FrameStack, self).__init__(env)
self.num_stack = num_stack
self.lz4_compress = lz4_compress
self.frames = deque(maxlen=num_stack)
low = np.repeat(self.observation_space.low[np.newaxis, ...], num_stack, axis=0)
high = np.repeat(
self.observation_space.high[np.newaxis, ...], num_stack, axis=0
)
self.observation_space = Box(
low=low, high=high, dtype=self.observation_space.dtype
)
def observation(self):
assert len(self.frames) == self.num_stack, (len(self.frames), self.num_stack)
return LazyFrames(list(self.frames), self.lz4_compress)
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.frames.append(observation)
return self.observation(), reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
[self.frames.append(observation) for _ in range(self.num_stack)]
return self.observation()
| 3,939 | Python | .py | 90 | 35.611111 | 91 | 0.641865 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,383 | njp.py | WindyLab_Gym-PPS/gym/wrappers/njp.py | import os
import json
from typing import Any
import gym
import argparse
import numpy as np
class Agent:
def __init__(self, adversary=False):
self.adversary = adversary
class NJP(gym.Wrapper):
def __init__(self, env, args):
super(NJP, self).__init__(env)
if isinstance(args, argparse.Namespace):
args_ = vars(args).items()
elif isinstance(args, dict):
args_ = args.items()
elif isinstance(args, str):
print(f"Retrieving customized param from '{args}'")
with open(args, "r") as file:
args_ = json.load(file).items()
else:
raise ValueError("Invalid argument type. Parameters must be a dictionary, or argparse.Namespace, or a json file directory")
for attr, value in args_:
self.set_param(attr, value)
self.__reinit__()
print('Environment parameter customization finished.')
self.env.n_p = args.n_p
self.env.n_e = args.n_e
self.env.pursuer_strategy = args.pursuer_strategy
self.env.escaper_strategy = args.escaper_strategy
self.env.is_periodic = args.is_periodic
self.env.dynamics_mode = args.dynamics_mode
self.env.render_traj = args.render_traj
self.env.traj_len = args.traj_len
self.env.billiards_mode = args.billiards_mode
self.num_prey = args.n_e
self.num_predator = args.n_p
self.agents = [Agent() for _ in range(self.num_prey)] + [Agent(adversary=True) for _ in range(self.num_predator)]
self.agent_types = ['adversary', 'agent']
env.__reinit__()
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
print('NJP environment initialized successfully.')
def set_param(self, name: str, value: Any) -> None:
if name not in self.env.param_list:
raise KeyError(f"Parameter '{name}' does not exist!"
)
setattr(self.env, name, value)
self.__reinit__()
def get_param(self, name: str) -> Any:
if name not in self.env.param_list:
raise KeyError(f"Parameter '{name}' does not exist!"
)
return getattr(self.env, name)
def __reinit__(self):
self.env.__reinit__()
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def dos_and_doa(self, x, h, T, N, D):
k = [0] * (self.num_prey)
k_h = [0] * (self.num_prey)
distances = []
distances_h = []
assert np.shape(x)[1] == np.shape(h)[1]
for t in range(np.shape(x)[2]):
for j in range(np.shape(x)[1]):
k[j] = self._find_nearest_neighbors_DOS(x[:, :, t], j)
k_h[j] = self._find_nearest_neighbors_DOA(h[:, :, t], j)
distances.append(k[j])
distances_h.append(k_h[j])
DOS = np.sum(distances) / (T * N * D)
DOA = np.sum(distances_h) / (2 * T * N)
return DOS, DOA
def dos_and_doa_one_episode(self, x, h, N, D):
k = [0] * (self.num_prey)
k_h = [0] * (self.num_prey)
distances = []
distances_h = []
assert np.shape(x)[1] == np.shape(h)[1]
for i in range(np.shape(x)[1]):
k[i] = self._find_nearest_neighbors_DOS(x, i)
k_h[i] = self._find_nearest_neighbors_DOA(h, i)
distances.append(k[i])
distances_h.append(k_h[i])
DOS = np.sum(distances) / (N * D)
DOA = np.sum(distances_h) / (2 * N)
return DOS, DOA
def _find_nearest_neighbors_DOS(self, x, i):
distances = []
for j in range(np.shape(x)[1]):
if j != i:
distances.append(np.linalg.norm(x[:, i] - x[:, j]))
return np.min(distances)
def _find_nearest_neighbors_DOA(self, x, i):
distances = []
for j in range(np.shape(x)[1]):
if j != i:
distances.append(np.linalg.norm(x[:, i] + x[:, j]))
return np.min(distances) | 4,189 | Python | .py | 99 | 31.979798 | 135 | 0.55704 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,384 | test_time_aware_observation.py | WindyLab_Gym-PPS/gym/wrappers/test_time_aware_observation.py | import pytest
import gym
from gym.wrappers import TimeAwareObservation
@pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v0"])
def test_time_aware_observation(env_id):
env = gym.make(env_id)
wrapped_env = TimeAwareObservation(env)
assert wrapped_env.observation_space.shape[0] == env.observation_space.shape[0] + 1
obs = env.reset()
wrapped_obs = wrapped_env.reset()
assert wrapped_env.t == 0.0
assert wrapped_obs[-1] == 0.0
assert wrapped_obs.shape[0] == obs.shape[0] + 1
wrapped_obs, _, _, _ = wrapped_env.step(env.action_space.sample())
assert wrapped_env.t == 1.0
assert wrapped_obs[-1] == 1.0
assert wrapped_obs.shape[0] == obs.shape[0] + 1
wrapped_obs, _, _, _ = wrapped_env.step(env.action_space.sample())
assert wrapped_env.t == 2.0
assert wrapped_obs[-1] == 2.0
assert wrapped_obs.shape[0] == obs.shape[0] + 1
wrapped_obs = wrapped_env.reset()
assert wrapped_env.t == 0.0
assert wrapped_obs[-1] == 0.0
assert wrapped_obs.shape[0] == obs.shape[0] + 1
| 1,058 | Python | .py | 25 | 37.8 | 87 | 0.66439 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,385 | stats_recorder.py | WindyLab_Gym-PPS/gym/wrappers/monitoring/stats_recorder.py | import json
import os
import time
from gym import error
from gym.utils import atomic_write
from gym.utils.json_utils import json_encode_np
class StatsRecorder(object):
def __init__(self, directory, file_prefix, autoreset=False, env_id=None):
self.autoreset = autoreset
self.env_id = env_id
self.initial_reset_timestamp = None
self.directory = directory
self.file_prefix = file_prefix
self.episode_lengths = []
self.episode_rewards = []
self.episode_types = [] # experimental addition
self._type = "t"
self.timestamps = []
self.steps = None
self.total_steps = 0
self.rewards = None
self.done = None
self.closed = False
filename = "{}.stats.json".format(self.file_prefix)
self.path = os.path.join(self.directory, filename)
@property
def type(self):
return self._type
@type.setter
def type(self, type):
if type not in ["t", "e"]:
raise error.Error(
"Invalid episode type {}: must be t for training or e for evaluation",
type,
)
self._type = type
def before_step(self, action):
assert not self.closed
if self.done:
raise error.ResetNeeded(
"Trying to step environment which is currently done. While the monitor is active for {}, you cannot step beyond the end of an episode. Call 'env.reset()' to start the next episode.".format(
self.env_id
)
)
elif self.steps is None:
raise error.ResetNeeded(
"Trying to step an environment before reset. While the monitor is active for {}, you must call 'env.reset()' before taking an initial step.".format(
self.env_id
)
)
def after_step(self, observation, reward, done, info):
self.steps += 1
self.total_steps += 1
self.rewards += reward
self.done = done
if done:
self.save_complete()
if done:
if self.autoreset:
self.before_reset()
self.after_reset(observation)
def before_reset(self):
assert not self.closed
if self.done is not None and not self.done and self.steps > 0:
raise error.Error(
"Tried to reset environment which is not done. While the monitor is active for {}, you cannot call reset() unless the episode is over.".format(
self.env_id
)
)
self.done = False
if self.initial_reset_timestamp is None:
self.initial_reset_timestamp = time.time()
def after_reset(self, observation):
self.steps = 0
self.rewards = 0
# We write the type at the beginning of the episode. If a user
# changes the type, it's more natural for it to apply next
# time the user calls reset().
self.episode_types.append(self._type)
def save_complete(self):
if self.steps is not None:
self.episode_lengths.append(self.steps)
self.episode_rewards.append(float(self.rewards))
self.timestamps.append(time.time())
def close(self):
self.flush()
self.closed = True
def flush(self):
if self.closed:
return
with atomic_write.atomic_write(self.path) as f:
json.dump(
{
"initial_reset_timestamp": self.initial_reset_timestamp,
"timestamps": self.timestamps,
"episode_lengths": self.episode_lengths,
"episode_rewards": self.episode_rewards,
"episode_types": self.episode_types,
},
f,
default=json_encode_np,
)
| 3,924 | Python | .py | 102 | 27.578431 | 205 | 0.573796 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,386 | video_recorder.py | WindyLab_Gym-PPS/gym/wrappers/monitoring/video_recorder.py | import json
import os
import os.path
import subprocess
import tempfile
from io import StringIO
import distutils.spawn
import distutils.version
import numpy as np
from gym import error, logger
from gym.utils import closer
def touch(path):
open(path, "a").close()
video_recorder_closer = closer.Closer()
class VideoRecorder(object):
"""VideoRecorder renders a nice movie of a rollout, frame by frame. It
comes with an `enabled` option so you can still use the same code
on episodes where you don't want to record video.
Note:
You are responsible for calling `close` on a created
VideoRecorder, or else you may leak an encoder process.
Args:
env (Env): Environment to take video of.
path (Optional[str]): Path to the video file; will be randomly chosen if omitted.
base_path (Optional[str]): Alternatively, path to the video file without extension, which will be added.
metadata (Optional[dict]): Contents to save to the metadata file.
enabled (bool): Whether to actually record video, or just no-op (for convenience)
"""
def __init__(self, env, path=None, metadata=None, enabled=True, base_path=None):
modes = env.metadata.get("render.modes", [])
self._async = env.metadata.get("semantics.async")
self.enabled = enabled
self._closed = False
self._recorder_id = video_recorder_closer.register(self)
# Don't bother setting anything else if not enabled
if not self.enabled:
return
self.ansi_mode = False
if "rgb_array" not in modes:
if "ansi" in modes:
self.ansi_mode = True
else:
logger.info(
'Disabling video recorder because {} neither supports video mode "rgb_array" nor "ansi".'.format(
env
)
)
# Whoops, turns out we shouldn't be enabled after all
self.enabled = False
return
if path is not None and base_path is not None:
raise error.Error("You can pass at most one of `path` or `base_path`.")
self.last_frame = None
self.env = env
required_ext = ".json" if self.ansi_mode else ".mp4"
if path is None:
if base_path is not None:
# Base path given, append ext
path = base_path + required_ext
else:
# Otherwise, just generate a unique filename
with tempfile.NamedTemporaryFile(
suffix=required_ext, delete=False
) as f:
path = f.name
self.path = path
path_base, actual_ext = os.path.splitext(self.path)
if actual_ext != required_ext:
hint = (
" HINT: The environment is text-only, therefore we're recording its text output in a structured JSON format."
if self.ansi_mode
else ""
)
raise error.Error(
"Invalid path given: {} -- must have file extension {}.{}".format(
self.path, required_ext, hint
)
)
# Touch the file in any case, so we know it's present. (This
# corrects for platform platform differences. Using ffmpeg on
# OS X, the file is precreated, but not on Linux.
touch(path)
self.frames_per_sec = env.metadata.get("video.frames_per_second", 30)
self.output_frames_per_sec = env.metadata.get(
"video.output_frames_per_second", self.frames_per_sec
)
self.encoder = None # lazily start the process
self.broken = False
# Dump metadata
self.metadata = metadata or {}
self.metadata["content_type"] = (
"video/vnd.openai.ansivid" if self.ansi_mode else "video/mp4"
)
self.metadata_path = "{}.meta.json".format(path_base)
self.write_metadata()
logger.info("Starting new video recorder writing to %s", self.path)
self.empty = True
@property
def functional(self):
return self.enabled and not self.broken
def capture_frame(self):
"""Render the given `env` and add the resulting frame to the video."""
if not self.functional:
return
if self._closed:
logger.warn(
"The video recorder has been closed and no frames will be captured anymore."
)
return
logger.debug("Capturing video frame: path=%s", self.path)
render_mode = "ansi" if self.ansi_mode else "rgb_array"
frame = self.env.render(mode=render_mode)
if frame is None:
if self._async:
return
else:
# Indicates a bug in the environment: don't want to raise
# an error here.
logger.warn(
"Env returned None on render(). Disabling further rendering for video recorder by marking as disabled: path=%s metadata_path=%s",
self.path,
self.metadata_path,
)
self.broken = True
else:
self.last_frame = frame
if self.ansi_mode:
self._encode_ansi_frame(frame)
else:
self._encode_image_frame(frame)
def close(self):
"""Flush all data to disk and close any open frame encoders."""
if not self.enabled or self._closed:
return
if self.encoder:
logger.debug("Closing video encoder: path=%s", self.path)
self.encoder.close()
self.encoder = None
else:
# No frames captured. Set metadata, and remove the empty output file.
os.remove(self.path)
if self.metadata is None:
self.metadata = {}
self.metadata["empty"] = True
# If broken, get rid of the output file, otherwise we'd leak it.
if self.broken:
logger.info(
"Cleaning up paths for broken video recorder: path=%s metadata_path=%s",
self.path,
self.metadata_path,
)
# Might have crashed before even starting the output file, don't try to remove in that case.
if os.path.exists(self.path):
os.remove(self.path)
if self.metadata is None:
self.metadata = {}
self.metadata["broken"] = True
self.write_metadata()
# Stop tracking this for autoclose
video_recorder_closer.unregister(self._recorder_id)
self._closed = True
def write_metadata(self):
with open(self.metadata_path, "w") as f:
json.dump(self.metadata, f)
def __del__(self):
# Make sure we've closed up shop when garbage collecting
self.close()
def _encode_ansi_frame(self, frame):
if not self.encoder:
self.encoder = TextEncoder(self.path, self.frames_per_sec)
self.metadata["encoder_version"] = self.encoder.version_info
self.encoder.capture_frame(frame)
self.empty = False
def _encode_image_frame(self, frame):
if not self.encoder:
self.encoder = ImageEncoder(
self.path, frame.shape, self.frames_per_sec, self.output_frames_per_sec
)
self.metadata["encoder_version"] = self.encoder.version_info
try:
self.encoder.capture_frame(frame)
except error.InvalidFrame as e:
logger.warn("Tried to pass invalid video frame, marking as broken: %s", e)
self.broken = True
else:
self.empty = False
class TextEncoder(object):
"""Store a moving picture made out of ANSI frames. Format adapted from
https://github.com/asciinema/asciinema/blob/master/doc/asciicast-v1.md"""
def __init__(self, output_path, frames_per_sec):
self.output_path = output_path
self.frames_per_sec = frames_per_sec
self.frames = []
def capture_frame(self, frame):
string = None
if isinstance(frame, str):
string = frame
elif isinstance(frame, StringIO):
string = frame.getvalue()
else:
raise error.InvalidFrame(
"Wrong type {} for {}: text frame must be a string or StringIO".format(
type(frame), frame
)
)
frame_bytes = string.encode("utf-8")
if frame_bytes[-1:] != b"\n":
raise error.InvalidFrame(
'Frame must end with a newline: """{}"""'.format(string)
)
if b"\r" in frame_bytes:
raise error.InvalidFrame(
'Frame contains carriage returns (only newlines are allowed: """{}"""'.format(
string
)
)
self.frames.append(frame_bytes)
def close(self):
# frame_duration = float(1) / self.frames_per_sec
frame_duration = 0.5
# Turn frames into events: clear screen beforehand
# https://rosettacode.org/wiki/Terminal_control/Clear_the_screen#Python
# https://rosettacode.org/wiki/Terminal_control/Cursor_positioning#Python
clear_code = b"%c[2J\033[1;1H" % (27)
# Decode the bytes as UTF-8 since JSON may only contain UTF-8
events = [
(
frame_duration,
(clear_code + frame.replace(b"\n", b"\r\n")).decode("utf-8"),
)
for frame in self.frames
]
# Calculate frame size from the largest frames.
# Add some padding since we'll get cut off otherwise.
height = max([frame.count(b"\n") for frame in self.frames]) + 1
width = (
max(
[
max([len(line) for line in frame.split(b"\n")])
for frame in self.frames
]
)
+ 2
)
data = {
"version": 1,
"width": width,
"height": height,
"duration": len(self.frames) * frame_duration,
"command": "-",
"title": "gym VideoRecorder episode",
"env": {}, # could add some env metadata here
"stdout": events,
}
with open(self.output_path, "w") as f:
json.dump(data, f)
@property
def version_info(self):
return {"backend": "TextEncoder", "version": 1}
class ImageEncoder(object):
def __init__(self, output_path, frame_shape, frames_per_sec, output_frames_per_sec):
self.proc = None
self.output_path = output_path
# Frame shape should be lines-first, so w and h are swapped
h, w, pixfmt = frame_shape
if pixfmt != 3 and pixfmt != 4:
raise error.InvalidFrame(
"Your frame has shape {}, but we require (w,h,3) or (w,h,4), i.e., RGB values for a w-by-h image, with an optional alpha channel.".format(
frame_shape
)
)
self.wh = (w, h)
self.includes_alpha = pixfmt == 4
self.frame_shape = frame_shape
self.frames_per_sec = frames_per_sec
self.output_frames_per_sec = output_frames_per_sec
if distutils.spawn.find_executable("avconv") is not None:
self.backend = "avconv"
elif distutils.spawn.find_executable("ffmpeg") is not None:
self.backend = "ffmpeg"
else:
raise error.DependencyNotInstalled(
"""Found neither the ffmpeg nor avconv executables. On OS X, you can install ffmpeg via `brew install ffmpeg`. On most Ubuntu variants, `sudo apt-get install ffmpeg` should do it. On Ubuntu 14.04, however, you'll need to install avconv with `sudo apt-get install libav-tools`."""
)
self.start()
@property
def version_info(self):
return {
"backend": self.backend,
"version": str(
subprocess.check_output(
[self.backend, "-version"], stderr=subprocess.STDOUT
)
),
"cmdline": self.cmdline,
}
def start(self):
if self.backend == "ffmpeg":
self.cmdline = (
self.backend,
"-nostats",
"-loglevel",
"error", # suppress warnings
"-y",
# input
"-f",
"rawvideo",
"-s:v",
"{}x{}".format(*self.wh),
"-pix_fmt",
("rgb32" if self.includes_alpha else "rgb24"),
"-r",
"%d" % self.frames_per_sec,
"-i",
"-", # this used to be /dev/stdin, which is not Windows-friendly
# output
"-an",
"-r",
"%d" % self.frames_per_sec,
"-vcodec",
# "mpeg4",
"libx264",
"-pix_fmt",
# "bgr24",
"yuv420p",
"-r",
"%d" % self.output_frames_per_sec,
self.output_path,
)
else:
self.cmdline = (
self.backend,
"-nostats",
"-loglevel",
"error", # suppress warnings
"-y",
# input
"-f",
"rawvideo",
"-s:v",
"{}x{}".format(*self.wh),
"-pix_fmt",
("rgb32" if self.includes_alpha else "rgb24"),
"-framerate",
"%d" % self.frames_per_sec,
"-i",
"-", # this used to be /dev/stdin, which is not Windows-friendly
# output
"-vf",
"scale=trunc(iw/2)*2:trunc(ih/2)*2",
"-vcodec",
"libx264",
"-pix_fmt",
"yuv420p",
"-r",
"%d" % self.output_frames_per_sec,
self.output_path,
)
logger.debug('Starting %s with "%s"', self.backend, " ".join(self.cmdline))
if hasattr(os, "setsid"): # setsid not present on Windows
self.proc = subprocess.Popen(
self.cmdline, stdin=subprocess.PIPE, preexec_fn=os.setsid
)
else:
self.proc = subprocess.Popen(self.cmdline, stdin=subprocess.PIPE)
def capture_frame(self, frame):
if not isinstance(frame, (np.ndarray, np.generic)):
raise error.InvalidFrame(
"Wrong type {} for {} (must be np.ndarray or np.generic)".format(
type(frame), frame
)
)
if frame.shape != self.frame_shape:
raise error.InvalidFrame(
"Your frame has shape {}, but the VideoRecorder is configured for shape {}.".format(
frame.shape, self.frame_shape
)
)
if frame.dtype != np.uint8:
raise error.InvalidFrame(
"Your frame has data type {}, but we require uint8 (i.e. RGB values from 0-255).".format(
frame.dtype
)
)
try:
if distutils.version.LooseVersion(
np.__version__
) >= distutils.version.LooseVersion("1.9.0"):
self.proc.stdin.write(frame.tobytes())
else:
self.proc.stdin.write(frame.tostring())
except Exception as e:
stdout, stderr = self.proc.communicate()
logger.error("VideoRecorder encoder failed: %s", stderr)
def close(self):
self.proc.stdin.close()
ret = self.proc.wait()
if ret != 0:
logger.error("VideoRecorder encoder exited with status {}".format(ret))
| 16,129 | Python | .py | 399 | 28.130326 | 295 | 0.538103 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,387 | test_video_recorder.py | WindyLab_Gym-PPS/gym/wrappers/monitoring/tests/test_video_recorder.py | import json
import os
import shutil
import tempfile
import numpy as np
import gym
from gym.wrappers.monitoring.video_recorder import VideoRecorder, video_recorder_closer
class BrokenRecordableEnv(object):
metadata = {"render.modes": [None, "rgb_array"]}
def render(self, mode=None):
pass
class UnrecordableEnv(object):
metadata = {"render.modes": [None]}
def render(self, mode=None):
pass
def test_record_simple():
env = gym.make("CartPole-v1")
rec = VideoRecorder(env)
env.reset()
rec.capture_frame()
rec.close()
assert not rec.empty
assert not rec.broken
assert os.path.exists(rec.path)
f = open(rec.path)
assert os.fstat(f.fileno()).st_size > 100
def test_autoclose():
def record():
env = gym.make("CartPole-v1")
rec = VideoRecorder(env)
env.reset()
rec.capture_frame()
rec_path = rec.path
proc = rec.encoder.proc
assert proc.poll() is None # subprocess is running
# The function ends without an explicit `rec.close()` call
# The Python interpreter will implicitly do `del rec` on garbage cleaning
return rec_path, proc
rec_path, proc = record()
assert proc.poll() is not None
assert os.path.exists(rec_path)
f = open(rec_path)
assert os.fstat(f.fileno()).st_size > 100
def test_no_frames():
env = BrokenRecordableEnv()
rec = VideoRecorder(env)
rec.close()
assert rec.empty
assert rec.functional
assert not os.path.exists(rec.path)
def test_record_unrecordable_method():
env = UnrecordableEnv()
rec = VideoRecorder(env)
assert not rec.enabled
rec.close()
def test_record_breaking_render_method():
env = BrokenRecordableEnv()
rec = VideoRecorder(env)
rec.capture_frame()
rec.close()
assert rec.empty
assert rec.broken
assert not os.path.exists(rec.path)
def test_text_envs():
env = gym.make("FrozenLake-v1")
video = VideoRecorder(env)
try:
env.reset()
video.capture_frame()
video.close()
finally:
os.remove(video.path)
| 2,137 | Python | .py | 72 | 24.305556 | 87 | 0.67189 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,388 | helpers.py | WindyLab_Gym-PPS/gym/wrappers/monitoring/tests/helpers.py | import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def tempdir():
temp = tempfile.mkdtemp()
yield temp
shutil.rmtree(temp)
| 161 | Python | .py | 8 | 17.375 | 29 | 0.794702 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,389 | multi_discrete.py | WindyLab_Gym-PPS/gym/spaces/multi_discrete.py | import numpy as np
from .space import Space
class MultiDiscrete(Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different number of actions in each
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space
Note: Some environment wrappers assume a value of 0 always represents the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ 5, 2, 2 ])
"""
def __init__(self, nvec, dtype=np.int64):
"""
nvec: vector of counts of each categorical variable
"""
assert (np.array(nvec) > 0).all(), "nvec (counts) have to be positive"
self.nvec = np.asarray(nvec, dtype=dtype)
super(MultiDiscrete, self).__init__(self.nvec.shape, dtype)
def sample(self):
return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(
self.dtype
)
def contains(self, x):
if isinstance(x, list):
x = np.array(x) # Promote list to array for contains check
# if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
# is within correct bounds for space dtype (even though x does not have to be unsigned)
return x.shape == self.shape and (0 <= x).all() and (x < self.nvec).all()
def to_jsonable(self, sample_n):
return [sample.tolist() for sample in sample_n]
def from_jsonable(self, sample_n):
return np.array(sample_n)
def __repr__(self):
return "MultiDiscrete({})".format(self.nvec)
def __eq__(self, other):
return isinstance(other, MultiDiscrete) and np.all(self.nvec == other.nvec)
| 2,222 | Python | .py | 41 | 46.731707 | 125 | 0.655268 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,390 | dict.py | WindyLab_Gym-PPS/gym/spaces/dict.py | from collections import OrderedDict
from .space import Space
class Dict(Space):
"""
A dictionary of simpler spaces.
Example usage:
self.observation_space = spaces.Dict({"position": spaces.Discrete(2), "velocity": spaces.Discrete(3)})
Example usage [nested]:
self.nested_observation_space = spaces.Dict({
'sensors': spaces.Dict({
'position': spaces.Box(low=-100, high=100, shape=(3,)),
'velocity': spaces.Box(low=-1, high=1, shape=(3,)),
'front_cam': spaces.Tuple((
spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3))
)),
'rear_cam': spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}),
'ext_controller': spaces.MultiDiscrete((5, 2, 2)),
'inner_state':spaces.Dict({
'charge': spaces.Discrete(100),
'system_checks': spaces.MultiBinary(10),
'job_status': spaces.Dict({
'task': spaces.Discrete(5),
'progress': spaces.Box(low=0, high=100, shape=()),
})
})
})
"""
def __init__(self, spaces=None, **spaces_kwargs):
assert (spaces is None) or (
not spaces_kwargs
), "Use either Dict(spaces=dict(...)) or Dict(foo=x, bar=z)"
if spaces is None:
spaces = spaces_kwargs
if isinstance(spaces, dict) and not isinstance(spaces, OrderedDict):
spaces = OrderedDict(sorted(list(spaces.items())))
if isinstance(spaces, list):
spaces = OrderedDict(spaces)
self.spaces = spaces
for space in spaces.values():
assert isinstance(
space, Space
), "Values of the dict should be instances of gym.Space"
super(Dict, self).__init__(
None, None
) # None for shape and dtype, since it'll require special handling
def seed(self, seed=None):
[space.seed(seed) for space in self.spaces.values()]
def sample(self):
return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])
def contains(self, x):
if not isinstance(x, dict) or len(x) != len(self.spaces):
return False
for k, space in self.spaces.items():
if k not in x:
return False
if not space.contains(x[k]):
return False
return True
def __getitem__(self, key):
return self.spaces[key]
def __setitem__(self, key, value):
self.spaces[key] = value
def __iter__(self):
for key in self.spaces:
yield key
def __len__(self):
return len(self.spaces)
def __contains__(self, item):
return self.contains(item)
def __repr__(self):
return (
"Dict("
+ ", ".join([str(k) + ":" + str(s) for k, s in self.spaces.items()])
+ ")"
)
def to_jsonable(self, sample_n):
# serialize as dict-repr of vectors
return {
key: space.to_jsonable([sample[key] for sample in sample_n])
for key, space in self.spaces.items()
}
def from_jsonable(self, sample_n):
dict_of_list = {}
for key, space in self.spaces.items():
dict_of_list[key] = space.from_jsonable(sample_n[key])
ret = []
for i, _ in enumerate(dict_of_list[key]):
entry = {}
for key, value in dict_of_list.items():
entry[key] = value[i]
ret.append(entry)
return ret
def __eq__(self, other):
return isinstance(other, Dict) and self.spaces == other.spaces
| 3,713 | Python | .py | 96 | 28.75 | 106 | 0.550556 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,391 | multi_binary.py | WindyLab_Gym-PPS/gym/spaces/multi_binary.py | import numpy as np
from .space import Space
class MultiBinary(Space):
"""
An n-shape binary space.
The argument to MultiBinary defines n, which could be a number or a `list` of numbers.
Example Usage:
>> self.observation_space = spaces.MultiBinary(5)
>> self.observation_space.sample()
array([0,1,0,1,0], dtype =int8)
>> self.observation_space = spaces.MultiBinary([3,2])
>> self.observation_space.sample()
array([[0, 0],
[0, 1],
[1, 1]], dtype=int8)
"""
def __init__(self, n):
self.n = n
if type(n) in [tuple, list, np.ndarray]:
input_n = n
else:
input_n = (n,)
super(MultiBinary, self).__init__(input_n, np.int8)
def sample(self):
return self.np_random.randint(low=0, high=2, size=self.n, dtype=self.dtype)
def contains(self, x):
if isinstance(x, list) or isinstance(x, tuple):
x = np.array(x) # Promote list to array for contains check
if self.shape != x.shape:
return False
return ((x == 0) | (x == 1)).all()
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
return "MultiBinary({})".format(self.n)
def __eq__(self, other):
return isinstance(other, MultiBinary) and self.n == other.n
| 1,485 | Python | .py | 39 | 30.102564 | 90 | 0.588235 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,392 | utils.py | WindyLab_Gym-PPS/gym/spaces/utils.py | from collections import OrderedDict
from functools import singledispatch, reduce
import numpy as np
import operator as op
from gym.spaces import Box
from gym.spaces import Discrete
from gym.spaces import MultiDiscrete
from gym.spaces import MultiBinary
from gym.spaces import Tuple
from gym.spaces import Dict
@singledispatch
def flatdim(space):
"""Return the number of dimensions a flattened equivalent of this space
would have.
Accepts a space and returns an integer. Raises ``NotImplementedError`` if
the space is not defined in ``gym.spaces``.
"""
raise NotImplementedError(f"Unknown space: `{space}`")
@flatdim.register(Box)
@flatdim.register(MultiBinary)
def flatdim_box_multibinary(space):
return reduce(op.mul, space.shape, 1)
@flatdim.register(Discrete)
def flatdim_discrete(space):
return int(space.n)
@flatdim.register(MultiDiscrete)
def flatdim_multidiscrete(space):
return int(np.sum(space.nvec))
@flatdim.register(Tuple)
def flatdim_tuple(space):
return sum([flatdim(s) for s in space.spaces])
@flatdim.register(Dict)
def flatdim_dict(space):
return sum([flatdim(s) for s in space.spaces.values()])
@singledispatch
def flatten(space, x):
"""Flatten a data point from a space.
This is useful when e.g. points from spaces must be passed to a neural
network, which only understands flat arrays of floats.
Accepts a space and a point from that space. Always returns a 1D array.
Raises ``NotImplementedError`` if the space is not defined in
``gym.spaces``.
"""
raise NotImplementedError(f"Unknown space: `{space}`")
@flatten.register(Box)
@flatten.register(MultiBinary)
def flatten_box_multibinary(space, x):
return np.asarray(x, dtype=space.dtype).flatten()
@flatten.register(Discrete)
def flatten_discrete(space, x):
onehot = np.zeros(space.n, dtype=space.dtype)
onehot[x] = 1
return onehot
@flatten.register(MultiDiscrete)
def flatten_multidiscrete(space, x):
offsets = np.zeros((space.nvec.size + 1,), dtype=space.dtype)
offsets[1:] = np.cumsum(space.nvec.flatten())
onehot = np.zeros((offsets[-1],), dtype=space.dtype)
onehot[offsets[:-1] + x.flatten()] = 1
return onehot
@flatten.register(Tuple)
def flatten_tuple(space, x):
return np.concatenate([flatten(s, x_part) for x_part, s in zip(x, space.spaces)])
@flatten.register(Dict)
def flatten_dict(space, x):
return np.concatenate([flatten(s, x[key]) for key, s in space.spaces.items()])
@singledispatch
def unflatten(space, x):
"""Unflatten a data point from a space.
This reverses the transformation applied by ``flatten()``. You must ensure
that the ``space`` argument is the same as for the ``flatten()`` call.
Accepts a space and a flattened point. Returns a point with a structure
that matches the space. Raises ``NotImplementedError`` if the space is not
defined in ``gym.spaces``.
"""
raise NotImplementedError(f"Unknown space: `{space}`")
@unflatten.register(Box)
@unflatten.register(MultiBinary)
def unflatten_box_multibinary(space, x):
return np.asarray(x, dtype=space.dtype).reshape(space.shape)
@unflatten.register(Discrete)
def unflatten_discrete(space, x):
return int(np.nonzero(x)[0][0])
@unflatten.register(MultiDiscrete)
def unflatten_multidiscrete(space, x):
offsets = np.zeros((space.nvec.size + 1,), dtype=space.dtype)
offsets[1:] = np.cumsum(space.nvec.flatten())
(indices,) = np.nonzero(x)
return np.asarray(indices - offsets[:-1], dtype=space.dtype).reshape(space.shape)
@unflatten.register(Tuple)
def unflatten_tuple(space, x):
dims = np.asarray([flatdim(s) for s in space.spaces], dtype=np.int_)
list_flattened = np.split(x, np.cumsum(dims[:-1]))
return tuple(
unflatten(s, flattened) for flattened, s in zip(list_flattened, space.spaces)
)
@unflatten.register(Dict)
def unflatten_dict(space, x):
dims = np.asarray([flatdim(s) for s in space.spaces.values()], dtype=np.int_)
list_flattened = np.split(x, np.cumsum(dims[:-1]))
return OrderedDict(
[
(key, unflatten(s, flattened))
for flattened, (key, s) in zip(list_flattened, space.spaces.items())
]
)
@singledispatch
def flatten_space(space):
"""Flatten a space into a single ``Box``.
This is equivalent to ``flatten()``, but operates on the space itself. The
result always is a `Box` with flat boundaries. The box has exactly
``flatdim(space)`` dimensions. Flattening a sample of the original space
has the same effect as taking a sample of the flattenend space.
Raises ``NotImplementedError`` if the space is not defined in
``gym.spaces``.
Example::
>>> box = Box(0.0, 1.0, shape=(3, 4, 5))
>>> box
Box(3, 4, 5)
>>> flatten_space(box)
Box(60,)
>>> flatten(box, box.sample()) in flatten_space(box)
True
Example that flattens a discrete space::
>>> discrete = Discrete(5)
>>> flatten_space(discrete)
Box(5,)
>>> flatten(box, box.sample()) in flatten_space(box)
True
Example that recursively flattens a dict::
>>> space = Dict({"position": Discrete(2),
... "velocity": Box(0, 1, shape=(2, 2))})
>>> flatten_space(space)
Box(6,)
>>> flatten(space, space.sample()) in flatten_space(space)
True
"""
raise NotImplementedError(f"Unknown space: `{space}`")
@flatten_space.register(Box)
def flatten_space_box(space):
return Box(space.low.flatten(), space.high.flatten(), dtype=space.dtype)
@flatten_space.register(Discrete)
@flatten_space.register(MultiBinary)
@flatten_space.register(MultiDiscrete)
def flatten_space_binary(space):
return Box(low=0, high=1, shape=(flatdim(space),), dtype=space.dtype)
@flatten_space.register(Tuple)
def flatten_space_tuple(space):
space = [flatten_space(s) for s in space.spaces]
return Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
dtype=np.result_type(*[s.dtype for s in space]),
)
@flatten_space.register(Dict)
def flatten_space_dict(space):
space = [flatten_space(s) for s in space.spaces.values()]
return Box(
low=np.concatenate([s.low for s in space]),
high=np.concatenate([s.high for s in space]),
dtype=np.result_type(*[s.dtype for s in space]),
)
| 6,478 | Python | .py | 162 | 35.345679 | 85 | 0.699232 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,393 | box.py | WindyLab_Gym-PPS/gym/spaces/box.py | import numpy as np
import warnings
from .space import Space
from gym import logger
class Box(Space):
"""
A (possibly unbounded) box in R^n. Specifically, a Box represents the
Cartesian product of n closed intervals. Each interval has the form of one
of [a, b], (-oo, b], [a, oo), or (-oo, oo).
There are two common use cases:
* Identical bound for each dimension::
>>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
Box(3, 4)
* Independent bound for each dimension::
>>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
Box(2,)
"""
def __init__(self, low, high, shape=None, dtype=np.float32):
assert dtype is not None, "dtype must be explicitly provided. "
self.dtype = np.dtype(dtype)
# determine shape if it isn't provided directly
if shape is not None:
shape = tuple(shape)
assert (
np.isscalar(low) or low.shape == shape
), "low.shape doesn't match provided shape"
assert (
np.isscalar(high) or high.shape == shape
), "high.shape doesn't match provided shape"
elif not np.isscalar(low):
shape = low.shape
assert (
np.isscalar(high) or high.shape == shape
), "high.shape doesn't match low.shape"
elif not np.isscalar(high):
shape = high.shape
assert (
np.isscalar(low) or low.shape == shape
), "low.shape doesn't match high.shape"
else:
raise ValueError(
"shape must be provided or inferred from the shapes of low or high"
)
if np.isscalar(low):
low = np.full(shape, low, dtype=dtype)
if np.isscalar(high):
high = np.full(shape, high, dtype=dtype)
self.shape = shape
self.low = low
self.high = high
def _get_precision(dtype):
if np.issubdtype(dtype, np.floating):
return np.finfo(dtype).precision
else:
return np.inf
low_precision = _get_precision(self.low.dtype)
high_precision = _get_precision(self.high.dtype)
dtype_precision = _get_precision(self.dtype)
if min(low_precision, high_precision) > dtype_precision:
logger.warn(
"Box bound precision lowered by casting to {}".format(self.dtype)
)
self.low = self.low.astype(self.dtype)
self.high = self.high.astype(self.dtype)
# Boolean arrays which indicate the interval type for each coordinate
self.bounded_below = -np.inf < self.low
self.bounded_above = np.inf > self.high
super(Box, self).__init__(self.shape, self.dtype)
def is_bounded(self, manner="both"):
below = np.all(self.bounded_below)
above = np.all(self.bounded_above)
if manner == "both":
return below and above
elif manner == "below":
return below
elif manner == "above":
return above
else:
raise ValueError("manner is not in {'below', 'above', 'both'}")
def sample(self):
"""
Generates a single random sample inside of the Box.
In creating a sample of the box, each coordinate is sampled according to
the form of the interval:
* [a, b] : uniform distribution
* [a, oo) : shifted exponential distribution
* (-oo, b] : shifted negative exponential distribution
* (-oo, oo) : normal distribution
"""
high = self.high if self.dtype.kind == "f" else self.high.astype("int64") + 1
sample = np.empty(self.shape)
# Masking arrays which classify the coordinates according to interval
# type
unbounded = ~self.bounded_below & ~self.bounded_above
upp_bounded = ~self.bounded_below & self.bounded_above
low_bounded = self.bounded_below & ~self.bounded_above
bounded = self.bounded_below & self.bounded_above
# Vectorized sampling by interval type
sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)
sample[low_bounded] = (
self.np_random.exponential(size=low_bounded[low_bounded].shape)
+ self.low[low_bounded]
)
sample[upp_bounded] = (
-self.np_random.exponential(size=upp_bounded[upp_bounded].shape)
+ self.high[upp_bounded]
)
sample[bounded] = self.np_random.uniform(
low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape
)
if self.dtype.kind == "i":
sample = np.floor(sample)
return sample.astype(self.dtype)
def contains(self, x):
if not isinstance(x, np.ndarray):
warnings.warn("Casting input x to numpy array.")
x = np.asarray(x, dtype=self.dtype)
return (
np.can_cast(x.dtype, self.dtype)
and x.shape == self.shape
and np.any(x >= self.low)
and np.any(x <= self.high)
)
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
return f"Box({self.low}, {self.high}, {self.shape}, {self.dtype})"
def __eq__(self, other):
return (
isinstance(other, Box)
and (self.shape == other.shape)
and np.allclose(self.low, other.low)
and np.allclose(self.high, other.high)
)
| 5,707 | Python | .py | 136 | 31.875 | 88 | 0.586387 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,394 | __init__.py | WindyLab_Gym-PPS/gym/spaces/__init__.py | from gym.spaces.space import Space
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
from gym.spaces.multi_discrete import MultiDiscrete
from gym.spaces.multi_binary import MultiBinary
from gym.spaces.tuple import Tuple
from gym.spaces.dict import Dict
from gym.spaces.utils import flatdim
from gym.spaces.utils import flatten_space
from gym.spaces.utils import flatten
from gym.spaces.utils import unflatten
__all__ = [
"Space",
"Box",
"Discrete",
"MultiDiscrete",
"MultiBinary",
"Tuple",
"Dict",
"flatdim",
"flatten_space",
"flatten",
"unflatten",
]
| 620 | Python | .py | 24 | 22.916667 | 51 | 0.755892 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,395 | discrete.py | WindyLab_Gym-PPS/gym/spaces/discrete.py | import numpy as np
from .space import Space
class Discrete(Space):
r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`.
Example::
>>> Discrete(2)
"""
def __init__(self, n):
assert n >= 0
self.n = n
super(Discrete, self).__init__((), np.int64)
def sample(self):
return self.np_random.randint(self.n)
def contains(self, x):
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (
x.dtype.char in np.typecodes["AllInteger"] and x.shape == ()
):
as_int = int(x)
else:
return False
return as_int >= 0 and as_int < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return isinstance(other, Discrete) and self.n == other.n
| 868 | Python | .py | 27 | 24.259259 | 72 | 0.53911 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,396 | space.py | WindyLab_Gym-PPS/gym/spaces/space.py | from gym.utils import seeding
class Space(object):
"""Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
action.
WARNING - Custom observation & action spaces can inherit from the `Space`
class. However, most use-cases should be covered by the existing space
classes (e.g. `Box`, `Discrete`, etc...), and container classes (`Tuple` &
`Dict`). Note that parametrized probability distributions (through the
`sample()` method), and batching functions (in `gym.vector.VectorEnv`), are
only well-defined for instances of spaces provided in gym by default.
Moreover, some implementations of Reinforcement Learning algorithms might
not handle custom spaces properly. Use custom spaces with care.
"""
def __init__(self, shape=None, dtype=None):
import numpy as np # takes about 300-400ms to import, so we load lazily
self.shape = None if shape is None else tuple(shape)
self.dtype = None if dtype is None else np.dtype(dtype)
self._np_random = None
@property
def np_random(self):
"""Lazily seed the rng since this is expensive and only needed if
sampling from this space.
"""
if self._np_random is None:
self.seed()
return self._np_random
def sample(self):
"""Randomly sample an element of this space. Can be
uniform or non-uniform sampling based on boundedness of space."""
raise NotImplementedError
def seed(self, seed=None):
"""Seed the PRNG of this space."""
self._np_random, seed = seeding.np_random(seed)
return [seed]
def contains(self, x):
"""
Return boolean specifying if x is a valid
member of this space
"""
raise NotImplementedError
def __contains__(self, x):
return self.contains(x)
def to_jsonable(self, sample_n):
"""Convert a batch of samples from this space to a JSONable data type."""
# By default, assume identity is JSONable
return sample_n
def from_jsonable(self, sample_n):
"""Convert a JSONable data type to a batch of samples from this space."""
# By default, assume identity is JSONable
return sample_n
| 2,329 | Python | .py | 51 | 38.294118 | 81 | 0.669757 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,397 | tuple.py | WindyLab_Gym-PPS/gym/spaces/tuple.py | import numpy as np
from .space import Space
class Tuple(Space):
"""
A tuple (i.e., product) of simpler spaces
Example usage:
self.observation_space = spaces.Tuple((spaces.Discrete(2), spaces.Discrete(3)))
"""
def __init__(self, spaces):
self.spaces = spaces
for space in spaces:
assert isinstance(
space, Space
), "Elements of the tuple must be instances of gym.Space"
super(Tuple, self).__init__(None, None)
def seed(self, seed=None):
[space.seed(seed) for space in self.spaces]
def sample(self):
return tuple([space.sample() for space in self.spaces])
def contains(self, x):
if isinstance(x, list):
x = tuple(x) # Promote list to tuple for contains check
return (
isinstance(x, tuple)
and len(x) == len(self.spaces)
and all(space.contains(part) for (space, part) in zip(self.spaces, x))
)
def __repr__(self):
return "Tuple(" + ", ".join([str(s) for s in self.spaces]) + ")"
def to_jsonable(self, sample_n):
# serialize as list-repr of tuple of vectors
return [
space.to_jsonable([sample[i] for sample in sample_n])
for i, space in enumerate(self.spaces)
]
def from_jsonable(self, sample_n):
return [
sample
for sample in zip(
*[
space.from_jsonable(sample_n[i])
for i, space in enumerate(self.spaces)
]
)
]
def __getitem__(self, index):
return self.spaces[index]
def __len__(self):
return len(self.spaces)
def __eq__(self, other):
return isinstance(other, Tuple) and self.spaces == other.spaces
| 1,827 | Python | .py | 51 | 26.411765 | 83 | 0.562677 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,398 | test_spaces.py | WindyLab_Gym-PPS/gym/spaces/tests/test_spaces.py | import json # note: ujson fails this test due to float equality
from copy import copy
import numpy as np
import pytest
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_roundtripping(space):
sample_1 = space.sample()
sample_2 = space.sample()
assert space.contains(sample_1)
assert space.contains(sample_2)
json_rep = space.to_jsonable([sample_1, sample_2])
json_roundtripped = json.loads(json.dumps(json_rep))
samples_after_roundtrip = space.from_jsonable(json_roundtripped)
sample_1_prime, sample_2_prime = samples_after_roundtrip
s1 = space.to_jsonable([sample_1])
s1p = space.to_jsonable([sample_1_prime])
s2 = space.to_jsonable([sample_2])
s2p = space.to_jsonable([sample_2_prime])
assert s1 == s1p, "Expected {} to equal {}".format(s1, s1p)
assert s2 == s2p, "Expected {} to equal {}".format(s2, s2p)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=-np.inf, high=np.inf, shape=(1, 3)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(6),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_equality(space):
space1 = space
space2 = copy(space)
assert space1 == space2, "Expected {} to equal {}".format(space1, space2)
@pytest.mark.parametrize(
"spaces",
[
(Discrete(3), Discrete(4)),
(MultiDiscrete([2, 2, 100]), MultiDiscrete([2, 2, 8])),
(MultiBinary(8), MultiBinary(7)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=np.array([-10, 0]), high=np.array([10, 9]), dtype=np.float32),
),
(
Box(low=-np.inf, high=0.0, shape=(2, 1)),
Box(low=0.0, high=np.inf, shape=(2, 1)),
),
(Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(1), Discrete(10)])),
(Dict({"position": Discrete(5)}), Dict({"position": Discrete(4)})),
(Dict({"position": Discrete(5)}), Dict({"speed": Discrete(5)})),
],
)
def test_inequality(spaces):
space1, space2 = spaces
assert space1 != space2, "Expected {} != {}".format(space1, space2)
@pytest.mark.parametrize(
"space",
[
Discrete(5),
Box(low=0, high=255, shape=(2,), dtype="uint8"),
Box(low=-np.inf, high=np.inf, shape=(3, 3)),
Box(low=1.0, high=np.inf, shape=(3, 3)),
Box(low=-np.inf, high=2.0, shape=(3, 3)),
],
)
def test_sample(space):
space.seed(0)
n_trials = 100
samples = np.array([space.sample() for _ in range(n_trials)])
expected_mean = 0.0
if isinstance(space, Box):
if space.is_bounded():
expected_mean = (space.high + space.low) / 2
elif space.is_bounded("below"):
expected_mean = 1 + space.low
elif space.is_bounded("above"):
expected_mean = -1 + space.high
else:
expected_mean = 0.0
elif isinstance(space, Discrete):
expected_mean = space.n / 2
else:
raise NotImplementedError
np.testing.assert_allclose(expected_mean, samples.mean(), atol=3.0 * samples.std())
@pytest.mark.parametrize(
"spaces",
[
(Discrete(5), MultiBinary(5)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
MultiDiscrete([2, 2, 8]),
),
(
Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),
Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8),
),
(Dict({"position": Discrete(5)}), Tuple([Discrete(5)])),
(Dict({"position": Discrete(5)}), Discrete(5)),
(Tuple((Discrete(5),)), Discrete(5)),
(
Box(low=np.array([-np.inf, 0.0]), high=np.array([0.0, np.inf])),
Box(low=np.array([-np.inf, 1.0]), high=np.array([0.0, np.inf])),
),
],
)
def test_class_inequality(spaces):
assert spaces[0] == spaces[0]
assert spaces[1] == spaces[1]
assert spaces[0] != spaces[1]
assert spaces[1] != spaces[0]
@pytest.mark.parametrize(
"space_fn",
[
lambda: Dict(space1="abc"),
lambda: Dict({"space1": "abc"}),
lambda: Tuple(["abc"]),
],
)
def test_bad_space_calls(space_fn):
with pytest.raises(AssertionError):
space_fn()
def test_box_dtype_check():
# Related Issues:
# https://github.com/openai/gym/issues/2357
# https://github.com/openai/gym/issues/2298
space = Box(0, 2, tuple(), dtype=np.float32)
# casting will match the correct type
assert space.contains(0.5)
# float64 is not in float32 space
assert not space.contains(np.array(0.5))
assert not space.contains(np.array(1))
| 5,923 | Python | .py | 173 | 26.473988 | 87 | 0.554993 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
2,289,399 | test_utils.py | WindyLab_Gym-PPS/gym/spaces/tests/test_utils.py | from collections import OrderedDict
import numpy as np
import pytest
from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete, Tuple, utils
spaces = [
Discrete(3),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Box(low=0.0, high=np.inf, shape=(2, 2), dtype=np.float16),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 10]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
]
flatdims = [3, 4, 4, 15, 7, 9, 14, 10, 7]
@pytest.mark.parametrize(["space", "flatdim"], zip(spaces, flatdims))
def test_flatdim(space, flatdim):
dim = utils.flatdim(space)
assert dim == flatdim, "Expected {} to equal {}".format(dim, flatdim)
@pytest.mark.parametrize("space", spaces)
def test_flatten_space_boxes(space):
flat_space = utils.flatten_space(space)
assert isinstance(flat_space, Box), "Expected {} to equal {}".format(
type(flat_space), Box
)
flatdim = utils.flatdim(space)
(single_dim,) = flat_space.shape
assert single_dim == flatdim, "Expected {} to equal {}".format(single_dim, flatdim)
@pytest.mark.parametrize("space", spaces)
def test_flat_space_contains_flat_points(space):
some_samples = [space.sample() for _ in range(10)]
flattened_samples = [utils.flatten(space, sample) for sample in some_samples]
flat_space = utils.flatten_space(space)
for i, flat_sample in enumerate(flattened_samples):
assert flat_sample in flat_space, "Expected sample #{} {} to be in {}".format(
i, flat_sample, flat_space
)
@pytest.mark.parametrize("space", spaces)
def test_flatten_dim(space):
sample = utils.flatten(space, space.sample())
(single_dim,) = sample.shape
flatdim = utils.flatdim(space)
assert single_dim == flatdim, "Expected {} to equal {}".format(single_dim, flatdim)
@pytest.mark.parametrize("space", spaces)
def test_flatten_roundtripping(space):
some_samples = [space.sample() for _ in range(10)]
flattened_samples = [utils.flatten(space, sample) for sample in some_samples]
roundtripped_samples = [
utils.unflatten(space, sample) for sample in flattened_samples
]
for i, (original, roundtripped) in enumerate(
zip(some_samples, roundtripped_samples)
):
assert compare_nested(
original, roundtripped
), "Expected sample #{} {} to equal {}".format(i, original, roundtripped)
def compare_nested(left, right):
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
return np.allclose(left, right)
elif isinstance(left, OrderedDict) and isinstance(right, OrderedDict):
res = len(left) == len(right)
for ((left_key, left_value), (right_key, right_value)) in zip(
left.items(), right.items()
):
if not res:
return False
res = left_key == right_key and compare_nested(left_value, right_value)
return res
elif isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):
res = len(left) == len(right)
for (x, y) in zip(left, right):
if not res:
return False
res = compare_nested(x, y)
return res
else:
return left == right
"""
Expecteded flattened types are based off:
1. The type that the space is hardcoded as(ie. multi_discrete=np.int64, discrete=np.int64, multi_binary=np.int8)
2. The type that the space is instantiated with(ie. box=np.float32 by default unless instantiated with a different type)
3. The smallest type that the composite space(tuple, dict) can be represented as. In flatten, this is determined
internally by numpy when np.concatenate is called.
"""
expected_flattened_dtypes = [
np.int64,
np.float32,
np.float16,
np.int64,
np.float64,
np.int64,
np.int64,
np.int8,
np.float64,
]
@pytest.mark.parametrize(
["original_space", "expected_flattened_dtype"],
zip(spaces, expected_flattened_dtypes),
)
def test_dtypes(original_space, expected_flattened_dtype):
flattened_space = utils.flatten_space(original_space)
original_sample = original_space.sample()
flattened_sample = utils.flatten(original_space, original_sample)
unflattened_sample = utils.unflatten(original_space, flattened_sample)
assert flattened_space.contains(
flattened_sample
), "Expected flattened_space to contain flattened_sample"
assert (
flattened_space.dtype == expected_flattened_dtype
), "Expected flattened_space's dtype to equal " "{}".format(
expected_flattened_dtype
)
assert flattened_sample.dtype == flattened_space.dtype, (
"Expected flattened_space's dtype to equal " "flattened_sample's dtype "
)
compare_sample_types(original_space, original_sample, unflattened_sample)
def compare_sample_types(original_space, original_sample, unflattened_sample):
if isinstance(original_space, Discrete):
assert isinstance(unflattened_sample, int), (
"Expected unflattened_sample to be an int. unflattened_sample: "
"{} original_sample: {}".format(unflattened_sample, original_sample)
)
elif isinstance(original_space, Tuple):
for index in range(len(original_space)):
compare_sample_types(
original_space.spaces[index],
original_sample[index],
unflattened_sample[index],
)
elif isinstance(original_space, Dict):
for key, space in original_space.spaces.items():
compare_sample_types(space, original_sample[key], unflattened_sample[key])
else:
assert unflattened_sample.dtype == original_sample.dtype, (
"Expected unflattened_sample's dtype to equal "
"original_sample's dtype. unflattened_sample: "
"{} original_sample: {}".format(unflattened_sample, original_sample)
)
samples = [
2,
np.array([[1.0, 3.0], [5.0, 8.0]], dtype=np.float32),
np.array([[1.0, 3.0], [5.0, 8.0]], dtype=np.float16),
(3, 7),
(2, np.array([0.5, 3.5], dtype=np.float32)),
(3, 0, 1),
np.array([0, 1, 7], dtype=np.int64),
np.array([0, 1, 1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int8),
OrderedDict(
[("position", 3), ("velocity", np.array([0.5, 3.5], dtype=np.float32))]
),
]
expected_flattened_samples = [
np.array([0, 0, 1], dtype=np.int64),
np.array([1.0, 3.0, 5.0, 8.0], dtype=np.float32),
np.array([1.0, 3.0, 5.0, 8.0], dtype=np.float16),
np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype=np.int64),
np.array([0, 0, 1, 0, 0, 0.5, 3.5], dtype=np.float64),
np.array([0, 0, 0, 1, 0, 1, 0, 0, 1], dtype=np.int64),
np.array([1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype=np.int64),
np.array([0, 1, 1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int8),
np.array([0, 0, 0, 1, 0, 0.5, 3.5], dtype=np.float64),
]
@pytest.mark.parametrize(
["space", "sample", "expected_flattened_sample"],
zip(spaces, samples, expected_flattened_samples),
)
def test_flatten(space, sample, expected_flattened_sample):
assert sample in space
flattened_sample = utils.flatten(space, sample)
assert flattened_sample.shape == expected_flattened_sample.shape
assert flattened_sample.dtype == expected_flattened_sample.dtype
assert np.all(flattened_sample == expected_flattened_sample)
@pytest.mark.parametrize(
["space", "flattened_sample", "expected_sample"],
zip(spaces, expected_flattened_samples, samples),
)
def test_unflatten(space, flattened_sample, expected_sample):
sample = utils.unflatten(space, flattened_sample)
assert compare_nested(sample, expected_sample)
expected_flattened_spaces = [
Box(low=0, high=1, shape=(3,), dtype=np.int64),
Box(low=0.0, high=np.inf, shape=(4,), dtype=np.float32),
Box(low=0.0, high=np.inf, shape=(4,), dtype=np.float16),
Box(low=0, high=1, shape=(15,), dtype=np.int64),
Box(
low=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64),
high=np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.0], dtype=np.float64),
dtype=np.float64,
),
Box(low=0, high=1, shape=(9,), dtype=np.int64),
Box(low=0, high=1, shape=(14,), dtype=np.int64),
Box(low=0, high=1, shape=(10,), dtype=np.int8),
Box(
low=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64),
high=np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.0], dtype=np.float64),
dtype=np.float64,
),
]
@pytest.mark.parametrize(
["space", "expected_flattened_space"], zip(spaces, expected_flattened_spaces)
)
def test_flatten_space(space, expected_flattened_space):
flattened_space = utils.flatten_space(space)
assert flattened_space == expected_flattened_space
| 9,138 | Python | .py | 217 | 36.004608 | 120 | 0.645608 | WindyLab/Gym-PPS | 8 | 2 | 1 | GPL-2.0 | 9/5/2024, 10:48:35 PM (Europe/Amsterdam) |
Subsets and Splits