code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Sequence, cast
import gym
import numpy as np
from ..containers import FIFOQueue
from ..dataset import (
Episode,
MDPDataset,
Transition,
TransitionMiniBatch,
trace_back_and_clear,
)
from ..envs import BatchEnv
from .utility import get_action_size_from_env
class _Buffer(metaclass=ABCMeta):
_transitions: FIFOQueue[Transition]
_observation_shape: Sequence[int]
_action_size: int
_create_mask: bool
_mask_size: int
def __init__(
self,
maxlen: int,
env: Optional[gym.Env] = None,
episodes: Optional[List[Episode]] = None,
create_mask: bool = False,
mask_size: int = 1,
):
def drop_callback(transition: Transition) -> None:
# remove links when dropping the last transition
if transition.next_transition is None:
trace_back_and_clear(transition)
self._transitions = FIFOQueue(maxlen, drop_callback)
# extract shape information
if env:
observation_shape = env.observation_space.shape
action_size = get_action_size_from_env(env)
elif episodes:
observation_shape = episodes[0].get_observation_shape()
action_size = episodes[0].get_action_size()
else:
raise ValueError("env or episodes are required to determine shape.")
self._observation_shape = observation_shape
self._action_size = action_size
self._create_mask = create_mask
self._mask_size = mask_size
# add initial transitions
if episodes:
for episode in episodes:
self.append_episode(episode)
def append_episode(self, episode: Episode) -> None:
"""Append Episode object to buffer.
Args:
episode: episode.
"""
assert episode.get_observation_shape() == self._observation_shape
assert episode.get_action_size() == self._action_size
for transition in episode.transitions:
self._transitions.append(transition)
# add mask if necessary
if self._create_mask and transition.mask is None:
transition.mask = np.random.randint(2, size=self._mask_size)
@abstractmethod
def sample(
self,
batch_size: int,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
) -> TransitionMiniBatch:
"""Returns sampled mini-batch of transitions.
If observation is image, you can stack arbitrary frames via
``n_frames``.
.. code-block:: python
buffer.observation_shape == (3, 84, 84)
# stack 4 frames
batch = buffer.sample(batch_size=32, n_frames=4)
batch.observations.shape == (32, 12, 84, 84)
Args:
batch_size: mini-batch size.
n_frames: the number of frames to stack for image observation.
n_steps: the number of steps before the next observation.
gamma: discount factor used in N-step return calculation.
Returns:
mini-batch.
"""
@abstractmethod
def clip_episode(self) -> None:
"""Clips the current episode."""
def size(self) -> int:
"""Returns the number of appended elements in buffer.
Returns:
the number of elements in buffer.
"""
return len(self._transitions)
def to_mdp_dataset(self) -> MDPDataset:
"""Convert replay data into static dataset.
The length of the dataset can be longer than the length of the replay
buffer because this conversion is done by tracing ``Transition``
objects.
Returns:
MDPDataset object.
"""
# get the last transitions
tail_transitions: List[Transition] = []
for transition in self._transitions:
if transition.next_transition is None:
tail_transitions.append(transition)
observations = []
actions = []
rewards = []
terminals = []
episode_terminals = []
for transition in tail_transitions:
# trace transition to the beginning
episode_transitions: List[Transition] = []
while True:
episode_transitions.append(transition)
if transition.prev_transition is None:
break
transition = transition.prev_transition
episode_transitions.reverse()
# stack data
for episode_transition in episode_transitions:
observations.append(episode_transition.observation)
actions.append(episode_transition.action)
rewards.append(episode_transition.reward)
terminals.append(0.0)
episode_terminals.append(0.0)
observations.append(episode_transitions[-1].next_observation)
actions.append(episode_transitions[-1].next_action)
rewards.append(episode_transitions[-1].next_reward)
terminals.append(episode_transitions[-1].terminal)
episode_terminals.append(1.0)
if len(self._observation_shape) == 3:
observations = np.asarray(observations, dtype=np.uint8)
else:
observations = np.asarray(observations, dtype=np.float32)
return MDPDataset(
observations=observations,
actions=actions,
rewards=rewards,
terminals=terminals,
episode_terminals=episode_terminals,
create_mask=self._create_mask,
mask_size=self._mask_size,
)
def __len__(self) -> int:
return self.size()
@property
def transitions(self) -> FIFOQueue[Transition]:
"""Returns a FIFO queue of transitions.
Returns:
d3rlpy.online.buffers.FIFOQueue: FIFO queue of transitions.
"""
return self._transitions
class Buffer(_Buffer):
@abstractmethod
def append(
self,
observation: np.ndarray,
action: np.ndarray,
reward: float,
terminal: float,
clip_episode: Optional[bool] = None,
) -> None:
"""Append observation, action, reward and terminal flag to buffer.
If the terminal flag is True, Monte-Carlo returns will be computed with
an entire episode and the whole transitions will be appended.
Args:
observation: observation.
action: action.
reward: reward.
terminal: terminal flag.
clip_episode: flag to clip the current episode. If ``None``, the
episode is clipped based on ``terminal``.
"""
class BatchBuffer(_Buffer):
@abstractmethod
def append(
self,
observations: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
terminals: np.ndarray,
clip_episodes: Optional[np.ndarray] = None,
) -> None:
"""Append observation, action, reward and terminal flag to buffer.
If the terminal flag is True, Monte-Carlo returns will be computed with
an entire episode and the whole transitions will be appended.
Args:
observations: observation.
actions: action.
rewards: reward.
terminals: terminal flag.
clip_episodes: flag to clip the current episode. If ``None``, the
episode is clipped based on ``terminal``.
"""
class BasicSampleMixin:
_transitions: FIFOQueue[Transition]
def sample(
self,
batch_size: int,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
) -> TransitionMiniBatch:
indices = np.random.choice(len(self._transitions), batch_size)
transitions = [self._transitions[index] for index in indices]
batch = TransitionMiniBatch(transitions, n_frames, n_steps, gamma)
return batch
class ReplayBuffer(BasicSampleMixin, Buffer):
"""Standard Replay Buffer.
Args:
maxlen (int): the maximum number of data length.
env (gym.Env): gym-like environment to extract shape information.
episodes (list(d3rlpy.dataset.Episode)): list of episodes to
initialize buffer.
create_mask (bool): flag to create bootstrapping mask.
mask_size (int): ensemble size for binary mask.
"""
_prev_observation: Optional[np.ndarray]
_prev_action: Optional[np.ndarray]
_prev_reward: float
_prev_transition: Optional[Transition]
def __init__(
self,
maxlen: int,
env: Optional[gym.Env] = None,
episodes: Optional[List[Episode]] = None,
create_mask: bool = False,
mask_size: int = 1,
):
super().__init__(maxlen, env, episodes, create_mask, mask_size)
self._prev_observation = None
self._prev_action = None
self._prev_reward = 0.0
self._prev_transition = None
def append(
self,
observation: np.ndarray,
action: np.ndarray,
reward: float,
terminal: float,
clip_episode: Optional[bool] = None,
) -> None:
# if None, use terminal
if clip_episode is None:
clip_episode = bool(terminal)
# validation
assert observation.shape == self._observation_shape
if isinstance(action, np.ndarray):
assert action.shape[0] == self._action_size
else:
action = int(action)
assert action < self._action_size
# not allow terminal=True and clip_episode=False
assert not (terminal and not clip_episode)
# create Transition object
if self._prev_observation is not None:
if isinstance(terminal, bool):
terminal = 1.0 if terminal else 0.0
# create binary mask
if self._create_mask:
mask = np.random.randint(2, size=self._mask_size)
else:
mask = None
transition = Transition(
observation_shape=self._observation_shape,
action_size=self._action_size,
observation=self._prev_observation,
action=self._prev_action,
reward=self._prev_reward,
next_observation=observation,
next_action=action,
next_reward=reward,
terminal=terminal,
mask=mask,
prev_transition=self._prev_transition,
)
if self._prev_transition:
self._prev_transition.next_transition = transition
self._transitions.append(transition)
self._prev_transition = transition
self._prev_observation = observation
self._prev_action = action
self._prev_reward = reward
if clip_episode:
self.clip_episode()
def clip_episode(self) -> None:
self._prev_observation = None
self._prev_action = None
self._prev_reward = 0.0
self._prev_transition = None
class BatchReplayBuffer(BasicSampleMixin, BatchBuffer):
"""Standard Replay Buffer for batch training.
Args:
maxlen (int): the maximum number of data length.
n_envs (int): the number of environments.
env (gym.Env): gym-like environment to extract shape information.
episodes (list(d3rlpy.dataset.Episode)): list of episodes to
initialize buffer
create_mask (bool): flag to create bootstrapping mask.
mask_size (int): ensemble size for binary mask.
"""
_n_envs: int
_prev_observations: List[Optional[np.ndarray]]
_prev_actions: List[Optional[np.ndarray]]
_prev_rewards: List[Optional[np.ndarray]]
_prev_transitions: List[Optional[Transition]]
def __init__(
self,
maxlen: int,
env: BatchEnv,
episodes: Optional[List[Episode]] = None,
create_mask: bool = False,
mask_size: int = 1,
):
super().__init__(maxlen, env, episodes, create_mask, mask_size)
self._n_envs = len(env)
self._prev_observations = [None for _ in range(len(env))]
self._prev_actions = [None for _ in range(len(env))]
self._prev_rewards = [None for _ in range(len(env))]
self._prev_transitions = [None for _ in range(len(env))]
def append(
self,
observations: np.ndarray,
actions: np.ndarray,
rewards: np.ndarray,
terminals: np.ndarray,
clip_episodes: Optional[np.ndarray] = None,
) -> None:
# if None, use terminal
if clip_episodes is None:
clip_episodes = terminals
# validation
assert observations.shape == (self._n_envs, *self._observation_shape)
if actions.ndim == 2:
assert actions.shape == (self._n_envs, self._action_size)
else:
assert actions.shape == (self._n_envs,)
assert rewards.shape == (self._n_envs,)
assert terminals.shape == (self._n_envs,)
# not allow terminal=True and clip_episode=False
assert np.all(terminals - clip_episodes < 1)
# create Transition objects
for i in range(self._n_envs):
if self._prev_observations[i] is not None:
prev_observation = self._prev_observations[i]
prev_action = self._prev_actions[i]
prev_reward = cast(np.ndarray, self._prev_rewards[i])
prev_transition = self._prev_transitions[i]
# create binary mask
if self._create_mask:
mask = np.random.randint(2, size=self._mask_size)
else:
mask = None
transition = Transition(
observation_shape=self._observation_shape,
action_size=self._action_size,
observation=prev_observation,
action=prev_action,
reward=float(prev_reward),
next_observation=observations[i],
next_action=actions[i],
next_reward=float(rewards[i]),
terminal=float(terminals[i]),
mask=mask,
prev_transition=prev_transition,
)
if prev_transition:
prev_transition.next_transition = transition
self._transitions.append(transition)
self._prev_transitions[i] = transition
self._prev_observations[i] = observations[i]
self._prev_actions[i] = actions[i]
self._prev_rewards[i] = rewards[i]
if clip_episodes[i]:
self._prev_observations[i] = None
self._prev_actions[i] = None
self._prev_rewards[i] = None
self._prev_transitions[i] = None
def clip_episode(self) -> None:
for i in range(self._n_envs):
self._prev_observations[i] = None
self._prev_actions[i] = None
self._prev_rewards[i] = None
self._prev_transitions[i] = None | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/online/buffers.py | buffers.py |
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Union
import numpy as np
from typing_extensions import Protocol
from ..preprocessing.action_scalers import ActionScaler, MinMaxActionScaler
class _ActionProtocol(Protocol):
def predict(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
...
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
...
@property
def action_size(self) -> Optional[int]:
...
@property
def action_scaler(self) -> Optional[ActionScaler]:
...
class Explorer(metaclass=ABCMeta):
@abstractmethod
def sample(
self, algo: _ActionProtocol, x: np.ndarray, step: int
) -> np.ndarray:
pass
class ConstantEpsilonGreedy(Explorer):
""":math:`\\epsilon`-greedy explorer with constant :math:`\\epsilon`.
Args:
epsilon (float): the constant :math:`\\epsilon`.
"""
_epsilon: float
def __init__(self, epsilon: float):
self._epsilon = epsilon
def sample(
self, algo: _ActionProtocol, x: np.ndarray, step: int
) -> np.ndarray:
greedy_actions = algo.predict(x)
random_actions = np.random.randint(algo.action_size, size=x.shape[0])
is_random = np.random.random(x.shape[0]) < self._epsilon
return np.where(is_random, random_actions, greedy_actions)
class LinearDecayEpsilonGreedy(Explorer):
""":math:`\\epsilon`-greedy explorer with linear decay schedule.
Args:
start_epsilon (float): the beginning :math:`\\epsilon`.
end_epsilon (float): the end :math:`\\epsilon`.
duration (int): the scheduling duration.
"""
_start_epsilon: float
_end_epsilon: float
_duration: int
def __init__(
self,
start_epsilon: float = 1.0,
end_epsilon: float = 0.1,
duration: int = 1000000,
):
self._start_epsilon = start_epsilon
self._end_epsilon = end_epsilon
self._duration = duration
def sample(
self, algo: _ActionProtocol, x: np.ndarray, step: int
) -> np.ndarray:
"""Returns :math:`\\epsilon`-greedy action.
Args:
algo: algorithm.
x: observation.
step: current environment step.
Returns:
:math:`\\epsilon`-greedy action.
"""
greedy_actions = algo.predict(x)
random_actions = np.random.randint(algo.action_size, size=x.shape[0])
is_random = np.random.random(x.shape[0]) < self.compute_epsilon(step)
return np.where(is_random, random_actions, greedy_actions)
def compute_epsilon(self, step: int) -> float:
"""Returns decayed :math:`\\epsilon`.
Returns:
:math:`\\epsilon`.
"""
if step >= self._duration:
return self._end_epsilon
base = self._start_epsilon - self._end_epsilon
return base * (1.0 - step / self._duration) + self._end_epsilon
class NormalNoise(Explorer):
"""Normal noise explorer.
Args:
mean (float): mean.
std (float): standard deviation.
"""
_mean: float
_std: float
def __init__(self, mean: float = 0.0, std: float = 0.1):
self._mean = mean
self._std = std
def sample(
self, algo: _ActionProtocol, x: np.ndarray, step: int
) -> np.ndarray:
"""Returns action with noise injection.
Args:
algo: algorithm.
x: observation.
Returns:
action with noise injection.
"""
action = algo.predict(x)
# FIXME: Scalar noise works better for some reason.
# But this is different from paper.
noise = np.random.normal(self._mean, self._std)
if isinstance(algo.action_scaler, MinMaxActionScaler):
# scale noise
params = algo.action_scaler.get_params()
minimum = params["minimum"]
maximum = params["maximum"]
else:
minimum = -1.0
maximum = 1.0
return np.clip(action + noise, minimum, maximum) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/online/explorers.py | explorers.py |
from typing import Any, Callable, Dict, List, Optional, Union
import gym
import numpy as np
from tqdm import trange
from typing_extensions import Protocol
from ..dataset import TransitionMiniBatch
from ..envs import BatchEnv
from ..logger import LOG, D3RLPyLogger
from ..metrics.scorer import evaluate_on_environment
from ..preprocessing import ActionScaler, Scaler
from ..preprocessing.stack import BatchStackedObservation, StackedObservation
from .buffers import BatchBuffer, Buffer
from .explorers import Explorer
class AlgoProtocol(Protocol):
def update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
...
def build_with_env(self, env: gym.Env) -> None:
...
def save_params(self, logger: D3RLPyLogger) -> None:
...
def predict(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
...
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
...
def save_model(self, fname: str) -> None:
...
def set_active_logger(self, logger: D3RLPyLogger) -> None:
...
@property
def action_size(self) -> Optional[int]:
...
@property
def scaler(self) -> Optional[Scaler]:
...
@property
def action_scaler(self) -> Optional[ActionScaler]:
...
@property
def n_frames(self) -> int:
...
@property
def n_steps(self) -> int:
...
@property
def gamma(self) -> float:
...
@property
def batch_size(self) -> int:
...
@property
def impl(self) -> Optional[Any]:
...
@property
def grad_step(self) -> int:
...
def _setup_algo(algo: AlgoProtocol, env: gym.Env) -> None:
# initialize scaler
if algo.scaler:
LOG.debug("Fitting scaler...", scler=algo.scaler.get_type())
algo.scaler.fit_with_env(env)
# initialize action scaler
if algo.action_scaler:
LOG.debug(
"Fitting action scaler...",
action_scler=algo.action_scaler.get_type(),
)
algo.action_scaler.fit_with_env(env)
# setup algorithm
if algo.impl is None:
LOG.debug("Building model...")
algo.build_with_env(env)
LOG.debug("Model has been built.")
else:
LOG.warning("Skip building models since they're already built.")
def train_single_env(
algo: AlgoProtocol,
env: gym.Env,
buffer: Buffer,
explorer: Optional[Explorer] = None,
n_steps: int = 1000000,
n_steps_per_epoch: int = 10000,
update_interval: int = 1,
update_start_step: int = 0,
random_steps: int = 0,
eval_env: Optional[gym.Env] = None,
eval_epsilon: float = 0.0,
save_metrics: bool = True,
save_interval: int = 1,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
timelimit_aware: bool = True,
callback: Optional[Callable[[AlgoProtocol, int, int], None]] = None,
) -> None:
"""Start training loop of online deep reinforcement learning.
Args:
algo: algorithm object.
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
n_steps: the number of total steps to train.
n_steps_per_epoch: the number of steps per epoch.
update_interval: the number of steps per update.
update_start_step: the steps before starting updates.
random_steps: the steps for the initial random explortion.
eval_env: gym-like environment. If None, evaluation is skipped.
eval_epsilon: :math:`\\epsilon`-greedy factor during evaluation.
save_metrics: flag to record metrics. If False, the log
directory is not created and the model parameters are not saved.
save_interval: the number of epochs before saving models.
experiment_name: experiment name for logging. If not passed,
the directory name will be ``{class name}_online_{timestamp}``.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called at the end of epochs.
"""
# setup logger
if experiment_name is None:
experiment_name = algo.__class__.__name__ + "_online"
logger = D3RLPyLogger(
experiment_name,
save_metrics=save_metrics,
root_dir=logdir,
verbose=verbose,
tensorboard_dir=tensorboard_dir,
with_timestamp=with_timestamp,
)
algo.set_active_logger(logger)
# initialize algorithm parameters
_setup_algo(algo, env)
observation_shape = env.observation_space.shape
is_image = len(observation_shape) == 3
# prepare stacked observation
if is_image:
stacked_frame = StackedObservation(observation_shape, algo.n_frames)
# save hyperparameters
algo.save_params(logger)
# switch based on show_progress flag
xrange = trange if show_progress else range
# setup evaluation scorer
eval_scorer: Optional[Callable[..., float]]
if eval_env:
eval_scorer = evaluate_on_environment(eval_env, epsilon=eval_epsilon)
else:
eval_scorer = None
# start training loop
observation, reward, terminal = env.reset(), 0.0, False
rollout_return = 0.0
clip_episode = False
for total_step in xrange(1, n_steps + 1):
with logger.measure_time("step"):
# stack observation if necessary
if is_image:
stacked_frame.append(observation)
fed_observation = stacked_frame.eval()
else:
observation = observation.astype("f4")
fed_observation = observation
# sample exploration action
with logger.measure_time("inference"):
if total_step < random_steps:
action = env.action_space.sample()
elif explorer:
x = fed_observation.reshape((1,) + fed_observation.shape)
action = explorer.sample(algo, x, total_step)[0]
else:
action = algo.sample_action([fed_observation])[0]
# store observation
buffer.append(
observation=observation,
action=action,
reward=reward,
terminal=terminal,
clip_episode=clip_episode,
)
# get next observation
if clip_episode:
observation, reward, terminal = env.reset(), 0.0, False
clip_episode = False
logger.add_metric("rollout_return", rollout_return)
rollout_return = 0.0
# for image observation
if is_image:
stacked_frame.clear()
else:
with logger.measure_time("environment_step"):
observation, reward, terminal, info = env.step(action)
rollout_return += reward
# special case for TimeLimit wrapper
if timelimit_aware and "TimeLimit.truncated" in info:
clip_episode = True
terminal = False
else:
clip_episode = terminal
# psuedo epoch count
epoch = total_step // n_steps_per_epoch
if total_step > update_start_step and len(buffer) > algo.batch_size:
if total_step % update_interval == 0:
# sample mini-batch
with logger.measure_time("sample_batch"):
batch = buffer.sample(
batch_size=algo.batch_size,
n_frames=algo.n_frames,
n_steps=algo.n_steps,
gamma=algo.gamma,
)
# update parameters
with logger.measure_time("algorithm_update"):
loss = algo.update(batch)
# record metrics
for name, val in loss.items():
logger.add_metric(name, val)
# call callback if given
if callback:
callback(algo, epoch, total_step)
if epoch > 0 and total_step % n_steps_per_epoch == 0:
# evaluation
if eval_scorer:
logger.add_metric("evaluation", eval_scorer(algo))
if epoch % save_interval == 0:
logger.save_model(total_step, algo)
# save metrics
logger.commit(epoch, total_step)
# clip the last episode
buffer.clip_episode()
def train_batch_env(
algo: AlgoProtocol,
env: BatchEnv,
buffer: BatchBuffer,
explorer: Optional[Explorer] = None,
n_epochs: int = 1000,
n_steps_per_epoch: int = 1000,
n_updates_per_epoch: int = 1000,
eval_interval: int = 10,
eval_env: Optional[gym.Env] = None,
eval_epsilon: float = 0.0,
save_metrics: bool = True,
save_interval: int = 1,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
timelimit_aware: bool = True,
callback: Optional[Callable[[AlgoProtocol, int, int], None]] = None,
) -> None:
"""Start training loop of online deep reinforcement learning.
Args:
algo: algorithm object.
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
n_epochs: the number of epochs to train.
n_steps_per_epoch: the number of steps per epoch.
n_updates_per_epoch: the number of updates per epoch.
eval_interval: the number of epochs before evaluation.
eval_env: gym-like environment. If None, evaluation is skipped.
eval_epsilon: :math:`\\epsilon`-greedy factor during evaluation.
save_metrics: flag to record metrics. If False, the log
directory is not created and the model parameters are not saved.
save_interval: the number of epochs before saving models.
experiment_name: experiment name for logging. If not passed,
the directory name will be ``{class name}_online_{timestamp}``.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called at the end of epochs.
"""
# setup logger
if experiment_name is None:
experiment_name = algo.__class__.__name__ + "_online"
logger = D3RLPyLogger(
experiment_name,
save_metrics=save_metrics,
root_dir=logdir,
verbose=verbose,
tensorboard_dir=tensorboard_dir,
with_timestamp=with_timestamp,
)
algo.set_active_logger(logger)
# initialize algorithm parameters
_setup_algo(algo, env)
observation_shape = env.observation_space.shape
is_image = len(observation_shape) == 3
# prepare stacked observation
if is_image:
stacked_frame = BatchStackedObservation(
observation_shape, algo.n_frames, len(env)
)
# save hyperparameters
algo.save_params(logger)
# switch based on show_progress flag
xrange = trange if show_progress else range
# setup evaluation scorer
eval_scorer: Optional[Callable[..., float]]
if eval_env:
eval_scorer = evaluate_on_environment(eval_env, epsilon=eval_epsilon)
else:
eval_scorer = None
# start training loop
observation = env.reset()
reward, terminal = np.zeros(len(env)), np.zeros(len(env))
clip_episode = np.zeros(len(env))
for epoch in range(1, n_epochs + 1):
for step in xrange(n_steps_per_epoch):
total_step = len(env) * (epoch * n_steps_per_epoch + step)
# stack observation if necessary
if is_image:
stacked_frame.append(observation)
fed_observation = stacked_frame.eval()
else:
observation = observation.astype("f4")
fed_observation = observation
# sample exploration action
with logger.measure_time("inference"):
if explorer:
action = explorer.sample(algo, fed_observation, total_step)
else:
action = algo.sample_action(fed_observation)
# store observation
buffer.append(
observations=observation,
actions=action,
rewards=reward,
terminals=terminal,
clip_episodes=clip_episode,
)
# get next observation
with logger.measure_time("environment_step"):
observation, reward, terminal, infos = env.step(action)
# special case for TimeLimit wrapper
for i, info in enumerate(infos):
if timelimit_aware and "TimeLimit.truncated" in info:
clip_episode[i] = 1.0
terminal[i] = 0.0
else:
clip_episode[i] = terminal[i]
if clip_episode[i] and is_image:
stacked_frame.clear_by_index(i)
# call callback if given
if callback:
callback(algo, epoch, total_step)
for step in range(n_updates_per_epoch):
# sample mini-batch
with logger.measure_time("sample_batch"):
batch = buffer.sample(
batch_size=algo.batch_size,
n_frames=algo.n_frames,
n_steps=algo.n_steps,
gamma=algo.gamma,
)
# update parameters
with logger.measure_time("algorithm_update"):
loss = algo.update(batch)
# record metrics
for name, val in loss.items():
logger.add_metric(name, val)
if epoch % eval_interval == 0:
# evaluation
if eval_scorer:
logger.add_metric("evaluation", eval_scorer(algo))
# save metrics
logger.commit(epoch, total_step)
if epoch % save_interval == 0:
logger.save_model(total_step, algo)
# finish all process
env.close()
# clip the last episodes
buffer.clip_episode()
def collect(
algo: AlgoProtocol,
env: gym.Env,
buffer: Buffer,
explorer: Optional[Explorer] = None,
deterministic: bool = False,
n_steps: int = 1000000,
show_progress: bool = True,
timelimit_aware: bool = True,
) -> None:
"""Collects data via interaction with environment.
Args:
algo: algorithm object.
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
deterministic: flag to collect data with the greedy policy.
n_steps: the number of total steps to train.
show_progress: flag to show progress bar for iterations.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
"""
# initialize algorithm parameters
_setup_algo(algo, env)
observation_shape = env.observation_space.shape
is_image = len(observation_shape) == 3
# prepare stacked observation
if is_image:
stacked_frame = StackedObservation(observation_shape, algo.n_frames)
# switch based on show_progress flag
xrange = trange if show_progress else range
# start training loop
observation, reward, terminal = env.reset(), 0.0, False
clip_episode = False
for total_step in xrange(1, n_steps + 1):
# stack observation if necessary
if is_image:
stacked_frame.append(observation)
fed_observation = stacked_frame.eval()
else:
observation = observation.astype("f4")
fed_observation = observation
# sample exploration action
if deterministic:
action = algo.predict([fed_observation])[0]
else:
if explorer:
x = fed_observation.reshape((1,) + fed_observation.shape)
action = explorer.sample(algo, x, total_step)[0]
else:
action = algo.sample_action([fed_observation])[0]
# store observation
buffer.append(
observation=observation,
action=action,
reward=reward,
terminal=terminal,
clip_episode=clip_episode,
)
# get next observation
if clip_episode:
observation, reward, terminal = env.reset(), 0.0, False
clip_episode = False
# for image observation
if is_image:
stacked_frame.clear()
else:
observation, reward, terminal, info = env.step(action)
# special case for TimeLimit wrapper
if timelimit_aware and "TimeLimit.truncated" in info:
clip_episode = True
terminal = False
else:
clip_episode = terminal
# clip the last episode
buffer.clip_episode() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/online/iterators.py | iterators.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.ddpg_impl import DDPGImpl
class DDPG(AlgoBase):
r"""Deep Deterministic Policy Gradients algorithm.
DDPG is an actor-critic algorithm that trains a Q function parametrized
with :math:`\theta` and a policy function parametrized with :math:`\phi`.
.. math::
L(\theta) = \mathbb{E}_{s_t,\, a_t,\, r_{t+1},\, s_{t+1} \sim D} \Big[(r_{t+1}
+ \gamma Q_{\theta'}\big(s_{t+1}, \pi_{\phi'}(s_{t+1}))
- Q_\theta(s_t, a_t)\big)^2\Big]
.. math::
J(\phi) = \mathbb{E}_{s_t \sim D} \Big[Q_\theta\big(s_t, \pi_\phi(s_t)\big)\Big]
where :math:`\theta'` and :math:`\phi` are the target network parameters.
There target network parameters are updated every iteration.
.. math::
\theta' \gets \tau \theta + (1 - \tau) \theta'
\phi' \gets \tau \phi + (1 - \tau) \phi'
References:
* `Silver et al., Deterministic policy gradient algorithms.
<http://proceedings.mlr.press/v32/silver14.html>`_
* `Lillicrap et al., Continuous control with deep reinforcement
learning. <https://arxiv.org/abs/1509.02971>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q function.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.ddpg_impl.DDPGImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_use_gpu: Optional[Device]
_impl: Optional[DDPGImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 1,
target_reduction_type: str = "min",
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[DDPGImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DDPGImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
critic_loss = self._impl.update_critic(batch)
actor_loss = self._impl.update_actor(batch)
self._impl.update_critic_target()
self._impl.update_actor_target()
return {"critic_loss": critic_loss, "actor_loss": actor_loss}
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/ddpg.py | ddpg.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.crr_impl import CRRImpl
class CRR(AlgoBase):
r"""Critic Reguralized Regression algorithm.
CRR is a simple offline RL method similar to AWAC.
The policy is trained as a supervised regression.
.. math::
J(\phi) = \mathbb{E}_{s_t, a_t \sim D}
[\log \pi_\phi(a_t|s_t) f(Q_\theta, \pi_\phi, s_t, a_t)]
where :math:`f` is a filter function which has several options. The first
option is ``binary`` function.
.. math::
f := \mathbb{1} [A_\theta(s, a) > 0]
The other is ``exp`` function.
.. math::
f := \exp(A(s, a) / \beta)
The :math:`A(s, a)` is an average function which also has several options.
The first option is ``mean``.
.. math::
A(s, a) = Q_\theta (s, a) - \frac{1}{m} \sum^m_j Q(s, a_j)
The other one is ``max``.
.. math::
A(s, a) = Q_\theta (s, a) - \max^m_j Q(s, a_j)
where :math:`a_j \sim \pi_\phi(s)`.
In evaluation, the action is determined by Critic Weighted Policy (CWP).
In CWP, the several actions are sampled from the policy function, and the
final action is re-sampled from the estimated action-value distribution.
References:
* `Wang et al., Critic Reguralized Regression.
<https://arxiv.org/abs/2006.15134>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
beta (float): temperature value defined as :math:`\beta` above.
n_action_samples (int): the number of sampled actions to calculate
:math:`A(s, a)` and for CWP.
advantage_type (str): advantage function type. The available options
are ``['mean', 'max']``.
weight_type (str): filter function type. The available options
are ``['binary', 'exp']``.
max_weight (float): maximum weight for cross-entropy loss.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_update_type (str): target update type. The available options are
``['hard', 'soft']``.
tau (float): target network synchronization coefficiency used with
``soft`` target update.
update_actor_interval (int): interval to update policy function used
with ``hard`` target update.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.crr_impl.CRRImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_beta: float
_n_action_samples: int
_advantage_type: str
_weight_type: str
_max_weight: float
_n_critics: int
_target_update_type: str
_tau: float
_target_update_interval: int
_target_reduction_type: str
_update_actor_interval: int
_use_gpu: Optional[Device]
_impl: Optional[CRRImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
beta: float = 1.0,
n_action_samples: int = 4,
advantage_type: str = "mean",
weight_type: str = "exp",
max_weight: float = 20.0,
n_critics: int = 1,
target_update_type: str = "hard",
tau: float = 5e-3,
target_update_interval: int = 100,
target_reduction_type: str = "min",
update_actor_interval: int = 1,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[CRRImpl] = None,
**kwargs: Any,
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._beta = beta
self._n_action_samples = n_action_samples
self._advantage_type = advantage_type
self._weight_type = weight_type
self._max_weight = max_weight
self._n_critics = n_critics
self._target_update_type = target_update_type
self._tau = tau
self._target_update_interval = target_update_interval
self._target_reduction_type = target_reduction_type
self._update_actor_interval = update_actor_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = CRRImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
beta=self._beta,
n_action_samples=self._n_action_samples,
advantage_type=self._advantage_type,
weight_type=self._weight_type,
max_weight=self._max_weight,
n_critics=self._n_critics,
tau=self._tau,
target_reduction_type=self._target_reduction_type,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
critic_loss = self._impl.update_critic(batch)
actor_loss = self._impl.update_actor(batch)
if self._target_update_type == "hard":
if self._grad_step % self._target_update_interval == 0:
self._impl.sync_critic_target()
self._impl.sync_actor_target()
elif self._target_update_type == "soft":
self._impl.update_critic_target()
self._impl.update_actor_target()
else:
raise ValueError(
f"invalid target_update_type: {self._target_update_type}"
)
return {"critic_loss": critic_loss, "actor_loss": actor_loss}
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/crr.py | crr.py |
from typing import Any, Dict, List, Optional, Sequence
import numpy as np
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import Transition, TransitionMiniBatch
from ..dynamics import DynamicsBase
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.combo_impl import COMBOImpl
from .utility import ModelBaseMixin
class COMBO(ModelBaseMixin, AlgoBase):
r"""Conservative Offline Model-Based Optimization.
COMBO is a model-based RL approach for offline policy optimization.
COMBO is similar to MOPO, but it also leverages conservative loss proposed
in CQL.
.. math::
L(\theta_i) = \mathbb{E}_{s \sim d_M}
\big[\log{\sum_a \exp{Q_{\theta_i}(s_t, a)}}\big]
- \mathbb{E}_{s, a \sim D} \big[Q_{\theta_i}(s, a)\big]
+ L_\mathrm{SAC}(\theta_i)
Note:
Currently, COMBO only supports vector observations.
References:
* `Yu et al., COMBO: Conservative Offline Model-Based Policy
Optimization. <https://arxiv.org/abs/2102.08363>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
temp_learning_rate (float): learning rate for temperature parameter.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
initial_temperature (float): initial temperature value.
conservative_weight (float): constant weight to scale conservative loss.
n_action_samples (int): the number of sampled actions to compute
:math:`\log{\sum_a \exp{Q(s, a)}}`.
soft_q_backup (bool): flag to use SAC-style backup.
dynamics (d3rlpy.dynamics.DynamicsBase): dynamics object.
rollout_interval (int): the number of steps before rollout.
rollout_horizon (int): the rollout step length.
rollout_batch_size (int): the number of initial transitions for
rollout.
real_ratio (float): the real of dataset samples in a mini-batch.
generated_maxlen (int): the maximum number of generated samples.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.combo_impl.COMBOImpl):
algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_update_actor_interval: int
_initial_temperature: float
_conservative_weight: float
_n_action_samples: int
_soft_q_backup: bool
_dynamics: Optional[DynamicsBase]
_rollout_interval: int
_rollout_horizon: int
_rollout_batch_size: int
_use_gpu: Optional[Device]
_impl: Optional[COMBOImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 3e-4,
temp_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
temp_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
update_actor_interval: int = 1,
initial_temperature: float = 1.0,
conservative_weight: float = 1.0,
n_action_samples: int = 10,
soft_q_backup: bool = False,
dynamics: Optional[DynamicsBase] = None,
rollout_interval: int = 1000,
rollout_horizon: int = 5,
rollout_batch_size: int = 50000,
real_ratio: float = 0.5,
generated_maxlen: int = 50000 * 5 * 5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[COMBOImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
real_ratio=real_ratio,
generated_maxlen=generated_maxlen,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._update_actor_interval = update_actor_interval
self._initial_temperature = initial_temperature
self._conservative_weight = conservative_weight
self._n_action_samples = n_action_samples
self._soft_q_backup = soft_q_backup
self._dynamics = dynamics
self._rollout_interval = rollout_interval
self._rollout_horizon = rollout_horizon
self._rollout_batch_size = rollout_batch_size
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = COMBOImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
temp_learning_rate=self._temp_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
temp_optim_factory=self._temp_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
initial_temperature=self._initial_temperature,
conservative_weight=self._conservative_weight,
n_action_samples=self._n_action_samples,
real_ratio=self._real_ratio,
soft_q_backup=self._soft_q_backup,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
# delayed policy update
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
# lagrangian parameter update for SAC temperature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
def _is_generating_new_data(self) -> bool:
return self._grad_step % self._rollout_interval == 0
def _sample_initial_transitions(
self, transitions: List[Transition]
) -> List[Transition]:
# uniformly sample transitions
n_transitions = self._rollout_batch_size
indices = np.random.randint(len(transitions), size=n_transitions)
return [transitions[i] for i in indices]
def _get_rollout_horizon(self) -> int:
return self._rollout_horizon | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/combo.py | combo.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.bear_impl import BEARImpl
class BEAR(AlgoBase):
r"""Bootstrapping Error Accumulation Reduction algorithm.
BEAR is a SAC-based data-driven deep reinforcement learning algorithm.
BEAR constrains the support of the policy function within data distribution
by minimizing Maximum Mean Discreptancy (MMD) between the policy function
and the approximated beahvior policy function :math:`\pi_\beta(a|s)`
which is optimized through L2 loss.
.. math::
L(\beta) = \mathbb{E}_{s_t, a_t \sim D, a \sim
\pi_\beta(\cdot|s_t)} [(a - a_t)^2]
The policy objective is a combination of SAC's objective and MMD penalty.
.. math::
J(\phi) = J_{SAC}(\phi) - \mathbb{E}_{s_t \sim D} \alpha (
\text{MMD}(\pi_\beta(\cdot|s_t), \pi_\phi(\cdot|s_t))
- \epsilon)
where MMD is computed as follows.
.. math::
\text{MMD}(x, y) = \frac{1}{N^2} \sum_{i, i'} k(x_i, x_{i'})
- \frac{2}{NM} \sum_{i, j} k(x_i, y_j)
+ \frac{1}{M^2} \sum_{j, j'} k(y_j, y_{j'})
where :math:`k(x, y)` is a gaussian kernel
:math:`k(x, y) = \exp{((x - y)^2 / (2 \sigma^2))}`.
:math:`\alpha` is also adjustable through dual gradient decsent where
:math:`\alpha` becomes smaller if MMD is smaller than the threshold
:math:`\epsilon`.
References:
* `Kumar et al., Stabilizing Off-Policy Q-Learning via Bootstrapping
Error Reduction. <https://arxiv.org/abs/1906.00949>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
imitator_learning_rate (float): learning rate for behavior policy
function.
temp_learning_rate (float): learning rate for temperature parameter.
alpha_learning_rate (float): learning rate for :math:`\alpha`.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
imitator_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the behavior policy.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
alpha_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for :math:`\alpha`.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
imitator_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the behavior policy.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
initial_temperature (float): initial temperature value.
initial_alpha (float): initial :math:`\alpha` value.
alpha_threshold (float): threshold value described as
:math:`\epsilon`.
lam (float): weight for critic ensemble.
n_action_samples (int): the number of action samples to compute the
best action.
n_target_samples (int): the number of action samples to compute
BCQ-like target value.
n_mmd_action_samples (int): the number of action samples to compute MMD.
mmd_kernel (str): MMD kernel function. The available options are
``['gaussian', 'laplacian']``.
mmd_sigma (float): :math:`\sigma` for gaussian kernel in MMD
calculation.
vae_kl_weight (float): constant weight to scale KL term for behavior
policy training.
warmup_steps (int): the number of steps to warmup the policy
function.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device iD or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The avaiable options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The avaiable options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.bear_impl.BEARImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_imitator_learning_rate: float
_temp_learning_rate: float
_alpha_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_imitator_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_alpha_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_imitator_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_initial_temperature: float
_initial_alpha: float
_alpha_threshold: float
_lam: float
_n_action_samples: int
_n_target_samples: int
_n_mmd_action_samples: int
_mmd_kernel: str
_mmd_sigma: float
_vae_kl_weight: float
_warmup_steps: int
_use_gpu: Optional[Device]
_impl: Optional[BEARImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 3e-4,
imitator_learning_rate: float = 3e-4,
temp_learning_rate: float = 1e-4,
alpha_learning_rate: float = 1e-3,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
imitator_optim_factory: OptimizerFactory = AdamFactory(),
temp_optim_factory: OptimizerFactory = AdamFactory(),
alpha_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
imitator_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
initial_temperature: float = 1.0,
initial_alpha: float = 1.0,
alpha_threshold: float = 0.05,
lam: float = 0.75,
n_action_samples: int = 100,
n_target_samples: int = 10,
n_mmd_action_samples: int = 4,
mmd_kernel: str = "laplacian",
mmd_sigma: float = 20.0,
vae_kl_weight: float = 0.5,
warmup_steps: int = 40000,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[BEARImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._imitator_learning_rate = imitator_learning_rate
self._temp_learning_rate = temp_learning_rate
self._alpha_learning_rate = alpha_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._imitator_optim_factory = imitator_optim_factory
self._temp_optim_factory = temp_optim_factory
self._alpha_optim_factory = alpha_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._imitator_encoder_factory = check_encoder(imitator_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._initial_temperature = initial_temperature
self._initial_alpha = initial_alpha
self._alpha_threshold = alpha_threshold
self._lam = lam
self._n_action_samples = n_action_samples
self._n_target_samples = n_target_samples
self._n_mmd_action_samples = n_mmd_action_samples
self._mmd_kernel = mmd_kernel
self._mmd_sigma = mmd_sigma
self._vae_kl_weight = vae_kl_weight
self._warmup_steps = warmup_steps
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = BEARImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
imitator_learning_rate=self._imitator_learning_rate,
temp_learning_rate=self._temp_learning_rate,
alpha_learning_rate=self._alpha_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
imitator_optim_factory=self._imitator_optim_factory,
temp_optim_factory=self._temp_optim_factory,
alpha_optim_factory=self._alpha_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
imitator_encoder_factory=self._imitator_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
initial_temperature=self._initial_temperature,
initial_alpha=self._initial_alpha,
alpha_threshold=self._alpha_threshold,
lam=self._lam,
n_action_samples=self._n_action_samples,
n_target_samples=self._n_target_samples,
n_mmd_action_samples=self._n_mmd_action_samples,
mmd_kernel=self._mmd_kernel,
mmd_sigma=self._mmd_sigma,
vae_kl_weight=self._vae_kl_weight,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
imitator_loss = self._impl.update_imitator(batch)
metrics.update({"imitator_loss": imitator_loss})
# lagrangian parameter update for SAC temperature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
# lagrangian parameter update for MMD loss weight
if self._alpha_learning_rate > 0:
alpha_loss, alpha = self._impl.update_alpha(batch)
metrics.update({"alpha_loss": alpha_loss, "alpha": alpha})
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
if self._grad_step < self._warmup_steps:
actor_loss = self._impl.warmup_actor(batch)
else:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_actor_target()
self._impl.update_critic_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/bear.py | bear.py |
from abc import abstractmethod
from typing import Any, Callable, List, Optional, Tuple, Union
import gym
import numpy as np
from ..base import ImplBase, LearnableBase
from ..constants import (
CONTINUOUS_ACTION_SPACE_MISMATCH_ERROR,
DISCRETE_ACTION_SPACE_MISMATCH_ERROR,
IMPL_NOT_INITIALIZED_ERROR,
ActionSpace,
)
from ..envs import BatchEnv
from ..online.buffers import (
BatchBuffer,
BatchReplayBuffer,
Buffer,
ReplayBuffer,
)
from ..online.explorers import Explorer
from ..online.iterators import (
AlgoProtocol,
collect,
train_batch_env,
train_single_env,
)
def _assert_action_space(algo: LearnableBase, env: gym.Env) -> None:
if isinstance(env.action_space, gym.spaces.Box):
assert (
algo.get_action_type() == ActionSpace.CONTINUOUS
), CONTINUOUS_ACTION_SPACE_MISMATCH_ERROR
elif isinstance(env.action_space, gym.spaces.discrete.Discrete):
assert (
algo.get_action_type() == ActionSpace.DISCRETE
), DISCRETE_ACTION_SPACE_MISMATCH_ERROR
else:
action_space = type(env.action_space)
raise ValueError(f"The action-space is not supported: {action_space}")
class AlgoImplBase(ImplBase):
@abstractmethod
def save_policy(self, fname: str) -> None:
pass
@abstractmethod
def predict_best_action(
self, x: Union[np.ndarray, List[Any]]
) -> np.ndarray:
pass
@abstractmethod
def predict_value(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_std: bool,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
pass
@abstractmethod
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
pass
def copy_policy_from(self, impl: "AlgoImplBase") -> None:
raise NotImplementedError
def copy_policy_optim_from(self, impl: "AlgoImplBase") -> None:
raise NotImplementedError
def copy_q_function_from(self, impl: "AlgoImplBase") -> None:
raise NotImplementedError
def copy_q_function_optim_from(self, impl: "AlgoImplBase") -> None:
raise NotImplementedError
def reset_optimizer_states(self) -> None:
raise NotImplementedError
class AlgoBase(LearnableBase):
_impl: Optional[AlgoImplBase]
def save_policy(self, fname: str) -> None:
"""Save the greedy-policy computational graph as TorchScript or ONNX.
The format will be automatically detected by the file name.
.. code-block:: python
# save as TorchScript
algo.save_policy('policy.pt')
# save as ONNX
algo.save_policy('policy.onnx')
The artifacts saved with this method will work without d3rlpy.
This method is especially useful to deploy the learned policy to
production environments or embedding systems.
See also
* https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html (for Python).
* https://pytorch.org/tutorials/advanced/cpp_export.html (for C++).
* https://onnx.ai (for ONNX)
Args:
fname: destination file path.
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
self._impl.save_policy(fname)
def predict(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
"""Returns greedy actions.
.. code-block:: python
# 100 observations with shape of (10,)
x = np.random.random((100, 10))
actions = algo.predict(x)
# actions.shape == (100, action size) for continuous control
# actions.shape == (100,) for discrete control
Args:
x: observations
Returns:
greedy actions
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
return self._impl.predict_best_action(x)
def predict_value(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_std: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Returns predicted action-values.
.. code-block:: python
# 100 observations with shape of (10,)
x = np.random.random((100, 10))
# for continuous control
# 100 actions with shape of (2,)
actions = np.random.random((100, 2))
# for discrete control
# 100 actions in integer values
actions = np.random.randint(2, size=100)
values = algo.predict_value(x, actions)
# values.shape == (100,)
values, stds = algo.predict_value(x, actions, with_std=True)
# stds.shape == (100,)
Args:
x: observations
action: actions
with_std: flag to return standard deviation of ensemble
estimation. This deviation reflects uncertainty for the given
observations. This uncertainty will be more accurate if you
enable ``bootstrap`` flag and increase ``n_critics`` value.
Returns:
predicted action-values
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
return self._impl.predict_value(x, action, with_std)
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
"""Returns sampled actions.
The sampled actions are identical to the output of `predict` method if
the policy is deterministic.
Args:
x: observations.
Returns:
sampled actions.
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
return self._impl.sample_action(x)
def fit_online(
self,
env: gym.Env,
buffer: Optional[Buffer] = None,
explorer: Optional[Explorer] = None,
n_steps: int = 1000000,
n_steps_per_epoch: int = 10000,
update_interval: int = 1,
update_start_step: int = 0,
random_steps: int = 0,
eval_env: Optional[gym.Env] = None,
eval_epsilon: float = 0.0,
save_metrics: bool = True,
save_interval: int = 1,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
timelimit_aware: bool = True,
callback: Optional[Callable[[AlgoProtocol, int, int], None]] = None,
) -> None:
"""Start training loop of online deep reinforcement learning.
Args:
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
n_steps: the number of total steps to train.
n_steps_per_epoch: the number of steps per epoch.
update_interval: the number of steps per update.
update_start_step: the steps before starting updates.
random_steps: the steps for the initial random explortion.
eval_env: gym-like environment. If None, evaluation is skipped.
eval_epsilon: :math:`\\epsilon`-greedy factor during evaluation.
save_metrics: flag to record metrics. If False, the log
directory is not created and the model parameters are not saved.
save_interval: the number of epochs before saving models.
experiment_name: experiment name for logging. If not passed,
the directory name will be ``{class name}_online_{timestamp}``.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called at the end of epochs.
"""
# create default replay buffer
if buffer is None:
buffer = ReplayBuffer(1000000, env=env)
# check action-space
_assert_action_space(self, env)
train_single_env(
algo=self,
env=env,
buffer=buffer,
explorer=explorer,
n_steps=n_steps,
n_steps_per_epoch=n_steps_per_epoch,
update_interval=update_interval,
update_start_step=update_start_step,
random_steps=random_steps,
eval_env=eval_env,
eval_epsilon=eval_epsilon,
save_metrics=save_metrics,
save_interval=save_interval,
experiment_name=experiment_name,
with_timestamp=with_timestamp,
logdir=logdir,
verbose=verbose,
show_progress=show_progress,
tensorboard_dir=tensorboard_dir,
timelimit_aware=timelimit_aware,
callback=callback,
)
def fit_batch_online(
self,
env: BatchEnv,
buffer: Optional[BatchBuffer] = None,
explorer: Optional[Explorer] = None,
n_epochs: int = 1000,
n_steps_per_epoch: int = 1000,
n_updates_per_epoch: int = 1000,
eval_interval: int = 10,
eval_env: Optional[gym.Env] = None,
eval_epsilon: float = 0.0,
save_metrics: bool = True,
save_interval: int = 1,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
timelimit_aware: bool = True,
callback: Optional[Callable[[AlgoProtocol, int, int], None]] = None,
) -> None:
"""Start training loop of batch online deep reinforcement learning.
Args:
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
n_epochs: the number of epochs to train.
n_steps_per_epoch: the number of steps per epoch.
update_interval: the number of steps per update.
n_updates_per_epoch: the number of updates per epoch.
eval_interval: the number of epochs before evaluation.
eval_env: gym-like environment. If None, evaluation is skipped.
eval_epsilon: :math:`\\epsilon`-greedy factor during evaluation.
save_metrics: flag to record metrics. If False, the log
directory is not created and the model parameters are not saved.
save_interval: the number of epochs before saving models.
experiment_name: experiment name for logging. If not passed,
the directory name will be ``{class name}_online_{timestamp}``.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called at the end of epochs.
"""
# create default replay buffer
if buffer is None:
buffer = BatchReplayBuffer(1000000, env=env)
# check action-space
_assert_action_space(self, env)
train_batch_env(
algo=self,
env=env,
buffer=buffer,
explorer=explorer,
n_epochs=n_epochs,
n_steps_per_epoch=n_steps_per_epoch,
n_updates_per_epoch=n_updates_per_epoch,
eval_interval=eval_interval,
eval_env=eval_env,
eval_epsilon=eval_epsilon,
save_metrics=save_metrics,
save_interval=save_interval,
experiment_name=experiment_name,
with_timestamp=with_timestamp,
logdir=logdir,
verbose=verbose,
show_progress=show_progress,
tensorboard_dir=tensorboard_dir,
timelimit_aware=timelimit_aware,
callback=callback,
)
def collect(
self,
env: gym.Env,
buffer: Optional[Buffer] = None,
explorer: Optional[Explorer] = None,
deterministic: bool = False,
n_steps: int = 1000000,
show_progress: bool = True,
timelimit_aware: bool = True,
) -> Buffer:
"""Collects data via interaction with environment.
If ``buffer`` is not given, ``ReplayBuffer`` will be internally created.
Args:
env: gym-like environment.
buffer : replay buffer.
explorer: action explorer.
deterministic: flag to collect data with the greedy policy.
n_steps: the number of total steps to train.
show_progress: flag to show progress bar for iterations.
timelimit_aware: flag to turn ``terminal`` flag ``False`` when
``TimeLimit.truncated`` flag is ``True``, which is designed to
incorporate with ``gym.wrappers.TimeLimit``.
Returns:
replay buffer with the collected data.
"""
# create default replay buffer
if buffer is None:
buffer = ReplayBuffer(1000000, env=env)
# check action-space
_assert_action_space(self, env)
collect(
algo=self,
env=env,
buffer=buffer,
explorer=explorer,
deterministic=deterministic,
n_steps=n_steps,
show_progress=show_progress,
timelimit_aware=timelimit_aware,
)
return buffer
def copy_policy_from(self, algo: "AlgoBase") -> None:
"""Copies policy parameters from the given algorithm.
.. code-block:: python
# pretrain with static dataset
cql = d3rlpy.algos.CQL()
cql.fit(dataset, n_steps=100000)
# transfer to online algorithm
sac = d3rlpy.algos.SAC()
sac.create_impl(cql.observation_shape, cql.action_size)
sac.copy_policy_from(cql)
Args:
algo: algorithm object.
"""
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
assert isinstance(algo.impl, AlgoImplBase)
self._impl.copy_policy_from(algo.impl)
def copy_policy_optim_from(self, algo: "AlgoBase") -> None:
"""Copies policy optimizer states from the given algorithm.
.. code-block:: python
# pretrain with static dataset
cql = d3rlpy.algos.CQL()
cql.fit(dataset, n_steps=100000)
# transfer to online algorithm
sac = d3rlpy.algos.SAC()
sac.create_impl(cql.observation_shape, cql.action_size)
sac.copy_policy_optim_from(cql)
Args:
algo: algorithm object.
"""
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
assert isinstance(algo.impl, AlgoImplBase)
self._impl.copy_policy_optim_from(algo.impl)
def copy_q_function_from(self, algo: "AlgoBase") -> None:
"""Copies Q-function parameters from the given algorithm.
.. code-block:: python
# pretrain with static dataset
cql = d3rlpy.algos.CQL()
cql.fit(dataset, n_steps=100000)
# transfer to online algorithmn
sac = d3rlpy.algos.SAC()
sac.create_impl(cql.observation_shape, cql.action_size)
sac.copy_q_function_from(cql)
Args:
algo: algorithm object.
"""
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
assert isinstance(algo.impl, AlgoImplBase)
self._impl.copy_q_function_from(algo.impl)
def copy_q_function_optim_from(self, algo: "AlgoBase") -> None:
"""Copies Q-function optimizer states from the given algorithm.
.. code-block:: python
# pretrain with static dataset
cql = d3rlpy.algos.CQL()
cql.fit(dataset, n_steps=100000)
# transfer to online algorithm
sac = d3rlpy.algos.SAC()
sac.create_impl(cql.observation_shape, cql.action_size)
sac.copy_policy_optim_from(cql)
Args:
algo: algorithm object.
"""
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
assert isinstance(algo.impl, AlgoImplBase)
self._impl.copy_q_function_optim_from(algo.impl)
def reset_optimizer_states(self) -> None:
"""Resets optimizer states.
This is especially useful when fine-tuning policies with setting inital
optimizer states.
"""
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
self._impl.reset_optimizer_states() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/base.py | base.py |
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from .base import AlgoBase
from .torch.bc_impl import BCBaseImpl, BCImpl, DiscreteBCImpl
class _BCBase(AlgoBase):
_learning_rate: float
_optim_factory: OptimizerFactory
_encoder_factory: EncoderFactory
_use_gpu: Optional[Device]
_impl: Optional[BCBaseImpl]
def __init__(
self,
*,
learning_rate: float = 1e-3,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
batch_size: int = 100,
n_frames: int = 1,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
impl: Optional[BCBaseImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=1,
gamma=1.0,
scaler=scaler,
action_scaler=action_scaler,
kwargs=kwargs,
)
self._learning_rate = learning_rate
self._optim_factory = optim_factory
self._encoder_factory = check_encoder(encoder_factory)
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
loss = self._impl.update_imitator(batch.observations, batch.actions)
return {"loss": loss}
def predict_value(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_std: bool = False,
) -> np.ndarray:
"""value prediction is not supported by BC algorithms."""
raise NotImplementedError("BC does not support value estimation.")
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> None:
"""sampling action is not supported by BC algorithm."""
raise NotImplementedError("BC does not support sampling action.")
class BC(_BCBase):
r"""Behavior Cloning algorithm.
Behavior Cloning (BC) is to imitate actions in the dataset via a supervised
learning approach.
Since BC is only imitating action distributions, the performance will be
close to the mean of the dataset even though BC mostly works better than
online RL algorithms.
.. math::
L(\theta) = \mathbb{E}_{a_t, s_t \sim D}
[(a_t - \pi_\theta(s_t))^2]
Args:
learning_rate (float): learing rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
policy_type (str): the policy type. The available options are
``['deterministic', 'stochastic']``.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action scaler. The available options are ``['min_max']``.
impl (d3rlpy.algos.torch.bc_impl.BCImpl):
implemenation of the algorithm.
"""
_policy_type: str
_impl: Optional[BCImpl]
def __init__(
self,
*,
learning_rate: float = 1e-3,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
batch_size: int = 100,
n_frames: int = 1,
policy_type: str = "deterministic",
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
impl: Optional[BCBaseImpl] = None,
**kwargs: Any
):
super().__init__(
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
batch_size=batch_size,
n_frames=n_frames,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
impl=impl,
**kwargs,
)
self._policy_type = policy_type
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = BCImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
policy_type=self._policy_type,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
)
self._impl.build()
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class DiscreteBC(_BCBase):
r"""Behavior Cloning algorithm for discrete control.
Behavior Cloning (BC) is to imitate actions in the dataset via a supervised
learning approach.
Since BC is only imitating action distributions, the performance will be
close to the mean of the dataset even though BC mostly works better than
online RL algorithms.
.. math::
L(\theta) = \mathbb{E}_{a_t, s_t \sim D}
[-\sum_a p(a|s_t) \log \pi_\theta(a|s_t)]
where :math:`p(a|s_t)` is implemented as a one-hot vector.
Args:
learning_rate (float): learing rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
beta (float): reguralization factor.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
impl (d3rlpy.algos.torch.bc_impl.DiscreteBCImpl):
implemenation of the algorithm.
"""
_beta: float
_impl: Optional[DiscreteBCImpl]
def __init__(
self,
*,
learning_rate: float = 1e-3,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
batch_size: int = 100,
n_frames: int = 1,
beta: float = 0.5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
impl: Optional[DiscreteBCImpl] = None,
**kwargs: Any
):
super().__init__(
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
batch_size=batch_size,
n_frames=n_frames,
use_gpu=use_gpu,
scaler=scaler,
impl=impl,
**kwargs,
)
self._beta = beta
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DiscreteBCImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
beta=self._beta,
use_gpu=self._use_gpu,
scaler=self._scaler,
)
self._impl.build()
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/bc.py | bc.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.awac_impl import AWACImpl
class AWAC(AlgoBase):
r"""Advantage Weighted Actor-Critic algorithm.
AWAC is a TD3-based actor-critic algorithm that enables efficient
fine-tuning where the policy is trained with offline datasets and is
deployed to online training.
The policy is trained as a supervised regression.
.. math::
J(\phi) = \mathbb{E}_{s_t, a_t \sim D}
[\log \pi_\phi(a_t|s_t)
\exp(\frac{1}{\lambda} A^\pi (s_t, a_t))]
where :math:`A^\pi (s_t, a_t) = Q_\theta(s_t, a_t) -
Q_\theta(s_t, a'_t)` and :math:`a'_t \sim \pi_\phi(\cdot|s_t)`
The key difference from AWR is that AWAC uses Q-function trained via TD
learning for the better sample-efficiency.
References:
* `Nair et al., Accelerating Online Reinforcement Learning with Offline
Datasets. <https://arxiv.org/abs/2006.09359>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
lam (float): :math:`\lambda` for weight calculation.
n_action_samples (int): the number of sampled actions to calculate
:math:`A^\pi(s_t, a_t)`.
max_weight (float): maximum weight for cross-entropy loss.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.awac_impl.AWACImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_lam: float
_n_action_samples: int
_max_weight: float
_n_critics: int
_target_reduction_type: str
_update_actor_interval: int
_use_gpu: Optional[Device]
_impl: Optional[AWACImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(weight_decay=1e-4),
critic_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 1024,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
lam: float = 1.0,
n_action_samples: int = 1,
max_weight: float = 20.0,
n_critics: int = 2,
target_reduction_type: str = "min",
update_actor_interval: int = 1,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[AWACImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._lam = lam
self._n_action_samples = n_action_samples
self._max_weight = max_weight
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._update_actor_interval = update_actor_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = AWACImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
lam=self._lam,
n_action_samples=self._n_action_samples,
max_weight=self._max_weight,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
# delayed policy update
if self._grad_step % self._update_actor_interval == 0:
actor_loss, mean_std = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss, "mean_std": mean_std})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/awac.py | awac.py |
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.bcq_impl import BCQImpl, DiscreteBCQImpl
class BCQ(AlgoBase):
r"""Batch-Constrained Q-learning algorithm.
BCQ is the very first practical data-driven deep reinforcement learning
lgorithm.
The major difference from DDPG is that the policy function is represented
as combination of conditional VAE and perturbation function in order to
remedy extrapolation error emerging from target value estimation.
The encoder and the decoder of the conditional VAE is represented as
:math:`E_\omega` and :math:`D_\omega` respectively.
.. math::
L(\omega) = E_{s_t, a_t \sim D} [(a - \tilde{a})^2
+ D_{KL}(N(\mu, \sigma)|N(0, 1))]
where :math:`\mu, \sigma = E_\omega(s_t, a_t)`,
:math:`\tilde{a} = D_\omega(s_t, z)` and :math:`z \sim N(\mu, \sigma)`.
The policy function is represented as a residual function
with the VAE and the perturbation function represented as
:math:`\xi_\phi (s, a)`.
.. math::
\pi(s, a) = a + \Phi \xi_\phi (s, a)
where :math:`a = D_\omega (s, z)`, :math:`z \sim N(0, 0.5)` and
:math:`\Phi` is a perturbation scale designated by `action_flexibility`.
Although the policy is learned closely to data distribution, the
perturbation function can lead to more rewarded states.
BCQ also leverages twin Q functions and computes weighted average over
maximum values and minimum values.
.. math::
L(\theta_i) = \mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D}
[(y - Q_{\theta_i}(s_t, a_t))^2]
.. math::
y = r_{t+1} + \gamma \max_{a_i} [
\lambda \min_j Q_{\theta_j'}(s_{t+1}, a_i)
+ (1 - \lambda) \max_j Q_{\theta_j'}(s_{t+1}, a_i)]
where :math:`\{a_i \sim D(s_{t+1}, z), z \sim N(0, 0.5)\}_{i=1}^n`.
The number of sampled actions is designated with `n_action_samples`.
Finally, the perturbation function is trained just like DDPG's policy
function.
.. math::
J(\phi) = \mathbb{E}_{s_t \sim D, a_t \sim D_\omega(s_t, z),
z \sim N(0, 0.5)}
[Q_{\theta_1} (s_t, \pi(s_t, a_t))]
At inference time, action candidates are sampled as many as
`n_action_samples`, and the action with highest value estimation is taken.
.. math::
\pi'(s) = \text{argmax}_{\pi(s, a_i)} Q_{\theta_1} (s, \pi(s, a_i))
Note:
The greedy action is not deterministic because the action candidates
are always randomly sampled. This might affect `save_policy` method and
the performance at production.
References:
* `Fujimoto et al., Off-Policy Deep Reinforcement Learning without
Exploration. <https://arxiv.org/abs/1812.02900>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
imitator_learning_rate (float): learning rate for Conditional VAE.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
imitator_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the conditional VAE.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
imitator_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the conditional VAE.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
update_actor_interval (int): interval to update policy function.
lam (float): weight factor for critic ensemble.
n_action_samples (int): the number of action samples to estimate
action-values.
action_flexibility (float): output scale of perturbation function
represented as :math:`\Phi`.
rl_start_step (int): step to start to update policy function and Q
functions. If this is large, RL training would be more stabilized.
beta (float): KL reguralization term for Conditional VAE.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.bcq_impl.BCQImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_imitator_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_imitator_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_imitator_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_update_actor_interval: int
_lam: float
_n_action_samples: int
_action_flexibility: float
_rl_start_step: int
_beta: float
_use_gpu: Optional[Device]
_impl: Optional[BCQImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-3,
critic_learning_rate: float = 1e-3,
imitator_learning_rate: float = 1e-3,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
imitator_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
imitator_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
update_actor_interval: int = 1,
lam: float = 0.75,
n_action_samples: int = 100,
action_flexibility: float = 0.05,
rl_start_step: int = 0,
beta: float = 0.5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[BCQImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._imitator_learning_rate = imitator_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._imitator_optim_factory = imitator_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._imitator_encoder_factory = check_encoder(imitator_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._update_actor_interval = update_actor_interval
self._lam = lam
self._n_action_samples = n_action_samples
self._action_flexibility = action_flexibility
self._rl_start_step = rl_start_step
self._beta = beta
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = BCQImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
imitator_learning_rate=self._imitator_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
imitator_optim_factory=self._imitator_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
imitator_encoder_factory=self._imitator_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
lam=self._lam,
n_action_samples=self._n_action_samples,
action_flexibility=self._action_flexibility,
beta=self._beta,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
imitator_loss = self._impl.update_imitator(batch)
metrics.update({"imitator_loss": imitator_loss})
if self._grad_step >= self._rl_start_step:
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_actor_target()
self._impl.update_critic_target()
return metrics
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
"""BCQ does not support sampling action."""
raise NotImplementedError("BCQ does not support sampling action.")
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class DiscreteBCQ(AlgoBase):
r"""Discrete version of Batch-Constrained Q-learning algorithm.
Discrete version takes theories from the continuous version, but the
algorithm is much simpler than that.
The imitation function :math:`G_\omega(a|s)` is trained as supervised
learning just like Behavior Cloning.
.. math::
L(\omega) = \mathbb{E}_{a_t, s_t \sim D}
[-\sum_a p(a|s_t) \log G_\omega(a|s_t)]
With this imitation function, the greedy policy is defined as follows.
.. math::
\pi(s_t) = \text{argmax}_{a|G_\omega(a|s_t)
/ \max_{\tilde{a}} G_\omega(\tilde{a}|s_t) > \tau}
Q_\theta (s_t, a)
which eliminates actions with probabilities :math:`\tau` times smaller
than the maximum one.
Finally, the loss function is computed in Double DQN style with the above
constrained policy.
.. math::
L(\theta) = \mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D} [(r_{t+1}
+ \gamma Q_{\theta'}(s_{t+1}, \pi(s_{t+1}))
- Q_\theta(s_t, a_t))^2]
References:
* `Fujimoto et al., Off-Policy Deep Reinforcement Learning without
Exploration. <https://arxiv.org/abs/1812.02900>`_
* `Fujimoto et al., Benchmarking Batch Deep Reinforcement Learning
Algorithms. <https://arxiv.org/abs/1910.01708>`_
Args:
learning_rate (float): learning rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
action_flexibility (float): probability threshold represented as
:math:`\tau`.
beta (float): reguralization term for imitation function.
target_update_interval (int): interval to update the target network.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.bcq_impl.DiscreteBCQImpl):
algorithm implementation.
"""
_learning_rate: float
_optim_factory: OptimizerFactory
_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_n_critics: int
_target_reduction_type: str
_action_flexibility: float
_beta: float
_target_update_interval: int
_use_gpu: Optional[Device]
_impl: Optional[DiscreteBCQImpl]
def __init__(
self,
*,
learning_rate: float = 6.25e-5,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 32,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
n_critics: int = 1,
target_reduction_type: str = "min",
action_flexibility: float = 0.3,
beta: float = 0.5,
target_update_interval: int = 8000,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[DiscreteBCQImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=None,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._learning_rate = learning_rate
self._optim_factory = optim_factory
self._encoder_factory = check_encoder(encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._action_flexibility = action_flexibility
self._beta = beta
self._target_update_interval = target_update_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DiscreteBCQImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
action_flexibility=self._action_flexibility,
beta=self._beta,
use_gpu=self._use_gpu,
scaler=self._scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
loss = self._impl.update(batch)
if self._grad_step % self._target_update_interval == 0:
self._impl.update_target()
return {"loss": loss}
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/bcq.py | bcq.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.td3_impl import TD3Impl
class TD3(AlgoBase):
r"""Twin Delayed Deep Deterministic Policy Gradients algorithm.
TD3 is an improved DDPG-based algorithm.
Major differences from DDPG are as follows.
* TD3 has twin Q functions to reduce overestimation bias at TD learning.
The number of Q functions can be designated by `n_critics`.
* TD3 adds noise to target value estimation to avoid overfitting with the
deterministic policy.
* TD3 updates the policy function after several Q function updates in order
to reduce variance of action-value estimation. The interval of the policy
function update can be designated by `update_actor_interval`.
.. math::
L(\theta_i) = \mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D} [(r_{t+1}
+ \gamma \min_j Q_{\theta_j'}(s_{t+1}, \pi_{\phi'}(s_{t+1}) +
\epsilon) - Q_{\theta_i}(s_t, a_t))^2]
.. math::
J(\phi) = \mathbb{E}_{s_t \sim D}
[\min_i Q_{\theta_i}(s_t, \pi_\phi(s_t))]
where :math:`\epsilon \sim clip (N(0, \sigma), -c, c)`
References:
* `Fujimoto et al., Addressing Function Approximation Error in
Actor-Critic Methods. <https://arxiv.org/abs/1802.09477>`_
Args:
actor_learning_rate (float): learning rate for a policy function.
critic_learning_rate (float): learning rate for Q functions.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_smoothing_sigma (float): standard deviation for target noise.
target_smoothing_clip (float): clipping range for target noise.
update_actor_interval (int): interval to update policy function
described as `delayed policy update` in the paper.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.td3_impl.TD3Impl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_target_smoothing_sigma: float
_target_smoothing_clip: float
_update_actor_interval: int
_use_gpu: Optional[Device]
_impl: Optional[TD3Impl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
target_smoothing_sigma: float = 0.2,
target_smoothing_clip: float = 0.5,
update_actor_interval: int = 2,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[TD3Impl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
self._update_actor_interval = update_actor_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = TD3Impl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
target_smoothing_sigma=self._target_smoothing_sigma,
target_smoothing_clip=self._target_smoothing_clip,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
# delayed policy update
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/td3.py | td3.py |
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch, compute_lambda_return
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import OptimizerFactory, SGDFactory
from .base import AlgoBase
from .torch.awr_impl import AWRBaseImpl, AWRImpl, DiscreteAWRImpl
class _AWRBase(AlgoBase):
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_batch_size_per_update: int
_n_actor_updates: int
_n_critic_updates: int
_lam: float
_beta: float
_max_weight: float
_use_gpu: Optional[Device]
_impl: Optional[AWRBaseImpl]
def __init__(
self,
*,
actor_learning_rate: float = 5e-5,
critic_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = SGDFactory(momentum=0.9),
critic_optim_factory: OptimizerFactory = SGDFactory(momentum=0.9),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
batch_size: int = 2048,
n_frames: int = 1,
gamma: float = 0.99,
batch_size_per_update: int = 256,
n_actor_updates: int = 1000,
n_critic_updates: int = 200,
lam: float = 0.95,
beta: float = 1.0,
max_weight: float = 20.0,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[AWRImpl] = None,
**kwargs: Any
):
# batch_size in AWR has different semantic from Q learning algorithms.
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=1,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._batch_size_per_update = batch_size_per_update
self._n_actor_updates = n_actor_updates
self._n_critic_updates = n_critic_updates
self._lam = lam
self._beta = beta
self._max_weight = max_weight
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _compute_lambda_returns(self, batch: TransitionMiniBatch) -> np.ndarray:
# compute TD(lambda)
lambda_returns = []
for transition in batch.transitions:
lambda_return = compute_lambda_return(
transition=transition,
algo=self,
gamma=self._gamma,
lam=self._lam,
n_frames=self._n_frames,
)
lambda_returns.append(lambda_return)
return np.array(lambda_returns).reshape((-1, 1))
def _compute_advantages(
self, returns: np.ndarray, batch: TransitionMiniBatch
) -> np.ndarray:
baselines = self.predict_value(batch.observations).reshape((-1, 1))
advantages = returns - baselines
adv_mean = np.mean(advantages)
adv_std = np.std(advantages)
return (advantages - adv_mean) / (adv_std + 1e-5)
def _compute_clipped_weights(self, advantages: np.ndarray) -> np.ndarray:
weights = np.exp(advantages / self._beta)
return np.minimum(weights, self._max_weight)
def predict_value( # pylint: disable=signature-differs
self, x: Union[np.ndarray, List[Any]], *args: Any, **kwargs: Any
) -> np.ndarray:
"""Returns predicted state values.
Args:
x: observations.
Returns:
predicted state values.
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
return self._impl.predict_value(x)
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
# compute lmabda return
lambda_returns = self._compute_lambda_returns(batch)
# calcuate advantage
advantages = self._compute_advantages(lambda_returns, batch)
# compute weights
clipped_weights = self._compute_clipped_weights(advantages)
metrics.update({"weights": np.mean(clipped_weights)})
n_steps_per_batch = self.batch_size // self._batch_size_per_update
# update critic
critic_loss_history = []
for _ in range(self._n_critic_updates // n_steps_per_batch):
for j in range(n_steps_per_batch):
head_index = j * self._batch_size_per_update
tail_index = head_index + self._batch_size_per_update
observations = batch.observations[head_index:tail_index]
returns = lambda_returns[head_index:tail_index]
critic_loss = self._impl.update_critic(observations, returns)
critic_loss_history.append(critic_loss)
critic_loss_mean = np.mean(critic_loss_history)
metrics.update({"critic_loss": critic_loss_mean})
# update actor
actor_loss_history = []
for _ in range(self._n_actor_updates // n_steps_per_batch):
for j in range(n_steps_per_batch):
head_index = j * self._batch_size_per_update
tail_index = head_index + self._batch_size_per_update
observations = batch.observations[head_index:tail_index]
actions = batch.actions[head_index:tail_index]
weights = clipped_weights[head_index:tail_index]
actor_loss = self._impl.update_actor(
observations, actions, weights
)
actor_loss_history.append(actor_loss)
actor_loss_mean = np.mean(actor_loss_history)
metrics.update({"actor_loss": actor_loss_mean})
return metrics
class AWR(_AWRBase):
r"""Advantage-Weighted Regression algorithm.
AWR is an actor-critic algorithm that trains via supervised regression way,
and has shown strong performance in online and offline settings.
The value function is trained as a supervised regression problem.
.. math::
L(\theta) = \mathbb{E}_{s_t, R_t \sim D} [(R_t - V(s_t|\theta))^2]
where :math:`R_t` is approximated using TD(:math:`\lambda`) to mitigate
high variance issue.
The policy function is also trained as a supervised regression problem.
.. math::
J(\phi) = \mathbb{E}_{s_t, a_t, R_t \sim D}
[\log \pi(a_t|s_t, \phi)
\exp (\frac{1}{B} (R_t - V(s_t|\theta)))]
where :math:`B` is a constant factor.
References:
* `Peng et al., Advantage-Weighted Regression: Simple and Scalable
Off-Policy Reinforcement Learning
<https://arxiv.org/abs/1910.00177>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for value function.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
batch_size (int): batch size per iteration.
n_frames (int): the number of frames to stack for image observation.
gamma (float): discount factor.
batch_size_per_update (int): mini-batch size.
n_actor_updates (int): actor gradient steps per iteration.
n_critic_updates (int): critic gradient steps per iteration.
lam (float): :math:`\lambda` for TD(:math:`\lambda`).
beta (float): :math:`B` for weight scale.
max_weight (float): :math:`w_{\text{max}}` for weight clipping.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.awr_impl.AWRImpl): algorithm implementation.
"""
_impl: Optional[AWRImpl]
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = AWRImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class DiscreteAWR(_AWRBase):
r"""Discrete veriosn of Advantage-Weighted Regression algorithm.
AWR is an actor-critic algorithm that trains via supervised regression way,
and has shown strong performance in online and offline settings.
The value function is trained as a supervised regression problem.
.. math::
L(\theta) = \mathbb{E}_{s_t, R_t \sim D} [(R_t - V(s_t|\theta))^2]
where :math:`R_t` is approximated using TD(:math:`\lambda`) to mitigate
high variance issue.
The policy function is also trained as a supervised regression problem.
.. math::
J(\phi) = \mathbb{E}_{s_t, a_t, R_t \sim D}
[\log \pi(a_t|s_t, \phi)
\exp (\frac{1}{B} (R_t - V(s_t|\theta)))]
where :math:`B` is a constant factor.
References:
* `Peng et al., Advantage-Weighted Regression: Simple and Scalable
Off-Policy Reinforcement Learning
<https://arxiv.org/abs/1910.00177>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for value function.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
batch_size (int): batch size per iteration.
n_frames (int): the number of frames to stack for image observation.
gamma (float): discount factor.
batch_size_per_update (int): mini-batch size.
n_actor_updates (int): actor gradient steps per iteration.
n_critic_updates (int): critic gradient steps per iteration.
lam (float): :math:`\lambda` for TD(:math:`\lambda`).
beta (float): :math:`B` for weight scale.
max_weight (float): :math:`w_{\text{max}}` for weight clipping.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.awr_impl.DiscreteAWRImpl):
algorithm implementation.
"""
_impl: Optional[DiscreteAWRImpl]
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DiscreteAWRImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=None,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/awr.py | awr.py |
from typing import Any, Dict, List, Optional, Sequence, Tuple
import numpy as np
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import Transition, TransitionMiniBatch
from ..dynamics import DynamicsBase
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.sac_impl import SACImpl
from .utility import ModelBaseMixin
class MOPO(ModelBaseMixin, AlgoBase):
r"""Model-based Offline Policy Optimization.
MOPO is a model-based RL approach for offline policy optimization.
MOPO leverages the probablistic ensemble dynamics model to generate
new dynamics data with uncertainty penalties.
The ensemble dynamics model consists of :math:`N` probablistic models
:math:`\{T_{\theta_i}\}_{i=1}^N`.
At each epoch, new transitions are generated via randomly picked dynamics
model :math:`T_\theta`.
.. math::
s_{t+1}, r_{t+1} \sim T_\theta(s_t, a_t)
where :math:`s_t \sim D` for the first step, otherwise :math:`s_t` is the
previous generated observation, and :math:`a_t \sim \pi(\cdot|s_t)`.
The generated :math:`r_{t+1}` would be far from the ground truth if the
actions sampled from the policy function is out-of-distribution.
Thus, the uncertainty penalty reguralizes this bias.
.. math::
\tilde{r_{t+1}} = r_{t+1} - \lambda \max_{i=1}^N
|| \Sigma_i (s_t, a_t) ||
where :math:`\Sigma(s_t, a_t)` is the estimated variance.
Finally, the generated transitions
:math:`(s_t, a_t, \tilde{r_{t+1}}, s_{t+1})` are appended to dataset
:math:`D`.
This generation process starts with randomly sampled
``n_initial_transitions`` transitions till ``horizon`` steps.
Note:
Currently, MOPO only supports vector observations.
References:
* `Yu et al., MOPO: Model-based Offline Policy Optimization.
<https://arxiv.org/abs/2005.13239>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
temp_learning_rate (float): learning rate for temperature parameter.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
initial_temperature (float): initial temperature value.
dynamics (d3rlpy.dynamics.DynamicsBase): dynamics object.
rollout_interval (int): the number of steps before rollout.
rollout_horizon (int): the rollout step length.
rollout_batch_size (int): the number of initial transitions for
rollout.
lam (float): :math:`\lambda` for uncertainty penalties.
real_ratio (float): the real of dataset samples in a mini-batch.
generated_maxlen (int): the maximum number of generated samples.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.sac_impl.SACImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_update_actor_interval: int
_initial_temperature: float
_dynamics: Optional[DynamicsBase]
_rollout_interval: int
_rollout_horizon: int
_rollout_batch_size: int
_lam: float
_use_gpu: Optional[Device]
_impl: Optional[SACImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
temp_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
temp_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
update_actor_interval: int = 1,
initial_temperature: float = 1.0,
dynamics: Optional[DynamicsBase] = None,
rollout_interval: int = 1000,
rollout_horizon: int = 5,
rollout_batch_size: int = 50000,
lam: float = 1.0,
real_ratio: float = 0.05,
generated_maxlen: int = 50000 * 5 * 5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[SACImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
real_ratio=real_ratio,
generated_maxlen=generated_maxlen,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._update_actor_interval = update_actor_interval
self._initial_temperature = initial_temperature
self._dynamics = dynamics
self._rollout_interval = rollout_interval
self._rollout_horizon = rollout_horizon
self._rollout_batch_size = rollout_batch_size
self._lam = lam
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = SACImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
temp_learning_rate=self._temp_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
temp_optim_factory=self._temp_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
initial_temperature=self._initial_temperature,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
# delayed policy update
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
# lagrangian parameter update for SAC temperature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
def _is_generating_new_data(self) -> bool:
return self._grad_step % self._rollout_interval == 0
def _sample_initial_transitions(
self, transitions: List[Transition]
) -> List[Transition]:
# uniformly sample transitions
n_transitions = self._rollout_batch_size
indices = np.random.randint(len(transitions), size=n_transitions)
return [transitions[i] for i in indices]
def _get_rollout_horizon(self) -> int:
return self._rollout_horizon
def _mutate_transition(
self,
observations: np.ndarray,
rewards: np.ndarray,
variances: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
# regularize by uncertainty
rewards -= self._lam * variances
return observations, rewards | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/mopo.py | mopo.py |
from typing import List, Optional, Tuple, cast
import numpy as np
from ..constants import DYNAMICS_NOT_GIVEN_ERROR, IMPL_NOT_INITIALIZED_ERROR
from ..dataset import Transition, TransitionMiniBatch
from ..dynamics import DynamicsBase
from .base import AlgoImplBase
class ModelBaseMixin:
_grad_step: int
_impl: Optional[AlgoImplBase]
_dynamics: Optional[DynamicsBase]
def generate_new_data(
self, transitions: List[Transition]
) -> Optional[List[Transition]]:
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
assert self._dynamics, DYNAMICS_NOT_GIVEN_ERROR
if not self._is_generating_new_data():
return None
init_transitions = self._sample_initial_transitions(transitions)
rets: List[Transition] = []
# rollout
batch = TransitionMiniBatch(init_transitions)
observations = batch.observations
actions = self._sample_rollout_action(observations)
rewards = batch.rewards
prev_transitions: List[Transition] = []
for _ in range(self._get_rollout_horizon()):
# predict next state
pred = self._dynamics.predict(observations, actions, True)
pred = cast(Tuple[np.ndarray, np.ndarray, np.ndarray], pred)
next_observations, next_rewards, variances = pred
# regularize by uncertainty
next_observations, next_rewards = self._mutate_transition(
next_observations, next_rewards, variances
)
# sample policy action
next_actions = self._sample_rollout_action(next_observations)
# append new transitions
new_transitions = []
for i in range(len(init_transitions)):
transition = Transition(
observation_shape=self._impl.observation_shape,
action_size=self._impl.action_size,
observation=observations[i],
action=actions[i],
reward=float(rewards[i][0]),
next_observation=next_observations[i],
next_action=next_actions[i],
next_reward=float(next_rewards[i][0]),
terminal=0.0,
)
if prev_transitions:
prev_transitions[i].next_transition = transition
transition.prev_transition = prev_transitions[i]
new_transitions.append(transition)
prev_transitions = new_transitions
rets += new_transitions
observations = next_observations.copy()
actions = next_actions.copy()
rewards = next_rewards.copy()
return rets
def _is_generating_new_data(self) -> bool:
raise NotImplementedError
def _sample_initial_transitions(
self, transitions: List[Transition]
) -> List[Transition]:
raise NotImplementedError
def _sample_rollout_action(self, observations: np.ndarray) -> np.ndarray:
assert self._impl, IMPL_NOT_INITIALIZED_ERROR
return self._impl.sample_action(observations)
def _get_rollout_horizon(self) -> int:
raise NotImplementedError
def _mutate_transition(
self,
observations: np.ndarray,
rewards: np.ndarray,
variances: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
return observations, rewards | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/utility.py | utility.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.td3_plus_bc_impl import TD3PlusBCImpl
class TD3PlusBC(AlgoBase):
r"""TD3+BC algorithm.
TD3+BC is an simple offline RL algorithm built on top of TD3.
TD3+BC introduces BC-reguralized policy objective function.
.. math::
J(\phi) = \mathbb{E}_{s,a \sim D}
[\lambda Q(s, \pi(s)) - (a - \pi(s))^2]
where
.. math::
\lambda = \frac{\alpha}{\frac{1}{N} \sum_(s_i, a_i) |Q(s_i, a_i)|}
References:
* `Fujimoto et al., A Minimalist Approach to Offline Reinforcement
Learning. <https://arxiv.org/abs/2106.06860>`_
Args:
actor_learning_rate (float): learning rate for a policy function.
critic_learning_rate (float): learning rate for Q functions.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_smoothing_sigma (float): standard deviation for target noise.
target_smoothing_clip (float): clipping range for target noise.
alpha (float): :math:`\alpha` value.
update_actor_interval (int): interval to update policy function
described as `delayed policy update` in the paper.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.td3_impl.TD3Impl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_target_smoothing_sigma: float
_target_smoothing_clip: float
_alpha: float
_update_actor_interval: int
_use_gpu: Optional[Device]
_impl: Optional[TD3PlusBCImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
target_smoothing_sigma: float = 0.2,
target_smoothing_clip: float = 0.5,
alpha: float = 2.5,
update_actor_interval: int = 2,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = "standard",
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[TD3PlusBCImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
self._alpha = alpha
self._update_actor_interval = update_actor_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = TD3PlusBCImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
target_smoothing_sigma=self._target_smoothing_sigma,
target_smoothing_clip=self._target_smoothing_clip,
alpha=self._alpha,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
# delayed policy update
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/td3_plus_bc.py | td3_plus_bc.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.sac_impl import DiscreteSACImpl, SACImpl
class SAC(AlgoBase):
r"""Soft Actor-Critic algorithm.
SAC is a DDPG-based maximum entropy RL algorithm, which produces
state-of-the-art performance in online RL settings.
SAC leverages twin Q functions proposed in TD3. Additionally,
`delayed policy update` in TD3 is also implemented, which is not done in
the paper.
.. math::
L(\theta_i) = \mathbb{E}_{s_t,\, a_t,\, r_{t+1},\, s_{t+1} \sim D,\,
a_{t+1} \sim \pi_\phi(\cdot|s_{t+1})} \Big[
\big(y - Q_{\theta_i}(s_t, a_t)\big)^2\Big]
.. math::
y = r_{t+1} + \gamma \Big(\min_j Q_{\theta_j}(s_{t+1}, a_{t+1})
- \alpha \log \big(\pi_\phi(a_{t+1}|s_{t+1})\big)\Big)
.. math::
J(\phi) = \mathbb{E}_{s_t \sim D,\, a_t \sim \pi_\phi(\cdot|s_t)}
\Big[\alpha \log (\pi_\phi (a_t|s_t))
- \min_i Q_{\theta_i}\big(s_t, \pi_\phi(a_t|s_t)\big)\Big]
The temperature parameter :math:`\alpha` is also automatically adjustable.
.. math::
J(\alpha) = \mathbb{E}_{s_t \sim D,\, a_t \sim \pi_\phi(\cdot|s_t)}
\bigg[-\alpha \Big(\log \big(\pi_\phi(a_t|s_t)\big) + H\Big)\bigg]
where :math:`H` is a target
entropy, which is defined as :math:`\dim a`.
References:
* `Haarnoja et al., Soft Actor-Critic: Off-Policy Maximum Entropy Deep
Reinforcement Learning with a Stochastic Actor.
<https://arxiv.org/abs/1801.01290>`_
* `Haarnoja et al., Soft Actor-Critic Algorithms and Applications.
<https://arxiv.org/abs/1812.05905>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
temp_learning_rate (float): learning rate for temperature parameter.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
initial_temperature (float): initial temperature value.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.sac_impl.SACImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_initial_temperature: float
_use_gpu: Optional[Device]
_impl: Optional[SACImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
temp_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
temp_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
initial_temperature: float = 1.0,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[SACImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._initial_temperature = initial_temperature
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = SACImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
temp_learning_rate=self._temp_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
temp_optim_factory=self._temp_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
initial_temperature=self._initial_temperature,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
# lagrangian parameter update for SAC temperature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class DiscreteSAC(AlgoBase):
r"""Soft Actor-Critic algorithm for discrete action-space.
This discrete version of SAC is built based on continuous version of SAC
with additional modifications.
The target state-value is calculated as expectation of all action-values.
.. math::
V(s_t) = \pi_\phi (s_t)^T [Q_\theta(s_t) - \alpha \log (\pi_\phi (s_t))]
Similarly, the objective function for the temperature parameter is as
follows.
.. math::
J(\alpha) = \pi_\phi (s_t)^T [-\alpha (\log(\pi_\phi (s_t)) + H)]
Finally, the objective function for the policy function is as follows.
.. math::
J(\phi) = \mathbb{E}_{s_t \sim D}
[\pi_\phi(s_t)^T [\alpha \log(\pi_\phi(s_t)) - Q_\theta(s_t)]]
References:
* `Christodoulou, Soft Actor-Critic for Discrete Action Settings.
<https://arxiv.org/abs/1910.07207>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
temp_learning_rate (float): learning rate for temperature parameter.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
n_critics (int): the number of Q functions for ensemble.
initial_temperature (float): initial temperature value.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.sac_impl.DiscreteSACImpl):
algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_n_critics: int
_initial_temperature: float
_target_update_interval: int
_use_gpu: Optional[Device]
_impl: Optional[DiscreteSACImpl]
def __init__(
self,
*,
actor_learning_rate: float = 3e-4,
critic_learning_rate: float = 3e-4,
temp_learning_rate: float = 3e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(eps=1e-4),
critic_optim_factory: OptimizerFactory = AdamFactory(eps=1e-4),
temp_optim_factory: OptimizerFactory = AdamFactory(eps=1e-4),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 64,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
n_critics: int = 2,
initial_temperature: float = 1.0,
target_update_interval: int = 8000,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[DiscreteSACImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=None,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._n_critics = n_critics
self._initial_temperature = initial_temperature
self._target_update_interval = target_update_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DiscreteSACImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
temp_learning_rate=self._temp_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
temp_optim_factory=self._temp_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
n_critics=self._n_critics,
initial_temperature=self._initial_temperature,
use_gpu=self._use_gpu,
scaler=self._scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
# lagrangian parameter update for SAC temeprature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
if self._grad_step % self._target_update_interval == 0:
self._impl.update_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/sac.py | sac.py |
from typing import Any, Dict, Type
from .awac import AWAC
from .awr import AWR, DiscreteAWR
from .base import AlgoBase
from .bc import BC, DiscreteBC
from .bcq import BCQ, DiscreteBCQ
from .bear import BEAR
from .combo import COMBO
from .cql import CQL, DiscreteCQL
from .crr import CRR
from .ddpg import DDPG
from .dqn import DQN, DoubleDQN
from .mopo import MOPO
from .plas import PLAS, PLASWithPerturbation
from .random_policy import DiscreteRandomPolicy, RandomPolicy
from .sac import SAC, DiscreteSAC
from .td3 import TD3
from .td3_plus_bc import TD3PlusBC
__all__ = [
"AlgoBase",
"AWAC",
"AWR",
"DiscreteAWR",
"BC",
"DiscreteBC",
"BCQ",
"DiscreteBCQ",
"BEAR",
"COMBO",
"CQL",
"DiscreteCQL",
"CRR",
"DDPG",
"DQN",
"DoubleDQN",
"MOPO",
"PLAS",
"PLASWithPerturbation",
"SAC",
"DiscreteSAC",
"TD3",
"TD3PlusBC",
"RandomPolicy",
"DiscreteRandomPolicy",
"get_algo",
"create_algo",
]
DISCRETE_ALGORITHMS: Dict[str, Type[AlgoBase]] = {
"awr": DiscreteAWR,
"bc": DiscreteBC,
"bcq": DiscreteBCQ,
"cql": DiscreteCQL,
"dqn": DQN,
"double_dqn": DoubleDQN,
"sac": DiscreteSAC,
"random": DiscreteRandomPolicy,
}
CONTINUOUS_ALGORITHMS: Dict[str, Type[AlgoBase]] = {
"awac": AWAC,
"awr": AWR,
"bc": BC,
"bcq": BCQ,
"bear": BEAR,
"combo": COMBO,
"cql": CQL,
"crr": CRR,
"ddpg": DDPG,
"mopo": MOPO,
"plas": PLASWithPerturbation,
"sac": SAC,
"td3": TD3,
"td3_plus_bc": TD3PlusBC,
"random": RandomPolicy,
}
def get_algo(name: str, discrete: bool) -> Type[AlgoBase]:
"""Returns algorithm class from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
Returns:
type: algorithm class.
"""
if discrete:
if name in DISCRETE_ALGORITHMS:
return DISCRETE_ALGORITHMS[name]
raise ValueError(f"{name} does not support discrete action-space.")
if name in CONTINUOUS_ALGORITHMS:
return CONTINUOUS_ALGORITHMS[name]
raise ValueError(f"{name} does not support continuous action-space.")
def create_algo(name: str, discrete: bool, **params: Any) -> AlgoBase:
"""Returns algorithm object from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
params (any): arguments for algorithm.
Returns:
d3rlpy.algos.base.AlgoBase: algorithm.
"""
return get_algo(name, discrete)(**params) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/__init__.py | __init__.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.dqn_impl import DoubleDQNImpl, DQNImpl
class DQN(AlgoBase):
r"""Deep Q-Network algorithm.
.. math::
L(\theta) = \mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D} [(r_{t+1}
+ \gamma \max_a Q_{\theta'}(s_{t+1}, a) - Q_\theta(s_t, a_t))^2]
where :math:`\theta'` is the target network parameter. The target network
parameter is synchronized every `target_update_interval` iterations.
References:
* `Mnih et al., Human-level control through deep reinforcement
learning. <https://www.nature.com/articles/nature14236>`_
Args:
learning_rate (float): learning rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory or str):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_update_interval (int): interval to update the target network.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.dqn_impl.DQNImpl): algorithm implementation.
"""
_learning_rate: float
_optim_factory: OptimizerFactory
_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_n_critics: int
_target_reduction_type: str
_target_update_interval: int
_use_gpu: Optional[Device]
_impl: Optional[DQNImpl]
def __init__(
self,
*,
learning_rate: float = 6.25e-5,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 32,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
n_critics: int = 1,
target_reduction_type: str = "min",
target_update_interval: int = 8000,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[DQNImpl] = None,
**kwargs: Any,
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._learning_rate = learning_rate
self._optim_factory = optim_factory
self._encoder_factory = check_encoder(encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._target_update_interval = target_update_interval
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DQNImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
use_gpu=self._use_gpu,
scaler=self._scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
loss = self._impl.update(batch)
if self._grad_step % self._target_update_interval == 0:
self._impl.update_target()
return {"loss": loss}
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE
class DoubleDQN(DQN):
r"""Double Deep Q-Network algorithm.
The difference from DQN is that the action is taken from the current Q
function instead of the target Q function.
This modification significantly decreases overestimation bias of TD
learning.
.. math::
L(\theta) = \mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D} [(r_{t+1}
+ \gamma Q_{\theta'}(s_{t+1}, \text{argmax}_a
Q_\theta(s_{t+1}, a)) - Q_\theta(s_t, a_t))^2]
where :math:`\theta'` is the target network parameter. The target network
parameter is synchronized every `target_update_interval` iterations.
References:
* `Hasselt et al., Deep reinforcement learning with double Q-learning.
<https://arxiv.org/abs/1509.06461>`_
Args:
learning_rate (float): learning rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
n_critics (int): the number of Q functions.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_update_interval (int): interval to synchronize the target
network.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
impl (d3rlpy.algos.torch.dqn_impl.DoubleDQNImpl):
algorithm implementation.
"""
_impl: Optional[DoubleDQNImpl]
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DoubleDQNImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
use_gpu=self._use_gpu,
scaler=self._scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/dqn.py | dqn.py |
from typing import Any, List, Sequence, Tuple, Union
import numpy as np
from ..argument_utility import ActionScalerArg
from ..constants import ActionSpace
from .base import AlgoBase
class RandomPolicy(AlgoBase):
r"""Random Policy for continuous control algorithm.
This is designed for data collection and lightweight interaction tests.
``fit`` and ``fit_online`` methods will raise exceptions.
Args:
distribution (str): random distribution. The available options are
``['uniform', 'normal']``.
normal_std (float): standard deviation of the normal distribution. This
is only used when ``distribution='normal'``.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
"""
_distribution: str
_normal_std: float
_action_size: int
def __init__(
self,
*,
distribution: str = "uniform",
normal_std: float = 1.0,
action_scaler: ActionScalerArg = None,
**kwargs: Any,
):
super().__init__(
batch_size=1,
n_frames=1,
n_steps=1,
gamma=0.0,
scaler=None,
action_scaler=action_scaler,
kwargs=kwargs,
)
self._distribution = distribution
self._normal_std = normal_std
self._action_size = 1
self._impl = None
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._action_size = action_size
def predict(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
return self.sample_action(x)
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
x = np.asarray(x)
action_shape = (x.shape[0], self._action_size)
if self._distribution == "uniform":
action = np.random.uniform(-1.0, 1.0, size=action_shape)
elif self._distribution == "normal":
action = np.random.normal(0.0, self._normal_std, size=action_shape)
else:
raise ValueError(f"invalid distribution type: {self._distribution}")
action = np.clip(action, -1.0, 1.0)
if self._action_scaler:
action = self._action_scaler.reverse_transform_numpy(action)
return action
def predict_value(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_std: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
raise NotImplementedError
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class DiscreteRandomPolicy(AlgoBase):
r"""Random Policy for discrete control algorithm.
This is designed for data collection and lightweight interaction tests.
``fit`` and ``fit_online`` methods will raise exceptions.
"""
_action_size: int
def __init__(self, **kwargs: Any):
super().__init__(
batch_size=1,
n_frames=1,
n_steps=1,
gamma=0.0,
scaler=None,
action_scaler=None,
kwargs=kwargs,
)
self._action_size = 1
self._impl = None
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._action_size = action_size
def predict(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
return self.sample_action(x)
def sample_action(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
x = np.asarray(x)
return np.random.randint(self._action_size, size=x.shape[0])
def predict_value(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_std: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
raise NotImplementedError
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/random_policy.py | random_policy.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.plas_impl import PLASImpl, PLASWithPerturbationImpl
class PLAS(AlgoBase):
r"""Policy in Latent Action Space algorithm.
PLAS is an offline deep reinforcement learning algorithm whose policy
function is trained in latent space of Conditional VAE.
Unlike other algorithms, PLAS can achieve good performance by using
its less constrained policy function.
.. math::
a \sim p_\beta (a|s, z=\pi_\phi(s))
where :math:`\beta` is a parameter of the decoder in Conditional VAE.
References:
* `Zhou et al., PLAS: latent action space for offline reinforcement
learning. <https://arxiv.org/abs/2011.07213>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
imitator_learning_rate (float): learning rate for Conditional VAE.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
imitator_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the conditional VAE.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
imitator_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the conditional VAE.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
lam (float): weight factor for critic ensemble.
warmup_steps (int): the number of steps to warmup the VAE.
beta (float): KL reguralization term for Conditional VAE.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.bcq_impl.BCQImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_imitator_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_imitator_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_imitator_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_update_actor_interval: int
_lam: float
_warmup_steps: int
_beta: float
_use_gpu: Optional[Device]
_impl: Optional[PLASImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 1e-3,
imitator_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
imitator_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
imitator_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "mix",
update_actor_interval: int = 1,
lam: float = 0.75,
warmup_steps: int = 500000,
beta: float = 0.5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[PLASImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._imitator_learning_rate = imitator_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._imitator_optim_factory = imitator_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._imitator_encoder_factory = check_encoder(imitator_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._update_actor_interval = update_actor_interval
self._lam = lam
self._warmup_steps = warmup_steps
self._beta = beta
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = PLASImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
imitator_learning_rate=self._imitator_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
imitator_optim_factory=self._imitator_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
imitator_encoder_factory=self._imitator_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
lam=self._lam,
beta=self._beta,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
if self._grad_step < self._warmup_steps:
imitator_loss = self._impl.update_imitator(batch)
metrics.update({"imitator_loss": imitator_loss})
else:
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_actor_target()
self._impl.update_critic_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class PLASWithPerturbation(PLAS):
r"""Policy in Latent Action Space algorithm with perturbation layer.
PLAS with perturbation layer enables PLAS to output out-of-distribution
action.
References:
* `Zhou et al., PLAS: latent action space for offline reinforcement
learning. <https://arxiv.org/abs/2011.07213>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
imitator_learning_rate (float): learning rate for Conditional VAE.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
imitator_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the conditional VAE.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
imitator_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the conditional VAE.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
lam (float): weight factor for critic ensemble.
action_flexibility (float): output scale of perturbation layer.
warmup_steps (int): the number of steps to warmup the VAE.
beta (float): KL reguralization term for Conditional VAE.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.bcq_impl.BCQImpl): algorithm implementation.
"""
_action_flexibility: float
_impl: Optional[PLASWithPerturbationImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 1e-3,
imitator_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
imitator_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
imitator_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "mix",
update_actor_interval: int = 1,
lam: float = 0.75,
action_flexibility: float = 0.05,
warmup_steps: int = 500000,
beta: float = 0.5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[PLASWithPerturbationImpl] = None,
**kwargs: Any
):
super().__init__(
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
imitator_learning_rate=imitator_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
imitator_optim_factory=imitator_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
imitator_encoder_factory=imitator_encoder_factory,
q_func_factory=q_func_factory,
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
update_actor_interval=update_actor_interval,
lam=lam,
warmup_steps=warmup_steps,
beta=beta,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
impl=impl,
**kwargs,
)
self._action_flexibility = action_flexibility
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = PLASWithPerturbationImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
imitator_learning_rate=self._imitator_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
imitator_optim_factory=self._imitator_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
imitator_encoder_factory=self._imitator_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
lam=self._lam,
beta=self._beta,
action_flexibility=self._action_flexibility,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/plas.py | plas.py |
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .dqn import DoubleDQN
from .torch.cql_impl import CQLImpl, DiscreteCQLImpl
class CQL(AlgoBase):
r"""Conservative Q-Learning algorithm.
CQL is a SAC-based data-driven deep reinforcement learning algorithm, which
achieves state-of-the-art performance in offline RL problems.
CQL mitigates overestimation error by minimizing action-values under the
current policy and maximizing values under data distribution for
underestimation issue.
.. math::
L(\theta_i) = \alpha\, \mathbb{E}_{s_t \sim D}
\left[\log{\sum_a \exp{Q_{\theta_i}(s_t, a)}}
- \mathbb{E}_{a \sim D} \big[Q_{\theta_i}(s_t, a)\big] - \tau\right]
+ L_\mathrm{SAC}(\theta_i)
where :math:`\alpha` is an automatically adjustable value via Lagrangian
dual gradient descent and :math:`\tau` is a threshold value.
If the action-value difference is smaller than :math:`\tau`, the
:math:`\alpha` will become smaller.
Otherwise, the :math:`\alpha` will become larger to aggressively penalize
action-values.
In continuous control, :math:`\log{\sum_a \exp{Q(s, a)}}` is computed as
follows.
.. math::
\log{\sum_a \exp{Q(s, a)}} \approx \log{\left(
\frac{1}{2N} \sum_{a_i \sim \text{Unif}(a)}^N
\left[\frac{\exp{Q(s, a_i)}}{\text{Unif}(a)}\right]
+ \frac{1}{2N} \sum_{a_i \sim \pi_\phi(a|s)}^N
\left[\frac{\exp{Q(s, a_i)}}{\pi_\phi(a_i|s)}\right]\right)}
where :math:`N` is the number of sampled actions.
The rest of optimization is exactly same as :class:`d3rlpy.algos.SAC`.
References:
* `Kumar et al., Conservative Q-Learning for Offline Reinforcement
Learning. <https://arxiv.org/abs/2006.04779>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
temp_learning_rate (float):
learning rate for temperature parameter of SAC.
alpha_learning_rate (float): learning rate for :math:`\alpha`.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
alpha_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for :math:`\alpha`.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
initial_temperature (float): initial temperature value.
initial_alpha (float): initial :math:`\alpha` value.
alpha_threshold (float): threshold value described as :math:`\tau`.
conservative_weight (float): constant weight to scale conservative loss.
n_action_samples (int): the number of sampled actions to compute
:math:`\log{\sum_a \exp{Q(s, a)}}`.
soft_q_backup (bool): flag to use SAC-style backup.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.cql_impl.CQLImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_alpha_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_alpha_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_initial_temperature: float
_initial_alpha: float
_alpha_threshold: float
_conservative_weight: float
_n_action_samples: int
_soft_q_backup: bool
_use_gpu: Optional[Device]
_impl: Optional[CQLImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 3e-4,
temp_learning_rate: float = 1e-4,
alpha_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
temp_optim_factory: OptimizerFactory = AdamFactory(),
alpha_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
initial_temperature: float = 1.0,
initial_alpha: float = 1.0,
alpha_threshold: float = 10.0,
conservative_weight: float = 5.0,
n_action_samples: int = 10,
soft_q_backup: bool = False,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[CQLImpl] = None,
**kwargs: Any,
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._alpha_learning_rate = alpha_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._alpha_optim_factory = alpha_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._initial_temperature = initial_temperature
self._initial_alpha = initial_alpha
self._alpha_threshold = alpha_threshold
self._conservative_weight = conservative_weight
self._n_action_samples = n_action_samples
self._soft_q_backup = soft_q_backup
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = CQLImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
temp_learning_rate=self._temp_learning_rate,
alpha_learning_rate=self._alpha_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
temp_optim_factory=self._temp_optim_factory,
alpha_optim_factory=self._alpha_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
initial_temperature=self._initial_temperature,
initial_alpha=self._initial_alpha,
alpha_threshold=self._alpha_threshold,
conservative_weight=self._conservative_weight,
n_action_samples=self._n_action_samples,
soft_q_backup=self._soft_q_backup,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
# lagrangian parameter update for SAC temperature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
# lagrangian parameter update for conservative loss weight
if self._alpha_learning_rate > 0:
alpha_loss, alpha = self._impl.update_alpha(batch)
metrics.update({"alpha_loss": alpha_loss, "alpha": alpha})
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class DiscreteCQL(DoubleDQN):
r"""Discrete version of Conservative Q-Learning algorithm.
Discrete version of CQL is a DoubleDQN-based data-driven deep reinforcement
learning algorithm (the original paper uses DQN), which achieves
state-of-the-art performance in offline RL problems.
CQL mitigates overestimation error by minimizing action-values under the
current policy and maximizing values under data distribution for
underestimation issue.
.. math::
L(\theta) = \alpha \mathbb{E}_{s_t \sim D}
[\log{\sum_a \exp{Q_{\theta}(s_t, a)}}
- \mathbb{E}_{a \sim D} [Q_{\theta}(s, a)]]
+ L_{DoubleDQN}(\theta)
References:
* `Kumar et al., Conservative Q-Learning for Offline Reinforcement
Learning. <https://arxiv.org/abs/2006.04779>`_
Args:
learning_rate (float): learning rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_update_interval (int): interval to synchronize the target
network.
alpha (float): the :math:`\alpha` value above.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.cql_impl.DiscreteCQLImpl):
algorithm implementation.
"""
_alpha: float
_impl: Optional[DiscreteCQLImpl]
def __init__(
self,
*,
learning_rate: float = 6.25e-5,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 32,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
n_critics: int = 1,
target_reduction_type: str = "min",
target_update_interval: int = 8000,
alpha: float = 1.0,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[DiscreteCQLImpl] = None,
**kwargs: Any,
):
super().__init__(
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
q_func_factory=q_func_factory,
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
target_update_interval=target_update_interval,
use_gpu=use_gpu,
scaler=scaler,
reward_scaler=reward_scaler,
impl=impl,
**kwargs,
)
self._alpha = alpha
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DiscreteCQLImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
alpha=self._alpha,
use_gpu=self._use_gpu,
scaler=self._scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/cql.py | cql.py |
import math
from typing import Optional, Sequence, cast
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import (
create_conditional_vae,
create_deterministic_residual_policy,
create_discrete_imitator,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import (
ConditionalVAE,
DeterministicResidualPolicy,
DiscreteImitator,
PixelEncoder,
compute_max_with_n_actions,
)
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, torch_api, train_api
from .ddpg_impl import DDPGBaseImpl
from .dqn_impl import DoubleDQNImpl
class BCQImpl(DDPGBaseImpl):
_imitator_learning_rate: float
_imitator_optim_factory: OptimizerFactory
_imitator_encoder_factory: EncoderFactory
_lam: float
_n_action_samples: int
_action_flexibility: float
_beta: float
_policy: Optional[DeterministicResidualPolicy]
_targ_policy: Optional[DeterministicResidualPolicy]
_imitator: Optional[ConditionalVAE]
_imitator_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
imitator_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
imitator_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
imitator_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
lam: float,
n_action_samples: int,
action_flexibility: float,
beta: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type="mix",
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._imitator_learning_rate = imitator_learning_rate
self._imitator_optim_factory = imitator_optim_factory
self._imitator_encoder_factory = imitator_encoder_factory
self._n_critics = n_critics
self._lam = lam
self._n_action_samples = n_action_samples
self._action_flexibility = action_flexibility
self._beta = beta
# initialized in build
self._imitator = None
self._imitator_optim = None
def build(self) -> None:
self._build_imitator()
super().build()
# setup optimizer after the parameters move to GPU
self._build_imitator_optim()
def _build_actor(self) -> None:
self._policy = create_deterministic_residual_policy(
self._observation_shape,
self._action_size,
self._action_flexibility,
self._actor_encoder_factory,
)
def _build_imitator(self) -> None:
self._imitator = create_conditional_vae(
observation_shape=self._observation_shape,
action_size=self._action_size,
latent_size=2 * self._action_size,
beta=self._beta,
min_logstd=-4.0,
max_logstd=15.0,
encoder_factory=self._imitator_encoder_factory,
)
def _build_imitator_optim(self) -> None:
assert self._imitator is not None
self._imitator_optim = self._imitator_optim_factory.create(
self._imitator.parameters(), lr=self._imitator_learning_rate
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
assert self._q_func is not None
latent = torch.randn(
batch.observations.shape[0],
2 * self._action_size,
device=self._device,
)
clipped_latent = latent.clamp(-0.5, 0.5)
sampled_action = self._imitator.decode(
batch.observations, clipped_latent
)
action = self._policy(batch.observations, sampled_action)
return -self._q_func(batch.observations, action, "none")[0].mean()
@train_api
@torch_api()
def update_imitator(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._imitator_optim is not None
assert self._imitator is not None
self._imitator_optim.zero_grad()
loss = self._imitator.compute_error(batch.observations, batch.actions)
loss.backward()
self._imitator_optim.step()
return loss.cpu().detach().numpy()
def _repeat_observation(self, x: torch.Tensor) -> torch.Tensor:
# (batch_size, *obs_shape) -> (batch_size, n, *obs_shape)
repeat_shape = (x.shape[0], self._n_action_samples, *x.shape[1:])
repeated_x = x.view(x.shape[0], 1, *x.shape[1:]).expand(repeat_shape)
return repeated_x
def _sample_repeated_action(
self, repeated_x: torch.Tensor, target: bool = False
) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
assert self._targ_policy is not None
# TODO: this seems to be slow with image observation
flattened_x = repeated_x.reshape(-1, *self.observation_shape)
# sample latent variable
latent = torch.randn(
flattened_x.shape[0], 2 * self._action_size, device=self._device
)
clipped_latent = latent.clamp(-0.5, 0.5)
# sample action
sampled_action = self._imitator.decode(flattened_x, clipped_latent)
# add residual action
policy = self._targ_policy if target else self._policy
action = policy(flattened_x, sampled_action)
return action.view(-1, self._n_action_samples, self._action_size)
def _predict_value(
self, repeated_x: torch.Tensor, action: torch.Tensor,
) -> torch.Tensor:
assert self._q_func is not None
# TODO: this seems to be slow with image observation
# (batch_size, n, *obs_shape) -> (batch_size * n, *obs_shape)
flattened_x = repeated_x.reshape(-1, *self.observation_shape)
# (batch_size, n, action_size) -> (batch_size * n, action_size)
flattend_action = action.view(-1, self.action_size)
# estimate values
return self._q_func(flattened_x, flattend_action, "none")
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
# TODO: this seems to be slow with image observation
repeated_x = self._repeat_observation(x)
action = self._sample_repeated_action(repeated_x)
values = self._predict_value(repeated_x, action)[0]
# pick the best (batch_size * n) -> (batch_size,)
index = values.view(-1, self._n_action_samples).argmax(dim=1)
return action[torch.arange(action.shape[0]), index]
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("BCQ does not support sampling action")
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_q_func is not None
# TODO: this seems to be slow with image observation
with torch.no_grad():
repeated_x = self._repeat_observation(batch.next_observations)
actions = self._sample_repeated_action(repeated_x, True)
values = compute_max_with_n_actions(
batch.next_observations, actions, self._targ_q_func, self._lam
)
return values
class DiscreteBCQImpl(DoubleDQNImpl):
_action_flexibility: float
_beta: float
_imitator: Optional[DiscreteImitator]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
n_critics: int,
target_reduction_type: str,
action_flexibility: float,
beta: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
reward_scaler=reward_scaler,
)
self._action_flexibility = action_flexibility
self._beta = beta
# initialized in build
self._imitator = None
def _build_network(self) -> None:
super()._build_network()
assert self._q_func is not None
# share convolutional layers if observation is pixel
if isinstance(self._q_func.q_funcs[0].encoder, PixelEncoder):
self._imitator = DiscreteImitator(
self._q_func.q_funcs[0].encoder, self._action_size, self._beta
)
else:
self._imitator = create_discrete_imitator(
self._observation_shape,
self._action_size,
self._beta,
self._encoder_factory,
)
def _build_optim(self) -> None:
assert self._q_func is not None
assert self._imitator is not None
q_func_params = list(self._q_func.parameters())
imitator_params = list(self._imitator.parameters())
# TODO: replace this with a cleaner way
# retrieve unique elements
unique_dict = {}
for param in q_func_params + imitator_params:
unique_dict[param] = param
unique_params = list(unique_dict.values())
self._optim = self._optim_factory.create(
unique_params, lr=self._learning_rate
)
def compute_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor
) -> torch.Tensor:
assert self._imitator is not None
loss = super().compute_loss(batch, q_tpn)
imitator_loss = self._imitator.compute_error(
batch.observations, batch.actions.long()
)
return loss + imitator_loss
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._imitator is not None
assert self._q_func is not None
log_probs = self._imitator(x)
ratio = log_probs - log_probs.max(dim=1, keepdim=True).values
mask = (ratio > math.log(self._action_flexibility)).float()
value = self._q_func(x)
normalized_value = value - value.min(dim=1, keepdim=True).values
action = (normalized_value * cast(torch.Tensor, mask)).argmax(dim=1)
return action | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/bcq_impl.py | bcq_impl.py |
import copy
from typing import Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import create_discrete_q_function
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import EnsembleDiscreteQFunction, EnsembleQFunction
from ...preprocessing import RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, hard_sync, torch_api, train_api
from .base import TorchImplBase
from .utility import DiscreteQFunctionMixin
class DQNImpl(DiscreteQFunctionMixin, TorchImplBase):
_learning_rate: float
_optim_factory: OptimizerFactory
_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_gamma: float
_n_critics: int
_target_reduction_type: str
_use_gpu: Optional[Device]
_q_func: Optional[EnsembleDiscreteQFunction]
_targ_q_func: Optional[EnsembleDiscreteQFunction]
_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
n_critics: int,
target_reduction_type: str,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
scaler=scaler,
action_scaler=None,
reward_scaler=reward_scaler,
)
self._learning_rate = learning_rate
self._optim_factory = optim_factory
self._encoder_factory = encoder_factory
self._q_func_factory = q_func_factory
self._gamma = gamma
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._use_gpu = use_gpu
# initialized in build
self._q_func = None
self._targ_q_func = None
self._optim = None
def build(self) -> None:
# setup torch models
self._build_network()
# setup target network
self._targ_q_func = copy.deepcopy(self._q_func)
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_optim()
def _build_network(self) -> None:
self._q_func = create_discrete_q_function(
self._observation_shape,
self._action_size,
self._encoder_factory,
self._q_func_factory,
n_ensembles=self._n_critics,
)
def _build_optim(self) -> None:
assert self._q_func is not None
self._optim = self._optim_factory.create(
self._q_func.parameters(), lr=self._learning_rate
)
@train_api
@torch_api(scaler_targets=["obs_t", "obs_tpn"])
def update(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._optim is not None
self._optim.zero_grad()
q_tpn = self.compute_target(batch)
loss = self.compute_loss(batch, q_tpn)
loss.backward()
self._optim.step()
return loss.cpu().detach().numpy()
def compute_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor,
) -> torch.Tensor:
assert self._q_func is not None
return self._q_func.compute_error(
obs_t=batch.observations,
act_t=batch.actions.long(),
rew_tp1=batch.next_rewards,
q_tp1=q_tpn,
ter_tp1=batch.terminals,
gamma=self._gamma ** batch.n_steps,
use_independent_target=self._target_reduction_type == "none",
masks=batch.masks,
)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_q_func is not None
with torch.no_grad():
next_actions = self._targ_q_func(batch.next_observations)
max_action = next_actions.argmax(dim=1)
return self._targ_q_func.compute_target(
batch.next_observations,
max_action,
reduction=self._target_reduction_type,
)
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._q_func is not None
return self._q_func(x).argmax(dim=1)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
return self._predict_best_action(x)
def update_target(self) -> None:
assert self._q_func is not None
assert self._targ_q_func is not None
hard_sync(self._targ_q_func, self._q_func)
@property
def q_function(self) -> EnsembleQFunction:
assert self._q_func
return self._q_func
@property
def q_function_optim(self) -> Optimizer:
assert self._optim
return self._optim
class DoubleDQNImpl(DQNImpl):
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_q_func is not None
with torch.no_grad():
action = self._predict_best_action(batch.next_observations)
return self._targ_q_func.compute_target(
batch.next_observations,
action,
reduction=self._target_reduction_type,
) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/dqn_impl.py | dqn_impl.py |
import copy
from abc import ABCMeta, abstractmethod
from typing import Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import (
create_continuous_q_function,
create_deterministic_policy,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import (
DeterministicPolicy,
EnsembleContinuousQFunction,
EnsembleQFunction,
Policy,
)
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, soft_sync, torch_api, train_api
from .base import TorchImplBase
from .utility import ContinuousQFunctionMixin
class DDPGBaseImpl(ContinuousQFunctionMixin, TorchImplBase, metaclass=ABCMeta):
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_gamma: float
_tau: float
_n_critics: int
_target_reduction_type: str
_use_gpu: Optional[Device]
_q_func: Optional[EnsembleContinuousQFunction]
_policy: Optional[Policy]
_targ_q_func: Optional[EnsembleContinuousQFunction]
_targ_policy: Optional[Policy]
_actor_optim: Optional[Optimizer]
_critic_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = actor_encoder_factory
self._critic_encoder_factory = critic_encoder_factory
self._q_func_factory = q_func_factory
self._gamma = gamma
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._use_gpu = use_gpu
# initialized in build
self._q_func = None
self._policy = None
self._targ_q_func = None
self._targ_policy = None
self._actor_optim = None
self._critic_optim = None
def build(self) -> None:
# setup torch models
self._build_critic()
self._build_actor()
# setup target networks
self._targ_q_func = copy.deepcopy(self._q_func)
self._targ_policy = copy.deepcopy(self._policy)
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_critic_optim()
self._build_actor_optim()
def _build_critic(self) -> None:
self._q_func = create_continuous_q_function(
self._observation_shape,
self._action_size,
self._critic_encoder_factory,
self._q_func_factory,
n_ensembles=self._n_critics,
)
def _build_critic_optim(self) -> None:
assert self._q_func is not None
self._critic_optim = self._critic_optim_factory.create(
self._q_func.parameters(), lr=self._critic_learning_rate
)
@abstractmethod
def _build_actor(self) -> None:
pass
def _build_actor_optim(self) -> None:
assert self._policy is not None
self._actor_optim = self._actor_optim_factory.create(
self._policy.parameters(), lr=self._actor_learning_rate
)
@train_api
@torch_api()
def update_critic(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._critic_optim is not None
self._critic_optim.zero_grad()
q_tpn = self.compute_target(batch)
loss = self.compute_critic_loss(batch, q_tpn)
loss.backward()
self._critic_optim.step()
return loss.cpu().detach().numpy()
def compute_critic_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor
) -> torch.Tensor:
assert self._q_func is not None
return self._q_func.compute_error(
obs_t=batch.observations,
act_t=batch.actions,
rew_tp1=batch.next_rewards,
q_tp1=q_tpn,
ter_tp1=batch.terminals,
gamma=self._gamma ** batch.n_steps,
use_independent_target=self._target_reduction_type == "none",
masks=batch.masks,
)
@train_api
@torch_api()
def update_actor(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._q_func is not None
assert self._actor_optim is not None
# Q function should be inference mode for stability
self._q_func.eval()
self._actor_optim.zero_grad()
loss = self.compute_actor_loss(batch)
loss.backward()
self._actor_optim.step()
return loss.cpu().detach().numpy()
@abstractmethod
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
pass
@abstractmethod
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
pass
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.best_action(x)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.sample(x)
def update_critic_target(self) -> None:
assert self._q_func is not None
assert self._targ_q_func is not None
soft_sync(self._targ_q_func, self._q_func, self._tau)
def update_actor_target(self) -> None:
assert self._policy is not None
assert self._targ_policy is not None
soft_sync(self._targ_policy, self._policy, self._tau)
@property
def policy(self) -> Policy:
assert self._policy
return self._policy
@property
def policy_optim(self) -> Optimizer:
assert self._actor_optim
return self._actor_optim
@property
def q_function(self) -> EnsembleQFunction:
assert self._q_func
return self._q_func
@property
def q_function_optim(self) -> Optimizer:
assert self._critic_optim
return self._critic_optim
class DDPGImpl(DDPGBaseImpl):
_policy: Optional[DeterministicPolicy]
_targ_policy: Optional[DeterministicPolicy]
def _build_actor(self) -> None:
self._policy = create_deterministic_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
action = self._policy(batch.observations)
q_t = self._q_func(batch.observations, action, "none")[0]
return -q_t.mean()
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_q_func is not None
assert self._targ_policy is not None
with torch.no_grad():
action = self._targ_policy(batch.next_observations)
return self._targ_q_func.compute_target(
batch.next_observations,
action.clamp(-1.0, 1.0),
reduction=self._target_reduction_type,
)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
return self._predict_best_action(x) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/ddpg_impl.py | ddpg_impl.py |
from typing import Optional, Sequence, cast
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.torch.policies import Policy
from ...models.torch.q_functions.ensemble_q_function import EnsembleQFunction
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import (
eval_api,
freeze,
get_state_dict,
hard_sync,
map_location,
reset_optimizer_states,
set_state_dict,
sync_optimizer_state,
to_cpu,
to_cuda,
torch_api,
unfreeze,
)
from ..base import AlgoImplBase
class TorchImplBase(AlgoImplBase):
_observation_shape: Sequence[int]
_action_size: int
_scaler: Optional[Scaler]
_action_scaler: Optional[ActionScaler]
_reward_scaler: Optional[RewardScaler]
_device: str
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
self._observation_shape = observation_shape
self._action_size = action_size
self._scaler = scaler
self._action_scaler = action_scaler
self._reward_scaler = reward_scaler
self._device = "cpu:0"
@eval_api
@torch_api(scaler_targets=["x"])
def predict_best_action(self, x: torch.Tensor) -> np.ndarray:
assert x.ndim > 1, "Input must have batch dimension."
with torch.no_grad():
action = self._predict_best_action(x)
# transform action back to the original range
if self._action_scaler:
action = self._action_scaler.reverse_transform(action)
return action.cpu().detach().numpy()
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@eval_api
@torch_api(scaler_targets=["x"])
def sample_action(self, x: torch.Tensor) -> np.ndarray:
assert x.ndim > 1, "Input must have batch dimension."
with torch.no_grad():
action = self._sample_action(x)
# transform action back to the original range
if self._action_scaler:
action = self._action_scaler.reverse_transform(action)
return action.cpu().detach().numpy()
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@eval_api
def save_policy(self, fname: str) -> None:
dummy_x = torch.rand(1, *self.observation_shape, device=self._device)
# workaround until version 1.6
freeze(self)
# dummy function to select best actions
def _func(x: torch.Tensor) -> torch.Tensor:
if self._scaler:
x = self._scaler.transform(x)
action = self._predict_best_action(x)
if self._action_scaler:
action = self._action_scaler.reverse_transform(action)
return action
traced_script = torch.jit.trace(_func, dummy_x, check_trace=False)
if fname.endswith(".onnx"):
# currently, PyTorch cannot directly export function as ONNX.
torch.onnx.export(
traced_script,
dummy_x,
fname,
export_params=True,
opset_version=11,
input_names=["input_0"],
output_names=["output_0"],
example_outputs=traced_script(dummy_x),
)
elif fname.endswith(".pt"):
traced_script.save(fname)
else:
raise ValueError(
f"invalid format type: {fname}."
" .pt and .onnx extensions are currently supported."
)
# workaround until version 1.6
unfreeze(self)
def to_gpu(self, device: Device = Device()) -> None:
self._device = f"cuda:{device.get_id()}"
to_cuda(self, self._device)
def to_cpu(self) -> None:
self._device = "cpu:0"
to_cpu(self)
def save_model(self, fname: str) -> None:
torch.save(get_state_dict(self), fname)
def load_model(self, fname: str) -> None:
chkpt = torch.load(fname, map_location=map_location(self._device))
set_state_dict(self, chkpt)
@property
def policy(self) -> Policy:
raise NotImplementedError
def copy_policy_from(self, impl: AlgoImplBase) -> None:
impl = cast("TorchImplBase", impl)
if not isinstance(impl.policy, type(self.policy)):
raise ValueError(
f"Invalid policy type: expected={type(self.policy)},"
f"actual={type(impl.policy)}"
)
hard_sync(self.policy, impl.policy)
@property
def policy_optim(self) -> Optimizer:
raise NotImplementedError
def copy_policy_optim_from(self, impl: AlgoImplBase) -> None:
impl = cast("TorchImplBase", impl)
if not isinstance(impl.policy_optim, type(self.policy_optim)):
raise ValueError(
"Invalid policy optimizer type: "
f"expected={type(self.policy_optim)},"
f"actual={type(impl.policy_optim)}"
)
sync_optimizer_state(self.policy_optim, impl.policy_optim)
@property
def q_function(self) -> EnsembleQFunction:
raise NotImplementedError
def copy_q_function_from(self, impl: AlgoImplBase) -> None:
impl = cast("TorchImplBase", impl)
q_func = self.q_function.q_funcs[0]
if not isinstance(impl.q_function.q_funcs[0], type(q_func)):
raise ValueError(
f"Invalid Q-function type: expected={type(q_func)},"
f"actual={type(impl.q_function.q_funcs[0])}"
)
hard_sync(self.q_function, impl.q_function)
@property
def q_function_optim(self) -> Optimizer:
raise NotImplementedError
def copy_q_function_optim_from(self, impl: AlgoImplBase) -> None:
impl = cast("TorchImplBase", impl)
if not isinstance(impl.q_function_optim, type(self.q_function_optim)):
raise ValueError(
"Invalid Q-function optimizer type: "
f"expected={type(self.q_function_optim)}",
f"actual={type(impl.q_function_optim)}",
)
sync_optimizer_state(self.q_function_optim, impl.q_function_optim)
def reset_optimizer_states(self) -> None:
reset_optimizer_states(self)
@property
def observation_shape(self) -> Sequence[int]:
return self._observation_shape
@property
def action_size(self) -> int:
return self._action_size
@property
def device(self) -> str:
return self._device
@property
def scaler(self) -> Optional[Scaler]:
return self._scaler
@property
def action_scaler(self) -> Optional[ActionScaler]:
return self._action_scaler
@property
def reward_scaler(self) -> Optional[RewardScaler]:
return self._reward_scaler | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/base.py | base.py |
from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .td3_impl import TD3Impl
class TD3PlusBCImpl(TD3Impl):
_alpha: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
target_smoothing_sigma: float,
target_smoothing_clip: float,
alpha: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
target_smoothing_sigma=target_smoothing_sigma,
target_smoothing_clip=target_smoothing_clip,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._alpha = alpha
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
action = self._policy(batch.observations)
q_t = self._q_func(batch.observations, action, "none")[0]
lam = self._alpha / (q_t.abs().mean()).detach()
return lam * -q_t.mean() + ((batch.actions - action) ** 2).mean() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/td3_plus_bc_impl.py | td3_plus_bc_impl.py |
from abc import ABCMeta, abstractmethod
from typing import Optional, Sequence, Union
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import (
create_deterministic_policy,
create_deterministic_regressor,
create_discrete_imitator,
create_probablistic_regressor,
create_squashed_normal_policy,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.torch import (
DeterministicRegressor,
DiscreteImitator,
Imitator,
Policy,
ProbablisticRegressor,
)
from ...preprocessing import ActionScaler, Scaler
from ...torch_utility import hard_sync, torch_api, train_api
from .base import TorchImplBase
class BCBaseImpl(TorchImplBase, metaclass=ABCMeta):
_learning_rate: float
_optim_factory: OptimizerFactory
_encoder_factory: EncoderFactory
_use_gpu: Optional[Device]
_imitator: Optional[Imitator]
_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=None,
)
self._learning_rate = learning_rate
self._optim_factory = optim_factory
self._encoder_factory = encoder_factory
self._use_gpu = use_gpu
# initialized in build
self._imitator = None
self._optim = None
def build(self) -> None:
self._build_network()
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
self._build_optim()
@abstractmethod
def _build_network(self) -> None:
pass
def _build_optim(self) -> None:
assert self._imitator is not None
self._optim = self._optim_factory.create(
self._imitator.parameters(), lr=self._learning_rate
)
@train_api
@torch_api(scaler_targets=["obs_t"], action_scaler_targets=["act_t"])
def update_imitator(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> np.ndarray:
assert self._optim is not None
self._optim.zero_grad()
loss = self.compute_loss(obs_t, act_t)
loss.backward()
self._optim.step()
return loss.cpu().detach().numpy()
def compute_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._imitator is not None
return self._imitator.compute_error(obs_t, act_t)
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._imitator is not None
return self._imitator(x)
def predict_value(
self, x: np.ndarray, action: np.ndarray, with_std: bool
) -> np.ndarray:
raise NotImplementedError("BC does not support value estimation")
class BCImpl(BCBaseImpl):
_policy_type: str
_imitator: Optional[Union[DeterministicRegressor, ProbablisticRegressor]]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
policy_type: str,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
)
self._policy_type = policy_type
def _build_network(self) -> None:
if self._policy_type == "deterministic":
self._imitator = create_deterministic_regressor(
self._observation_shape,
self._action_size,
self._encoder_factory,
)
elif self._policy_type == "stochastic":
self._imitator = create_probablistic_regressor(
self._observation_shape,
self._action_size,
self._encoder_factory,
min_logstd=-4.0,
max_logstd=15.0,
)
else:
raise ValueError("invalid policy_type: {self._policy_type}")
@property
def policy(self) -> Policy:
assert self._imitator
policy: Policy
if self._policy_type == "deterministic":
policy = create_deterministic_policy(
self._observation_shape,
self._action_size,
self._encoder_factory,
)
elif self._policy_type == "stochastic":
policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._encoder_factory,
min_logstd=-20.0,
max_logstd=2.0,
)
else:
raise ValueError(f"invalid policy_type: {self._policy_type}")
# copy parameters
hard_sync(policy, self._imitator)
return policy
@property
def policy_optim(self) -> Optimizer:
assert self._optim
return self._optim
class DiscreteBCImpl(BCBaseImpl):
_beta: float
_imitator: Optional[DiscreteImitator]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
beta: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=None,
)
self._beta = beta
def _build_network(self) -> None:
self._imitator = create_discrete_imitator(
self._observation_shape,
self._action_size,
self._beta,
self._encoder_factory,
)
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._imitator is not None
return self._imitator(x).argmax(dim=1)
def compute_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._imitator is not None
return self._imitator.compute_error(obs_t, act_t.long()) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/bc_impl.py | bc_impl.py |
import math
from typing import Optional, Sequence
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import create_parameter
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import Parameter
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, torch_api, train_api
from .dqn_impl import DoubleDQNImpl
from .sac_impl import SACImpl
class CQLImpl(SACImpl):
_alpha_learning_rate: float
_alpha_optim_factory: OptimizerFactory
_initial_alpha: float
_alpha_threshold: float
_conservative_weight: float
_n_action_samples: int
_soft_q_backup: bool
_log_alpha: Optional[Parameter]
_alpha_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
temp_learning_rate: float,
alpha_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
temp_optim_factory: OptimizerFactory,
alpha_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
initial_temperature: float,
initial_alpha: float,
alpha_threshold: float,
conservative_weight: float,
n_action_samples: int,
soft_q_backup: bool,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
temp_learning_rate=temp_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
temp_optim_factory=temp_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
initial_temperature=initial_temperature,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._alpha_learning_rate = alpha_learning_rate
self._alpha_optim_factory = alpha_optim_factory
self._initial_alpha = initial_alpha
self._alpha_threshold = alpha_threshold
self._conservative_weight = conservative_weight
self._n_action_samples = n_action_samples
self._soft_q_backup = soft_q_backup
# initialized in build
self._log_alpha = None
self._alpha_optim = None
def build(self) -> None:
self._build_alpha()
super().build()
self._build_alpha_optim()
def _build_alpha(self) -> None:
initial_val = math.log(self._initial_alpha)
self._log_alpha = create_parameter((1, 1), initial_val)
def _build_alpha_optim(self) -> None:
assert self._log_alpha is not None
self._alpha_optim = self._alpha_optim_factory.create(
self._log_alpha.parameters(), lr=self._alpha_learning_rate
)
def compute_critic_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor
) -> torch.Tensor:
loss = super().compute_critic_loss(batch, q_tpn)
conservative_loss = self._compute_conservative_loss(
batch.observations, batch.actions, batch.next_observations
)
return loss + conservative_loss
@train_api
@torch_api()
def update_alpha(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._alpha_optim is not None
assert self._q_func is not None
assert self._log_alpha is not None
# Q function should be inference mode for stability
self._q_func.eval()
self._alpha_optim.zero_grad()
# the original implementation does scale the loss value
loss = -self._compute_conservative_loss(
batch.observations, batch.actions, batch.next_observations
)
loss.backward()
self._alpha_optim.step()
cur_alpha = self._log_alpha().exp().cpu().detach().numpy()[0][0]
return loss.cpu().detach().numpy(), cur_alpha
def _compute_policy_is_values(
self, policy_obs: torch.Tensor, value_obs: torch.Tensor
) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
with torch.no_grad():
policy_actions, n_log_probs = self._policy.sample_n_with_log_prob(
policy_obs, self._n_action_samples
)
obs_shape = value_obs.shape
repeated_obs = value_obs.expand(self._n_action_samples, *obs_shape)
# (n, batch, observation) -> (batch, n, observation)
transposed_obs = repeated_obs.transpose(0, 1)
# (batch, n, observation) -> (batch * n, observation)
flat_obs = transposed_obs.reshape(-1, *obs_shape[1:])
# (batch, n, action) -> (batch * n, action)
flat_policy_acts = policy_actions.reshape(-1, self.action_size)
# estimate action-values for policy actions
policy_values = self._q_func(flat_obs, flat_policy_acts, "none")
policy_values = policy_values.view(
self._n_critics, obs_shape[0], self._n_action_samples
)
log_probs = n_log_probs.view(1, -1, self._n_action_samples)
# importance sampling
return policy_values - log_probs
def _compute_random_is_values(self, obs: torch.Tensor) -> torch.Tensor:
assert self._q_func is not None
repeated_obs = obs.expand(self._n_action_samples, *obs.shape)
# (n, batch, observation) -> (batch, n, observation)
transposed_obs = repeated_obs.transpose(0, 1)
# (batch, n, observation) -> (batch * n, observation)
flat_obs = transposed_obs.reshape(-1, *obs.shape[1:])
# estimate action-values for actions from uniform distribution
# uniform distribution between [-1.0, 1.0]
flat_shape = (obs.shape[0] * self._n_action_samples, self._action_size)
zero_tensor = torch.zeros(flat_shape, device=self._device)
random_actions = zero_tensor.uniform_(-1.0, 1.0)
random_values = self._q_func(flat_obs, random_actions, "none")
random_values = random_values.view(
self._n_critics, obs.shape[0], self._n_action_samples
)
random_log_probs = math.log(0.5 ** self._action_size)
# importance sampling
return random_values - random_log_probs
def _compute_conservative_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor, obs_tp1: torch.Tensor
) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
assert self._log_alpha is not None
policy_values_t = self._compute_policy_is_values(obs_t, obs_t)
policy_values_tp1 = self._compute_policy_is_values(obs_tp1, obs_t)
random_values = self._compute_random_is_values(obs_t)
# compute logsumexp
# (n critics, batch, 3 * n samples) -> (n critics, batch, 1)
target_values = torch.cat(
[policy_values_t, policy_values_tp1, random_values], dim=2
)
logsumexp = torch.logsumexp(target_values, dim=2, keepdim=True)
# estimate action-values for data actions
data_values = self._q_func(obs_t, act_t, "none")
loss = logsumexp.mean(dim=0).mean() - data_values.mean(dim=0).mean()
scaled_loss = self._conservative_weight * loss
# clip for stability
clipped_alpha = self._log_alpha().exp().clamp(0, 1e6)[0][0]
return clipped_alpha * (scaled_loss - self._alpha_threshold)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
if self._soft_q_backup:
target_value = super().compute_target(batch)
else:
target_value = self._compute_deterministic_target(batch)
return target_value
def _compute_deterministic_target(
self, batch: TorchMiniBatch
) -> torch.Tensor:
assert self._policy
assert self._targ_q_func
with torch.no_grad():
action = self._policy.best_action(batch.next_observations)
return self._targ_q_func.compute_target(
batch.next_observations,
action,
reduction=self._target_reduction_type,
)
class DiscreteCQLImpl(DoubleDQNImpl):
_alpha: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
learning_rate: float,
optim_factory: OptimizerFactory,
encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
n_critics: int,
target_reduction_type: str,
alpha: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
reward_scaler=reward_scaler,
)
self._alpha = alpha
def compute_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor,
) -> torch.Tensor:
loss = super().compute_loss(batch, q_tpn)
conservative_loss = self._compute_conservative_loss(
batch.observations, batch.actions.long()
)
return loss + self._alpha * conservative_loss
def _compute_conservative_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._q_func is not None
# compute logsumexp
policy_values = self._q_func(obs_t)
logsumexp = torch.logsumexp(policy_values, dim=1, keepdim=True)
# estimate action-values under data distribution
one_hot = F.one_hot(act_t.view(-1), num_classes=self.action_size)
data_values = (self._q_func(obs_t) * one_hot).sum(dim=1, keepdim=True)
return (logsumexp - data_values).mean() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/cql_impl.py | cql_impl.py |
from abc import ABCMeta, abstractmethod
from typing import Any, Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import (
create_categorical_policy,
create_squashed_normal_policy,
create_value_function,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.torch import (
CategoricalPolicy,
Policy,
SquashedNormalPolicy,
ValueFunction,
squash_action,
)
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import eval_api, torch_api, train_api
from .base import TorchImplBase
class AWRBaseImpl(TorchImplBase, metaclass=ABCMeta):
_actor_learning_rate: float
_critic_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_use_gpu: Optional[Device]
_v_func: Optional[ValueFunction]
_policy: Optional[Policy]
_critic_optim: Optional[Optimizer]
_actor_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._actor_encoder_factory = actor_encoder_factory
self._critic_encoder_factory = critic_encoder_factory
self._use_gpu = use_gpu
# initialized in build
self._v_func = None
self._policy = None
self._critic_optim = None
self._actor_optim = None
def build(self) -> None:
# setup torch models
self._build_critic()
self._build_actor()
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_critic_optim()
self._build_actor_optim()
def _build_critic(self) -> None:
self._v_func = create_value_function(
self._observation_shape, self._critic_encoder_factory
)
def _build_critic_optim(self) -> None:
assert self._v_func is not None
self._critic_optim = self._critic_optim_factory.create(
self._v_func.parameters(), lr=self._critic_learning_rate
)
@abstractmethod
def _build_actor(self) -> None:
pass
def _build_actor_optim(self) -> None:
assert self._policy is not None
self._actor_optim = self._actor_optim_factory.create(
self._policy.parameters(), lr=self._actor_learning_rate
)
@train_api
@torch_api(scaler_targets=["observation"])
def update_critic(
self, observation: torch.Tensor, value: torch.Tensor
) -> np.ndarray:
assert self._critic_optim is not None
self._critic_optim.zero_grad()
loss = self.compute_critic_loss(observation, value)
loss.backward()
self._critic_optim.step()
return loss.cpu().detach().numpy()
def compute_critic_loss(
self, observation: torch.Tensor, value: torch.Tensor
) -> torch.Tensor:
assert self._v_func is not None
return self._v_func.compute_error(observation, value)
@train_api
@torch_api(scaler_targets=["observation"], action_scaler_targets=["action"])
def update_actor(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> np.ndarray:
assert self._actor_optim is not None
self._actor_optim.zero_grad()
loss = self.compute_actor_loss(observation, action, weight)
loss.backward()
self._actor_optim.step()
return loss.cpu().detach().numpy()
def compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
return self._compute_actor_loss(observation, action, weight)
@abstractmethod
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
pass
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.best_action(x)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.sample(x)
@eval_api
@torch_api(scaler_targets=["x"])
def predict_value(
self, x: torch.Tensor, *args: Any, **kwargs: Any
) -> np.ndarray:
assert self._v_func is not None
with torch.no_grad():
return self._v_func(x).view(-1).cpu().detach().numpy()
class AWRImpl(AWRBaseImpl):
_policy: Optional[SquashedNormalPolicy]
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(observation)
# unnormalize action via inverse tanh function
unnormalized_action = torch.atanh(action.clamp(-0.999999, 0.999999))
# compute log probability
_, log_probs = squash_action(dist, unnormalized_action)
return -(weight * log_probs).mean()
class DiscreteAWRImpl(AWRBaseImpl):
_policy: Optional[CategoricalPolicy]
def _build_actor(self) -> None:
self._policy = create_categorical_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _compute_actor_loss(
self,
observation: torch.Tensor,
action: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(observation)
log_probs = dist.log_prob(action).view(observation.shape[0], -1)
return -(weight * log_probs.sum(dim=1, keepdim=True)).mean() | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/awr_impl.py | awr_impl.py |
from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from ...gpu import Device
from ...models.builders import create_squashed_normal_policy
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import SquashedNormalPolicy, squash_action
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, hard_sync
from .ddpg_impl import DDPGBaseImpl
class CRRImpl(DDPGBaseImpl):
_beta: float
_n_action_samples: int
_advantage_type: str
_weight_type: str
_max_weight: float
_policy: Optional[SquashedNormalPolicy]
_targ_policy: Optional[SquashedNormalPolicy]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
beta: float,
n_action_samples: int,
advantage_type: str,
weight_type: str,
max_weight: float,
n_critics: int,
tau: float,
target_reduction_type: str,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._beta = beta
self._n_action_samples = n_action_samples
self._advantage_type = advantage_type
self._weight_type = weight_type
self._max_weight = max_weight
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(batch.observations)
# unnormalize action via inverse tanh function
clipped_actions = batch.actions.clamp(-0.999999, 0.999999)
unnormalized_act_t = torch.atanh(clipped_actions)
# compute log probability
_, log_probs = squash_action(dist, unnormalized_act_t)
weight = self._compute_weight(batch.observations, batch.actions)
return -(log_probs * weight).mean()
def _compute_weight(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
advantages = self._compute_advantage(obs_t, act_t)
if self._weight_type == "binary":
return (advantages > 0.0).float()
elif self._weight_type == "exp":
return (advantages / self._beta).exp().clamp(0.0, self._max_weight)
raise ValueError(f"invalid weight type: {self._weight_type}.")
def _compute_advantage(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._q_func is not None
assert self._policy is not None
with torch.no_grad():
batch_size = obs_t.shape[0]
# (batch_size, N, action)
policy_actions = self._policy.sample_n(
obs_t, self._n_action_samples
)
flat_actions = policy_actions.reshape(-1, self._action_size)
# repeat observation
# (batch_size, obs_size) -> (batch_size, 1, obs_size)
reshaped_obs_t = obs_t.view(batch_size, 1, *obs_t.shape[1:])
# (batch_sie, 1, obs_size) -> (batch_size, N, obs_size)
repeated_obs_t = reshaped_obs_t.expand(
batch_size, self._n_action_samples, *obs_t.shape[1:]
)
# (batch_size, N, obs_size) -> (batch_size * N, obs_size)
flat_obs_t = repeated_obs_t.reshape(-1, *obs_t.shape[1:])
flat_values = self._q_func(flat_obs_t, flat_actions)
reshaped_values = flat_values.view(obs_t.shape[0], -1, 1)
if self._advantage_type == "mean":
values = reshaped_values.mean(dim=1)
elif self._advantage_type == "max":
values = reshaped_values.max(dim=1).values
else:
raise ValueError(
f"invalid advantage type: {self._advantage_type}."
)
return self._q_func(obs_t, act_t) - values
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_q_func is not None
assert self._targ_policy is not None
with torch.no_grad():
action = self._targ_policy.sample(batch.next_observations)
return self._targ_q_func.compute_target(
batch.next_observations,
action.clamp(-1.0, 1.0),
reduction=self._target_reduction_type,
)
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
# compute CWP
actions = self._policy.onnx_safe_sample_n(x, self._n_action_samples)
# (batch_size, N, action_size) -> (batch_size * N, action_size)
flat_actions = actions.reshape(-1, self._action_size)
# repeat observation
# (batch_size, obs_size) -> (batch_size, 1, obs_size)
reshaped_obs_t = x.view(x.shape[0], 1, *x.shape[1:])
# (batch_size, 1, obs_size) -> (batch_size, N, obs_size)
repeated_obs_t = reshaped_obs_t.expand(
x.shape[0], self._n_action_samples, *x.shape[1:]
)
# (batch_size, N, obs_size) -> (batch_size * N, obs_size)
flat_obs_t = repeated_obs_t.reshape(-1, *x.shape[1:])
# (batch_size * N, 1)
flat_values = self._q_func(flat_obs_t, flat_actions)
# (batch_size * N, 1) -> (batch_size, N)
reshaped_values = flat_values.view(x.shape[0], -1)
# re-sampling
probs = F.softmax(reshaped_values, dim=1)
indices = torch.multinomial(probs, 1, replacement=True)
return actions[torch.arange(x.shape[0]), indices.view(-1)]
def sync_critic_target(self) -> None:
assert self._targ_q_func is not None
assert self._q_func is not None
hard_sync(self._targ_q_func, self._q_func)
def sync_actor_target(self) -> None:
assert self._targ_policy is not None
assert self._policy is not None
hard_sync(self._targ_policy, self._policy) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/crr_impl.py | crr_impl.py |
from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .ddpg_impl import DDPGImpl
class TD3Impl(DDPGImpl):
_target_smoothing_sigma: float
_target_smoothing_clip: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
target_smoothing_sigma: float,
target_smoothing_clip: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_policy is not None
assert self._targ_q_func is not None
with torch.no_grad():
action = self._targ_policy(batch.next_observations)
# smoothing target
noise = torch.randn(action.shape, device=batch.device)
scaled_noise = self._target_smoothing_sigma * noise
clipped_noise = scaled_noise.clamp(
-self._target_smoothing_clip, self._target_smoothing_clip
)
smoothed_action = action + clipped_noise
clipped_action = smoothed_action.clamp(-1.0, 1.0)
return self._targ_q_func.compute_target(
batch.next_observations,
clipped_action,
reduction=self._target_reduction_type,
) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/td3_impl.py | td3_impl.py |
from typing import Optional, Tuple, Union
import numpy as np
import torch
from typing_extensions import Protocol
from ...models.torch import (
EnsembleContinuousQFunction,
EnsembleDiscreteQFunction,
)
from ...torch_utility import eval_api, torch_api
class _DiscreteQFunctionProtocol(Protocol):
_q_func: Optional[EnsembleDiscreteQFunction]
class _ContinuousQFunctionProtocol(Protocol):
_q_func: Optional[EnsembleContinuousQFunction]
class DiscreteQFunctionMixin:
@eval_api
@torch_api(scaler_targets=["x"])
def predict_value(
self: _DiscreteQFunctionProtocol,
x: torch.Tensor,
action: torch.Tensor,
with_std: bool,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
assert x.ndim > 1, "Input must have batch dimension."
assert x.shape[0] == action.shape[0]
assert self._q_func is not None
action = action.view(-1).long().cpu().detach().numpy()
with torch.no_grad():
values = self._q_func(x, reduction="none").cpu().detach().numpy()
values = np.transpose(values, [1, 0, 2])
mean_values = values.mean(axis=1)
stds = np.std(values, axis=1)
ret_values = []
ret_stds = []
for v, std, a in zip(mean_values, stds, action):
ret_values.append(v[a])
ret_stds.append(std[a])
if with_std:
return np.array(ret_values), np.array(ret_stds)
return np.array(ret_values)
class ContinuousQFunctionMixin:
@eval_api
@torch_api(scaler_targets=["x"], action_scaler_targets=["action"])
def predict_value(
self: _ContinuousQFunctionProtocol,
x: torch.Tensor,
action: torch.Tensor,
with_std: bool,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
assert x.ndim > 1, "Input must have batch dimension."
assert x.shape[0] == action.shape[0]
assert self._q_func is not None
with torch.no_grad():
values = self._q_func(x, action, "none").cpu().detach().numpy()
values = np.transpose(values, [1, 0, 2])
mean_values = values.mean(axis=1).reshape(-1)
stds = np.std(values, axis=1).reshape(-1)
if with_std:
return mean_values, stds
return mean_values | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/utility.py | utility.py |
import copy
import math
from typing import Optional, Sequence, Tuple
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import (
create_categorical_policy,
create_discrete_q_function,
create_parameter,
create_squashed_normal_policy,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import (
CategoricalPolicy,
EnsembleDiscreteQFunction,
EnsembleQFunction,
Parameter,
Policy,
SquashedNormalPolicy,
)
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, hard_sync, torch_api, train_api
from .base import TorchImplBase
from .ddpg_impl import DDPGBaseImpl
from .utility import DiscreteQFunctionMixin
class SACImpl(DDPGBaseImpl):
_policy: Optional[SquashedNormalPolicy]
_targ_policy: Optional[SquashedNormalPolicy]
_temp_learning_rate: float
_temp_optim_factory: OptimizerFactory
_initial_temperature: float
_log_temp: Optional[Parameter]
_temp_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
temp_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
temp_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
initial_temperature: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._temp_learning_rate = temp_learning_rate
self._temp_optim_factory = temp_optim_factory
self._initial_temperature = initial_temperature
# initialized in build
self._log_temp = None
self._temp_optim = None
def build(self) -> None:
self._build_temperature()
super().build()
self._build_temperature_optim()
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _build_temperature(self) -> None:
initial_val = math.log(self._initial_temperature)
self._log_temp = create_parameter((1, 1), initial_val)
def _build_temperature_optim(self) -> None:
assert self._log_temp is not None
self._temp_optim = self._temp_optim_factory.create(
self._log_temp.parameters(), lr=self._temp_learning_rate
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
assert self._log_temp is not None
assert self._q_func is not None
action, log_prob = self._policy.sample_with_log_prob(batch.observations)
entropy = self._log_temp().exp() * log_prob
q_t = self._q_func(batch.observations, action, "min")
return (entropy - q_t).mean()
@train_api
@torch_api()
def update_temp(
self, batch: TorchMiniBatch
) -> Tuple[np.ndarray, np.ndarray]:
assert self._temp_optim is not None
assert self._policy is not None
assert self._log_temp is not None
self._temp_optim.zero_grad()
with torch.no_grad():
_, log_prob = self._policy.sample_with_log_prob(batch.observations)
targ_temp = log_prob - self._action_size
loss = -(self._log_temp().exp() * targ_temp).mean()
loss.backward()
self._temp_optim.step()
# current temperature value
cur_temp = self._log_temp().exp().cpu().detach().numpy()[0][0]
return loss.cpu().detach().numpy(), cur_temp
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
assert self._log_temp is not None
assert self._targ_q_func is not None
with torch.no_grad():
action, log_prob = self._policy.sample_with_log_prob(
batch.next_observations
)
entropy = self._log_temp().exp() * log_prob
target = self._targ_q_func.compute_target(
batch.next_observations,
action,
reduction=self._target_reduction_type,
)
if self._target_reduction_type == "none":
return target - entropy.view(1, -1, 1)
else:
return target - entropy
class DiscreteSACImpl(DiscreteQFunctionMixin, TorchImplBase):
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_gamma: float
_n_critics: int
_initial_temperature: float
_use_gpu: Optional[Device]
_policy: Optional[CategoricalPolicy]
_q_func: Optional[EnsembleDiscreteQFunction]
_targ_q_func: Optional[EnsembleDiscreteQFunction]
_log_temp: Optional[Parameter]
_actor_optim: Optional[Optimizer]
_critic_optim: Optional[Optimizer]
_temp_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
temp_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
temp_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
n_critics: int,
initial_temperature: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
scaler=scaler,
action_scaler=None,
reward_scaler=reward_scaler,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._actor_encoder_factory = actor_encoder_factory
self._critic_encoder_factory = critic_encoder_factory
self._q_func_factory = q_func_factory
self._gamma = gamma
self._n_critics = n_critics
self._initial_temperature = initial_temperature
self._use_gpu = use_gpu
# initialized in build
self._q_func = None
self._policy = None
self._targ_q_func = None
self._log_temp = None
self._actor_optim = None
self._critic_optim = None
self._temp_optim = None
def build(self) -> None:
self._build_critic()
self._build_actor()
self._build_temperature()
# setup target networks
self._targ_q_func = copy.deepcopy(self._q_func)
if self._use_gpu:
self.to_gpu(self._use_gpu)
else:
self.to_cpu()
# setup optimizer after the parameters move to GPU
self._build_critic_optim()
self._build_actor_optim()
self._build_temperature_optim()
def _build_critic(self) -> None:
self._q_func = create_discrete_q_function(
self._observation_shape,
self._action_size,
self._critic_encoder_factory,
self._q_func_factory,
n_ensembles=self._n_critics,
)
def _build_critic_optim(self) -> None:
assert self._q_func is not None
self._critic_optim = self._critic_optim_factory.create(
self._q_func.parameters(), lr=self._critic_learning_rate
)
def _build_actor(self) -> None:
self._policy = create_categorical_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
)
def _build_actor_optim(self) -> None:
assert self._policy is not None
self._actor_optim = self._actor_optim_factory.create(
self._policy.parameters(), lr=self._actor_learning_rate
)
def _build_temperature(self) -> None:
initial_val = math.log(self._initial_temperature)
self._log_temp = create_parameter((1, 1), initial_val)
def _build_temperature_optim(self) -> None:
assert self._log_temp is not None
self._temp_optim = self._temp_optim_factory.create(
self._log_temp.parameters(), lr=self._temp_learning_rate
)
@train_api
@torch_api()
def update_critic(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._critic_optim is not None
self._critic_optim.zero_grad()
q_tpn = self.compute_target(batch)
loss = self.compute_critic_loss(batch, q_tpn)
loss.backward()
self._critic_optim.step()
return loss.cpu().detach().numpy()
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
assert self._log_temp is not None
assert self._targ_q_func is not None
with torch.no_grad():
log_probs = self._policy.log_probs(batch.next_observations)
probs = log_probs.exp()
entropy = self._log_temp().exp() * log_probs
target = self._targ_q_func.compute_target(batch.next_observations)
keepdims = True
if target.dim() == 3:
entropy = entropy.unsqueeze(-1)
probs = probs.unsqueeze(-1)
keepdims = False
return (probs * (target - entropy)).sum(dim=1, keepdim=keepdims)
def compute_critic_loss(
self, batch: TorchMiniBatch, q_tpn: torch.Tensor,
) -> torch.Tensor:
assert self._q_func is not None
return self._q_func.compute_error(
obs_t=batch.observations,
act_t=batch.actions.long(),
rew_tp1=batch.next_rewards,
q_tp1=q_tpn,
ter_tp1=batch.terminals,
gamma=self._gamma ** batch.n_steps,
masks=batch.masks,
)
@train_api
@torch_api()
def update_actor(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._q_func is not None
assert self._actor_optim is not None
# Q function should be inference mode for stability
self._q_func.eval()
self._actor_optim.zero_grad()
loss = self.compute_actor_loss(batch)
loss.backward()
self._actor_optim.step()
return loss.cpu().detach().numpy()
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._q_func is not None
assert self._policy is not None
assert self._log_temp is not None
with torch.no_grad():
q_t = self._q_func(batch.observations, reduction="min")
log_probs = self._policy.log_probs(batch.observations)
probs = log_probs.exp()
entropy = self._log_temp().exp() * log_probs
return (probs * (entropy - q_t)).sum(dim=1).mean()
@train_api
@torch_api()
def update_temp(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._temp_optim is not None
assert self._policy is not None
assert self._log_temp is not None
self._temp_optim.zero_grad()
with torch.no_grad():
log_probs = self._policy.log_probs(batch.observations)
probs = log_probs.exp()
expct_log_probs = (probs * log_probs).sum(dim=1, keepdim=True)
entropy_target = 0.98 * (-math.log(1 / self.action_size))
targ_temp = expct_log_probs + entropy_target
loss = -(self._log_temp().exp() * targ_temp).mean()
loss.backward()
self._temp_optim.step()
# current temperature value
cur_temp = self._log_temp().exp().cpu().detach().numpy()[0][0]
return loss.cpu().detach().numpy(), cur_temp
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.best_action(x)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
return self._policy.sample(x)
def update_target(self) -> None:
assert self._q_func is not None
assert self._targ_q_func is not None
hard_sync(self._targ_q_func, self._q_func)
@property
def policy(self) -> Policy:
assert self._policy
return self._policy
@property
def policy_optim(self) -> Optimizer:
assert self._actor_optim
return self._actor_optim
@property
def q_function(self) -> EnsembleQFunction:
assert self._q_func
return self._q_func
@property
def q_function_optim(self) -> Optimizer:
assert self._critic_optim
return self._critic_optim | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/sac_impl.py | sac_impl.py |
import math
from typing import Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import create_conditional_vae, create_parameter
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import (
ConditionalVAE,
Parameter,
compute_max_with_n_actions_and_indices,
)
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, torch_api, train_api
from .sac_impl import SACImpl
def _gaussian_kernel(
x: torch.Tensor, y: torch.Tensor, sigma: float
) -> torch.Tensor:
# x: (batch, n, 1, action), y: (batch, 1, n, action) -> (batch, n, n)
return (-((x - y) ** 2).sum(dim=3) / (2 * sigma)).exp()
def _laplacian_kernel(
x: torch.Tensor, y: torch.Tensor, sigma: float
) -> torch.Tensor:
# x: (batch, n, 1, action), y: (batch, 1, n, action) -> (batch, n, n)
return (-(x - y).abs().sum(dim=3) / (2 * sigma)).exp()
class BEARImpl(SACImpl):
_imitator_learning_rate: float
_alpha_learning_rate: float
_imitator_optim_factory: OptimizerFactory
_alpha_optim_factory: OptimizerFactory
_imitator_encoder_factory: EncoderFactory
_initial_alpha: float
_alpha_threshold: float
_lam: float
_n_action_samples: int
_n_target_samples: int
_n_mmd_action_samples: int
_mmd_kernel: str
_mmd_sigma: float
_vae_kl_weight: float
_imitator: Optional[ConditionalVAE]
_imitator_optim: Optional[Optimizer]
_log_alpha: Optional[Parameter]
_alpha_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
imitator_learning_rate: float,
temp_learning_rate: float,
alpha_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
imitator_optim_factory: OptimizerFactory,
temp_optim_factory: OptimizerFactory,
alpha_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
imitator_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
initial_temperature: float,
initial_alpha: float,
alpha_threshold: float,
lam: float,
n_action_samples: int,
n_target_samples: int,
n_mmd_action_samples: int,
mmd_kernel: str,
mmd_sigma: float,
vae_kl_weight: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
temp_learning_rate=temp_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
temp_optim_factory=temp_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type="mix",
initial_temperature=initial_temperature,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._imitator_learning_rate = imitator_learning_rate
self._alpha_learning_rate = alpha_learning_rate
self._imitator_optim_factory = imitator_optim_factory
self._alpha_optim_factory = alpha_optim_factory
self._imitator_encoder_factory = imitator_encoder_factory
self._initial_alpha = initial_alpha
self._alpha_threshold = alpha_threshold
self._lam = lam
self._n_action_samples = n_action_samples
self._n_target_samples = n_target_samples
self._n_mmd_action_samples = n_mmd_action_samples
self._mmd_kernel = mmd_kernel
self._mmd_sigma = mmd_sigma
self._vae_kl_weight = vae_kl_weight
# initialized in build
self._imitator = None
self._imitator_optim = None
self._log_alpha = None
self._alpha_optim = None
def build(self) -> None:
self._build_imitator()
self._build_alpha()
super().build()
self._build_imitator_optim()
self._build_alpha_optim()
def _build_imitator(self) -> None:
self._imitator = create_conditional_vae(
observation_shape=self._observation_shape,
action_size=self._action_size,
latent_size=2 * self._action_size,
beta=self._vae_kl_weight,
min_logstd=-4.0,
max_logstd=15.0,
encoder_factory=self._imitator_encoder_factory,
)
def _build_imitator_optim(self) -> None:
assert self._imitator is not None
self._imitator_optim = self._imitator_optim_factory.create(
self._imitator.parameters(), lr=self._imitator_learning_rate
)
def _build_alpha(self) -> None:
initial_val = math.log(self._initial_alpha)
self._log_alpha = create_parameter((1, 1), initial_val)
def _build_alpha_optim(self) -> None:
assert self._log_alpha is not None
self._alpha_optim = self._alpha_optim_factory.create(
self._log_alpha.parameters(), lr=self._alpha_learning_rate
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
loss = super().compute_actor_loss(batch)
mmd_loss = self._compute_mmd_loss(batch.observations)
return loss + mmd_loss
@train_api
@torch_api()
def warmup_actor(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._actor_optim is not None
self._actor_optim.zero_grad()
loss = self._compute_mmd_loss(batch.observations)
loss.backward()
self._actor_optim.step()
return loss.cpu().detach().numpy()
def _compute_mmd_loss(self, obs_t: torch.Tensor) -> torch.Tensor:
assert self._log_alpha
mmd = self._compute_mmd(obs_t)
alpha = self._log_alpha().exp()
return (alpha * (mmd - self._alpha_threshold)).mean()
@train_api
@torch_api()
def update_imitator(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._imitator_optim is not None
self._imitator_optim.zero_grad()
loss = self.compute_imitator_loss(batch)
loss.backward()
self._imitator_optim.step()
return loss.cpu().detach().numpy()
def compute_imitator_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._imitator is not None
return self._imitator.compute_error(batch.observations, batch.actions)
@train_api
@torch_api()
def update_alpha(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._alpha_optim is not None
assert self._log_alpha is not None
loss = -self._compute_mmd_loss(batch.observations)
self._alpha_optim.zero_grad()
loss.backward()
self._alpha_optim.step()
# clip for stability
self._log_alpha.data.clamp_(-5.0, 10.0)
cur_alpha = self._log_alpha().exp().cpu().detach().numpy()[0][0]
return loss.cpu().detach().numpy(), cur_alpha
def _compute_mmd(self, x: torch.Tensor) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
with torch.no_grad():
behavior_actions = self._imitator.sample_n_without_squash(
x, self._n_mmd_action_samples
)
policy_actions = self._policy.sample_n_without_squash(
x, self._n_mmd_action_samples
)
if self._mmd_kernel == "gaussian":
kernel = _gaussian_kernel
elif self._mmd_kernel == "laplacian":
kernel = _laplacian_kernel
else:
raise ValueError(f"Invalid kernel type: {self._mmd_kernel}")
# (batch, n, action) -> (batch, n, 1, action)
behavior_actions = behavior_actions.reshape(
x.shape[0], -1, 1, self.action_size
)
policy_actions = policy_actions.reshape(
x.shape[0], -1, 1, self.action_size
)
# (batch, n, action) -> (batch, 1, n, action)
behavior_actions_T = behavior_actions.reshape(
x.shape[0], 1, -1, self.action_size
)
policy_actions_T = policy_actions.reshape(
x.shape[0], 1, -1, self.action_size
)
# 1 / N^2 \sum k(a_\pi, a_\pi)
inter_policy = kernel(policy_actions, policy_actions_T, self._mmd_sigma)
mmd = inter_policy.mean(dim=[1, 2])
# 1 / N^2 \sum k(a_\beta, a_\beta)
inter_data = kernel(
behavior_actions, behavior_actions_T, self._mmd_sigma
)
mmd += inter_data.mean(dim=[1, 2])
# 2 / N^2 \sum k(a_\pi, a_\beta)
distance = kernel(policy_actions, behavior_actions_T, self._mmd_sigma)
mmd -= 2 * distance.mean(dim=[1, 2])
return (mmd + 1e-6).sqrt().view(-1, 1)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
assert self._targ_q_func is not None
assert self._log_temp is not None
with torch.no_grad():
# BCQ-like target computation
actions, log_probs = self._policy.sample_n_with_log_prob(
batch.next_observations, self._n_target_samples,
)
values, indices = compute_max_with_n_actions_and_indices(
batch.next_observations, actions, self._targ_q_func, self._lam
)
# (batch, n, 1) -> (batch, 1)
batch_size = batch.observations.shape[0]
max_log_prob = log_probs[torch.arange(batch_size), indices]
return values - self._log_temp().exp() * max_log_prob
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
with torch.no_grad():
# (batch, n, action)
actions = self._policy.onnx_safe_sample_n(x, self._n_action_samples)
# (batch, n, action) -> (batch * n, action)
flat_actions = actions.reshape(-1, self._action_size)
# (batch, observation) -> (batch, 1, observation)
expanded_x = x.view(x.shape[0], 1, *x.shape[1:])
# (batch, 1, observation) -> (batch, n, observation)
repeated_x = expanded_x.expand(
x.shape[0], self._n_action_samples, *x.shape[1:]
)
# (batch, n, observation) -> (batch * n, observation)
flat_x = repeated_x.reshape(-1, *x.shape[1:])
# (batch * n, 1)
flat_values = self._q_func(flat_x, flat_actions, "none")[0]
# (batch, n)
values = flat_values.view(x.shape[0], self._n_action_samples)
# (batch, n) -> (batch,)
max_indices = torch.argmax(values, dim=1)
return actions[torch.arange(x.shape[0]), max_indices] | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/bear_impl.py | bear_impl.py |
import copy
from typing import Optional, Sequence
import numpy as np
import torch
from torch.optim import Optimizer
from ...gpu import Device
from ...models.builders import (
create_conditional_vae,
create_deterministic_policy,
create_deterministic_residual_policy,
)
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import (
ConditionalVAE,
DeterministicPolicy,
DeterministicResidualPolicy,
)
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, soft_sync, torch_api, train_api
from .ddpg_impl import DDPGBaseImpl
class PLASImpl(DDPGBaseImpl):
_imitator_learning_rate: float
_imitator_optim_factory: OptimizerFactory
_imitator_encoder_factory: EncoderFactory
_n_critics: int
_lam: float
_beta: float
_policy: Optional[DeterministicPolicy]
_targ_policy: Optional[DeterministicPolicy]
_imitator: Optional[ConditionalVAE]
_imitator_optim: Optional[Optimizer]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
imitator_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
imitator_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
imitator_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
lam: float,
beta: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._imitator_learning_rate = imitator_learning_rate
self._imitator_optim_factory = imitator_optim_factory
self._imitator_encoder_factory = imitator_encoder_factory
self._n_critics = n_critics
self._lam = lam
self._beta = beta
# initialized in build
self._imitator = None
self._imitator_optim = None
def build(self) -> None:
self._build_imitator()
super().build()
# setup optimizer after the parameters move to GPU
self._build_imitator_optim()
def _build_actor(self) -> None:
self._policy = create_deterministic_policy(
observation_shape=self._observation_shape,
action_size=2 * self._action_size,
encoder_factory=self._actor_encoder_factory,
)
def _build_imitator(self) -> None:
self._imitator = create_conditional_vae(
observation_shape=self._observation_shape,
action_size=self._action_size,
latent_size=2 * self._action_size,
beta=self._beta,
min_logstd=-4.0,
max_logstd=15.0,
encoder_factory=self._imitator_encoder_factory,
)
def _build_imitator_optim(self) -> None:
assert self._imitator is not None
self._imitator_optim = self._imitator_optim_factory.create(
params=self._imitator.parameters(), lr=self._imitator_learning_rate
)
@train_api
@torch_api()
def update_imitator(self, batch: TorchMiniBatch) -> np.ndarray:
assert self._imitator is not None
assert self._imitator_optim is not None
self._imitator_optim.zero_grad()
loss = self._imitator.compute_error(batch.observations, batch.actions)
loss.backward()
self._imitator_optim.step()
return loss.cpu().detach().numpy()
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
assert self._q_func is not None
latent_actions = 2.0 * self._policy(batch.observations)
actions = self._imitator.decode(batch.observations, latent_actions)
return -self._q_func(batch.observations, actions, "none")[0].mean()
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
return self._imitator.decode(x, 2.0 * self._policy(x))
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
return self._predict_best_action(x)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._imitator is not None
assert self._targ_policy is not None
assert self._targ_q_func is not None
with torch.no_grad():
latent_actions = 2.0 * self._targ_policy(batch.next_observations)
actions = self._imitator.decode(
batch.next_observations, latent_actions
)
return self._targ_q_func.compute_target(
batch.next_observations,
actions,
self._target_reduction_type,
self._lam,
)
class PLASWithPerturbationImpl(PLASImpl):
_action_flexibility: float
_perturbation: Optional[DeterministicResidualPolicy]
_targ_perturbation: Optional[DeterministicResidualPolicy]
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
imitator_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
imitator_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
imitator_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
lam: float,
beta: float,
action_flexibility: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
imitator_learning_rate=imitator_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
imitator_optim_factory=imitator_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
imitator_encoder_factory=imitator_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
lam=lam,
beta=beta,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._action_flexibility = action_flexibility
# initialized in build
self._perturbation = None
self._targ_perturbation = None
def build(self) -> None:
super().build()
self._targ_perturbation = copy.deepcopy(self._perturbation)
def _build_actor(self) -> None:
super()._build_actor()
self._perturbation = create_deterministic_residual_policy(
observation_shape=self._observation_shape,
action_size=self._action_size,
scale=self._action_flexibility,
encoder_factory=self._actor_encoder_factory,
)
def _build_actor_optim(self) -> None:
assert self._policy is not None
assert self._perturbation is not None
parameters = list(self._policy.parameters())
parameters += list(self._perturbation.parameters())
self._actor_optim = self._actor_optim_factory.create(
params=parameters, lr=self._actor_learning_rate
)
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
assert self._perturbation is not None
assert self._q_func is not None
latent_actions = 2.0 * self._policy(batch.observations)
actions = self._imitator.decode(batch.observations, latent_actions)
residual_actions = self._perturbation(batch.observations, actions)
q_value = self._q_func(batch.observations, residual_actions, "none")
return -q_value[0].mean()
def _predict_best_action(self, x: torch.Tensor) -> torch.Tensor:
assert self._imitator is not None
assert self._policy is not None
assert self._perturbation is not None
action = self._imitator.decode(x, 2.0 * self._policy(x))
return self._perturbation(x, action)
def _sample_action(self, x: torch.Tensor) -> torch.Tensor:
return self._predict_best_action(x)
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._imitator is not None
assert self._targ_policy is not None
assert self._targ_perturbation is not None
assert self._targ_q_func is not None
with torch.no_grad():
latent_actions = 2.0 * self._targ_policy(batch.next_observations)
actions = self._imitator.decode(
batch.next_observations, latent_actions
)
residual_actions = self._targ_perturbation(
batch.next_observations, actions
)
return self._targ_q_func.compute_target(
batch.next_observations,
residual_actions,
reduction=self._target_reduction_type,
lam=self._lam,
)
def update_actor_target(self) -> None:
assert self._perturbation is not None
assert self._targ_perturbation is not None
super().update_actor_target()
soft_sync(self._targ_perturbation, self._perturbation, self._tau) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/plas_impl.py | plas_impl.py |
from typing import Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from ...gpu import Device
from ...models.builders import create_squashed_normal_policy
from ...models.encoders import EncoderFactory
from ...models.optimizers import AdamFactory, OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...models.torch import squash_action
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch, torch_api, train_api
from .sac_impl import SACImpl
class AWACImpl(SACImpl):
_lam: float
_n_action_samples: int
_max_weight: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
lam: float,
n_action_samples: int,
max_weight: float,
n_critics: int,
target_reduction_type: str,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
temp_learning_rate=0.0,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
temp_optim_factory=AdamFactory(),
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
initial_temperature=1e-20,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._lam = lam
self._n_action_samples = n_action_samples
self._max_weight = max_weight
def _build_actor(self) -> None:
self._policy = create_squashed_normal_policy(
self._observation_shape,
self._action_size,
self._actor_encoder_factory,
min_logstd=-6.0,
max_logstd=0.0,
use_std_parameter=True,
)
@train_api
@torch_api()
def update_actor(
self, batch: TorchMiniBatch
) -> Tuple[np.ndarray, np.ndarray]:
assert self._q_func is not None
assert self._policy is not None
assert self._actor_optim is not None
# Q function should be inference mode for stability
self._q_func.eval()
self._actor_optim.zero_grad()
loss = self.compute_actor_loss(batch)
loss.backward()
self._actor_optim.step()
# get current standard deviation for policy function for debug
mean_std = self._policy.get_logstd_parameter().exp().mean()
return loss.cpu().detach().numpy(), mean_std.cpu().detach().numpy()
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._policy is not None
dist = self._policy.dist(batch.observations)
# unnormalize action via inverse tanh function
clipped_actions = batch.actions.clamp(-0.999999, 0.999999)
unnormalized_act_t = torch.atanh(clipped_actions)
# compute log probability
_, log_probs = squash_action(dist, unnormalized_act_t)
# compute exponential weight
weights = self._compute_weights(batch.observations, batch.actions)
return -(log_probs * weights).sum()
def _compute_weights(
self, obs_t: torch.Tensor, act_t: torch.Tensor
) -> torch.Tensor:
assert self._q_func is not None
assert self._policy is not None
with torch.no_grad():
batch_size = obs_t.shape[0]
# compute action-value
q_values = self._q_func(obs_t, act_t, "min")
# sample actions
# (batch_size * N, action_size)
policy_actions = self._policy.sample_n(
obs_t, self._n_action_samples
)
flat_actions = policy_actions.reshape(-1, self.action_size)
# repeat observation
# (batch_size, obs_size) -> (batch_size, 1, obs_size)
reshaped_obs_t = obs_t.view(batch_size, 1, *obs_t.shape[1:])
# (batch_sie, 1, obs_size) -> (batch_size, N, obs_size)
repeated_obs_t = reshaped_obs_t.expand(
batch_size, self._n_action_samples, *obs_t.shape[1:]
)
# (batch_size, N, obs_size) -> (batch_size * N, obs_size)
flat_obs_t = repeated_obs_t.reshape(-1, *obs_t.shape[1:])
# compute state-value
flat_v_values = self._q_func(flat_obs_t, flat_actions, "min")
reshaped_v_values = flat_v_values.view(obs_t.shape[0], -1, 1)
v_values = reshaped_v_values.mean(dim=1)
# compute normalized weight
adv_values = (q_values - v_values).view(-1)
weights = F.softmax(adv_values / self._lam, dim=0).view(-1, 1)
# clip like AWR
clipped_weights = weights.clamp(0.0, self._max_weight)
return clipped_weights | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/awac_impl.py | awac_impl.py |
from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from .cql_impl import CQLImpl
class COMBOImpl(CQLImpl):
_real_ratio: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
temp_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
temp_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
initial_temperature: float,
conservative_weight: float,
n_action_samples: int,
real_ratio: float,
soft_q_backup: bool,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
temp_learning_rate=temp_learning_rate,
alpha_learning_rate=0.0,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
temp_optim_factory=temp_optim_factory,
alpha_optim_factory=temp_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
initial_temperature=initial_temperature,
initial_alpha=1.0,
alpha_threshold=0.0,
conservative_weight=conservative_weight,
n_action_samples=n_action_samples,
soft_q_backup=soft_q_backup,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._real_ratio = real_ratio
def _compute_conservative_loss(
self, obs_t: torch.Tensor, act_t: torch.Tensor, obs_tp1: torch.Tensor
) -> torch.Tensor:
assert self._policy is not None
assert self._q_func is not None
assert self._log_alpha is not None
# split batch
fake_obs_t = obs_t[int(obs_t.shape[0] * self._real_ratio) :]
fake_obs_tp1 = obs_tp1[int(obs_tp1.shape[0] * self._real_ratio) :]
real_obs_t = obs_t[: int(obs_t.shape[0] * self._real_ratio)]
real_act_t = act_t[: int(act_t.shape[0] * self._real_ratio)]
# compute conservative loss only with generated transitions
random_values = self._compute_random_is_values(fake_obs_t)
policy_values_t = self._compute_policy_is_values(fake_obs_t, fake_obs_t)
policy_values_tp1 = self._compute_policy_is_values(
fake_obs_tp1, fake_obs_t
)
# compute logsumexp
# (n critics, batch, 3 * n samples) -> (n critics, batch, 1)
target_values = torch.cat(
[policy_values_t, policy_values_tp1, random_values], dim=2
)
logsumexp = torch.logsumexp(target_values, dim=2, keepdim=True)
# estimate action-values for real data actions
data_values = self._q_func(real_obs_t, real_act_t, "none")
loss = logsumexp.sum(dim=0).mean() - data_values.sum(dim=0).mean()
return loss | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/algos/torch/combo_impl.py | combo_impl.py |
from typing import Any, ClassVar, Dict, Type
from ..decorators import pretty_repr
from .torch import (
ContinuousFQFQFunction,
ContinuousIQNQFunction,
ContinuousMeanQFunction,
ContinuousQFunction,
ContinuousQRQFunction,
DiscreteFQFQFunction,
DiscreteIQNQFunction,
DiscreteMeanQFunction,
DiscreteQFunction,
DiscreteQRQFunction,
Encoder,
EncoderWithAction,
)
@pretty_repr
class QFunctionFactory:
TYPE: ClassVar[str] = "none"
_bootstrap: bool
_share_encoder: bool
def __init__(self, bootstrap: bool, share_encoder: bool):
self._bootstrap = bootstrap
self._share_encoder = share_encoder
def create_discrete(
self, encoder: Encoder, action_size: int
) -> DiscreteQFunction:
"""Returns PyTorch's Q function module.
Args:
encoder: an encoder module that processes the observation to
obtain feature representations.
action_size: dimension of discrete action-space.
Returns:
discrete Q function object.
"""
raise NotImplementedError
def create_continuous(
self, encoder: EncoderWithAction
) -> ContinuousQFunction:
"""Returns PyTorch's Q function module.
Args:
encoder: an encoder module that processes the observation and
action to obtain feature representations.
Returns:
continuous Q function object.
"""
raise NotImplementedError
def get_type(self) -> str:
"""Returns Q function type.
Returns:
Q function type.
"""
return self.TYPE
def get_params(self, deep: bool = False) -> Dict[str, Any]:
"""Returns Q function parameters.
Returns:
Q function parameters.
"""
raise NotImplementedError
@property
def bootstrap(self) -> bool:
return self._bootstrap
@property
def share_encoder(self) -> bool:
return self._share_encoder
class MeanQFunctionFactory(QFunctionFactory):
"""Standard Q function factory class.
This is the standard Q function factory class.
References:
* `Mnih et al., Human-level control through deep reinforcement
learning. <https://www.nature.com/articles/nature14236>`_
* `Lillicrap et al., Continuous control with deep reinforcement
learning. <https://arxiv.org/abs/1509.02971>`_
Args:
bootstrap (bool): flag to bootstrap Q functions.
share_encoder (bool): flag to share encoder over multiple Q functions.
"""
TYPE: ClassVar[str] = "mean"
def __init__(self, bootstrap: bool = False, share_encoder: bool = False):
super().__init__(bootstrap, share_encoder)
def create_discrete(
self, encoder: Encoder, action_size: int,
) -> DiscreteMeanQFunction:
return DiscreteMeanQFunction(encoder, action_size)
def create_continuous(
self, encoder: EncoderWithAction,
) -> ContinuousMeanQFunction:
return ContinuousMeanQFunction(encoder)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"bootstrap": self._bootstrap,
"share_encoder": self._share_encoder,
}
class QRQFunctionFactory(QFunctionFactory):
"""Quantile Regression Q function factory class.
References:
* `Dabney et al., Distributional reinforcement learning with quantile
regression. <https://arxiv.org/abs/1710.10044>`_
Args:
bootstrap (bool): flag to bootstrap Q functions.
share_encoder (bool): flag to share encoder over multiple Q functions.
n_quantiles: the number of quantiles.
"""
TYPE: ClassVar[str] = "qr"
_n_quantiles: int
def __init__(
self,
bootstrap: bool = False,
share_encoder: bool = False,
n_quantiles: int = 32,
):
super().__init__(bootstrap, share_encoder)
self._n_quantiles = n_quantiles
def create_discrete(
self, encoder: Encoder, action_size: int
) -> DiscreteQRQFunction:
return DiscreteQRQFunction(encoder, action_size, self._n_quantiles)
def create_continuous(
self, encoder: EncoderWithAction,
) -> ContinuousQRQFunction:
return ContinuousQRQFunction(encoder, self._n_quantiles)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"bootstrap": self._bootstrap,
"share_encoder": self._share_encoder,
"n_quantiles": self._n_quantiles,
}
@property
def n_quantiles(self) -> int:
return self._n_quantiles
class IQNQFunctionFactory(QFunctionFactory):
"""Implicit Quantile Network Q function factory class.
References:
* `Dabney et al., Implicit quantile networks for distributional
reinforcement learning. <https://arxiv.org/abs/1806.06923>`_
Args:
bootstrap (bool): flag to bootstrap Q functions.
share_encoder (bool): flag to share encoder over multiple Q functions.
n_quantiles: the number of quantiles.
n_greedy_quantiles: the number of quantiles for inference.
embed_size: the embedding size.
"""
TYPE: ClassVar[str] = "iqn"
_n_quantiles: int
_n_greedy_quantiles: int
_embed_size: int
def __init__(
self,
bootstrap: bool = False,
share_encoder: bool = False,
n_quantiles: int = 64,
n_greedy_quantiles: int = 32,
embed_size: int = 64,
):
super().__init__(bootstrap, share_encoder)
self._n_quantiles = n_quantiles
self._n_greedy_quantiles = n_greedy_quantiles
self._embed_size = embed_size
def create_discrete(
self, encoder: Encoder, action_size: int,
) -> DiscreteIQNQFunction:
return DiscreteIQNQFunction(
encoder=encoder,
action_size=action_size,
n_quantiles=self._n_quantiles,
n_greedy_quantiles=self._n_greedy_quantiles,
embed_size=self._embed_size,
)
def create_continuous(
self, encoder: EncoderWithAction,
) -> ContinuousIQNQFunction:
return ContinuousIQNQFunction(
encoder=encoder,
n_quantiles=self._n_quantiles,
n_greedy_quantiles=self._n_greedy_quantiles,
embed_size=self._embed_size,
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"bootstrap": self._bootstrap,
"share_encoder": self._share_encoder,
"n_quantiles": self._n_quantiles,
"n_greedy_quantiles": self._n_greedy_quantiles,
"embed_size": self._embed_size,
}
@property
def n_quantiles(self) -> int:
return self._n_quantiles
@property
def n_greedy_quantiles(self) -> int:
return self._n_greedy_quantiles
@property
def embed_size(self) -> int:
return self._embed_size
class FQFQFunctionFactory(QFunctionFactory):
"""Fully parameterized Quantile Function Q function factory.
References:
* `Yang et al., Fully parameterized quantile function for
distributional reinforcement learning.
<https://arxiv.org/abs/1911.02140>`_
Args:
bootstrap (bool): flag to bootstrap Q functions.
share_encoder (bool): flag to share encoder over multiple Q functions.
n_quantiles: the number of quantiles.
embed_size: the embedding size.
entropy_coeff: the coefficiency of entropy penalty term.
"""
TYPE: ClassVar[str] = "fqf"
_n_quantiles: int
_embed_size: int
_entropy_coeff: float
def __init__(
self,
bootstrap: bool = False,
share_encoder: bool = False,
n_quantiles: int = 32,
embed_size: int = 64,
entropy_coeff: float = 0.0,
):
super().__init__(bootstrap, share_encoder)
self._n_quantiles = n_quantiles
self._embed_size = embed_size
self._entropy_coeff = entropy_coeff
def create_discrete(
self, encoder: Encoder, action_size: int,
) -> DiscreteFQFQFunction:
return DiscreteFQFQFunction(
encoder=encoder,
action_size=action_size,
n_quantiles=self._n_quantiles,
embed_size=self._embed_size,
entropy_coeff=self._entropy_coeff,
)
def create_continuous(
self, encoder: EncoderWithAction,
) -> ContinuousFQFQFunction:
return ContinuousFQFQFunction(
encoder=encoder,
n_quantiles=self._n_quantiles,
embed_size=self._embed_size,
entropy_coeff=self._entropy_coeff,
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"bootstrap": self._bootstrap,
"share_encoder": self._share_encoder,
"n_quantiles": self._n_quantiles,
"embed_size": self._embed_size,
"entropy_coeff": self._entropy_coeff,
}
@property
def n_quantiles(self) -> int:
return self._n_quantiles
@property
def embed_size(self) -> int:
return self._embed_size
@property
def entropy_coeff(self) -> float:
return self._entropy_coeff
Q_FUNC_LIST: Dict[str, Type[QFunctionFactory]] = {}
def register_q_func_factory(cls: Type[QFunctionFactory]) -> None:
"""Registers Q function factory class.
Args:
cls: Q function factory class inheriting ``QFunctionFactory``.
"""
is_registered = cls.TYPE in Q_FUNC_LIST
assert not is_registered, f"{cls.TYPE} seems to be already registered"
Q_FUNC_LIST[cls.TYPE] = cls
def create_q_func_factory(name: str, **kwargs: Any) -> QFunctionFactory:
"""Returns registered Q function factory object.
Args:
name: registered Q function factory type name.
kwargs: Q function arguments.
Returns:
Q function factory object.
"""
assert name in Q_FUNC_LIST, f"{name} seems not to be registered."
factory = Q_FUNC_LIST[name](**kwargs)
assert isinstance(factory, QFunctionFactory)
return factory
register_q_func_factory(MeanQFunctionFactory)
register_q_func_factory(QRQFunctionFactory)
register_q_func_factory(IQNQFunctionFactory)
register_q_func_factory(FQFQFunctionFactory) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/q_functions.py | q_functions.py |
import copy
from typing import Any, Dict, Iterable, Tuple, Type, Union, cast
from torch import nn, optim
from torch.optim import SGD, Adam, Optimizer, RMSprop
from ..decorators import pretty_repr
@pretty_repr
class OptimizerFactory:
"""A factory class that creates an optimizer object in a lazy way.
The optimizers in algorithms can be configured through this factory class.
.. code-block:: python
from torch.optim Adam
from d3rlpy.optimizers import OptimizerFactory
from d3rlpy.algos import DQN
factory = OptimizerFactory(Adam, eps=0.001)
dqn = DQN(optim_factory=factory)
Args:
optim_cls: An optimizer class.
kwargs: arbitrary keyword-arguments.
"""
_optim_cls: Type[Optimizer]
_optim_kwargs: Dict[str, Any]
def __init__(self, optim_cls: Union[Type[Optimizer], str], **kwargs: Any):
if isinstance(optim_cls, str):
self._optim_cls = cast(Type[Optimizer], getattr(optim, optim_cls))
else:
self._optim_cls = optim_cls
self._optim_kwargs = kwargs
def create(self, params: Iterable[nn.Parameter], lr: float) -> Optimizer:
"""Returns an optimizer object.
Args:
params (list): a list of PyTorch parameters.
lr (float): learning rate.
Returns:
torch.optim.Optimizer: an optimizer object.
"""
return self._optim_cls(params, lr=lr, **self._optim_kwargs)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
"""Returns optimizer parameters.
Args:
deep: flag to deeply copy the parameters.
Returns:
optimizer parameters.
"""
if deep:
params = copy.deepcopy(self._optim_kwargs)
else:
params = self._optim_kwargs
return {"optim_cls": self._optim_cls.__name__, **params}
class SGDFactory(OptimizerFactory):
"""An alias for SGD optimizer.
.. code-block:: python
from d3rlpy.optimizers import SGDFactory
factory = SGDFactory(weight_decay=1e-4)
Args:
momentum: momentum factor.
dampening: dampening for momentum.
weight_decay: weight decay (L2 penalty).
nesterov: flag to enable Nesterov momentum.
"""
def __init__(
self,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False,
**kwargs: Any
):
super().__init__(
optim_cls=SGD,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
class AdamFactory(OptimizerFactory):
"""An alias for Adam optimizer.
.. code-block:: python
from d3rlpy.optimizers import AdamFactory
factory = AdamFactory(weight_decay=1e-4)
Args:
betas: coefficients used for computing running averages of
gradient and its square.
eps: term added to the denominator to improve numerical stability.
weight_decay: weight decay (L2 penalty).
amsgrad: flag to use the AMSGrad variant of this algorithm.
"""
def __init__(
self,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0,
amsgrad: bool = False,
**kwargs: Any
):
super().__init__(
optim_cls=Adam,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
class RMSpropFactory(OptimizerFactory):
"""An alias for RMSprop optimizer.
.. code-block:: python
from d3rlpy.optimizers import RMSpropFactory
factory = RMSpropFactory(weight_decay=1e-4)
Args:
alpha: smoothing constant.
eps: term added to the denominator to improve numerical stability.
weight_decay: weight decay (L2 penalty).
momentum: momentum factor.
centered: flag to compute the centered RMSProp, the gradient is
normalized by an estimation of its variance.
"""
def __init__(
self,
alpha: float = 0.95,
eps: float = 1e-2,
weight_decay: float = 0,
momentum: float = 0,
centered: bool = True,
**kwargs: Any
):
super().__init__(
optim_cls=RMSprop,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/optimizers.py | optimizers.py |
import copy
from typing import Any, ClassVar, Dict, List, Optional, Sequence, Type, Union
from torch import nn
from ..decorators import pretty_repr
from ..torch_utility import Swish
from .torch import (
Encoder,
EncoderWithAction,
PixelEncoder,
PixelEncoderWithAction,
VectorEncoder,
VectorEncoderWithAction,
)
def _create_activation(activation_type: str) -> nn.Module:
if activation_type == "relu":
return nn.ReLU()
elif activation_type == "tanh":
return nn.Tanh()
elif activation_type == "swish":
return Swish()
raise ValueError("invalid activation_type.")
@pretty_repr
class EncoderFactory:
TYPE: ClassVar[str] = "none"
def create(self, observation_shape: Sequence[int]) -> Encoder:
"""Returns PyTorch's state enocder module.
Args:
observation_shape: observation shape.
Returns:
an enocder object.
"""
raise NotImplementedError
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> EncoderWithAction:
"""Returns PyTorch's state-action enocder module.
Args:
observation_shape: observation shape.
action_size: action size. If None, the encoder does not take
action as input.
discrete_action: flag if action-space is discrete.
Returns:
an enocder object.
"""
raise NotImplementedError
def get_type(self) -> str:
"""Returns encoder type.
Returns:
encoder type.
"""
return self.TYPE
def get_params(self, deep: bool = False) -> Dict[str, Any]:
"""Returns encoder parameters.
Args:
deep: flag to deeply copy the parameters.
Returns:
encoder parameters.
"""
raise NotImplementedError
class PixelEncoderFactory(EncoderFactory):
"""Pixel encoder factory class.
This is the default encoder factory for image observation.
Args:
filters (list): list of tuples consisting with
``(filter_size, kernel_size, stride)``. If None,
``Nature DQN``-based architecture is used.
feature_size (int): the last linear layer size.
activation (str): activation function name.
use_batch_norm (bool): flag to insert batch normalization layers.
dropout_rate (float): dropout probability.
"""
TYPE: ClassVar[str] = "pixel"
_filters: List[Sequence[int]]
_feature_size: int
_activation: str
_use_batch_norm: bool
_dropout_rate: Optional[float]
def __init__(
self,
filters: Optional[List[Sequence[int]]] = None,
feature_size: int = 512,
activation: str = "relu",
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
):
if filters is None:
self._filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
else:
self._filters = filters
self._feature_size = feature_size
self._activation = activation
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
def create(self, observation_shape: Sequence[int]) -> PixelEncoder:
assert len(observation_shape) == 3
return PixelEncoder(
observation_shape=observation_shape,
filters=self._filters,
feature_size=self._feature_size,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
activation=_create_activation(self._activation),
)
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> PixelEncoderWithAction:
assert len(observation_shape) == 3
return PixelEncoderWithAction(
observation_shape=observation_shape,
action_size=action_size,
filters=self._filters,
feature_size=self._feature_size,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
discrete_action=discrete_action,
activation=_create_activation(self._activation),
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if deep:
filters = copy.deepcopy(self._filters)
else:
filters = self._filters
params = {
"filters": filters,
"feature_size": self._feature_size,
"activation": self._activation,
"use_batch_norm": self._use_batch_norm,
"dropout_rate": self._dropout_rate,
}
return params
class VectorEncoderFactory(EncoderFactory):
"""Vector encoder factory class.
This is the default encoder factory for vector observation.
Args:
hidden_units (list): list of hidden unit sizes. If ``None``, the
standard architecture with ``[256, 256]`` is used.
activation (str): activation function name.
use_batch_norm (bool): flag to insert batch normalization layers.
use_dense (bool): flag to use DenseNet architecture.
dropout_rate (float): dropout probability.
"""
TYPE: ClassVar[str] = "vector"
_hidden_units: Sequence[int]
_activation: str
_use_batch_norm: bool
_dropout_rate: Optional[float]
_use_dense: bool
def __init__(
self,
hidden_units: Optional[Sequence[int]] = None,
activation: str = "relu",
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
):
if hidden_units is None:
self._hidden_units = [256, 256]
else:
self._hidden_units = hidden_units
self._activation = activation
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
self._use_dense = use_dense
def create(self, observation_shape: Sequence[int]) -> VectorEncoder:
assert len(observation_shape) == 1
return VectorEncoder(
observation_shape=observation_shape,
hidden_units=self._hidden_units,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
use_dense=self._use_dense,
activation=_create_activation(self._activation),
)
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> VectorEncoderWithAction:
assert len(observation_shape) == 1
return VectorEncoderWithAction(
observation_shape=observation_shape,
action_size=action_size,
hidden_units=self._hidden_units,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
use_dense=self._use_dense,
discrete_action=discrete_action,
activation=_create_activation(self._activation),
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if deep:
hidden_units = copy.deepcopy(self._hidden_units)
else:
hidden_units = self._hidden_units
params = {
"hidden_units": hidden_units,
"activation": self._activation,
"use_batch_norm": self._use_batch_norm,
"dropout_rate": self._dropout_rate,
"use_dense": self._use_dense,
}
return params
class DefaultEncoderFactory(EncoderFactory):
"""Default encoder factory class.
This encoder factory returns an encoder based on observation shape.
Args:
activation (str): activation function name.
use_batch_norm (bool): flag to insert batch normalization layers.
dropout_rate (float): dropout probability.
"""
TYPE: ClassVar[str] = "default"
_activation: str
_use_batch_norm: bool
_dropout_rate: Optional[float]
def __init__(
self,
activation: str = "relu",
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
):
self._activation = activation
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
def create(self, observation_shape: Sequence[int]) -> Encoder:
factory: Union[PixelEncoderFactory, VectorEncoderFactory]
if len(observation_shape) == 3:
factory = PixelEncoderFactory(
activation=self._activation,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
)
else:
factory = VectorEncoderFactory(
activation=self._activation,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
)
return factory.create(observation_shape)
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> EncoderWithAction:
factory: Union[PixelEncoderFactory, VectorEncoderFactory]
if len(observation_shape) == 3:
factory = PixelEncoderFactory(
activation=self._activation,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
)
else:
factory = VectorEncoderFactory(
activation=self._activation,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
)
return factory.create_with_action(
observation_shape, action_size, discrete_action
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"activation": self._activation,
"use_batch_norm": self._use_batch_norm,
"dropout_rate": self._dropout_rate,
}
class DenseEncoderFactory(EncoderFactory):
"""DenseNet encoder factory class.
This is an alias for DenseNet architecture proposed in D2RL.
This class does exactly same as follows.
.. code-block:: python
from d3rlpy.encoders import VectorEncoderFactory
factory = VectorEncoderFactory(hidden_units=[256, 256, 256, 256],
use_dense=True)
For now, this only supports vector observations.
References:
* `Sinha et al., D2RL: Deep Dense Architectures in Reinforcement
Learning. <https://arxiv.org/abs/2010.09163>`_
Args:
activation (str): activation function name.
use_batch_norm (bool): flag to insert batch normalization layers.
dropout_rate (float): dropout probability.
"""
TYPE: ClassVar[str] = "dense"
_activation: str
_use_batch_norm: bool
_dropout_rate: Optional[float]
def __init__(
self,
activation: str = "relu",
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
):
self._activation = activation
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
def create(self, observation_shape: Sequence[int]) -> VectorEncoder:
if len(observation_shape) == 3:
raise NotImplementedError("pixel observation is not supported.")
factory = VectorEncoderFactory(
hidden_units=[256, 256, 256, 256],
activation=self._activation,
use_dense=True,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
)
return factory.create(observation_shape)
def create_with_action(
self,
observation_shape: Sequence[int],
action_size: int,
discrete_action: bool = False,
) -> VectorEncoderWithAction:
if len(observation_shape) == 3:
raise NotImplementedError("pixel observation is not supported.")
factory = VectorEncoderFactory(
hidden_units=[256, 256, 256, 256],
activation=self._activation,
use_dense=True,
use_batch_norm=self._use_batch_norm,
dropout_rate=self._dropout_rate,
)
return factory.create_with_action(
observation_shape, action_size, discrete_action
)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"activation": self._activation,
"use_batch_norm": self._use_batch_norm,
"dropout_rate": self._dropout_rate,
}
ENCODER_LIST: Dict[str, Type[EncoderFactory]] = {}
def register_encoder_factory(cls: Type[EncoderFactory]) -> None:
"""Registers encoder factory class.
Args:
cls: encoder factory class inheriting ``EncoderFactory``.
"""
is_registered = cls.TYPE in ENCODER_LIST
assert not is_registered, f"{cls.TYPE} seems to be already registered"
ENCODER_LIST[cls.TYPE] = cls
def create_encoder_factory(name: str, **kwargs: Any) -> EncoderFactory:
"""Returns registered encoder factory object.
Args:
name: regsitered encoder factory type name.
kwargs: encoder arguments.
Returns:
encoder factory object.
"""
assert name in ENCODER_LIST, f"{name} seems not to be registered."
factory = ENCODER_LIST[name](**kwargs) # type: ignore
assert isinstance(factory, EncoderFactory)
return factory
register_encoder_factory(VectorEncoderFactory)
register_encoder_factory(PixelEncoderFactory)
register_encoder_factory(DefaultEncoderFactory)
register_encoder_factory(DenseEncoderFactory) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/encoders.py | encoders.py |
from typing import Sequence, cast
import torch
from torch import nn
from .encoders import EncoderFactory
from .q_functions import QFunctionFactory
from .torch import (
CategoricalPolicy,
ConditionalVAE,
DeterministicPolicy,
DeterministicRegressor,
DeterministicResidualPolicy,
DiscreteImitator,
EnsembleContinuousQFunction,
EnsembleDiscreteQFunction,
Parameter,
ProbabilisticDynamicsModel,
ProbabilisticEnsembleDynamicsModel,
ProbablisticRegressor,
SquashedNormalPolicy,
ValueFunction,
)
def create_discrete_q_function(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
n_ensembles: int = 1,
) -> EnsembleDiscreteQFunction:
if q_func_factory.share_encoder:
encoder = encoder_factory.create(observation_shape)
# normalize gradient scale by ensemble size
for p in cast(nn.Module, encoder).parameters():
p.register_hook(lambda grad: grad / n_ensembles)
q_funcs = []
for _ in range(n_ensembles):
if not q_func_factory.share_encoder:
encoder = encoder_factory.create(observation_shape)
q_funcs.append(q_func_factory.create_discrete(encoder, action_size))
return EnsembleDiscreteQFunction(
q_funcs, bootstrap=q_func_factory.bootstrap
)
def create_continuous_q_function(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
n_ensembles: int = 1,
) -> EnsembleContinuousQFunction:
if q_func_factory.share_encoder:
encoder = encoder_factory.create_with_action(
observation_shape, action_size
)
# normalize gradient scale by ensemble size
for p in cast(nn.Module, encoder).parameters():
p.register_hook(lambda grad: grad / n_ensembles)
q_funcs = []
for _ in range(n_ensembles):
if not q_func_factory.share_encoder:
encoder = encoder_factory.create_with_action(
observation_shape, action_size
)
q_funcs.append(q_func_factory.create_continuous(encoder))
return EnsembleContinuousQFunction(
q_funcs, bootstrap=q_func_factory.bootstrap
)
def create_deterministic_policy(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
) -> DeterministicPolicy:
encoder = encoder_factory.create(observation_shape)
return DeterministicPolicy(encoder, action_size)
def create_deterministic_residual_policy(
observation_shape: Sequence[int],
action_size: int,
scale: float,
encoder_factory: EncoderFactory,
) -> DeterministicResidualPolicy:
encoder = encoder_factory.create_with_action(observation_shape, action_size)
return DeterministicResidualPolicy(encoder, scale)
def create_squashed_normal_policy(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
min_logstd: float = -20.0,
max_logstd: float = 2.0,
use_std_parameter: bool = False,
) -> SquashedNormalPolicy:
encoder = encoder_factory.create(observation_shape)
return SquashedNormalPolicy(
encoder,
action_size,
min_logstd=min_logstd,
max_logstd=max_logstd,
use_std_parameter=use_std_parameter,
)
def create_categorical_policy(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
) -> CategoricalPolicy:
encoder = encoder_factory.create(observation_shape)
return CategoricalPolicy(encoder, action_size)
def create_conditional_vae(
observation_shape: Sequence[int],
action_size: int,
latent_size: int,
beta: float,
encoder_factory: EncoderFactory,
min_logstd: float = -20.0,
max_logstd: float = 2.0,
) -> ConditionalVAE:
encoder_encoder = encoder_factory.create_with_action(
observation_shape, action_size
)
decoder_encoder = encoder_factory.create_with_action(
observation_shape, latent_size
)
return ConditionalVAE(
encoder_encoder,
decoder_encoder,
beta,
min_logstd=min_logstd,
max_logstd=max_logstd,
)
def create_discrete_imitator(
observation_shape: Sequence[int],
action_size: int,
beta: float,
encoder_factory: EncoderFactory,
) -> DiscreteImitator:
encoder = encoder_factory.create(observation_shape)
return DiscreteImitator(encoder, action_size, beta)
def create_deterministic_regressor(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
) -> DeterministicRegressor:
encoder = encoder_factory.create(observation_shape)
return DeterministicRegressor(encoder, action_size)
def create_probablistic_regressor(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
min_logstd: float = -20.0,
max_logstd: float = 2.0,
) -> ProbablisticRegressor:
encoder = encoder_factory.create(observation_shape)
return ProbablisticRegressor(
encoder, action_size, min_logstd=min_logstd, max_logstd=max_logstd
)
def create_value_function(
observation_shape: Sequence[int], encoder_factory: EncoderFactory
) -> ValueFunction:
encoder = encoder_factory.create(observation_shape)
return ValueFunction(encoder)
def create_probabilistic_ensemble_dynamics_model(
observation_shape: Sequence[int],
action_size: int,
encoder_factory: EncoderFactory,
n_ensembles: int = 5,
discrete_action: bool = False,
) -> ProbabilisticEnsembleDynamicsModel:
models = []
for _ in range(n_ensembles):
encoder = encoder_factory.create_with_action(
observation_shape=observation_shape,
action_size=action_size,
discrete_action=discrete_action,
)
model = ProbabilisticDynamicsModel(encoder)
models.append(model)
return ProbabilisticEnsembleDynamicsModel(models)
def create_parameter(shape: Sequence[int], initial_value: float) -> Parameter:
data = torch.full(shape, initial_value, dtype=torch.float32)
return Parameter(data) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/builders.py | builders.py |
import math
from abc import ABCMeta, abstractmethod
from typing import Tuple, Union, cast
import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions import Categorical, Normal
from .encoders import Encoder, EncoderWithAction
def squash_action(
dist: torch.distributions.Distribution, raw_action: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
squashed_action = torch.tanh(raw_action)
jacob = 2 * (math.log(2) - raw_action - F.softplus(-2 * raw_action))
log_prob = (dist.log_prob(raw_action) - jacob).sum(dim=-1, keepdims=True)
return squashed_action, log_prob
class Policy(nn.Module, metaclass=ABCMeta): # type: ignore
def sample(self, x: torch.Tensor) -> torch.Tensor:
return self.sample_with_log_prob(x)[0]
@abstractmethod
def sample_with_log_prob(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
pass
def sample_n(self, x: torch.Tensor, n: int) -> torch.Tensor:
return self.sample_n_with_log_prob(x, n)[0]
@abstractmethod
def sample_n_with_log_prob(
self, x: torch.Tensor, n: int
) -> Tuple[torch.Tensor, torch.Tensor]:
pass
@abstractmethod
def best_action(self, x: torch.Tensor) -> torch.Tensor:
pass
class DeterministicPolicy(Policy):
_encoder: Encoder
_fc: nn.Linear
def __init__(self, encoder: Encoder, action_size: int):
super().__init__()
self._encoder = encoder
self._fc = nn.Linear(encoder.get_feature_size(), action_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._encoder(x)
return torch.tanh(self._fc(h))
def __call__(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x))
def sample_with_log_prob(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError(
"deterministic policy does not support sample"
)
def sample_n_with_log_prob(
self, x: torch.Tensor, n: int
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError(
"deterministic policy does not support sample_n"
)
def best_action(self, x: torch.Tensor) -> torch.Tensor:
return self.forward(x)
class DeterministicResidualPolicy(Policy):
_encoder: EncoderWithAction
_scale: float
_fc: nn.Linear
def __init__(self, encoder: EncoderWithAction, scale: float):
super().__init__()
self._scale = scale
self._encoder = encoder
self._fc = nn.Linear(encoder.get_feature_size(), encoder.action_size)
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
h = self._encoder(x, action)
residual_action = self._scale * torch.tanh(self._fc(h))
return (action + cast(torch.Tensor, residual_action)).clamp(-1.0, 1.0)
def __call__(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x, action))
def best_residual_action(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
return self.forward(x, action)
def best_action(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError(
"residual policy does not support best_action"
)
def sample_with_log_prob(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError(
"deterministic policy does not support sample"
)
def sample_n_with_log_prob(
self, x: torch.Tensor, n: int
) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError(
"deterministic policy does not support sample_n"
)
class SquashedNormalPolicy(Policy):
_encoder: Encoder
_action_size: int
_min_logstd: float
_max_logstd: float
_use_std_parameter: bool
_mu: nn.Linear
_logstd: Union[nn.Linear, nn.Parameter]
def __init__(
self,
encoder: Encoder,
action_size: int,
min_logstd: float,
max_logstd: float,
use_std_parameter: bool,
):
super().__init__()
self._action_size = action_size
self._encoder = encoder
self._min_logstd = min_logstd
self._max_logstd = max_logstd
self._use_std_parameter = use_std_parameter
self._mu = nn.Linear(encoder.get_feature_size(), action_size)
if use_std_parameter:
initial_logstd = torch.zeros(1, action_size, dtype=torch.float32)
self._logstd = nn.Parameter(initial_logstd)
else:
self._logstd = nn.Linear(encoder.get_feature_size(), action_size)
def _compute_logstd(self, h: torch.Tensor) -> torch.Tensor:
if self._use_std_parameter:
clipped_logstd = self.get_logstd_parameter()
else:
logstd = cast(nn.Linear, self._logstd)(h)
clipped_logstd = logstd.clamp(self._min_logstd, self._max_logstd)
return clipped_logstd
def dist(self, x: torch.Tensor) -> Normal:
h = self._encoder(x)
mu = self._mu(h)
clipped_logstd = self._compute_logstd(h)
return Normal(mu, clipped_logstd.exp())
def forward(
self,
x: torch.Tensor,
deterministic: bool = False,
with_log_prob: bool = False,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
if deterministic:
# to avoid errors at ONNX export because broadcast_tensors in
# Normal distribution is not supported by ONNX
action = self._mu(self._encoder(x))
else:
dist = self.dist(x)
action = dist.rsample()
if with_log_prob:
return squash_action(dist, action)
return torch.tanh(action)
def sample_with_log_prob(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
out = self.forward(x, with_log_prob=True)
return cast(Tuple[torch.Tensor, torch.Tensor], out)
def sample_n_with_log_prob(
self, x: torch.Tensor, n: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
dist = self.dist(x)
action = dist.rsample((n,))
squashed_action_T, log_prob_T = squash_action(dist, action)
# (n, batch, action) -> (batch, n, action)
squashed_action = squashed_action_T.transpose(0, 1)
# (n, batch, 1) -> (batch, n, 1)
log_prob = log_prob_T.transpose(0, 1)
return squashed_action, log_prob
def sample_n_without_squash(self, x: torch.Tensor, n: int) -> torch.Tensor:
dist = self.dist(x)
action = dist.rsample((n,))
return action.transpose(0, 1)
def onnx_safe_sample_n(self, x: torch.Tensor, n: int) -> torch.Tensor:
h = self._encoder(x)
mean = self._mu(h)
std = self._compute_logstd(h).exp()
# expand shape
# (batch_size, action_size) -> (batch_size, N, action_size)
expanded_mean = mean.view(-1, 1, self._action_size).repeat((1, n, 1))
expanded_std = std.view(-1, 1, self._action_size).repeat((1, n, 1))
# sample noise from Gaussian distribution
noise = torch.randn(x.shape[0], n, self._action_size, device=x.device)
return torch.tanh(expanded_mean + noise * expanded_std)
def best_action(self, x: torch.Tensor) -> torch.Tensor:
action = self.forward(x, deterministic=True, with_log_prob=False)
return cast(torch.Tensor, action)
def get_logstd_parameter(self) -> torch.Tensor:
assert self._use_std_parameter
logstd = torch.sigmoid(cast(nn.Parameter, self._logstd))
base_logstd = self._max_logstd - self._min_logstd
return self._min_logstd + logstd * base_logstd
class CategoricalPolicy(Policy):
_encoder: Encoder
_fc: nn.Linear
def __init__(self, encoder: Encoder, action_size: int):
super().__init__()
self._encoder = encoder
self._fc = nn.Linear(encoder.get_feature_size(), action_size)
def dist(self, x: torch.Tensor) -> Categorical:
h = self._encoder(x)
h = self._fc(h)
return Categorical(torch.softmax(h, dim=1))
def forward(
self,
x: torch.Tensor,
deterministic: bool = False,
with_log_prob: bool = False,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
dist = self.dist(x)
if deterministic:
action = cast(torch.Tensor, dist.probs.argmax(dim=1))
else:
action = cast(torch.Tensor, dist.sample())
if with_log_prob:
return action, dist.log_prob(action)
return action
def sample_with_log_prob(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
out = self.forward(x, with_log_prob=True)
return cast(Tuple[torch.Tensor, torch.Tensor], out)
def sample_n_with_log_prob(
self, x: torch.Tensor, n: int
) -> Tuple[torch.Tensor, torch.Tensor]:
dist = self.dist(x)
action_T = cast(torch.Tensor, dist.sample((n,)))
log_prob_T = dist.log_prob(action_T)
# (n, batch) -> (batch, n)
action = action_T.transpose(0, 1)
# (n, batch) -> (batch, n)
log_prob = log_prob_T.transpose(0, 1)
return action, log_prob
def best_action(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self.forward(x, deterministic=True))
def log_probs(self, x: torch.Tensor) -> torch.Tensor:
dist = self.dist(x)
return cast(torch.Tensor, dist.logits) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/policies.py | policies.py |
from abc import ABCMeta, abstractmethod
from typing import Tuple, cast
import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions import Normal
from torch.distributions.kl import kl_divergence
from .encoders import Encoder, EncoderWithAction
class ConditionalVAE(nn.Module): # type: ignore
_encoder_encoder: EncoderWithAction
_decoder_encoder: EncoderWithAction
_beta: float
_min_logstd: float
_max_logstd: float
_action_size: int
_latent_size: int
_mu: nn.Linear
_logstd: nn.Linear
_fc: nn.Linear
def __init__(
self,
encoder_encoder: EncoderWithAction,
decoder_encoder: EncoderWithAction,
beta: float,
min_logstd: float = -20.0,
max_logstd: float = 2.0,
):
super().__init__()
self._encoder_encoder = encoder_encoder
self._decoder_encoder = decoder_encoder
self._beta = beta
self._min_logstd = min_logstd
self._max_logstd = max_logstd
self._action_size = encoder_encoder.action_size
self._latent_size = decoder_encoder.action_size
# encoder
self._mu = nn.Linear(
encoder_encoder.get_feature_size(), self._latent_size
)
self._logstd = nn.Linear(
encoder_encoder.get_feature_size(), self._latent_size
)
# decoder
self._fc = nn.Linear(
decoder_encoder.get_feature_size(), self._action_size
)
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
dist = self.encode(x, action)
return self.decode(x, dist.rsample())
def __call__(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x, action))
def encode(self, x: torch.Tensor, action: torch.Tensor) -> Normal:
h = self._encoder_encoder(x, action)
mu = self._mu(h)
logstd = self._logstd(h)
clipped_logstd = logstd.clamp(self._min_logstd, self._max_logstd)
return Normal(mu, clipped_logstd.exp())
def decode(self, x: torch.Tensor, latent: torch.Tensor) -> torch.Tensor:
h = self._decoder_encoder(x, latent)
return torch.tanh(self._fc(h))
def decode_without_squash(
self, x: torch.Tensor, latent: torch.Tensor
) -> torch.Tensor:
h = self._decoder_encoder(x, latent)
return self._fc(h)
def compute_error(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
dist = self.encode(x, action)
kl_loss = kl_divergence(dist, Normal(0.0, 1.0)).mean()
y = self.decode(x, dist.rsample())
return F.mse_loss(y, action) + cast(torch.Tensor, self._beta * kl_loss)
def sample(self, x: torch.Tensor) -> torch.Tensor:
latent = torch.randn((x.shape[0], self._latent_size), device=x.device)
# to prevent extreme numbers
return self.decode(x, latent.clamp(-0.5, 0.5))
def sample_n(
self, x: torch.Tensor, n: int, with_squash: bool = True
) -> torch.Tensor:
flat_latent_shape = (n * x.shape[0], self._latent_size)
flat_latent = torch.randn(flat_latent_shape, device=x.device)
# to prevent extreme numbers
clipped_latent = flat_latent.clamp(-0.5, 0.5)
# (batch, obs) -> (n, batch, obs)
repeated_x = x.expand((n, *x.shape))
# (n, batch, obs) -> (n * batch, obs)
flat_x = repeated_x.reshape(-1, *x.shape[1:])
if with_squash:
flat_actions = self.decode(flat_x, clipped_latent)
else:
flat_actions = self.decode_without_squash(flat_x, clipped_latent)
# (n * batch, action) -> (n, batch, action)
actions = flat_actions.view(n, x.shape[0], -1)
# (n, batch, action) -> (batch, n, action)
return actions.transpose(0, 1)
def sample_n_without_squash(self, x: torch.Tensor, n: int) -> torch.Tensor:
return self.sample_n(x, n, with_squash=False)
class Imitator(nn.Module, metaclass=ABCMeta): # type: ignore
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
def __call__(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x))
@abstractmethod
def compute_error(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
pass
class DiscreteImitator(Imitator):
_encoder: Encoder
_beta: float
_fc: nn.Linear
def __init__(self, encoder: Encoder, action_size: int, beta: float):
super().__init__()
self._encoder = encoder
self._beta = beta
self._fc = nn.Linear(encoder.get_feature_size(), action_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.compute_log_probs_with_logits(x)[0]
def compute_log_probs_with_logits(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
h = self._encoder(x)
logits = self._fc(h)
log_probs = F.log_softmax(logits, dim=1)
return log_probs, logits
def compute_error(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
log_probs, logits = self.compute_log_probs_with_logits(x)
penalty = (logits ** 2).mean()
return F.nll_loss(log_probs, action.view(-1)) + self._beta * penalty
class DeterministicRegressor(Imitator):
_encoder: Encoder
_fc: nn.Linear
def __init__(self, encoder: Encoder, action_size: int):
super().__init__()
self._encoder = encoder
self._fc = nn.Linear(encoder.get_feature_size(), action_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._encoder(x)
h = self._fc(h)
return torch.tanh(h)
def compute_error(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
return F.mse_loss(self.forward(x), action)
class ProbablisticRegressor(Imitator):
_min_logstd: float
_max_logstd: float
_encoder: Encoder
_mu: nn.Linear
_logstd: nn.Linear
def __init__(
self,
encoder: Encoder,
action_size: int,
min_logstd: float,
max_logstd: float,
):
super().__init__()
self._min_logstd = min_logstd
self._max_logstd = max_logstd
self._encoder = encoder
self._mu = nn.Linear(encoder.get_feature_size(), action_size)
self._logstd = nn.Linear(encoder.get_feature_size(), action_size)
def dist(self, x: torch.Tensor) -> Normal:
h = self._encoder(x)
mu = self._mu(h)
logstd = self._logstd(h)
clipped_logstd = logstd.clamp(self._min_logstd, self._max_logstd)
return Normal(mu, clipped_logstd.exp())
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._encoder(x)
mu = self._mu(h)
return torch.tanh(mu)
def sample_n(self, x: torch.Tensor, n: int) -> torch.Tensor:
dist = self.dist(x)
actions = cast(torch.Tensor, dist.rsample((n,)))
# (n, batch, action) -> (batch, n, action)
return actions.transpose(0, 1)
def compute_error(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
dist = self.dist(x)
return F.mse_loss(torch.tanh(dist.rsample()), action) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/imitators.py | imitators.py |
from typing import List, Optional, Tuple, cast
import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions import Normal
from torch.nn.utils import spectral_norm
from .encoders import EncoderWithAction
def _compute_ensemble_variance(
observations: torch.Tensor,
rewards: torch.Tensor,
variances: torch.Tensor,
variance_type: str,
) -> torch.Tensor:
if variance_type == "max":
return variances.max(dim=1).values
elif variance_type == "data":
data = torch.cat([observations, rewards], dim=2)
return (data.std(dim=1) ** 2).sum(dim=1, keepdim=True)
raise ValueError(f"invalid variance_type: {variance_type}")
def _apply_spectral_norm_recursively(model: nn.Module) -> None:
for _, module in model.named_children():
if isinstance(module, nn.ModuleList):
for m in module:
_apply_spectral_norm_recursively(m)
else:
if "weight" in module._parameters:
spectral_norm(module)
def _gaussian_likelihood(
x: torch.Tensor, mu: torch.Tensor, logstd: torch.Tensor
) -> torch.Tensor:
inv_std = torch.exp(-logstd)
return (((mu - x) ** 2) * inv_std).mean(dim=1, keepdim=True)
class ProbabilisticDynamicsModel(nn.Module): # type: ignore
"""Probabilistic dynamics model.
References:
* `Janner et al., When to Trust Your Model: Model-Based Policy
Optimization. <https://arxiv.org/abs/1906.08253>`_
* `Chua et al., Deep Reinforcement Learning in a Handful of Trials
using Probabilistic Dynamics Models.
<https://arxiv.org/abs/1805.12114>`_
"""
_encoder: EncoderWithAction
_mu: nn.Linear
_logstd: nn.Linear
_max_logstd: nn.Parameter
_min_logstd: nn.Parameter
def __init__(self, encoder: EncoderWithAction):
super().__init__()
# apply spectral normalization except logstd encoder.
_apply_spectral_norm_recursively(cast(nn.Module, encoder))
self._encoder = encoder
feature_size = encoder.get_feature_size()
observation_size = encoder.observation_shape[0]
out_size = observation_size + 1
# TODO: handle image observation
self._mu = spectral_norm(nn.Linear(feature_size, out_size))
self._logstd = nn.Linear(feature_size, out_size)
# logstd bounds
init_max = torch.empty(1, out_size, dtype=torch.float32).fill_(2.0)
init_min = torch.empty(1, out_size, dtype=torch.float32).fill_(-10.0)
self._max_logstd = nn.Parameter(init_max)
self._min_logstd = nn.Parameter(init_min)
def compute_stats(
self, x: torch.Tensor, action: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
h = self._encoder(x, action)
mu = self._mu(h)
# log standard deviation with bounds
logstd = self._logstd(h)
logstd = self._max_logstd - F.softplus(self._max_logstd - logstd)
logstd = self._min_logstd + F.softplus(logstd - self._min_logstd)
return mu, logstd
def forward(
self, x: torch.Tensor, action: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict_with_variance(x, action)[:2]
def predict_with_variance(
self, x: torch.Tensor, action: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, logstd = self.compute_stats(x, action)
dist = Normal(mu, logstd.exp())
pred = dist.rsample()
# residual prediction
next_x = x + pred[:, :-1]
next_reward = pred[:, -1].view(-1, 1)
return next_x, next_reward, dist.variance.sum(dim=1, keepdims=True)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
obs_tp1: torch.Tensor,
) -> torch.Tensor:
mu, logstd = self.compute_stats(obs_t, act_t)
# residual prediction
mu_x = obs_t + mu[:, :-1]
mu_reward = mu[:, -1].view(-1, 1)
logstd_x = logstd[:, :-1]
logstd_reward = logstd[:, -1].view(-1, 1)
# gaussian likelihood loss
likelihood_loss = _gaussian_likelihood(obs_tp1, mu_x, logstd_x)
likelihood_loss += _gaussian_likelihood(
rew_tp1, mu_reward, logstd_reward
)
# penalty to minimize standard deviation
penalty = logstd.sum(dim=1, keepdim=True)
# minimize logstd bounds
bound_loss = self._max_logstd.sum() - self._min_logstd.sum()
loss = likelihood_loss + penalty + 1e-2 * bound_loss
return loss.view(-1, 1)
class ProbabilisticEnsembleDynamicsModel(nn.Module): # type: ignore
_models: nn.ModuleList
def __init__(self, models: List[ProbabilisticDynamicsModel]):
super().__init__()
self._models = nn.ModuleList(models)
def forward(
self,
x: torch.Tensor,
action: torch.Tensor,
indices: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict_with_variance(x, action, indices=indices)[:2]
def __call__(
self,
x: torch.Tensor,
action: torch.Tensor,
indices: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
return cast(
Tuple[torch.Tensor, torch.Tensor],
super().__call__(x, action, indices),
)
def predict_with_variance(
self,
x: torch.Tensor,
action: torch.Tensor,
variance_type: str = "data",
indices: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
observations_list: List[torch.Tensor] = []
rewards_list: List[torch.Tensor] = []
variances_list: List[torch.Tensor] = []
# predict next observation and reward
for model in self._models:
obs, rew, var = model.predict_with_variance(x, action)
observations_list.append(obs.view(1, x.shape[0], -1))
rewards_list.append(rew.view(1, x.shape[0], 1))
variances_list.append(var.view(1, x.shape[0], 1))
# (ensemble, batch, -1) -> (batch, ensemble, -1)
observations = torch.cat(observations_list, dim=0).transpose(0, 1)
rewards = torch.cat(rewards_list, dim=0).transpose(0, 1)
variances = torch.cat(variances_list, dim=0).transpose(0, 1)
variances = _compute_ensemble_variance(
observations=observations,
rewards=rewards,
variances=variances,
variance_type=variance_type,
)
if indices is None:
return observations, rewards, variances
# pick samples based on indices
partial_observations = observations[torch.arange(x.shape[0]), indices]
partial_rewards = rewards[torch.arange(x.shape[0]), indices]
return partial_observations, partial_rewards, variances
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
obs_tp1: torch.Tensor,
masks: Optional[torch.Tensor] = None,
) -> torch.Tensor:
loss_sum = torch.tensor(0.0, dtype=torch.float32, device=obs_t.device)
for i, model in enumerate(self._models):
loss = model.compute_error(obs_t, act_t, rew_tp1, obs_tp1)
assert loss.shape == (obs_t.shape[0], 1)
# create mask if necessary
if masks is None:
mask = torch.randint(0, 2, size=loss.shape, device=obs_t.device)
else:
mask = masks[i]
loss_sum += (loss * mask).mean()
return loss_sum
@property
def models(self) -> nn.ModuleList:
return self._models | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/dynamics.py | dynamics.py |
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Sequence
import torch
import torch.nn.functional as F
from torch import nn
from ...itertools import last_flag
from ...torch_utility import View
class Encoder(metaclass=ABCMeta):
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
@abstractmethod
def get_feature_size(self) -> int:
pass
@property
def observation_shape(self) -> Sequence[int]:
pass
@abstractmethod
def __call__(self, x: torch.Tensor) -> torch.Tensor:
pass
def create_reverse(self) -> Sequence[torch.nn.Module]:
raise NotImplementedError
@property
def last_layer(self) -> nn.Linear:
raise NotImplementedError
class EncoderWithAction(metaclass=ABCMeta):
@abstractmethod
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
pass
@abstractmethod
def get_feature_size(self) -> int:
pass
@property
def action_size(self) -> int:
pass
@property
def observation_shape(self) -> Sequence[int]:
pass
@abstractmethod
def __call__(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
pass
def create_reverse(self) -> Sequence[torch.nn.Module]:
raise NotImplementedError
@property
def last_layer(self) -> nn.Linear:
raise NotImplementedError
class _PixelEncoder(nn.Module): # type: ignore
_observation_shape: Sequence[int]
_feature_size: int
_use_batch_norm: bool
_dropout_rate: Optional[float]
_activation: nn.Module
_convs: nn.ModuleList
_conv_bns: nn.ModuleList
_fc: nn.Linear
_fc_bn: nn.BatchNorm1d
_dropouts: nn.ModuleList
def __init__(
self,
observation_shape: Sequence[int],
filters: Optional[List[Sequence[int]]] = None,
feature_size: int = 512,
use_batch_norm: bool = False,
dropout_rate: Optional[float] = False,
activation: nn.Module = nn.ReLU(),
):
super().__init__()
# default architecture is based on Nature DQN paper.
if filters is None:
filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
if feature_size is None:
feature_size = 512
self._observation_shape = observation_shape
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
self._activation = activation
self._feature_size = feature_size
# convolutional layers
in_channels = [observation_shape[0]] + [f[0] for f in filters[:-1]]
self._convs = nn.ModuleList()
self._conv_bns = nn.ModuleList()
self._dropouts = nn.ModuleList()
for in_channel, f in zip(in_channels, filters):
out_channel, kernel_size, stride = f
conv = nn.Conv2d(
in_channel, out_channel, kernel_size=kernel_size, stride=stride
)
self._convs.append(conv)
# use batch normalization layer
if use_batch_norm:
self._conv_bns.append(nn.BatchNorm2d(out_channel))
# use dropout layer
if dropout_rate is not None:
self._dropouts.append(nn.Dropout2d(dropout_rate))
# last dense layer
self._fc = nn.Linear(self._get_linear_input_size(), feature_size)
if use_batch_norm:
self._fc_bn = nn.BatchNorm1d(feature_size)
if dropout_rate is not None:
self._dropouts.append(nn.Dropout(dropout_rate))
def _get_linear_input_size(self) -> int:
x = torch.rand((1,) + tuple(self._observation_shape))
with torch.no_grad():
return self._conv_encode(x).view(1, -1).shape[1] # type: ignore
def _get_last_conv_shape(self) -> Sequence[int]:
x = torch.rand((1,) + tuple(self._observation_shape))
with torch.no_grad():
return self._conv_encode(x).shape # type: ignore
def _conv_encode(self, x: torch.Tensor) -> torch.Tensor:
h = x
for i, conv in enumerate(self._convs):
h = self._activation(conv(h))
if self._use_batch_norm:
h = self._conv_bns[i](h)
if self._dropout_rate is not None:
h = self._dropouts[i](h)
return h
def get_feature_size(self) -> int:
return self._feature_size
@property
def observation_shape(self) -> Sequence[int]:
return self._observation_shape
@property
def last_layer(self) -> nn.Linear:
return self._fc
class PixelEncoder(_PixelEncoder, Encoder):
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._conv_encode(x)
h = self._activation(self._fc(h.view(h.shape[0], -1)))
if self._use_batch_norm:
h = self._fc_bn(h)
if self._dropout_rate is not None:
h = self._dropouts[-1](h)
return h
def create_reverse(self) -> Sequence[torch.nn.Module]:
modules: List[torch.nn.Module] = []
# add linear layer
modules.append(nn.Linear(self.get_feature_size(), self._fc.in_features))
modules.append(self._activation)
# reshape output
modules.append(View((-1, *self._get_last_conv_shape()[1:])))
# add conv layers
for is_last, conv in last_flag(reversed(self._convs)):
deconv = nn.ConvTranspose2d(
conv.out_channels,
conv.in_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
)
modules.append(deconv)
if not is_last:
modules.append(self._activation)
return modules
class PixelEncoderWithAction(_PixelEncoder, EncoderWithAction):
_action_size: int
_discrete_action: bool
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
filters: Optional[List[Sequence[int]]] = None,
feature_size: int = 512,
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
discrete_action: bool = False,
activation: nn.Module = nn.ReLU(),
):
self._action_size = action_size
self._discrete_action = discrete_action
super().__init__(
observation_shape=observation_shape,
filters=filters,
feature_size=feature_size,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate,
activation=activation,
)
def _get_linear_input_size(self) -> int:
size = super()._get_linear_input_size()
return size + self._action_size
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
h = self._conv_encode(x)
if self._discrete_action:
action = F.one_hot(
action.view(-1).long(), num_classes=self._action_size
).float()
# cocat feature and action
h = torch.cat([h.view(h.shape[0], -1), action], dim=1)
h = self._activation(self._fc(h))
if self._use_batch_norm:
h = self._fc_bn(h)
if self._dropout_rate is not None:
h = self._dropouts[-1](h)
return h
@property
def action_size(self) -> int:
return self._action_size
def create_reverse(self) -> Sequence[torch.nn.Module]:
modules: List[torch.nn.Module] = []
# add linear layer
in_features = self._fc.in_features - self._action_size
modules.append(nn.Linear(self.get_feature_size(), in_features))
modules.append(self._activation)
# reshape output
modules.append(View((-1, *self._get_last_conv_shape()[1:])))
# add conv layers
for is_last, conv in last_flag(reversed(self._convs)):
deconv = nn.ConvTranspose2d(
conv.out_channels,
conv.in_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
)
modules.append(deconv)
if not is_last:
modules.append(self._activation)
return modules
class _VectorEncoder(nn.Module): # type: ignore
_observation_shape: Sequence[int]
_use_batch_norm: bool
_dropout_rate: Optional[float]
_use_dense: bool
_activation: nn.Module
_feature_size: int
_fcs: nn.ModuleList
_bns: nn.ModuleList
_dropouts: nn.ModuleList
def __init__(
self,
observation_shape: Sequence[int],
hidden_units: Optional[Sequence[int]] = None,
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
activation: nn.Module = nn.ReLU(),
):
super().__init__()
self._observation_shape = observation_shape
if hidden_units is None:
hidden_units = [256, 256]
self._use_batch_norm = use_batch_norm
self._dropout_rate = dropout_rate
self._feature_size = hidden_units[-1]
self._activation = activation
self._use_dense = use_dense
in_units = [observation_shape[0]] + list(hidden_units[:-1])
self._fcs = nn.ModuleList()
self._bns = nn.ModuleList()
self._dropouts = nn.ModuleList()
for i, (in_unit, out_unit) in enumerate(zip(in_units, hidden_units)):
if use_dense and i > 0:
in_unit += observation_shape[0]
self._fcs.append(nn.Linear(in_unit, out_unit))
if use_batch_norm:
self._bns.append(nn.BatchNorm1d(out_unit))
if dropout_rate is not None:
self._dropouts.append(nn.Dropout(dropout_rate))
def _fc_encode(self, x: torch.Tensor) -> torch.Tensor:
h = x
for i, fc in enumerate(self._fcs):
if self._use_dense and i > 0:
h = torch.cat([h, x], dim=1)
h = self._activation(fc(h))
if self._use_batch_norm:
h = self._bns[i](h)
if self._dropout_rate is not None:
h = self._dropouts[i](h)
return h
def get_feature_size(self) -> int:
return self._feature_size
@property
def observation_shape(self) -> Sequence[int]:
return self._observation_shape
@property
def last_layer(self) -> nn.Linear:
return self._fcs[-1]
class VectorEncoder(_VectorEncoder, Encoder):
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._fc_encode(x)
if self._use_batch_norm:
h = self._bns[-1](h)
if self._dropout_rate is not None:
h = self._dropouts[-1](h)
return h
def create_reverse(self) -> Sequence[torch.nn.Module]:
assert not self._use_dense, "use_dense=True is not supported yet"
modules: List[torch.nn.Module] = []
for is_last, fc in last_flag(reversed(self._fcs)):
modules.append(nn.Linear(fc.out_features, fc.in_features))
if not is_last:
modules.append(self._activation)
return modules
class VectorEncoderWithAction(_VectorEncoder, EncoderWithAction):
_action_size: int
_discrete_action: bool
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
hidden_units: Optional[Sequence[int]] = None,
use_batch_norm: bool = False,
dropout_rate: Optional[float] = None,
use_dense: bool = False,
discrete_action: bool = False,
activation: nn.Module = nn.ReLU(),
):
self._action_size = action_size
self._discrete_action = discrete_action
concat_shape = (observation_shape[0] + action_size,)
super().__init__(
observation_shape=concat_shape,
hidden_units=hidden_units,
use_batch_norm=use_batch_norm,
use_dense=use_dense,
dropout_rate=dropout_rate,
activation=activation,
)
self._observation_shape = observation_shape
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
if self._discrete_action:
action = F.one_hot(
action.view(-1).long(), num_classes=self.action_size
).float()
x = torch.cat([x, action], dim=1)
h = self._fc_encode(x)
if self._use_batch_norm:
h = self._bns[-1](h)
if self._dropout_rate is not None:
h = self._dropouts[-1](h)
return h
@property
def action_size(self) -> int:
return self._action_size
def create_reverse(self) -> Sequence[torch.nn.Module]:
assert not self._use_dense, "use_dense=True is not supported yet"
modules: List[torch.nn.Module] = []
for is_last, fc in last_flag(reversed(self._fcs)):
if is_last:
in_features = fc.in_features - self._action_size
else:
in_features = fc.in_features
modules.append(nn.Linear(fc.out_features, in_features))
if not is_last:
modules.append(self._activation)
return modules | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/encoders.py | encoders.py |
from .dynamics import (
ProbabilisticDynamicsModel,
ProbabilisticEnsembleDynamicsModel,
)
from .encoders import (
Encoder,
EncoderWithAction,
PixelEncoder,
PixelEncoderWithAction,
VectorEncoder,
VectorEncoderWithAction,
)
from .imitators import (
ConditionalVAE,
DeterministicRegressor,
DiscreteImitator,
Imitator,
ProbablisticRegressor,
)
from .parameters import Parameter
from .policies import (
CategoricalPolicy,
DeterministicPolicy,
DeterministicResidualPolicy,
Policy,
SquashedNormalPolicy,
squash_action,
)
from .q_functions import (
compute_max_with_n_actions,
compute_max_with_n_actions_and_indices,
)
from .q_functions.base import ContinuousQFunction, DiscreteQFunction
from .q_functions.ensemble_q_function import (
EnsembleContinuousQFunction,
EnsembleDiscreteQFunction,
EnsembleQFunction,
)
from .q_functions.fqf_q_function import (
ContinuousFQFQFunction,
DiscreteFQFQFunction,
)
from .q_functions.iqn_q_function import (
ContinuousIQNQFunction,
DiscreteIQNQFunction,
)
from .q_functions.mean_q_function import (
ContinuousMeanQFunction,
DiscreteMeanQFunction,
)
from .q_functions.qr_q_function import (
ContinuousQRQFunction,
DiscreteQRQFunction,
)
from .v_functions import ValueFunction
__all__ = [
"Encoder",
"EncoderWithAction",
"PixelEncoder",
"PixelEncoderWithAction",
"VectorEncoder",
"VectorEncoderWithAction",
"Policy",
"squash_action",
"DeterministicPolicy",
"DeterministicResidualPolicy",
"SquashedNormalPolicy",
"CategoricalPolicy",
"DiscreteQFunction",
"ContinuousQFunction",
"EnsembleQFunction",
"DiscreteMeanQFunction",
"ContinuousMeanQFunction",
"DiscreteQRQFunction",
"ContinuousQRQFunction",
"DiscreteIQNQFunction",
"ContinuousIQNQFunction",
"DiscreteFQFQFunction",
"ContinuousFQFQFunction",
"EnsembleDiscreteQFunction",
"EnsembleContinuousQFunction",
"compute_max_with_n_actions",
"compute_max_with_n_actions_and_indices",
"ValueFunction",
"ConditionalVAE",
"Imitator",
"DiscreteImitator",
"DeterministicRegressor",
"ProbablisticRegressor",
"ProbabilisticEnsembleDynamicsModel",
"ProbabilisticDynamicsModel",
"Parameter",
] | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/__init__.py | __init__.py |
from typing import List, Optional, Union, cast
import torch
from torch import nn
from .base import ContinuousQFunction, DiscreteQFunction
def _reduce_ensemble(
y: torch.Tensor, reduction: str = "min", dim: int = 0, lam: float = 0.75
) -> torch.Tensor:
if reduction == "min":
return y.min(dim=dim).values
elif reduction == "max":
return y.max(dim=dim).values
elif reduction == "mean":
return y.mean(dim=dim)
elif reduction == "none":
return y
elif reduction == "mix":
max_values = y.max(dim=dim).values
min_values = y.min(dim=dim).values
return lam * min_values + (1.0 - lam) * max_values
raise ValueError
def _gather_quantiles_by_indices(
y: torch.Tensor, indices: torch.Tensor
) -> torch.Tensor:
# TODO: implement this in general case
if y.dim() == 3:
# (N, batch, n_quantiles) -> (batch, n_quantiles)
return y.transpose(0, 1)[torch.arange(y.shape[1]), indices]
elif y.dim() == 4:
# (N, batch, action, n_quantiles) -> (batch, action, N, n_quantiles)
transposed_y = y.transpose(0, 1).transpose(1, 2)
# (batch, action, N, n_quantiles) -> (batch * action, N, n_quantiles)
flat_y = transposed_y.reshape(-1, y.shape[0], y.shape[3])
head_indices = torch.arange(y.shape[1] * y.shape[2])
# (batch * action, N, n_quantiles) -> (batch * action, n_quantiles)
gathered_y = flat_y[head_indices, indices.view(-1)]
# (batch * action, n_quantiles) -> (batch, action, n_quantiles)
return gathered_y.view(y.shape[1], y.shape[2], -1)
raise ValueError
def _reduce_quantile_ensemble(
y: torch.Tensor, reduction: str = "min", dim: int = 0, lam: float = 0.75
) -> torch.Tensor:
# reduction beased on expectation
mean = y.mean(dim=-1)
if reduction == "min":
indices = mean.min(dim=dim).indices
return _gather_quantiles_by_indices(y, indices)
elif reduction == "max":
indices = mean.max(dim=dim).indices
return _gather_quantiles_by_indices(y, indices)
elif reduction == "none":
return y
elif reduction == "mix":
min_indices = mean.min(dim=dim).indices
max_indices = mean.max(dim=dim).indices
min_values = _gather_quantiles_by_indices(y, min_indices)
max_values = _gather_quantiles_by_indices(y, max_indices)
return lam * min_values + (1.0 - lam) * max_values
raise ValueError
class EnsembleQFunction(nn.Module): # type: ignore
_action_size: int
_q_funcs: nn.ModuleList
_bootstrap: bool
def __init__(
self,
q_funcs: Union[List[DiscreteQFunction], List[ContinuousQFunction]],
bootstrap: bool = False,
):
super().__init__()
self._action_size = q_funcs[0].action_size
self._q_funcs = nn.ModuleList(q_funcs)
self._bootstrap = bootstrap and len(q_funcs) > 1
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
use_independent_target: bool = False,
masks: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if use_independent_target:
assert q_tp1.ndim == 3
else:
assert q_tp1.ndim == 2
if self._bootstrap and masks is not None:
assert masks.shape == (len(self._q_funcs), obs_t.shape[0], 1,), (
"Invalid mask shape is detected. "
f"mask_size must be {len(self._q_funcs)}."
)
td_sum = torch.tensor(0.0, dtype=torch.float32, device=obs_t.device)
for i, q_func in enumerate(self._q_funcs):
if use_independent_target:
target = q_tp1[i]
else:
target = q_tp1
loss = q_func.compute_error(
obs_t, act_t, rew_tp1, target, ter_tp1, gamma, reduction="none"
)
if self._bootstrap:
if masks is None:
mask = torch.randint(0, 2, loss.shape, device=obs_t.device)
else:
mask = masks[i]
loss *= mask.float()
td_sum += loss.sum() / (mask.sum().float() + 1e-10)
else:
td_sum += loss.mean()
return td_sum
def _compute_target(
self,
x: torch.Tensor,
action: Optional[torch.Tensor] = None,
reduction: str = "min",
lam: float = 0.75,
) -> torch.Tensor:
values_list: List[torch.Tensor] = []
for q_func in self._q_funcs:
target = q_func.compute_target(x, action)
values_list.append(target.reshape(1, x.shape[0], -1))
values = torch.cat(values_list, dim=0)
if action is None:
# mean Q function
if values.shape[2] == self._action_size:
return _reduce_ensemble(values, reduction)
# distributional Q function
n_q_funcs = values.shape[0]
values = values.view(n_q_funcs, x.shape[0], self._action_size, -1)
return _reduce_quantile_ensemble(values, reduction)
if values.shape[2] == 1:
return _reduce_ensemble(values, reduction, lam=lam)
return _reduce_quantile_ensemble(values, reduction, lam=lam)
@property
def q_funcs(self) -> nn.ModuleList:
return self._q_funcs
@property
def bootstrap(self) -> bool:
return self._bootstrap
class EnsembleDiscreteQFunction(EnsembleQFunction):
def forward(self, x: torch.Tensor, reduction: str = "mean") -> torch.Tensor:
values = []
for q_func in self._q_funcs:
values.append(q_func(x).view(1, x.shape[0], self._action_size))
return _reduce_ensemble(torch.cat(values, dim=0), reduction)
def __call__(
self, x: torch.Tensor, reduction: str = "mean"
) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x, reduction))
def compute_target(
self,
x: torch.Tensor,
action: Optional[torch.Tensor] = None,
reduction: str = "min",
lam: float = 0.75,
) -> torch.Tensor:
return self._compute_target(x, action, reduction, lam)
class EnsembleContinuousQFunction(EnsembleQFunction):
def forward(
self, x: torch.Tensor, action: torch.Tensor, reduction: str = "mean"
) -> torch.Tensor:
values = []
for q_func in self._q_funcs:
values.append(q_func(x, action).view(1, x.shape[0], 1))
return _reduce_ensemble(torch.cat(values, dim=0), reduction)
def __call__(
self, x: torch.Tensor, action: torch.Tensor, reduction: str = "mean"
) -> torch.Tensor:
return cast(torch.Tensor, super().__call__(x, action, reduction))
def compute_target(
self,
x: torch.Tensor,
action: torch.Tensor,
reduction: str = "min",
lam: float = 0.75,
) -> torch.Tensor:
return self._compute_target(x, action, reduction, lam) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/ensemble_q_function.py | ensemble_q_function.py |
from typing import Optional, cast
import torch
from torch import nn
from ..encoders import Encoder, EncoderWithAction
from .base import ContinuousQFunction, DiscreteQFunction
from .utility import (
compute_quantile_loss,
compute_reduce,
pick_quantile_value_by_action,
)
def _make_taus(h: torch.Tensor, n_quantiles: int) -> torch.Tensor:
steps = torch.arange(n_quantiles, dtype=torch.float32, device=h.device)
taus = ((steps + 1).float() / n_quantiles).view(1, -1)
taus_dot = (steps.float() / n_quantiles).view(1, -1)
return (taus + taus_dot) / 2.0
class DiscreteQRQFunction(DiscreteQFunction, nn.Module): # type: ignore
_action_size: int
_encoder: Encoder
_n_quantiles: int
_fc: nn.Linear
def __init__(self, encoder: Encoder, action_size: int, n_quantiles: int):
super().__init__()
self._encoder = encoder
self._action_size = action_size
self._n_quantiles = n_quantiles
self._fc = nn.Linear(
encoder.get_feature_size(), action_size * n_quantiles
)
def _compute_quantiles(
self, h: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
h = cast(torch.Tensor, self._fc(h))
return h.view(-1, self._action_size, self._n_quantiles)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._encoder(x)
taus = _make_taus(h, self._n_quantiles)
quantiles = self._compute_quantiles(h, taus)
return quantiles.mean(dim=2)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
assert q_tp1.shape == (obs_t.shape[0], self._n_quantiles)
# extraect quantiles corresponding to act_t
h = self._encoder(obs_t)
taus = _make_taus(h, self._n_quantiles)
quantiles = self._compute_quantiles(h, taus)
quantiles_t = pick_quantile_value_by_action(quantiles, act_t)
loss = compute_quantile_loss(
quantiles_t=quantiles_t,
rewards_tp1=rew_tp1,
quantiles_tp1=q_tp1,
terminals_tp1=ter_tp1,
taus=taus,
gamma=gamma,
)
return compute_reduce(loss, reduction)
def compute_target(
self, x: torch.Tensor, action: Optional[torch.Tensor] = None
) -> torch.Tensor:
h = self._encoder(x)
taus = _make_taus(h, self._n_quantiles)
quantiles = self._compute_quantiles(h, taus)
if action is None:
return quantiles
return pick_quantile_value_by_action(quantiles, action)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> Encoder:
return self._encoder
class ContinuousQRQFunction(ContinuousQFunction, nn.Module): # type: ignore
_action_size: int
_encoder: EncoderWithAction
_n_quantiles: int
_fc: nn.Linear
def __init__(self, encoder: EncoderWithAction, n_quantiles: int):
super().__init__()
self._encoder = encoder
self._action_size = encoder.action_size
self._n_quantiles = n_quantiles
self._fc = nn.Linear(encoder.get_feature_size(), n_quantiles)
def _compute_quantiles(
self, h: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
return cast(torch.Tensor, self._fc(h))
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
h = self._encoder(x, action)
taus = _make_taus(h, self._n_quantiles)
quantiles = self._compute_quantiles(h, taus)
return quantiles.mean(dim=1, keepdim=True)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
assert q_tp1.shape == (obs_t.shape[0], self._n_quantiles)
h = self._encoder(obs_t, act_t)
taus = _make_taus(h, self._n_quantiles)
quantiles_t = self._compute_quantiles(h, taus)
loss = compute_quantile_loss(
quantiles_t=quantiles_t,
rewards_tp1=rew_tp1,
quantiles_tp1=q_tp1,
terminals_tp1=ter_tp1,
taus=taus,
gamma=gamma,
)
return compute_reduce(loss, reduction)
def compute_target(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
h = self._encoder(x, action)
taus = _make_taus(h, self._n_quantiles)
return self._compute_quantiles(h, taus)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> EncoderWithAction:
return self._encoder | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/qr_q_function.py | qr_q_function.py |
from typing import Optional, cast
import torch
import torch.nn.functional as F
from torch import nn
from ..encoders import Encoder, EncoderWithAction
from .base import ContinuousQFunction, DiscreteQFunction
from .utility import compute_huber_loss, compute_reduce, pick_value_by_action
class DiscreteMeanQFunction(DiscreteQFunction, nn.Module): # type: ignore
_action_size: int
_encoder: Encoder
_fc: nn.Linear
def __init__(self, encoder: Encoder, action_size: int):
super().__init__()
self._action_size = action_size
self._encoder = encoder
self._fc = nn.Linear(encoder.get_feature_size(), action_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self._fc(self._encoder(x)))
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
one_hot = F.one_hot(act_t.view(-1), num_classes=self.action_size)
q_t = (self.forward(obs_t) * one_hot.float()).sum(dim=1, keepdim=True)
y = rew_tp1 + gamma * q_tp1 * (1 - ter_tp1)
loss = compute_huber_loss(q_t, y)
return compute_reduce(loss, reduction)
def compute_target(
self, x: torch.Tensor, action: Optional[torch.Tensor] = None
) -> torch.Tensor:
if action is None:
return self.forward(x)
return pick_value_by_action(self.forward(x), action, keepdim=True)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> Encoder:
return self._encoder
class ContinuousMeanQFunction(ContinuousQFunction, nn.Module): # type: ignore
_encoder: EncoderWithAction
_action_size: int
_fc: nn.Linear
def __init__(self, encoder: EncoderWithAction):
super().__init__()
self._encoder = encoder
self._action_size = encoder.action_size
self._fc = nn.Linear(encoder.get_feature_size(), 1)
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, self._fc(self._encoder(x, action)))
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
q_t = self.forward(obs_t, act_t)
y = rew_tp1 + gamma * q_tp1 * (1 - ter_tp1)
loss = F.mse_loss(q_t, y, reduction="none")
return compute_reduce(loss, reduction)
def compute_target(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
return self.forward(x, action)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> EncoderWithAction:
return self._encoder | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/mean_q_function.py | mean_q_function.py |
from typing import Optional, Tuple, cast
import torch
from torch import nn
from ..encoders import Encoder, EncoderWithAction
from .base import ContinuousQFunction, DiscreteQFunction
from .iqn_q_function import compute_iqn_feature
from .utility import (
compute_quantile_loss,
compute_reduce,
pick_quantile_value_by_action,
)
def _make_taus(
h: torch.Tensor, proposal: nn.Linear,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
proposals = proposal(h.detach())
# tau_i+1
log_probs = torch.log_softmax(proposals, dim=1)
probs = log_probs.exp()
taus = torch.cumsum(probs, dim=1)
# tau_i
pads = torch.zeros(h.shape[0], 1, device=h.device)
taus_minus = torch.cat([pads, taus[:, :-1]], dim=1)
# tau^
taus_prime = (taus + taus_minus) / 2
# entropy for penalty
entropies = -(log_probs * probs).sum(dim=1)
return taus, taus_minus, taus_prime, entropies
class DiscreteFQFQFunction(DiscreteQFunction, nn.Module): # type: ignore
_action_size: int
_entropy_coeff: float
_encoder: Encoder
_fc: nn.Linear
_n_quantiles: int
_embed_size: int
_embed: nn.Linear
_proposal: nn.Linear
def __init__(
self,
encoder: Encoder,
action_size: int,
n_quantiles: int,
embed_size: int,
entropy_coeff: float = 0.0,
):
super().__init__()
self._encoder = encoder
self._action_size = action_size
self._fc = nn.Linear(encoder.get_feature_size(), self._action_size)
self._entropy_coeff = entropy_coeff
self._n_quantiles = n_quantiles
self._embed_size = embed_size
self._embed = nn.Linear(embed_size, encoder.get_feature_size())
self._proposal = nn.Linear(encoder.get_feature_size(), n_quantiles)
def _compute_quantiles(
self, h: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
# element-wise product on feature and phi (batch, quantile, feature)
prod = compute_iqn_feature(h, taus, self._embed, self._embed_size)
# (batch, quantile, feature) -> (batch, action, quantile)
return cast(torch.Tensor, self._fc(prod)).transpose(1, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._encoder(x)
taus, taus_minus, taus_prime, _ = _make_taus(h, self._proposal)
quantiles = self._compute_quantiles(h, taus_prime.detach())
weight = (taus - taus_minus).view(-1, 1, self._n_quantiles).detach()
return (weight * quantiles).sum(dim=2)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
assert q_tp1.shape == (obs_t.shape[0], self._n_quantiles)
# compute quantiles
h = self._encoder(obs_t)
taus, _, taus_prime, entropies = _make_taus(h, self._proposal)
quantiles = self._compute_quantiles(h, taus_prime.detach())
quantiles_t = pick_quantile_value_by_action(quantiles, act_t)
quantile_loss = compute_quantile_loss(
quantiles_t=quantiles_t,
rewards_tp1=rew_tp1,
quantiles_tp1=q_tp1,
terminals_tp1=ter_tp1,
taus=taus_prime.detach(),
gamma=gamma,
)
# compute proposal network loss
# original paper explicitly separates the optimization process
# but, it's combined here
proposal_loss = self._compute_proposal_loss(h, act_t, taus, taus_prime)
proposal_params = list(self._proposal.parameters())
proposal_grads = torch.autograd.grad(
outputs=proposal_loss.mean(),
inputs=proposal_params,
retain_graph=True,
)
# directly apply gradients
for param, grad in zip(list(proposal_params), proposal_grads):
param.grad = 1e-4 * grad
loss = quantile_loss - self._entropy_coeff * entropies
return compute_reduce(loss, reduction)
def _compute_proposal_loss(
self,
h: torch.Tensor,
action: torch.Tensor,
taus: torch.Tensor,
taus_prime: torch.Tensor,
) -> torch.Tensor:
q_taus = self._compute_quantiles(h.detach(), taus)
q_taus_prime = self._compute_quantiles(h.detach(), taus_prime)
batch_steps = torch.arange(h.shape[0])
# (batch, n_quantiles - 1)
q_taus = q_taus[batch_steps, action.view(-1)][:, :-1]
# (batch, n_quantiles)
q_taus_prime = q_taus_prime[batch_steps, action.view(-1)]
# compute gradients
proposal_grad = 2 * q_taus - q_taus_prime[:, :-1] - q_taus_prime[:, 1:]
return proposal_grad.sum(dim=1)
def compute_target(
self, x: torch.Tensor, action: Optional[torch.Tensor] = None
) -> torch.Tensor:
h = self._encoder(x)
_, _, taus_prime, _ = _make_taus(h, self._proposal)
quantiles = self._compute_quantiles(h, taus_prime.detach())
if action is None:
return quantiles
return pick_quantile_value_by_action(quantiles, action)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> Encoder:
return self._encoder
class ContinuousFQFQFunction(ContinuousQFunction, nn.Module): # type: ignore
_action_size: int
_entropy_coeff: float
_encoder: EncoderWithAction
_fc: nn.Linear
_n_quantiles: int
_embed_size: int
_embed: nn.Linear
_proposal: nn.Linear
def __init__(
self,
encoder: EncoderWithAction,
n_quantiles: int,
embed_size: int,
entropy_coeff: float = 0.0,
):
super().__init__()
self._encoder = encoder
self._action_size = encoder.action_size
self._fc = nn.Linear(encoder.get_feature_size(), 1)
self._entropy_coeff = entropy_coeff
self._n_quantiles = n_quantiles
self._embed_size = embed_size
self._embed = nn.Linear(embed_size, encoder.get_feature_size())
self._proposal = nn.Linear(encoder.get_feature_size(), n_quantiles)
def _compute_quantiles(
self, h: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
# element-wise product on feature and phi (batch, quantile, feature)
prod = compute_iqn_feature(h, taus, self._embed, self._embed_size)
# (batch, quantile, feature) -> (batch, quantile)
return cast(torch.Tensor, self._fc(prod)).view(h.shape[0], -1)
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
h = self._encoder(x, action)
taus, taus_minus, taus_prime, _ = _make_taus(h, self._proposal)
quantiles = self._compute_quantiles(h, taus_prime.detach())
weight = (taus - taus_minus).detach()
return (weight * quantiles).sum(dim=1, keepdim=True)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
assert q_tp1.shape == (obs_t.shape[0], self._n_quantiles)
h = self._encoder(obs_t, act_t)
taus, _, taus_prime, entropies = _make_taus(h, self._proposal)
quantiles_t = self._compute_quantiles(h, taus_prime.detach())
quantile_loss = compute_quantile_loss(
quantiles_t=quantiles_t,
rewards_tp1=rew_tp1,
quantiles_tp1=q_tp1,
terminals_tp1=ter_tp1,
taus=taus_prime.detach(),
gamma=gamma,
)
# compute proposal network loss
# original paper explicitly separates the optimization process
# but, it's combined here
proposal_loss = self._compute_proposal_loss(h, taus, taus_prime)
proposal_params = list(self._proposal.parameters())
proposal_grads = torch.autograd.grad(
outputs=proposal_loss.mean(),
inputs=proposal_params,
retain_graph=True,
)
# directly apply gradients
for param, grad in zip(list(proposal_params), proposal_grads):
param.grad = 1e-4 * grad
loss = quantile_loss - self._entropy_coeff * entropies
return compute_reduce(loss, reduction)
def _compute_proposal_loss(
self, h: torch.Tensor, taus: torch.Tensor, taus_prime: torch.Tensor
) -> torch.Tensor:
# (batch, n_quantiles - 1)
q_taus = self._compute_quantiles(h.detach(), taus)[:, :-1]
# (batch, n_quantiles)
q_taus_prime = self._compute_quantiles(h.detach(), taus_prime)
# compute gradients
proposal_grad = 2 * q_taus - q_taus_prime[:, :-1] - q_taus_prime[:, 1:]
return proposal_grad.sum(dim=1)
def compute_target(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
h = self._encoder(x, action)
_, _, taus_prime, _ = _make_taus(h, self._proposal)
return self._compute_quantiles(h, taus_prime.detach())
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> EncoderWithAction:
return self._encoder | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/fqf_q_function.py | fqf_q_function.py |
from typing import cast
import torch
import torch.nn.functional as F
def pick_value_by_action(
values: torch.Tensor, action: torch.Tensor, keepdim: bool = False
) -> torch.Tensor:
assert values.ndim == 2
action_size = values.shape[1]
one_hot = F.one_hot(action.view(-1), num_classes=action_size)
masked_values = values * cast(torch.Tensor, one_hot.float())
return masked_values.sum(dim=1, keepdim=keepdim)
def pick_quantile_value_by_action(
values: torch.Tensor, action: torch.Tensor, keepdim: bool = False
) -> torch.Tensor:
assert values.ndim == 3
action_size = values.shape[1]
one_hot = F.one_hot(action.view(-1), num_classes=action_size)
mask = cast(torch.Tensor, one_hot.view(-1, action_size, 1).float())
return (values * mask).sum(dim=1, keepdim=keepdim)
def compute_huber_loss(
y: torch.Tensor, target: torch.Tensor, beta: float = 1.0
) -> torch.Tensor:
diff = target - y
cond = diff.detach().abs() < beta
return torch.where(cond, 0.5 * diff ** 2, beta * (diff.abs() - 0.5 * beta))
def compute_quantile_huber_loss(
y: torch.Tensor, target: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
assert y.dim() == 3 and target.dim() == 3 and taus.dim() == 3
# compute huber loss
huber_loss = compute_huber_loss(y, target)
delta = cast(torch.Tensor, ((target - y).detach() < 0.0).float())
element_wise_loss = (taus - delta).abs() * huber_loss
return element_wise_loss.sum(dim=2).mean(dim=1)
def compute_quantile_loss(
quantiles_t: torch.Tensor,
rewards_tp1: torch.Tensor,
quantiles_tp1: torch.Tensor,
terminals_tp1: torch.Tensor,
taus: torch.Tensor,
gamma: float,
) -> torch.Tensor:
batch_size, n_quantiles = quantiles_t.shape
expanded_quantiles_t = quantiles_t.view(batch_size, 1, -1)
y = rewards_tp1 + gamma * quantiles_tp1 * (1 - terminals_tp1)
expanded_y = y.view(batch_size, -1, 1)
expanded_taus = taus.view(-1, 1, n_quantiles)
return compute_quantile_huber_loss(
expanded_quantiles_t, expanded_y, expanded_taus
)
def compute_reduce(value: torch.Tensor, reduction_type: str) -> torch.Tensor:
if reduction_type == "mean":
return value.mean()
elif reduction_type == "sum":
return value.sum()
elif reduction_type == "none":
return value.view(-1, 1)
raise ValueError("invalid reduction type.") | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/utility.py | utility.py |
from typing import Tuple
import torch
from .ensemble_q_function import EnsembleContinuousQFunction
def compute_max_with_n_actions_and_indices(
x: torch.Tensor,
actions: torch.Tensor,
q_func: EnsembleContinuousQFunction,
lam: float,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns weighted target value from sampled actions.
This calculation is proposed in BCQ paper for the first time.
`x` should be shaped with `(batch, dim_obs)`.
`actions` should be shaped with `(batch, N, dim_action)`.
"""
batch_size = actions.shape[0]
n_critics = len(q_func.q_funcs)
n_actions = actions.shape[1]
# (batch, observation) -> (batch, n, observation)
expanded_x = x.expand(n_actions, *x.shape).transpose(0, 1)
# (batch * n, observation)
flat_x = expanded_x.reshape(-1, *x.shape[1:])
# (batch, n, action) -> (batch * n, action)
flat_actions = actions.reshape(batch_size * n_actions, -1)
# estimate values while taking care of quantiles
flat_values = q_func.compute_target(flat_x, flat_actions, "none")
# reshape to (n_ensembles, batch_size, n, -1)
transposed_values = flat_values.view(n_critics, batch_size, n_actions, -1)
# (n_ensembles, batch_size, n, -1) -> (batch_size, n_ensembles, n, -1)
values = transposed_values.transpose(0, 1)
# get combination indices
# (batch_size, n_ensembles, n, -1) -> (batch_size, n_ensembles, n)
mean_values = values.mean(dim=3)
# (batch_size, n_ensembles, n) -> (batch_size, n)
max_values, max_indices = mean_values.max(dim=1)
min_values, min_indices = mean_values.min(dim=1)
mix_values = (1.0 - lam) * max_values + lam * min_values
# (batch_size, n) -> (batch_size,)
action_indices = mix_values.argmax(dim=1)
# fuse maximum values and minimum values
# (batch_size, n_ensembles, n, -1) -> (batch_size, n, n_ensembles, -1)
values_T = values.transpose(1, 2)
# (batch, n, n_ensembles, -1) -> (batch * n, n_ensembles, -1)
flat_values = values_T.reshape(batch_size * n_actions, n_critics, -1)
# (batch * n, n_ensembles, -1) -> (batch * n, -1)
bn_indices = torch.arange(batch_size * n_actions)
max_values = flat_values[bn_indices, max_indices.view(-1)]
min_values = flat_values[bn_indices, min_indices.view(-1)]
# (batch * n, -1) -> (batch, n, -1)
max_values = max_values.view(batch_size, n_actions, -1)
min_values = min_values.view(batch_size, n_actions, -1)
mix_values = (1.0 - lam) * max_values + lam * min_values
# (batch, n, -1) -> (batch, -1)
result_values = mix_values[torch.arange(x.shape[0]), action_indices]
return result_values, action_indices
def compute_max_with_n_actions(
x: torch.Tensor,
actions: torch.Tensor,
q_func: EnsembleContinuousQFunction,
lam: float,
) -> torch.Tensor:
return compute_max_with_n_actions_and_indices(x, actions, q_func, lam)[0] | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/__init__.py | __init__.py |
import math
from typing import Optional, cast
import torch
from torch import nn
from ..encoders import Encoder, EncoderWithAction
from .base import ContinuousQFunction, DiscreteQFunction
from .utility import (
compute_quantile_loss,
compute_reduce,
pick_quantile_value_by_action,
)
def _make_taus(
h: torch.Tensor, n_quantiles: int, training: bool
) -> torch.Tensor:
if training:
taus = torch.rand(h.shape[0], n_quantiles, device=h.device)
else:
taus = torch.linspace(
start=0,
end=1,
steps=n_quantiles,
device=h.device,
dtype=torch.float32,
)
taus = taus.view(1, -1).repeat(h.shape[0], 1)
return taus
def compute_iqn_feature(
h: torch.Tensor, taus: torch.Tensor, embed: nn.Linear, embed_size: int,
) -> torch.Tensor:
# compute embedding
steps = torch.arange(embed_size, device=h.device).float() + 1
# (batch, quantile, embedding)
expanded_taus = taus.view(h.shape[0], -1, 1)
prior = torch.cos(math.pi * steps.view(1, 1, -1) * expanded_taus)
# (batch, quantile, embedding) -> (batch, quantile, feature)
phi = torch.relu(embed(prior))
# (batch, 1, feature) -> (batch, quantile, feature)
return h.view(h.shape[0], 1, -1) * phi
class DiscreteIQNQFunction(DiscreteQFunction, nn.Module): # type: ignore
_action_size: int
_encoder: Encoder
_fc: nn.Linear
_n_quantiles: int
_n_greedy_quantiles: int
_embed_size: int
_embed: nn.Linear
def __init__(
self,
encoder: Encoder,
action_size: int,
n_quantiles: int,
n_greedy_quantiles: int,
embed_size: int,
):
super().__init__()
self._encoder = encoder
self._action_size = action_size
self._fc = nn.Linear(encoder.get_feature_size(), self._action_size)
self._n_quantiles = n_quantiles
self._n_greedy_quantiles = n_greedy_quantiles
self._embed_size = embed_size
self._embed = nn.Linear(embed_size, encoder.get_feature_size())
def _make_taus(self, h: torch.Tensor) -> torch.Tensor:
if self.training:
n_quantiles = self._n_quantiles
else:
n_quantiles = self._n_greedy_quantiles
return _make_taus(h, n_quantiles, self.training)
def _compute_quantiles(
self, h: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
# element-wise product on feature and phi (batch, quantile, feature)
prod = compute_iqn_feature(h, taus, self._embed, self._embed_size)
# (batch, quantile, feature) -> (batch, action, quantile)
return cast(torch.Tensor, self._fc(prod)).transpose(1, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self._encoder(x)
taus = self._make_taus(h)
quantiles = self._compute_quantiles(h, taus)
return quantiles.mean(dim=2)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
assert q_tp1.shape == (obs_t.shape[0], self._n_quantiles)
# extraect quantiles corresponding to act_t
h = self._encoder(obs_t)
taus = self._make_taus(h)
quantiles = self._compute_quantiles(h, taus)
quantiles_t = pick_quantile_value_by_action(quantiles, act_t)
loss = compute_quantile_loss(
quantiles_t=quantiles_t,
rewards_tp1=rew_tp1,
quantiles_tp1=q_tp1,
terminals_tp1=ter_tp1,
taus=taus,
gamma=gamma,
)
return compute_reduce(loss, reduction)
def compute_target(
self, x: torch.Tensor, action: Optional[torch.Tensor] = None
) -> torch.Tensor:
h = self._encoder(x)
taus = self._make_taus(h)
quantiles = self._compute_quantiles(h, taus)
if action is None:
return quantiles
return pick_quantile_value_by_action(quantiles, action)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> Encoder:
return self._encoder
class ContinuousIQNQFunction(ContinuousQFunction, nn.Module): # type: ignore
_action_size: int
_encoder: EncoderWithAction
_fc: nn.Linear
_n_quantiles: int
_n_greedy_quantiles: int
_embed_size: int
_embed: nn.Linear
def __init__(
self,
encoder: EncoderWithAction,
n_quantiles: int,
n_greedy_quantiles: int,
embed_size: int,
):
super().__init__()
self._encoder = encoder
self._action_size = encoder.action_size
self._fc = nn.Linear(encoder.get_feature_size(), 1)
self._n_quantiles = n_quantiles
self._n_greedy_quantiles = n_greedy_quantiles
self._embed_size = embed_size
self._embed = nn.Linear(embed_size, encoder.get_feature_size())
def _make_taus(self, h: torch.Tensor) -> torch.Tensor:
if self.training:
n_quantiles = self._n_quantiles
else:
n_quantiles = self._n_greedy_quantiles
return _make_taus(h, n_quantiles, self.training)
def _compute_quantiles(
self, h: torch.Tensor, taus: torch.Tensor
) -> torch.Tensor:
# element-wise product on feature and phi (batch, quantile, feature)
prod = compute_iqn_feature(h, taus, self._embed, self._embed_size)
# (batch, quantile, feature) -> (batch, quantile)
return cast(torch.Tensor, self._fc(prod)).view(h.shape[0], -1)
def forward(self, x: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
h = self._encoder(x, action)
taus = self._make_taus(h)
quantiles = self._compute_quantiles(h, taus)
return quantiles.mean(dim=1, keepdim=True)
def compute_error(
self,
obs_t: torch.Tensor,
act_t: torch.Tensor,
rew_tp1: torch.Tensor,
q_tp1: torch.Tensor,
ter_tp1: torch.Tensor,
gamma: float = 0.99,
reduction: str = "mean",
) -> torch.Tensor:
assert q_tp1.shape == (obs_t.shape[0], self._n_quantiles)
h = self._encoder(obs_t, act_t)
taus = self._make_taus(h)
quantiles_t = self._compute_quantiles(h, taus)
loss = compute_quantile_loss(
quantiles_t=quantiles_t,
rewards_tp1=rew_tp1,
quantiles_tp1=q_tp1,
terminals_tp1=ter_tp1,
taus=taus,
gamma=gamma,
)
return compute_reduce(loss, reduction)
def compute_target(
self, x: torch.Tensor, action: torch.Tensor
) -> torch.Tensor:
h = self._encoder(x, action)
taus = self._make_taus(h)
return self._compute_quantiles(h, taus)
@property
def action_size(self) -> int:
return self._action_size
@property
def encoder(self) -> EncoderWithAction:
return self._encoder | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/models/torch/q_functions/iqn_q_function.py | iqn_q_function.py |
from typing import TYPE_CHECKING, Any, List, Tuple, Union
import numpy as np
from gym.spaces import Discrete
from ..algos import AlgoBase
from ..dataset import MDPDataset
if TYPE_CHECKING:
from stable_baselines3.common.buffers import ReplayBuffer
class SB3Wrapper:
"""A wrapper for d3rlpy algorithms so they can be used with Stable-Baselines3 (SB3).
Args:
algo (d3rlpy.algos.base.AlgoBase): algorithm.
Attributes:
algo (d3rlpy.algos.base.AlgoBase): algorithm.
"""
def __init__(self, algo: AlgoBase):
# Avoid infinite recursion due to override of setattr
self.__dict__["algo"] = algo
def predict(
self,
observation: Union[np.ndarray, List[Any]],
state: Any = None,
mask: Any = None,
deterministic: bool = True,
) -> Tuple[np.ndarray, None]:
"""Returns actions.
Args:
observation: observation.
state: this argument is just ignored.
mask: this argument is just ignored.
deterministic: flag to return greedy actions.
Returns:
``(actions, None)``.
"""
if deterministic:
return self.algo.predict(observation), None
return self.algo.sample_action(observation), None
def __getattr__(self, attr: str) -> Any:
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.algo, attr)
def __setattr__(self, attr_name: str, value: Any) -> None:
if attr_name != "algo":
self.algo.__setattr__(attr_name, value)
else:
self.__dict__["algo"] = value
def to_mdp_dataset(replay_buffer: "ReplayBuffer") -> MDPDataset:
"""Returns d3rlpy's MDPDataset from SB3's ReplayBuffer
Args:
replay_buffer: SB3's replay buffer.
Returns:
d3rlpy's MDPDataset.
"""
pos = replay_buffer.size()
discrete_action = isinstance(replay_buffer.action_space, Discrete)
dataset = MDPDataset(
observations=replay_buffer.observations[:pos, 0],
actions=replay_buffer.actions[:pos, 0],
rewards=replay_buffer.rewards[:pos, 0],
terminals=replay_buffer.dones[:pos, 0],
discrete_action=discrete_action,
)
return dataset | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/wrappers/sb3.py | sb3.py |
from typing import Sequence
import numpy as np
class StackedObservation:
"""StackedObservation class.
This class is used to stack images to handle temporal features.
References:
* `Mnih et al., Human-level control through deep reinforcement
learning. <https://www.nature.com/articles/nature14236>`_
Args:
observation_shape (tuple): image observation shape.
n_frames (int): the number of frames to stack.
dtype (int): numpy data type.
"""
_image_channels: int
_n_frames: int
_dtype: np.dtype
_stack: np.ndarray
def __init__(
self,
observation_shape: Sequence[int],
n_frames: int,
dtype: np.dtype = np.uint8,
):
self._image_channels = observation_shape[0]
image_size = observation_shape[1:]
self._n_frames = n_frames
self._dtype = dtype
stacked_shape = (self._image_channels * n_frames, *image_size)
self._stack = np.zeros(stacked_shape, dtype=self._dtype)
def append(self, image: np.ndarray) -> np.ndarray:
"""Stack new image.
Args:
image (numpy.ndarray): image observation.
"""
assert image.dtype == self._dtype
self._stack = np.roll(self._stack, -self._image_channels, axis=0)
head_channel = self._image_channels * (self._n_frames - 1)
self._stack[head_channel:] = image.copy()
def eval(self) -> np.ndarray:
"""Returns stacked observation.
Returns:
numpy.ndarray: stacked observation.
"""
return self._stack
def clear(self) -> None:
"""Clear stacked observation by filling 0."""
self._stack.fill(0)
class BatchStackedObservation:
"""Batch version of StackedObservation class.
This class is used to stack images to handle temporal features.
References:
* `Mnih et al., Human-level control through deep reinforcement
learning. <https://www.nature.com/articles/nature14236>`_
Args:
observation_shape (tuple): image observation shape.
n_frames (int): the number of frames to stack.
dtype (int): numpy data type.
"""
_image_channels: int
_n_frames: int
_n_envs: int
_dtype: np.dtype
_stack: np.ndarray
def __init__(
self,
observation_shape: Sequence[int],
n_frames: int,
n_envs: int,
dtype: np.dtype = np.uint8,
):
self._image_channels = observation_shape[0]
image_size = observation_shape[1:]
self._n_frames = n_frames
self._n_envs = n_envs
self._dtype = dtype
stacked_shape = (n_envs, self._image_channels * n_frames, *image_size)
self._stack = np.zeros(stacked_shape, dtype=self._dtype)
def append(self, image: np.ndarray) -> np.ndarray:
"""Stack new image.
Args:
image (numpy.ndarray): image observation.
"""
assert image.dtype == self._dtype
self._stack = np.roll(self._stack, -self._image_channels, axis=1)
head_channel = self._image_channels * (self._n_frames - 1)
self._stack[:, head_channel:] = image.copy()
def eval(self) -> np.ndarray:
"""Returns stacked observation.
Returns:
numpy.ndarray: stacked observation.
"""
return self._stack
def clear(self) -> None:
"""Clear stacked observation by filling 0."""
self._stack.fill(0)
def clear_by_index(self, index: int) -> None:
"""Clear stacked observation in the specific index by filling 0."""
self._stack[index].fill(0) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/preprocessing/stack.py | stack.py |
from typing import Any, ClassVar, Dict, List, Optional, Type
import gym
import numpy as np
import torch
from ..dataset import MDPDataset, Transition
from ..decorators import pretty_repr
@pretty_repr
class Scaler:
TYPE: ClassVar[str] = "none"
def fit(self, transitions: List[Transition]) -> None:
"""Estimates scaling parameters from dataset.
Args:
transitions: list of transitions.
"""
raise NotImplementedError
def fit_with_env(self, env: gym.Env) -> None:
"""Gets scaling parameters from environment.
Args:
env: gym environment.
"""
raise NotImplementedError
def transform(self, x: torch.Tensor) -> torch.Tensor:
"""Returns processed observations.
Args:
x: observation.
Returns:
processed observation.
"""
raise NotImplementedError
def reverse_transform(self, x: torch.Tensor) -> torch.Tensor:
"""Returns reversely transformed observations.
Args:
x: observation.
Returns:
reversely transformed observation.
"""
raise NotImplementedError
def get_type(self) -> str:
"""Returns a scaler type.
Returns:
scaler type.
"""
return self.TYPE
def get_params(self, deep: bool = False) -> Dict[str, Any]:
"""Returns scaling parameters.
Args:
deep: flag to deeply copy objects.
Returns:
scaler parameters.
"""
raise NotImplementedError
class PixelScaler(Scaler):
"""Pixel normalization preprocessing.
.. math::
x' = x / 255
.. code-block:: python
from d3rlpy.dataset import MDPDataset
from d3rlpy.algos import CQL
dataset = MDPDataset(observations, actions, rewards, terminals)
# initialize algorithm with PixelScaler
cql = CQL(scaler='pixel')
cql.fit(dataset.episodes)
"""
TYPE: ClassVar[str] = "pixel"
def fit(self, transitions: List[Transition]) -> None:
pass
def fit_with_env(self, env: gym.Env) -> None:
pass
def transform(self, x: torch.Tensor) -> torch.Tensor:
return x.float() / 255.0
def reverse_transform(self, x: torch.Tensor) -> torch.Tensor:
return (x * 255.0).long()
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {}
class MinMaxScaler(Scaler):
r"""Min-Max normalization preprocessing.
.. math::
x' = (x - \min{x}) / (\max{x} - \min{x})
.. code-block:: python
from d3rlpy.dataset import MDPDataset
from d3rlpy.algos import CQL
dataset = MDPDataset(observations, actions, rewards, terminals)
# initialize algorithm with MinMaxScaler
cql = CQL(scaler='min_max')
# scaler is initialized from the given transitions
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
cql.fit(transitions)
You can also initialize with :class:`d3rlpy.dataset.MDPDataset` object or
manually.
.. code-block:: python
from d3rlpy.preprocessing import MinMaxScaler
# initialize with dataset
scaler = MinMaxScaler(dataset)
# initialize manually
minimum = observations.min(axis=0)
maximum = observations.max(axis=0)
scaler = MinMaxScaler(minimum=minimum, maximum=maximum)
cql = CQL(scaler=scaler)
Args:
dataset (d3rlpy.dataset.MDPDataset): dataset object.
min (numpy.ndarray): minimum values at each entry.
max (numpy.ndarray): maximum values at each entry.
"""
TYPE: ClassVar[str] = "min_max"
_minimum: Optional[np.ndarray]
_maximum: Optional[np.ndarray]
def __init__(
self,
dataset: Optional[MDPDataset] = None,
maximum: Optional[np.ndarray] = None,
minimum: Optional[np.ndarray] = None,
):
self._minimum = None
self._maximum = None
if dataset:
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
self.fit(transitions)
elif maximum is not None and minimum is not None:
self._minimum = np.asarray(minimum)
self._maximum = np.asarray(maximum)
def fit(self, transitions: List[Transition]) -> None:
if self._minimum is not None and self._maximum is not None:
return
for i, transition in enumerate(transitions):
observation = np.asarray(transition.observation)
if i == 0:
minimum = observation
maximum = observation
else:
minimum = np.minimum(minimum, observation)
maximum = np.maximum(maximum, observation)
if transition.terminal:
minimum = np.minimum(minimum, transition.next_observation)
maximum = np.maximum(maximum, transition.next_observation)
self._minimum = minimum.reshape((1,) + minimum.shape)
self._maximum = maximum.reshape((1,) + maximum.shape)
def fit_with_env(self, env: gym.Env) -> None:
if self._minimum is not None and self._maximum is not None:
return
assert isinstance(env.observation_space, gym.spaces.Box)
shape = env.observation_space.shape
low = np.asarray(env.observation_space.low)
high = np.asarray(env.observation_space.high)
self._minimum = low.reshape((1,) + shape)
self._maximum = high.reshape((1,) + shape)
def transform(self, x: torch.Tensor) -> torch.Tensor:
assert self._minimum is not None and self._maximum is not None
minimum = torch.tensor(
self._minimum, dtype=torch.float32, device=x.device
)
maximum = torch.tensor(
self._maximum, dtype=torch.float32, device=x.device
)
return (x - minimum) / (maximum - minimum)
def reverse_transform(self, x: torch.Tensor) -> torch.Tensor:
assert self._minimum is not None and self._maximum is not None
minimum = torch.tensor(
self._minimum, dtype=torch.float32, device=x.device
)
maximum = torch.tensor(
self._maximum, dtype=torch.float32, device=x.device
)
return ((maximum - minimum) * x) + minimum
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if self._maximum is not None:
maximum = self._maximum.copy() if deep else self._maximum
else:
maximum = None
if self._minimum is not None:
minimum = self._minimum.copy() if deep else self._minimum
else:
minimum = None
return {"maximum": maximum, "minimum": minimum}
class StandardScaler(Scaler):
r"""Standardization preprocessing.
.. math::
x' = (x - \mu) / \sigma
.. code-block:: python
from d3rlpy.dataset import MDPDataset
from d3rlpy.algos import CQL
dataset = MDPDataset(observations, actions, rewards, terminals)
# initialize algorithm with StandardScaler
cql = CQL(scaler='standard')
# scaler is initialized from the given episodes
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
cql.fit(transitions)
You can initialize with :class:`d3rlpy.dataset.MDPDataset` object or
manually.
.. code-block:: python
from d3rlpy.preprocessing import StandardScaler
# initialize with dataset
scaler = StandardScaler(dataset)
# initialize manually
mean = observations.mean(axis=0)
std = observations.std(axis=0)
scaler = StandardScaler(mean=mean, std=std)
cql = CQL(scaler=scaler)
Args:
dataset (d3rlpy.dataset.MDPDataset): dataset object.
mean (numpy.ndarray): mean values at each entry.
std (numpy.ndarray): standard deviation at each entry.
eps (float): small constant value to avoid zero-division.
"""
TYPE = "standard"
_mean: Optional[np.ndarray]
_std: Optional[np.ndarray]
_eps: float
def __init__(
self,
dataset: Optional[MDPDataset] = None,
mean: Optional[np.ndarray] = None,
std: Optional[np.ndarray] = None,
eps: float = 1e-3,
):
self._mean = None
self._std = None
self._eps = eps
if dataset:
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
self.fit(transitions)
elif mean is not None and std is not None:
self._mean = np.asarray(mean)
self._std = np.asarray(std)
def fit(self, transitions: List[Transition]) -> None:
if self._mean is not None and self._std is not None:
return
# compute mean
total_sum = np.zeros(transitions[0].get_observation_shape())
total_count = 0
for transition in transitions:
total_sum += np.asarray(transition.observation)
total_count += 1
if transition.terminal:
total_sum += np.asarray(transition.next_observation)
total_count += 1
mean = total_sum / total_count
# compute stdandard deviation
total_sqsum = np.zeros(transitions[0].get_observation_shape())
expanded_mean = mean.reshape(mean.shape)
for transition in transitions:
observation = np.asarray(transition.observation)
total_sqsum += (observation - expanded_mean) ** 2
if transition.terminal:
next_observation = transition.next_observation
total_sqsum += (next_observation - expanded_mean) ** 2
std = np.sqrt(total_sqsum / total_count)
self._mean = mean.reshape((1,) + mean.shape)
self._std = std.reshape((1,) + std.shape)
def fit_with_env(self, env: gym.Env) -> None:
if self._mean is not None and self._std is not None:
return
raise NotImplementedError(
"standard scaler does not support fit_with_env."
)
def transform(self, x: torch.Tensor) -> torch.Tensor:
assert self._mean is not None and self._std is not None
mean = torch.tensor(self._mean, dtype=torch.float32, device=x.device)
std = torch.tensor(self._std, dtype=torch.float32, device=x.device)
return (x - mean) / (std + self._eps)
def reverse_transform(self, x: torch.Tensor) -> torch.Tensor:
assert self._mean is not None and self._std is not None
mean = torch.tensor(self._mean, dtype=torch.float32, device=x.device)
std = torch.tensor(self._std, dtype=torch.float32, device=x.device)
return ((std + self._eps) * x) + mean
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if self._mean is not None:
mean = self._mean.copy() if deep else self._mean
else:
mean = None
if self._std is not None:
std = self._std.copy() if deep else self._std
else:
std = None
return {"mean": mean, "std": std, "eps": self._eps}
SCALER_LIST: Dict[str, Type[Scaler]] = {}
def register_scaler(cls: Type[Scaler]) -> None:
"""Registers scaler class.
Args:
cls: scaler class inheriting ``Scaler``.
"""
is_registered = cls.TYPE in SCALER_LIST
assert not is_registered, f"{cls.TYPE} seems to be already registered"
SCALER_LIST[cls.TYPE] = cls
def create_scaler(name: str, **kwargs: Any) -> Scaler:
"""Returns registered scaler object.
Args:
name: regsitered scaler type name.
kwargs: scaler arguments.
Returns:
scaler object.
"""
assert name in SCALER_LIST, f"{name} seems not to be registered."
scaler = SCALER_LIST[name](**kwargs) # type: ignore
assert isinstance(scaler, Scaler)
return scaler
register_scaler(PixelScaler)
register_scaler(MinMaxScaler)
register_scaler(StandardScaler) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/preprocessing/scalers.py | scalers.py |
from typing import Any, ClassVar, Dict, List, Optional, Type
import gym
import numpy as np
import torch
from ..dataset import MDPDataset, Transition
from ..decorators import pretty_repr
from ..logger import LOG
@pretty_repr
class RewardScaler:
TYPE: ClassVar[str] = "none"
def fit(self, transitions: List[Transition]) -> None:
"""Estimates scaling parameters from dataset.
Args:
transitions: list of transitions.
"""
raise NotImplementedError
def fit_with_env(self, env: gym.Env) -> None:
"""Gets scaling parameters from environment.
Note:
``RewardScaler`` does not support fitting with environment.
Args:
env: gym environment.
"""
raise NotImplementedError("Please initialize with dataset.")
def transform(self, reward: torch.Tensor) -> torch.Tensor:
"""Returns processed rewards.
Args:
reward: reward.
Returns:
processed reward.
"""
raise NotImplementedError
def reverse_transform(self, reward: torch.Tensor) -> torch.Tensor:
"""Returns reversely processed rewards.
Args:
reward: reward.
Returns:
reversely processed reward.
"""
raise NotImplementedError
def transform_numpy(self, reward: np.ndarray) -> np.ndarray:
"""Returns transformed rewards in numpy array.
Args:
reward: reward.
Returns:
transformed reward.
"""
raise NotImplementedError
def get_type(self) -> str:
"""Returns a scaler type.
Returns:
scaler type.
"""
return self.TYPE
def get_params(self, deep: bool = False) -> Dict[str, Any]:
"""Returns scaling parameters.
Args:
deep: flag to deeply copy objects.
Returns:
scaler parameters.
"""
raise NotImplementedError
class MultiplyRewardScaler(RewardScaler):
r"""Multiplication reward preprocessing.
This preprocessor multiplies rewards by a constant number.
.. code-block:: python
from d3rlpy.preprocessing import MultiplyRewardScaler
# multiply rewards by 10
reward_scaler = MultiplyRewardScaler(10.0)
cql = CQL(reward_scaler=reward_scaler)
Args:
multiplier (float): constant multiplication value.
"""
TYPE: ClassVar[str] = "multiply"
_multiplier: Optional[float]
def __init__(self, multiplier: Optional[float] = None):
self._multiplier = multiplier
def fit(self, transitions: List[Transition]) -> None:
if self._multiplier is None:
LOG.warning("Please initialize MultiplyRewardScaler manually.")
def transform(self, reward: torch.Tensor) -> torch.Tensor:
return self._multiplier * reward
def reverse_transform(self, reward: torch.Tensor) -> torch.Tensor:
return reward / self._multiplier
def transform_numpy(self, reward: np.ndarray) -> np.ndarray:
return self._multiplier * reward
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {"multiplier": self._multiplier}
class ClipRewardScaler(RewardScaler):
r"""Reward clipping preprocessing.
.. code-block:: python
from d3rlpy.preprocessing import ClipRewardScaler
# clip rewards within [-1.0, 1.0]
reward_scaler = ClipRewardScaler(low=-1.0, high=1.0)
cql = CQL(reward_scaler=reward_scaler)
Args:
low (float): minimum value to clip.
high (float): maximum value to clip.
multiplier (float): constant multiplication value.
"""
TYPE: ClassVar[str] = "clip"
_low: Optional[float]
_high: Optional[float]
_multiplier: float
def __init__(
self,
low: Optional[float] = None,
high: Optional[float] = None,
multiplier: float = 1.0,
):
self._low = low
self._high = high
self._multiplier = multiplier
def fit(self, transitions: List[Transition]) -> None:
if self._low is None and self._high is None:
LOG.warning("Please initialize ClipRewardScaler manually.")
def transform(self, reward: torch.Tensor) -> torch.Tensor:
return self._multiplier * reward.clamp(self._low, self._high)
def reverse_transform(self, reward: torch.Tensor) -> torch.Tensor:
return reward / self._multiplier
def transform_numpy(self, reward: np.ndarray) -> np.ndarray:
return self._multiplier * np.clip(reward, self._low, self._high)
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"low": self._low,
"high": self._high,
"multiplier": self._multiplier,
}
class MinMaxRewardScaler(RewardScaler):
r"""Min-Max reward normalization preprocessing.
.. math::
r' = (r - \min(r)) / (\max(r) - \min(r))
.. code-block:: python
from d3rlpy.algos import CQL
cql = CQL(reward_scaler="min_max")
You can also initialize with :class:`d3rlpy.dataset.MDPDataset` object or
manually.
.. code-block:: python
from d3rlpy.preprocessing import MinMaxRewardScaler
# initialize with dataset
scaler = MinMaxRewardScaler(dataset)
# initialize manually
scaler = MinMaxRewardScaler(minimum=0.0, maximum=10.0)
cql = CQL(scaler=scaler)
Args:
dataset (d3rlpy.dataset.MDPDataset): dataset object.
minimum (float): minimum value.
maximum (float): maximum value.
multiplier (float): constant multiplication value.
"""
TYPE: ClassVar[str] = "min_max"
_minimum: Optional[float]
_maximum: Optional[float]
_multiplier: float
def __init__(
self,
dataset: Optional[MDPDataset] = None,
minimum: Optional[float] = None,
maximum: Optional[float] = None,
multiplier: float = 1.0,
):
self._minimum = None
self._maximum = None
self._multiplier = multiplier
if dataset:
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
self.fit(transitions)
elif minimum is not None and maximum is not None:
self._minimum = minimum
self._maximum = maximum
def fit(self, transitions: List[Transition]) -> None:
if self._minimum is not None and self._maximum is not None:
return
rewards = [transition.next_reward for transition in transitions]
self._minimum = float(np.min(rewards))
self._maximum = float(np.max(rewards))
def transform(self, reward: torch.Tensor) -> torch.Tensor:
assert self._minimum is not None and self._maximum is not None
base = self._maximum - self._minimum
return self._multiplier * (reward - self._minimum) / base
def reverse_transform(self, reward: torch.Tensor) -> torch.Tensor:
assert self._minimum is not None and self._maximum is not None
base = self._maximum - self._minimum
return reward * base / self._multiplier + self._minimum
def transform_numpy(self, reward: np.ndarray) -> np.ndarray:
assert self._minimum is not None and self._maximum is not None
base = self._maximum - self._minimum
return self._multiplier * (reward - self._minimum) / base
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"minimum": self._minimum,
"maximum": self._maximum,
"multiplier": self._multiplier,
}
class StandardRewardScaler(RewardScaler):
r"""Reward standardization preprocessing.
.. math::
r' = (r - \mu) / \sigma
.. code-block:: python
from d3rlpy.algos import CQL
cql = CQL(reward_scaler="standard")
You can also initialize with :class:`d3rlpy.dataset.MDPDataset` object or
manually.
.. code-block:: python
from d3rlpy.preprocessing import StandardRewardScaler
# initialize with dataset
scaler = StandardRewardScaler(dataset)
# initialize manually
scaler = StandardRewardScaler(mean=0.0, std=1.0)
cql = CQL(scaler=scaler)
Args:
dataset (d3rlpy.dataset.MDPDataset): dataset object.
mean (float): mean value.
std (float): standard deviation value.
eps (float): constant value to avoid zero-division.
multiplier (float): constant multiplication value
"""
TYPE: ClassVar[str] = "standard"
_mean: Optional[float]
_std: Optional[float]
_eps: float
_multiplier: float
def __init__(
self,
dataset: Optional[MDPDataset] = None,
mean: Optional[float] = None,
std: Optional[float] = None,
eps: float = 1e-3,
multiplier: float = 1.0,
):
self._mean = None
self._std = None
self._eps = eps
self._multiplier = multiplier
if dataset:
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
self.fit(transitions)
elif mean is not None and std is not None:
self._mean = mean
self._std = std
def fit(self, transitions: List[Transition]) -> None:
if self._mean is not None and self._std is not None:
return
rewards = [transition.next_reward for transition in transitions]
self._mean = float(np.mean(rewards))
self._std = float(np.std(rewards))
def transform(self, reward: torch.Tensor) -> torch.Tensor:
assert self._mean is not None and self._std is not None
nonzero_std = self._std + self._eps
return self._multiplier * (reward - self._mean) / nonzero_std
def reverse_transform(self, reward: torch.Tensor) -> torch.Tensor:
assert self._mean is not None and self._std is not None
return reward * (self._std + self._eps) / self._multiplier + self._mean
def transform_numpy(self, reward: np.ndarray) -> np.ndarray:
assert self._mean is not None and self._std is not None
nonzero_std = self._std + self._eps
return self._multiplier * (reward - self._mean) / nonzero_std
def get_params(self, deep: bool = False) -> Dict[str, Any]:
return {
"mean": self._mean,
"std": self._std,
"eps": self._eps,
"multiplier": self._multiplier,
}
REWARD_SCALER_LIST: Dict[str, Type[RewardScaler]] = {}
def register_reward_scaler(cls: Type[RewardScaler]) -> None:
"""Registers reward scaler class.
Args:
cls: scaler class inheriting ``RewardScaler``.
"""
is_registered = cls.TYPE in REWARD_SCALER_LIST
assert not is_registered, f"{cls.TYPE} seems to be already registered"
REWARD_SCALER_LIST[cls.TYPE] = cls
def create_reward_scaler(name: str, **kwargs: Any) -> RewardScaler:
assert name in REWARD_SCALER_LIST, f"{name} seems not to be registered."
reward_scaler = REWARD_SCALER_LIST[name](**kwargs) # type: ignore
assert isinstance(reward_scaler, RewardScaler)
return reward_scaler
register_reward_scaler(MultiplyRewardScaler)
register_reward_scaler(ClipRewardScaler)
register_reward_scaler(MinMaxRewardScaler)
register_reward_scaler(StandardRewardScaler) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/preprocessing/reward_scalers.py | reward_scalers.py |
from typing import Any, ClassVar, Dict, List, Optional, Type
import gym
import numpy as np
import torch
from ..dataset import MDPDataset, Transition
from ..decorators import pretty_repr
@pretty_repr
class ActionScaler:
TYPE: ClassVar[str] = "none"
def fit(self, transitions: List[Transition]) -> None:
"""Estimates scaling parameters from dataset.
Args:
transitions: a list of transition objects.
"""
raise NotImplementedError
def fit_with_env(self, env: gym.Env) -> None:
"""Gets scaling parameters from environment.
Args:
env: gym environment.
"""
raise NotImplementedError
def transform(self, action: torch.Tensor) -> torch.Tensor:
"""Returns processed action.
Args:
action: action vector.
Returns:
processed action.
"""
raise NotImplementedError
def reverse_transform(self, action: torch.Tensor) -> torch.Tensor:
"""Returns reversely transformed action.
Args:
action: action vector.
Returns:
reversely transformed action.
"""
raise NotImplementedError
def reverse_transform_numpy(self, action: np.ndarray) -> np.ndarray:
"""Returns reversely transformed action in numpy array.
Args:
action: action vector.
Returns:
reversely transformed action.
"""
raise NotImplementedError
def get_type(self) -> str:
"""Returns action scaler type.
Returns:
action scaler type.
"""
return self.TYPE
def get_params(self, deep: bool = False) -> Dict[str, Any]:
"""Returns action scaler params.
Args:
deep: flag to deepcopy parameters.
Returns:
action scaler parameters.
"""
raise NotImplementedError
class MinMaxActionScaler(ActionScaler):
r"""Min-Max normalization action preprocessing.
Actions will be normalized in range ``[-1.0, 1.0]``.
.. math::
a' = (a - \min{a}) / (\max{a} - \min{a}) * 2 - 1
.. code-block:: python
from d3rlpy.dataset import MDPDataset
from d3rlpy.algos import CQL
dataset = MDPDataset(observations, actions, rewards, terminals)
# initialize algorithm with MinMaxActionScaler
cql = CQL(action_scaler='min_max')
# scaler is initialized from the given transitions
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
cql.fit(transitions)
You can also initialize with :class:`d3rlpy.dataset.MDPDataset` object or
manually.
.. code-block:: python
from d3rlpy.preprocessing import MinMaxActionScaler
# initialize with dataset
scaler = MinMaxActionScaler(dataset)
# initialize manually
minimum = actions.min(axis=0)
maximum = actions.max(axis=0)
action_scaler = MinMaxActionScaler(minimum=minimum, maximum=maximum)
cql = CQL(action_scaler=action_scaler)
Args:
dataset (d3rlpy.dataset.MDPDataset): dataset object.
min (numpy.ndarray): minimum values at each entry.
max (numpy.ndarray): maximum values at each entry.
"""
TYPE: ClassVar[str] = "min_max"
_minimum: Optional[np.ndarray]
_maximum: Optional[np.ndarray]
def __init__(
self,
dataset: Optional[MDPDataset] = None,
maximum: Optional[np.ndarray] = None,
minimum: Optional[np.ndarray] = None,
):
self._minimum = None
self._maximum = None
if dataset:
transitions = []
for episode in dataset.episodes:
transitions += episode.transitions
self.fit(transitions)
elif maximum is not None and minimum is not None:
self._minimum = np.asarray(minimum)
self._maximum = np.asarray(maximum)
def fit(self, transitions: List[Transition]) -> None:
if self._minimum is not None and self._maximum is not None:
return
for i, transition in enumerate(transitions):
action = np.asarray(transition.action)
if i == 0:
minimum = action
maximum = action
else:
minimum = np.minimum(minimum, action)
maximum = np.maximum(maximum, action)
if transition.terminal:
minimum = np.minimum(minimum, transition.next_action)
maximum = np.maximum(maximum, transition.next_action)
self._minimum = minimum.reshape((1,) + minimum.shape)
self._maximum = maximum.reshape((1,) + maximum.shape)
def fit_with_env(self, env: gym.Env) -> None:
if self._minimum is not None and self._maximum is not None:
return
assert isinstance(env.action_space, gym.spaces.Box)
shape = env.action_space.shape
low = np.asarray(env.action_space.low)
high = np.asarray(env.action_space.high)
self._minimum = low.reshape((1,) + shape)
self._maximum = high.reshape((1,) + shape)
def transform(self, action: torch.Tensor) -> torch.Tensor:
assert self._minimum is not None and self._maximum is not None
minimum = torch.tensor(
self._minimum, dtype=torch.float32, device=action.device
)
maximum = torch.tensor(
self._maximum, dtype=torch.float32, device=action.device
)
# transform action into [-1.0, 1.0]
return ((action - minimum) / (maximum - minimum)) * 2.0 - 1.0
def reverse_transform(self, action: torch.Tensor) -> torch.Tensor:
assert self._minimum is not None and self._maximum is not None
minimum = torch.tensor(
self._minimum, dtype=torch.float32, device=action.device
)
maximum = torch.tensor(
self._maximum, dtype=torch.float32, device=action.device
)
# transform action from [-1.0, 1.0]
return ((maximum - minimum) * ((action + 1.0) / 2.0)) + minimum
def reverse_transform_numpy(self, action: np.ndarray) -> np.ndarray:
assert self._minimum is not None and self._maximum is not None
minimum, maximum = self._minimum, self._maximum
# transform action from [-1.0, 1.0]
return ((maximum - minimum) * ((action + 1.0) / 2.0)) + minimum
def get_params(self, deep: bool = False) -> Dict[str, Any]:
if self._minimum is not None:
minimum = self._minimum.copy() if deep else self._minimum
else:
minimum = None
if self._maximum is not None:
maximum = self._maximum.copy() if deep else self._maximum
else:
maximum = None
return {"minimum": minimum, "maximum": maximum}
ACTION_SCALER_LIST: Dict[str, Type[ActionScaler]] = {}
def register_action_scaler(cls: Type[ActionScaler]) -> None:
"""Registers action scaler class.
Args:
cls: action scaler class inheriting ``ActionScaler``.
"""
is_registered = cls.TYPE in ACTION_SCALER_LIST
assert not is_registered, f"{cls.TYPE} seems to be already registered"
ACTION_SCALER_LIST[cls.TYPE] = cls
def create_action_scaler(name: str, **kwargs: Any) -> ActionScaler:
"""Returns registered action scaler object.
Args:
name: regsitered scaler type name.
kwargs: scaler arguments.
Returns:
scaler object.
"""
assert name in ACTION_SCALER_LIST, f"{name} seems not to be registered."
scaler = ACTION_SCALER_LIST[name](**kwargs) # type: ignore
assert isinstance(scaler, ActionScaler)
return scaler
register_action_scaler(MinMaxActionScaler) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/preprocessing/action_scalers.py | action_scalers.py |
from typing import Callable, List
import numpy as np
from ..dataset import Episode
from .scorer import WINDOW_SIZE, AlgoProtocol, _make_batches
def compare_continuous_action_diff(
base_algo: AlgoProtocol,
) -> Callable[[AlgoProtocol, List[Episode]], float]:
r"""Returns scorer function of action difference between algorithms.
This metrics suggests how different the two algorithms are in continuous
action-space.
If the algorithm to compare with is near-optimal, the small action
difference would be better.
.. math::
\mathbb{E}_{s_t \sim D}
[(\pi_{\phi_1}(s_t) - \pi_{\phi_2}(s_t))^2]
.. code-block:: python
from d3rlpy.algos import CQL
from d3rlpy.metrics.comparer import compare_continuous_action_diff
cql1 = CQL()
cql2 = CQL()
scorer = compare_continuous_action_diff(cql1)
squared_action_diff = scorer(cql2, ...)
Args:
base_algo: algorithm to comapre with.
Returns:
scorer function.
"""
def scorer(algo: AlgoProtocol, episodes: List[Episode]) -> float:
total_diffs = []
for episode in episodes:
# TODO: handle different n_frames
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
base_actions = base_algo.predict(batch.observations)
actions = algo.predict(batch.observations)
diff = ((actions - base_actions) ** 2).sum(axis=1).tolist()
total_diffs += diff
# smaller is better, sometimes?
return -float(np.mean(total_diffs))
return scorer
def compare_discrete_action_match(
base_algo: AlgoProtocol,
) -> Callable[[AlgoProtocol, List[Episode]], float]:
r"""Returns scorer function of action matches between algorithms.
This metrics suggests how different the two algorithms are in discrete
action-space.
If the algorithm to compare with is near-optimal, the small action
difference would be better.
.. math::
\mathbb{E}_{s_t \sim D} [\parallel
\{\text{argmax}_a Q_{\theta_1}(s_t, a)
= \text{argmax}_a Q_{\theta_2}(s_t, a)\}]
.. code-block:: python
from d3rlpy.algos import DQN
from d3rlpy.metrics.comparer import compare_continuous_action_diff
dqn1 = DQN()
dqn2 = DQN()
scorer = compare_continuous_action_diff(dqn1)
percentage_of_identical_actions = scorer(dqn2, ...)
Args:
base_algo: algorithm to comapre with.
Returns:
scorer function.
"""
def scorer(algo: AlgoProtocol, episodes: List[Episode]) -> float:
total_matches = []
for episode in episodes:
# TODO: handle different n_frames
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
base_actions = base_algo.predict(batch.observations)
actions = algo.predict(batch.observations)
match = (base_actions == actions).tolist()
total_matches += match
return float(np.mean(total_matches))
return scorer | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/metrics/comparer.py | comparer.py |
from typing import Any, Callable, Iterator, List, Optional, Tuple, Union, cast
import gym
import numpy as np
from typing_extensions import Protocol
from ..dataset import Episode, TransitionMiniBatch
from ..preprocessing.reward_scalers import RewardScaler
from ..preprocessing.stack import StackedObservation
WINDOW_SIZE = 1024
class AlgoProtocol(Protocol):
def predict(self, x: Union[np.ndarray, List[Any]]) -> np.ndarray:
...
def predict_value(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_std: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
...
@property
def n_frames(self) -> int:
...
@property
def gamma(self) -> float:
...
@property
def reward_scaler(self) -> Optional[RewardScaler]:
...
class DynamicsProtocol(Protocol):
def predict(
self,
x: Union[np.ndarray, List[Any]],
action: Union[np.ndarray, List[Any]],
with_variance: bool = False,
) -> Union[
Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]
]:
...
@property
def n_frames(self) -> int:
...
@property
def reward_scaler(self) -> Optional[RewardScaler]:
...
def _make_batches(
episode: Episode, window_size: int, n_frames: int
) -> Iterator[TransitionMiniBatch]:
n_batches = len(episode) // window_size
if len(episode) % window_size != 0:
n_batches += 1
for i in range(n_batches):
head_index = i * window_size
last_index = min(head_index + window_size, len(episode))
transitions = episode.transitions[head_index:last_index]
batch = TransitionMiniBatch(transitions, n_frames)
yield batch
########################################################
# Author: Shyamal H Anadkat | AIPI530 | Fall 2021 #
########################################################
# Attempt: Calculate the true total discounted reward by taking initial action a in initial state s
def true_q_value_scorer(algo: AlgoProtocol, episodes: List[Episode]) -> float:
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
# estimate values for next observations
next_actions = algo.predict([batch.next_observations[0]])
next_values = algo.predict_value(
[batch.next_observations[0]], next_actions
)
mask = (1.0 - np.asarray(batch.terminals)).reshape(-1)
rewards = np.asarray(batch.next_rewards).reshape(-1)
if algo.reward_scaler:
rewards = algo.reward_scaler.transform_numpy(rewards)
y = rewards + algo.gamma * cast(np.ndarray, next_values) * mask
return float(np.mean(y))
def td_error_scorer(algo: AlgoProtocol, episodes: List[Episode]) -> float:
r"""Returns average TD error.
This metics suggests how Q functions overfit to training sets.
If the TD error is large, the Q functions are overfitting.
.. math::
\mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D}
[(Q_\theta (s_t, a_t)
- r_{t+1} - \gamma \max_a Q_\theta (s_{t+1}, a))^2]
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
average TD error.
"""
total_errors = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
# estimate values for current observations
values = algo.predict_value(batch.observations, batch.actions)
# estimate values for next observations
next_actions = algo.predict(batch.next_observations)
next_values = algo.predict_value(
batch.next_observations, next_actions
)
# calculate td errors
mask = (1.0 - np.asarray(batch.terminals)).reshape(-1)
rewards = np.asarray(batch.next_rewards).reshape(-1)
if algo.reward_scaler:
rewards = algo.reward_scaler.transform_numpy(rewards)
y = rewards + algo.gamma * cast(np.ndarray, next_values) * mask
total_errors += ((values - y) ** 2).tolist()
return float(np.mean(total_errors))
def discounted_sum_of_advantage_scorer(
algo: AlgoProtocol, episodes: List[Episode]
) -> float:
r"""Returns average of discounted sum of advantage.
This metrics suggests how the greedy-policy selects different actions in
action-value space.
If the sum of advantage is small, the policy selects actions with larger
estimated action-values.
.. math::
\mathbb{E}_{s_t, a_t \sim D}
[\sum_{t' = t} \gamma^{t' - t} A(s_{t'}, a_{t'})]
where :math:`A(s_t, a_t) = Q_\theta (s_t, a_t)
- \mathbb{E}_{a \sim \pi} [Q_\theta (s_t, a)]`.
References:
* `Murphy., A generalization error for Q-Learning.
<http://www.jmlr.org/papers/volume6/murphy05a/murphy05a.pdf>`_
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
average of discounted sum of advantage.
"""
total_sums = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
# estimate values for dataset actions
dataset_values = algo.predict_value(
batch.observations, batch.actions
)
dataset_values = cast(np.ndarray, dataset_values)
# estimate values for the current policy
actions = algo.predict(batch.observations)
on_policy_values = algo.predict_value(batch.observations, actions)
# calculate advantages
advantages = (dataset_values - on_policy_values).tolist()
# calculate discounted sum of advantages
A = advantages[-1]
sum_advantages = [A]
for advantage in reversed(advantages[:-1]):
A = advantage + algo.gamma * A
sum_advantages.append(A)
total_sums += sum_advantages
# smaller is better
return float(np.mean(total_sums))
def average_value_estimation_scorer(
algo: AlgoProtocol, episodes: List[Episode]
) -> float:
r"""Returns average value estimation.
This metrics suggests the scale for estimation of Q functions.
If average value estimation is too large, the Q functions overestimate
action-values, which possibly makes training failed.
.. math::
\mathbb{E}_{s_t \sim D} [ \max_a Q_\theta (s_t, a)]
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
average value estimation.
"""
total_values = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
actions = algo.predict(batch.observations)
values = algo.predict_value(batch.observations, actions)
total_values += cast(np.ndarray, values).tolist()
return float(np.mean(total_values))
def value_estimation_std_scorer(
algo: AlgoProtocol, episodes: List[Episode]
) -> float:
r"""Returns standard deviation of value estimation.
This metrics suggests how confident Q functions are for the given
episodes.
This metrics will be more accurate with `boostrap` enabled and the larger
`n_critics` at algorithm.
If standard deviation of value estimation is large, the Q functions are
overfitting to the training set.
.. math::
\mathbb{E}_{s_t \sim D, a \sim \text{argmax}_a Q_\theta(s_t, a)}
[Q_{\text{std}}(s_t, a)]
where :math:`Q_{\text{std}}(s, a)` is a standard deviation of action-value
estimation over ensemble functions.
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
standard deviation.
"""
total_stds = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
actions = algo.predict(batch.observations)
_, stds = algo.predict_value(batch.observations, actions, True)
total_stds += stds.tolist()
return float(np.mean(total_stds))
def initial_state_value_estimation_scorer(
algo: AlgoProtocol, episodes: List[Episode]
) -> float:
r"""Returns mean estimated action-values at the initial states.
This metrics suggests how much return the trained policy would get from
the initial states by deploying the policy to the states.
If the estimated value is large, the trained policy is expected to get
higher returns.
.. math::
\mathbb{E}_{s_0 \sim D} [Q(s_0, \pi(s_0))]
References:
* `Paine et al., Hyperparameter Selection for Offline Reinforcement
Learning <https://arxiv.org/abs/2007.09055>`_
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
mean action-value estimation at the initial states.
"""
total_values = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
# estimate action-value in initial states
actions = algo.predict([batch.observations[0]])
values = algo.predict_value([batch.observations[0]], actions)
total_values.append(values[0])
return float(np.mean(total_values))
def soft_opc_scorer(
return_threshold: float,
) -> Callable[[AlgoProtocol, List[Episode]], float]:
r"""Returns Soft Off-Policy Classification metrics.
This function returns scorer function, which is suitable to the standard
scikit-learn scorer function style.
The metrics of the scorer funciton is evaluating gaps of action-value
estimation between the success episodes and the all episodes.
If the learned Q-function is optimal, action-values in success episodes
are expected to be higher than the others.
The success episode is defined as an episode with a return above the given
threshold.
.. math::
\mathbb{E}_{s, a \sim D_{success}} [Q(s, a)]
- \mathbb{E}_{s, a \sim D} [Q(s, a)]
.. code-block:: python
from d3rlpy.datasets import get_cartpole
from d3rlpy.algos import DQN
from d3rlpy.metrics.scorer import soft_opc_scorer
from sklearn.model_selection import train_test_split
dataset, _ = get_cartpole()
train_episodes, test_episodes = train_test_split(dataset, test_size=0.2)
scorer = soft_opc_scorer(return_threshold=180)
dqn = DQN()
dqn.fit(train_episodes,
eval_episodes=test_episodes,
scorers={'soft_opc': scorer})
References:
* `Irpan et al., Off-Policy Evaluation via Off-Policy Classification.
<https://arxiv.org/abs/1906.01624>`_
Args:
return_threshold: threshold of success episodes.
Returns:
scorer function.
"""
def scorer(algo: AlgoProtocol, episodes: List[Episode]) -> float:
success_values = []
all_values = []
for episode in episodes:
is_success = episode.compute_return() >= return_threshold
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
values = algo.predict_value(batch.observations, batch.actions)
values = cast(np.ndarray, values)
all_values += values.reshape(-1).tolist()
if is_success:
success_values += values.reshape(-1).tolist()
return float(np.mean(success_values) - np.mean(all_values))
return scorer
def continuous_action_diff_scorer(
algo: AlgoProtocol, episodes: List[Episode]
) -> float:
r"""Returns squared difference of actions between algorithm and dataset.
This metrics suggests how different the greedy-policy is from the given
episodes in continuous action-space.
If the given episodes are near-optimal, the small action difference would
be better.
.. math::
\mathbb{E}_{s_t, a_t \sim D} [(a_t - \pi_\phi (s_t))^2]
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
squared action difference.
"""
total_diffs = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
actions = algo.predict(batch.observations)
diff = ((batch.actions - actions) ** 2).sum(axis=1).tolist()
total_diffs += diff
return float(np.mean(total_diffs))
def discrete_action_match_scorer(
algo: AlgoProtocol, episodes: List[Episode]
) -> float:
r"""Returns percentage of identical actions between algorithm and dataset.
This metrics suggests how different the greedy-policy is from the given
episodes in discrete action-space.
If the given episdoes are near-optimal, the large percentage would be
better.
.. math::
\frac{1}{N} \sum^N \parallel
\{a_t = \text{argmax}_a Q_\theta (s_t, a)\}
Args:
algo: algorithm.
episodes: list of episodes.
Returns:
percentage of identical actions.
"""
total_matches = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, algo.n_frames):
actions = algo.predict(batch.observations)
match = (batch.actions.reshape(-1) == actions).tolist()
total_matches += match
return float(np.mean(total_matches))
def evaluate_on_environment(
env: gym.Env, n_trials: int = 10, epsilon: float = 0.0, render: bool = False
) -> Callable[..., float]:
"""Returns scorer function of evaluation on environment.
This function returns scorer function, which is suitable to the standard
scikit-learn scorer function style.
The metrics of the scorer function is ideal metrics to evaluate the
resulted policies.
.. code-block:: python
import gym
from d3rlpy.algos import DQN
from d3rlpy.metrics.scorer import evaluate_on_environment
env = gym.make('CartPole-v0')
scorer = evaluate_on_environment(env)
cql = CQL()
mean_episode_return = scorer(cql)
Args:
env: gym-styled environment.
n_trials: the number of trials.
epsilon: noise factor for epsilon-greedy policy.
render: flag to render environment.
Returns:
scoerer function.
"""
# for image observation
observation_shape = env.observation_space.shape
is_image = len(observation_shape) == 3
def scorer(algo: AlgoProtocol, *args: Any) -> float:
if is_image:
stacked_observation = StackedObservation(
observation_shape, algo.n_frames
)
episode_rewards = []
for _ in range(n_trials):
observation = env.reset()
episode_reward = 0.0
# frame stacking
if is_image:
stacked_observation.clear()
stacked_observation.append(observation)
while True:
# take action
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
if is_image:
action = algo.predict([stacked_observation.eval()])[0]
else:
action = algo.predict([observation])[0]
observation, reward, done, _ = env.step(action)
episode_reward += reward
if is_image:
stacked_observation.append(observation)
if render:
env.render()
if done:
break
episode_rewards.append(episode_reward)
return float(np.mean(episode_rewards))
return scorer
def dynamics_observation_prediction_error_scorer(
dynamics: DynamicsProtocol, episodes: List[Episode]
) -> float:
r"""Returns MSE of observation prediction.
This metrics suggests how dynamics model is generalized to test sets.
If the MSE is large, the dynamics model are overfitting.
.. math::
\mathbb{E}_{s_t, a_t, s_{t+1} \sim D} [(s_{t+1} - s')^2]
where :math:`s' \sim T(s_t, a_t)`.
Args:
dynamics: dynamics model.
episodes: list of episodes.
Returns:
mean squared error.
"""
total_errors = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, dynamics.n_frames):
pred = dynamics.predict(batch.observations, batch.actions)
errors = ((batch.next_observations - pred[0]) ** 2).sum(axis=1)
total_errors += errors.tolist()
return float(np.mean(total_errors))
def dynamics_reward_prediction_error_scorer(
dynamics: DynamicsProtocol, episodes: List[Episode]
) -> float:
r"""Returns MSE of reward prediction.
This metrics suggests how dynamics model is generalized to test sets.
If the MSE is large, the dynamics model are overfitting.
.. math::
\mathbb{E}_{s_t, a_t, r_{t+1} \sim D} [(r_{t+1} - r')^2]
where :math:`r' \sim T(s_t, a_t)`.
Args:
dynamics: dynamics model.
episodes: list of episodes.
Returns:
mean squared error.
"""
total_errors = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, dynamics.n_frames):
pred = dynamics.predict(batch.observations, batch.actions)
rewards = batch.next_rewards
if dynamics.reward_scaler:
rewards = dynamics.reward_scaler.transform_numpy(rewards)
errors = ((rewards - pred[1]) ** 2).reshape(-1)
total_errors += errors.tolist()
return float(np.mean(total_errors))
def dynamics_prediction_variance_scorer(
dynamics: DynamicsProtocol, episodes: List[Episode]
) -> float:
"""Returns prediction variance of ensemble dynamics.
This metrics suggests how dynamics model is confident of test sets.
If the variance is large, the dynamics model has large uncertainty.
Args:
dynamics: dynamics model.
episodes: list of episodes.
Returns:
variance.
"""
total_variances = []
for episode in episodes:
for batch in _make_batches(episode, WINDOW_SIZE, dynamics.n_frames):
pred = dynamics.predict(batch.observations, batch.actions, True)
pred = cast(Tuple[np.ndarray, np.ndarray, np.ndarray], pred)
total_variances += pred[2].tolist()
return float(np.mean(total_variances)) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/metrics/scorer.py | scorer.py |
import os
import tempfile
import uuid
from multiprocessing import Process, get_context
from multiprocessing.connection import Connection
from typing import Any, Callable, Dict, List, Sequence, Tuple
import cloudpickle
import gym
import numpy as np
from ..online.utility import get_action_size_from_env
def _subproc(conn: Connection, remote_conn: Connection, fn_path: str) -> None:
remote_conn.close()
with open(fn_path, "rb") as f:
env = cloudpickle.load(f)()
# notify if it's ready
conn.send("ready")
while True:
command = conn.recv()
if command[0] == "step":
observation, reward, terminal, info = env.step(command[1])
conn.send([observation, reward, terminal, info])
elif command[0] == "reset":
conn.send([env.reset()])
elif command[0] == "close":
conn.close()
break
else:
raise ValueError(f"invalid {command[0]}.")
class SubprocEnv:
_conn: Connection
_remote_conn: Connection
_proc: Process
def __init__(self, make_env_fn: Callable[..., gym.Env], dname: str):
# pickle function
fn_path = os.path.join(dname, str(uuid.uuid1()))
with open(fn_path, "wb") as f:
cloudpickle.dump(make_env_fn, f)
# spawn process otherwise PyTorch raises error
ctx = get_context("spawn")
self._conn, self._remote_conn = ctx.Pipe(duplex=True)
self._proc = ctx.Process( # type: ignore
target=_subproc,
args=(self._remote_conn, self._conn, fn_path),
daemon=True,
)
self._proc.start()
self._remote_conn.close()
def step_send(self, action: np.ndarray) -> None:
self._conn.send(["step", action])
def step_get(self) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
return self._conn.recv() # type: ignore
def reset_send(self) -> None:
self._conn.send(["reset"])
def reset_get(self) -> np.ndarray:
return self._conn.recv()[0]
def wait_for_ready(self) -> bool:
self._conn.recv()
return True
def close(self) -> None:
self._conn.send(["close"])
self._conn.close()
self._proc.join()
class BatchEnv(gym.Env): # type: ignore
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]:
"""Returns batch of next observations, actions, rewards and infos.
Args:
action: batch action.
Returns:
batch of next data.
"""
raise NotImplementedError
def reset(self) -> np.ndarray:
"""Initializes environments and returns batch of observations.
Returns:
batch of observations.
"""
raise NotImplementedError
def render(self, mode: str = "human") -> Any:
raise NotImplementedError("BatchEnvWrapper does not support render.")
@property
def n_envs(self) -> int:
raise NotImplementedError
def __len__(self) -> int:
return self.n_envs
def close(self) -> None:
for env in self._envs:
env.close()
class SyncBatchEnv(BatchEnv):
"""The environment wrapper for batch training with synchronized
environments.
Multiple environments are serially running. Basically, the computational
cost is linearly increased depending on the number of environments.
Args:
envs (list(gym.Env)): a list of environments.
"""
_envs: List[gym.Env]
_observation_shape: Sequence[int]
_action_size: int
_prev_terminals: np.ndarray
def __init__(self, envs: List[gym.Env]):
self._envs = envs
self.observation_space = envs[0].observation_space
self.action_space = envs[0].action_space
self._observation_shape = self.observation_space.shape
self._action_size = get_action_size_from_env(envs[0])
self._prev_terminals = np.ones(len(self._envs))
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]:
n_envs = len(self._envs)
is_image = len(self._observation_shape) == 3
observations = np.empty(
(n_envs,) + tuple(self._observation_shape),
dtype=np.uint8 if is_image else np.float32,
)
rewards = np.empty(n_envs, dtype=np.float32)
terminals = np.empty(n_envs, dtype=np.float32)
infos = []
info: Dict[str, Any]
for i, (env, act) in enumerate(zip(self._envs, action)):
if self._prev_terminals[i]:
observation = env.reset()
reward, terminal, info = 0.0, 0.0, {}
else:
observation, reward, terminal, info = env.step(act)
observations[i] = observation
rewards[i] = reward
terminals[i] = terminal
infos.append(info)
self._prev_terminals[i] = terminal
return observations, rewards, terminals, infos
def reset(self) -> np.ndarray:
n_envs = len(self._envs)
is_image = len(self._observation_shape) == 3
observations = np.empty(
(n_envs,) + tuple(self._observation_shape),
dtype=np.uint8 if is_image else np.float32,
)
for i, env in enumerate(self._envs):
observations[i] = env.reset()
self._prev_terminals = np.ones(len(self._envs))
return observations
@property
def n_envs(self) -> int:
return len(self._envs)
class AsyncBatchEnv(BatchEnv):
"""The environment wrapper for batch training with asynchronous environment
workers.
Multiple environments are running in different processes to maximize the
computational efficiency.
Ideally, you can scale the training linearly up to the number of CPUs.
Args:
make_env_fns (list(callable)): a list of callable functions to return an environment.
"""
_envs: List[SubprocEnv]
_observation_shape: Sequence[int]
_action_size: int
_prev_terminals: np.ndarray
def __init__(self, make_env_fns: List[Callable[..., gym.Env]]):
# start multiprocesses
with tempfile.TemporaryDirectory() as dname:
self._envs = []
for make_env in make_env_fns:
self._envs.append(SubprocEnv(make_env, dname))
# make sure that all environements are created
for env in self._envs:
env.wait_for_ready()
ref_env = make_env_fns[0]()
self.observation_space = ref_env.observation_space
self.action_space = ref_env.action_space
self._observation_shape = self.observation_space.shape
self._action_size = get_action_size_from_env(ref_env)
self._prev_terminals = np.ones(len(self._envs))
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[Dict[str, Any]]]:
n_envs = len(self._envs)
is_image = len(self._observation_shape) == 3
observations = np.empty(
(n_envs,) + tuple(self._observation_shape),
dtype=np.uint8 if is_image else np.float32,
)
rewards = np.empty(n_envs, dtype=np.float32)
terminals = np.empty(n_envs, dtype=np.float32)
infos = []
# asynchronous environment step
for i, (env, act) in enumerate(zip(self._envs, action)):
if self._prev_terminals[i]:
env.reset_send()
else:
env.step_send(act)
# get the result through pipes
info: Dict[str, Any]
for i, env in enumerate(self._envs):
if self._prev_terminals[i]:
observation = env.reset_get()
reward, terminal, info = 0.0, 0.0, {}
else:
observation, reward, terminal, info = env.step_get()
observations[i] = observation
rewards[i] = reward
terminals[i] = terminal
infos.append(info)
self._prev_terminals[i] = terminal
return observations, rewards, terminals, infos
def reset(self) -> np.ndarray:
n_envs = len(self._envs)
is_image = len(self._observation_shape) == 3
observations = np.empty(
(n_envs,) + tuple(self._observation_shape),
dtype=np.uint8 if is_image else np.float32,
)
# asynchronous step
for env in self._envs:
env.reset_send()
# get the result through pipes
for i, env in enumerate(self._envs):
observations[i] = env.reset_get()
self._prev_terminals = np.ones(len(self._envs))
return observations
@property
def n_envs(self) -> int:
return len(self._envs) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/envs/batch.py | batch.py |
import json
import os
from typing import Any, Callable, Dict, Optional, Tuple, Union
import gym
import numpy as np
try:
import cv2 # this is used in AtariPreprocessing
except ImportError:
cv2 = None
from gym.spaces import Box
from gym.wrappers import TransformReward
class ChannelFirst(gym.Wrapper): # type: ignore
"""Channel-first wrapper for image observation environments.
d3rlpy expects channel-first images since it's built with PyTorch.
You can transform the observation shape with ``ChannelFirst`` wrapper.
Args:
env (gym.Env): gym environment.
"""
observation_space: Box
def __init__(self, env: gym.Env):
super().__init__(env)
shape = self.observation_space.shape
low = self.observation_space.low
high = self.observation_space.high
dtype = self.observation_space.dtype
if len(shape) == 3:
self.observation_space = Box(
low=np.transpose(low, [2, 0, 1]),
high=np.transpose(high, [2, 0, 1]),
shape=(shape[2], shape[0], shape[1]),
dtype=dtype,
)
elif len(shape) == 2:
self.observation_space = Box(
low=np.reshape(low, (1, *shape)),
high=np.reshape(high, (1, *shape)),
shape=(1, *shape),
dtype=dtype,
)
else:
raise ValueError("image observation is only allowed.")
def step(
self, action: Union[int, np.ndarray]
) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
observation, reward, terminal, info = self.env.step(action)
# make channel first observation
if observation.ndim == 3:
observation_T = np.transpose(observation, [2, 0, 1])
else:
observation_T = np.reshape(observation, (1, *observation.shape))
assert observation_T.shape == self.observation_space.shape
return observation_T, reward, terminal, info
def reset(self, **kwargs: Any) -> np.ndarray:
observation = self.env.reset(**kwargs)
# make channel first observation
if observation.ndim == 3:
observation_T = np.transpose(observation, [2, 0, 1])
else:
observation_T = np.reshape(observation, (1, *observation.shape))
assert observation_T.shape == self.observation_space.shape
return observation_T
# https://github.com/openai/gym/blob/0.17.3/gym/wrappers/atari_preprocessing.py
class AtariPreprocessing(gym.Wrapper): # type: ignore
r"""Atari 2600 preprocessings.
This class follows the guidelines in
Machado et al. (2018), "Revisiting the Arcade Learning Environment:
Evaluation Protocols and Open Problems for General Agents".
Specifically:
* NoopReset: obtain initial state by taking random number of no-ops on
reset.
* Frame skipping: 4 by default
* Max-pooling: most recent two observations
* Termination signal when a life is lost: turned off by default. Not
recommended by Machado et al. (2018).
* Resize to a square image: 84x84 by default
* Grayscale observation: optional
* Scale observation: optional
Args:
env (Env): environment
noop_max (int): max number of no-ops
frame_skip (int): the frequency at which the agent experiences the game.
screen_size (int): resize Atari frame
terminal_on_life_loss (bool): if True, then step() returns done=True
whenever a life is lost.
grayscale_obs (bool): if True, then gray scale observation is returned,
otherwise, RGB observation is returned.
grayscale_newaxis (bool): if True and grayscale_obs=True, then a
channel axis is added to grayscale observations to make them
3-dimensional.
scale_obs (bool): if True, then observation normalized in range [0,1]
is returned. It also limits memory optimization benefits of
FrameStack Wrapper.
"""
def __init__(
self,
env: gym.Env,
noop_max: int = 30,
frame_skip: int = 4,
screen_size: int = 84,
terminal_on_life_loss: bool = False,
grayscale_obs: bool = True,
grayscale_newaxis: bool = False,
scale_obs: bool = False,
):
super().__init__(env)
assert cv2 is not None, (
"opencv-python package not installed! Try"
" running pip install gym[atari] to get dependencies for atari"
)
assert frame_skip > 0
assert screen_size > 0
assert noop_max >= 0
if frame_skip > 1:
assert "NoFrameskip" in env.spec.id, (
"disable frame-skipping in"
" the original env. for more than one frame-skip as it will"
" be done by the wrapper"
)
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
self.frame_skip = frame_skip
self.screen_size = screen_size
self.terminal_on_life_loss = terminal_on_life_loss
self.grayscale_obs = grayscale_obs
self.grayscale_newaxis = grayscale_newaxis
self.scale_obs = scale_obs
# buffer of most recent two observations for max pooling
if grayscale_obs:
self.obs_buffer = [
np.empty(env.observation_space.shape[:2], dtype=np.uint8),
np.empty(env.observation_space.shape[:2], dtype=np.uint8),
]
else:
self.obs_buffer = [
np.empty(env.observation_space.shape, dtype=np.uint8),
np.empty(env.observation_space.shape, dtype=np.uint8),
]
self.ale = env.unwrapped.ale
self.lives = 0
self.game_over = True
_low, _high, _obs_dtype = (
(0, 255, np.uint8) if not scale_obs else (0, 1, np.float32)
)
_shape = (screen_size, screen_size, 1 if grayscale_obs else 3)
if grayscale_obs and not grayscale_newaxis:
_shape = _shape[:-1] # type: ignore
self.observation_space = Box(
low=_low, high=_high, shape=_shape, dtype=_obs_dtype
)
def step(
self, action: int
) -> Tuple[np.ndarray, float, float, Dict[str, Any]]:
R = 0.0
for t in range(self.frame_skip):
_, reward, done, info = self.env.step(action)
R += reward
self.game_over = done
if self.terminal_on_life_loss:
new_lives = self.ale.lives()
done = done or new_lives < self.lives
self.lives = new_lives
if done:
break
if t == self.frame_skip - 2:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[1])
else:
self.ale.getScreenRGB2(self.obs_buffer[1])
elif t == self.frame_skip - 1:
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB2(self.obs_buffer[0])
return self._get_obs(), R, done, info
def reset(self, **kwargs: Any) -> np.ndarray:
# this condition is not included in the original code
if self.game_over:
self.env.reset(**kwargs)
else:
# NoopReset
self.env.step(0)
noops = (
self.env.unwrapped.np_random.randint(1, self.noop_max + 1)
if self.noop_max > 0
else 0
)
for _ in range(noops):
_, _, done, _ = self.env.step(0)
if done:
self.env.reset(**kwargs)
self.lives = self.ale.lives()
if self.grayscale_obs:
self.ale.getScreenGrayscale(self.obs_buffer[0])
else:
self.ale.getScreenRGB2(self.obs_buffer[0])
self.obs_buffer[1].fill(0)
return self._get_obs()
def _get_obs(self) -> np.ndarray:
if self.frame_skip > 1: # more efficient in-place pooling
np.maximum(
self.obs_buffer[0], self.obs_buffer[1], out=self.obs_buffer[0]
)
obs = cv2.resize(
self.obs_buffer[0],
(self.screen_size, self.screen_size),
interpolation=cv2.INTER_AREA,
)
if self.scale_obs:
obs = np.asarray(obs, dtype=np.float32) / 255.0
else:
obs = np.asarray(obs, dtype=np.uint8)
if self.grayscale_obs and self.grayscale_newaxis:
obs = np.expand_dims(obs, axis=-1) # Add a channel axis
return obs
class Atari(gym.Wrapper): # type: ignore
"""Atari 2600 wrapper for experiments.
Args:
env (gym.Env): gym environment.
is_eval (bool): flag to enter evaluation mode.
"""
def __init__(self, env: gym.Env, is_eval: bool = False):
env = AtariPreprocessing(env, terminal_on_life_loss=not is_eval)
if not is_eval:
env = TransformReward(env, lambda r: np.clip(r, -1.0, 1.0))
super().__init__(ChannelFirst(env))
class Monitor(gym.Wrapper): # type: ignore
"""gym.wrappers.Monitor-style Monitor wrapper.
Args:
env (gym.Env): gym environment.
directory (str): directory to save.
video_callable (callable): callable function that takes episode counter
to control record frequency.
force (bool): flag to allow existing directory.
frame_rate (float): video frame rate.
record_rate (int): images are record every ``record_rate`` frames.
"""
_directory: str
_video_callable: Callable[[int], bool]
_frame_rate: float
_record_rate: int
_episode: int
_episode_return: float
_episode_step: int
_buffer: np.ndarray
def __init__(
self,
env: gym.Env,
directory: str,
video_callable: Optional[Callable[[int], bool]] = None,
force: bool = False,
frame_rate: float = 30.0,
record_rate: int = 1,
):
super().__init__(env)
# prepare directory
if os.path.exists(directory) and not force:
raise ValueError(f"{directory} already exists.")
os.makedirs(directory, exist_ok=True)
self._directory = directory
if video_callable:
self._video_callable = video_callable # type: ignore
else:
self._video_callable = lambda ep: ep % 10 == 0 # type: ignore
self._frame_rate = frame_rate
self._record_rate = record_rate
self._episode = 0
self._episode_return = 0.0
self._episode_step = 0
self._buffer = []
def step(
self, action: Union[np.ndarray, int]
) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
obs, reward, done, info = super().step(action)
if self._video_callable(self._episode): # type: ignore
# store rendering
frame = cv2.cvtColor(super().render("rgb_array"), cv2.COLOR_BGR2RGB)
self._buffer.append(frame)
self._episode_step += 1
self._episode_return += reward
if done:
self._save_video()
self._save_stats()
return obs, reward, done, info
def reset(self, **kwargs: Any) -> np.ndarray:
self._episode += 1
self._episode_return = 0.0
self._episode_step = 0
self._buffer = []
return super().reset(**kwargs)
def _save_video(self) -> None:
height, width = self._buffer[0].shape[:2]
path = os.path.join(self._directory, f"video{self._episode}.avi")
fmt = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(path, fmt, self._frame_rate, (width, height))
print(f"Saving a recorded video to {path}...")
for i, frame in enumerate(self._buffer):
if i % self._record_rate == 0:
writer.write(frame)
writer.release()
def _save_stats(self) -> None:
path = os.path.join(self._directory, f"stats{self._episode}.json")
stats = {
"episode_step": self._episode_step,
"return": self._episode_return,
}
with open(path, "w") as f:
json_str = json.dumps(stats, indent=2)
f.write(json_str) | zjkdemo2 | /zjkdemo2-0.91.tar.gz/zjkdemo2-0.91/d3rlpy/envs/wrappers.py | wrappers.py |
from abc import abstractmethod
import argparse
from dataclasses import dataclass
from datetime import datetime
import os
from pathlib import Path
from subprocess import call
from typing import Iterable
class FileAlreadyExistsException(Exception):
pass
@dataclass
class ZKProject:
home: Path
timestamp: datetime
@property
def notes(self) -> Path:
return self.home / "notes"
@property
def bib(self) -> Path:
return self.home / "bib"
@property
def config(self) -> Path:
return self.home / "zk.conf"
def init(self):
self.notes.mkdir()
self.bib.mkdir()
self.config.touch()
def _new_file(self, file_type: type, name: str):
file = file_type(self, name)
file.touch()
return file
def new_note(self, name: str):
return self._new_file(Note, name)
def new_bib(self, name: str):
return self._new_file(Bib, name)
def _fmt_date(date: datetime):
return date.strftime("%Y%m%d%H%M")
class File:
def __init__(self, project: ZKProject, name: str):
self.project = project
self.file_name = f"{_fmt_date(project.timestamp)} {name}.md"
@abstractmethod
def path(self) -> Path:
pass
def touch(self):
if self.path().exists():
raise FileAlreadyExistsException
self.path().touch()
def edit(self):
call(["nvim", self.path().absolute()])
class Note(File):
def path(self) -> Path:
return self.project.notes / self.file_name
class Bib(File):
def path(self) -> Path:
return self.project.bib / self.file_name
def main():
parser = argparse.ArgumentParser(
description="A command line utility for Zettelkasten."
)
subparsers = parser.add_subparsers(dest="command")
init_parser = subparsers.add_parser("init")
note_parser = subparsers.add_parser("note")
note_parser.add_argument("name", nargs="*")
bib_parser = subparsers.add_parser("bib")
bib_parser.add_argument("name", nargs="*")
args = parser.parse_args()
home = os.getenv("ZK_HOME", "~/zk")
project = ZKProject(Path(home), datetime.utcnow())
if args.command is None:
parser.print_help()
elif args.command == "init":
project.init()
elif args.command in ("note", "bib"):
fn = getattr(project, f"new_{args.command}")
file = fn(" ".join(args.name))
file.edit() | zk-cli | /zk-cli-0.0.2.tar.gz/zk-cli-0.0.2/zk/zk.py | zk.py |
zk-flock [](https://travis-ci.org/noxiouz/python-flock)
========
You can use `zk-flock` to run programs in a cluster under a distributed lock to limit overall amount of instances.
Configuration
=============
You have to write the configuration file **/etc/distributed-flock.json** with the following content:
```js
{
"host": ["hostname1:2181","hostname2:2181","hostname3:2181"],
"timeout": 5,
"app_id": "my_application_namespace",
"sleep": "ON", //ON or OFF - Default OFF
"maxla": 30, // If >=0 -> max loadaverage for work. Default -1
"logger": {
"path": "/tmp/zkflock.log",
"level": "INFO",
"zklevel": "ERROR"
},
"auth": {
"scheme": "digest",
"data": "noxiouz:password"
}
}
```
* **host** - list of Zookeeper nodes
* **timeout** - timeout for zookeper connection (sec)
* **app_id** - namespace for your application in Zookeeper. This means that the lock will be stored
in Zookeeper with path likes **/app_id/your_lock_name**
* **sleep** - Sleep before work. Default: "OFF". Switch "ON" by -s (--sleep).
* **maxla** - Maximal load average. Use if >=0. Default: -1. Set by -m (--maxla).
Logging
=======
* **path** - path to log file (default: /dev/null)
* **level** - logging level of zk-flock (default: INFO)
* **zklevel** - logging level of Zookeeper Client (default: WARN)
Both loglevels are one of values: ERROR, WARN, INFO, DEBUG
Usage
=====
To run the application under the supervision of the zk-flock use the command:
```bash
zk-flock <pidname> <application command>
```
If your application requires command-line arguments enclose it in double quotes:
```bash
zk-flock my_test_lock "bash /home/user/test.sh arg1 arg2 arg3"
```
For attempting to lock lasted for a specific time, use the **-w** option (**--wait**) setting the time in seconds.
Add key **-d** or **--daemonize** to starts this appliction as daemon.
Use **-p** or **--pdeathsig** to specify a signal that will be sent if the master process died. By default the signal is **SIGTERM**.
Non Linux usage warning
=======================
If you kill zk-flock application with **kill -9**, the lock will be released, but this will not stop your application.
| zk-flock | /zk-flock-0.1.4.1.tar.gz/zk-flock-0.1.4.1/README.md | README.md |
import logging
import socket
import uuid
from ZKeeperAPI import zkapi
class ZKLockServer(object):
def __init__(self, **config):
try:
self.log = logging.getLogger(config.get('logger_name', 'combaine'))
self.zkclient = zkapi.ZKeeperClient(**config)
self.id = config['app_id']
res = self.zkclient.write('/%s' % self.id, "Rootnode")
if (res != zkapi.zookeeper.NODEEXISTS) and (res < 0):
if res == zkapi.DEFAULT_ERRNO:
self.log.error("Unexpectable error")
raise Exception("Unexpectable error. See Zookeeper logs")
else:
msg = "Zookeeper error: %s" % zkapi.zookeeper.zerror(res)
self.log.error(msg)
raise Exception(msg)
self.lock = config['name']
self.lockpath = '/%s/%s' % (self.id, self.lock)
self.locked = False
self.lock_content = socket.gethostname() + str(uuid.uuid4())
except Exception as err:
self.log.error('Failed to init ZKLockServer: %s', err)
raise
else:
self.log.debug('ZKeeperClient has been created')
def getlock(self):
if self.locked:
return True
if self.zkclient.write(self.lockpath, self.lock_content, 1) == 0:
self.log.info('Lock: success')
self.locked = True
return True
else:
self.log.info('Lock: fail')
return False
def set_lock_name(self, name):
self.lock = name
self.lockpath = '/%s/%s' % (self.id, self.lock)
def releaselock(self):
try:
self.zkclient.delete(self.lockpath)
self.log.info('Unlocked successfully')
self.locked = False
return True
except Exception as err:
self.log.error('Unlocking failed %s', err)
return False
def check_lock(self):
try:
content = self.zkclient.read(self.lockpath)
return content == self.lock_content
except Exception as err:
self.log.error("Unable to check lock %s", repr(err))
return False
def set_async_check_lock(self, callback):
assert callable(callback), "callback must be callable"
if not self.locked:
return False
def callback_wrapper(*args):
callback()
if self.check_lock():
self.zkclient.aget(self.lockpath, callback_wrapper)
return self.zkclient.aget(self.lockpath, callback_wrapper)
def set_node_deleting_watcher(self, path, callback):
assert callable(callback), "callback must be callable"
def callback_wrapper(event, state, path):
if event == 2: # zookeeper.DELETE_EVENT
callback()
def callback_rc_wrapper(rc):
if rc == -101: # zookeeper.NONODE
callback()
return self.zkclient.aget(path, callback_wrapper, callback_rc_wrapper)
def destroy(self):
try:
self.zkclient.disconnect()
self.log.info('Disconnected successfully')
return True
except Exception as err:
self.log.error('Disconnection error %s', err)
return False | zk-flock | /zk-flock-0.1.4.1.tar.gz/zk-flock-0.1.4.1/distributedflock/Zookeeper.py | Zookeeper.py |
from __future__ import with_statement
from functools import partial
import logging
import threading
import zookeeper
ZK_ACL = {"perms": 0x1f,
"scheme": "world",
"id": "anyone"}
zookeeper.set_log_stream(open('/dev/null', 'w'))
DEFAULT_ERRNO = -9999
# JFYI
LOG_LEVELS = {"DEBUG": zookeeper.LOG_LEVEL_DEBUG,
"INFO": zookeeper.LOG_LEVEL_INFO,
"WARN": zookeeper.LOG_LEVEL_WARN,
"ERROR": zookeeper.LOG_LEVEL_ERROR}
class Null(object):
"""This class does nothing as logger"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __getattribute__(self, name):
return self
def __setattribute__(self, name, value):
pass
def __delattribute__(self, name):
pass
def handling_error(zkfunc, logger=Null()):
def wrapper(*args, **kwargs):
ret = None
errno = DEFAULT_ERRNO
try:
ret = zkfunc(*args, **kwargs)
except zookeeper.ConnectionLossException as err:
logger.error("ConectionLossException: %s", str(err))
errno = zookeeper.CONNECTIONLOSS
except zookeeper.NodeExistsException as err:
logger.debug("Node exists: %s", str(err))
errno = zookeeper.NODEEXISTS
except zookeeper.OperationTimeoutException as err:
logger.error("Operation timeout: %s", str(err))
errno = zookeeper.OPERATIONTIMEOUT
except zookeeper.RuntimeInconsistencyException as err:
logger.error("RuntimeInconsistency: %s", str(err))
errno = zookeeper.RUNTIMEINCONSISTENCY
except zookeeper.MarshallingErrorException as err:
logger.error(str(err))
errno = zookeeper.MARSHALLINGERROR
except zookeeper.ZooKeeperException as err:
logger.error("ZookeperException %s", str(err))
except Exception as err:
logger.exception("Unknown exception %s", str(err))
else:
errno = 0
finally:
return ret, errno
return wrapper
class ZKeeperClient(object):
def __init__(self, **config):
logger_name = config.get('logger_name')
self.logger = logging.getLogger(logger_name) if logger_name else Null()
self.zkhandle = None
self.auth = None
self.cv = threading.Condition()
try:
auth_config = config.get("auth")
if auth_config is not None:
auth_scheme = auth_config["scheme"]
auth_data = auth_config["data"]
self.auth = (auth_scheme, auth_data)
zklogfile_path, zklog_level = config.get("ZookeeperLog",
("/dev/stderr", "WARN"))
self.connection_timeout = config['timeout']
self.zkhosts = ','.join(config['host'])
except KeyError as err:
self.logger.exception("Missing configuration option: %s", err)
raise
except Exception as err:
self.logger.exception("Unknown configuration error: %s", err)
raise
try:
_f = open(zklogfile_path, 'a')
except IOError as err:
self.logger.error("Unable to open logfile %s %s",
zklogfile_path, err)
else:
zookeeper.set_log_stream(_f)
zookeeper.set_debug_level(LOG_LEVELS.get(zklog_level.upper(),
zookeeper.LOG_LEVEL_WARN))
self.connect()
if zookeeper.state(self.zkhandle) == zookeeper.CONNECTED_STATE:
self.logger.info('Connected to Zookeeper successfully')
else:
raise zookeeper.ZooKeeperException('Unable to connect '
'to Zookeeper')
def on_auth_callback(state, result):
with self.cv:
if result == zookeeper.AUTHFAILED:
self.logger.error(zookeeper.zerror(zookeeper.AUTHFAILED))
self.logger.info("on_auth: state %s, result %s",
state, result)
self.cv.notify()
if self.auth:
self.logger.info("Auth using %s", self.auth[0])
with self.cv:
res = zookeeper.add_auth(self.zkhandle, self.auth[0],
self.auth[1], on_auth_callback)
if res != zookeeper.OK:
self.logger.error("Invalid status %d",
zookeeper.zerror(res))
raise Exception("Invalid status")
self.cv.wait(self.connection_timeout)
if zookeeper.state(self.zkhandle) == zookeeper.AUTH_FAILED_STATE:
raise zookeeper.ZooKeeperException('authentication failed')
def connect(self):
def connect_watcher(handle, w_type, state, path):
"""Callback for connect()"""
with self.cv:
if state == zookeeper.CONNECTED_STATE:
self.logger.debug("connect_watcher: CONNECTED_STATE")
else:
self.logger.debug("connect_watcher: state %d", state)
self.cv.notify()
with self.cv:
try:
# zookeeper.init accepts timeout in ms
recv_timeout = int(self.connection_timeout * 1e3)
self.zkhandle = zookeeper.init(self.zkhosts, connect_watcher,
recv_timeout)
except Exception as err:
self.logger.exception("Unable to init zookeeper: %s", err)
raise err
else:
while True:
self.logger.debug("Connecting to Zookeeper... Wait %d",
self.connection_timeout)
self.cv.wait(self.connection_timeout)
if zookeeper.state(self.zkhandle) != zookeeper.CONNECTING_STATE:
break
@property
def connected(self):
return self.zkhandle and\
zookeeper.state(self.zkhandle) == zookeeper.CONNECTED_STATE
def disconnect(self):
return zookeeper.close(self.zkhandle)
def write(self, absname, value, typeofnode=0, acl=ZK_ACL):
return handling_error(zookeeper.create, self.logger)(self.zkhandle,
absname,
value,
[acl],
typeofnode)[1]
def read(self, absname):
res = zookeeper.get(self.zkhandle, absname)
return res[0]
def list(self, absname):
return zookeeper.get_children(self.zkhandle, absname)
def modify(self, absname, value):
return zookeeper.set(self.zkhandle, absname, value)
def delete(self, absname):
return zookeeper.delete(self.zkhandle, absname)
# Async API
def aget(self, node, callback, rccallback=None):
# callback is invoked when the watcher triggers
# rccallback is invoked when the result of attaching
# becomes available (OK, NONODE and so on)
assert callable(callback), "callback must be callable"
if rccallback is not None:
assert callable(rccallback), "rccallback must be callable"
def watcher(self, zh, event, state, path):
self.logger.info("Node state has been changed")
if event == zookeeper.CHANGED_EVENT:
self.logger.debug("Node %s has been modified", path)
elif event == zookeeper.CREATED_EVENT:
self.logger.debug("Node %s has been created", path)
elif event == zookeeper.DELETED_EVENT:
self.logger.warning("Node %s has been deleted", path)
if state == zookeeper.EXPIRED_SESSION_STATE:
self.logger.error("Session has expired")
callback(event, state, path)
def rc_handler(self, zh, rc, data, stat):
if zookeeper.OK == rc:
self.logger.debug("Callback has been attached succesfully")
elif zookeeper.NONODE == rc:
self.logger.warning("Watched node doesn't exists")
if rccallback is not None:
rccallback(rc)
res = zookeeper.aget(self.zkhandle, node,
partial(watcher, self),
partial(rc_handler, self))
return res == zookeeper.OK | zk-flock | /zk-flock-0.1.4.1.tar.gz/zk-flock-0.1.4.1/distributedflock/ZKeeperAPI/zkapi.py | zkapi.py |
# zk_grpc
a zookeeper registration center manager for python grpcio
Requires: Python 3.5, grpcio, kazoo
### install
```shell
pip install zk-grpc
```
####How to update 0.0.1 to 0.1.0
```text
1. Update the Client server which use with ZKGrpc or AIOZKGrpc to v0.1.0 zk-grpc first.
2. Then update the server which use with ZKRegister or AIOZKRegister.
```
**Notice: Can not use V0.0.1 ZKGrpc class with v0.1.0 ZKRegister class**
##### [More Eaxmples](https://github.com/laiyongtao/zk_grpc/tree/master/example)
## Service Register
```python
import signal
from example_pb2 import HelloRequest, HelloResponse
from example_pb2_grpc import HelloServiceServicer, add_HelloServiceServicer_to_server
from kazoo.client import KazooClient
from zk_grpc import ZKRegister
class HelloService(HelloServiceServicer):
def hello_world(self, request: HelloRequest, context):
hello = request.hello
return HelloResponse(hello=hello)
def run(host, port):
from grpc import server
from concurrent.futures import ThreadPoolExecutor
server = server(ThreadPoolExecutor(50))
add_HelloServiceServicer_to_server(HelloService(), server)
server.add_insecure_port("{}:{}".format(host, port))
server.start()
kz = KazooClient(hosts="127.0.0.1:2181")
kz.start()
zk_register = ZKRegister(kz_client=kz)
# register all servicers on gprc server obj, do not support aio grpc server
zk_register.register_grpc_server(server, host, port)
# or register servicer one by one
# zk_register.register_server(HelloServiceServicer, host, port)
def shutdown(*args, **kwargs):
zk_register.stop()
# close kazoo client after zk_register stoped
kz.stop()
kz.close()
server.stop(0.5)
signal.signal(signal.SIGTERM, shutdown)
try:
server.wait_for_termination()
except KeyboardInterrupt:
shutdown()
if __name__ == '__main__':
host = "127.0.0.1"
port = 50052
run(host, port)
```
## Service Discovery
```python
from example_pb2 import HelloRequest
from example_pb2_grpc import HelloServiceStub
from kazoo.client import KazooClient
from zk_grpc import ZKGrpc
def run():
# before useing
kz = KazooClient(hosts="127.0.0.1:2181")
kz.start()
zk_g = ZKGrpc(kz_client=kz)
# get stub
stub = zk_g.wrap_stub(HelloServiceStub)
# call grpc api
resp = stub.hello_world(HelloRequest(hello="hello"))
print(resp.hello)
# before exit
zk_g.stop()
kz.stop()
kz.close()
if __name__ == '__main__':
run()
``` | zk-grpc | /zk-grpc-0.1.0.tar.gz/zk-grpc-0.1.0/README.md | README.md |
import asyncio
from typing import Union, Optional, cast, Iterable
from inspect import isclass
from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED
import grpc.experimental.aio
from kazoo.client import KazooClient
from .definition import (ZK_ROOT_PATH, SNODE_PREFIX,
ServerInfo,
NoServerAvailable,
StubClass, ServicerClass,
DEFAILT_WEIGHT, LBS)
from .basic import ZKGrpcMixin, ZKRegisterMixin
class AIOZKGrpc(ZKGrpcMixin):
def __init__(self, kz_client: KazooClient,
zk_root_path: str = ZK_ROOT_PATH, node_prefix: str = SNODE_PREFIX,
channel_factory: Union[
grpc.experimental.aio.insecure_channel, grpc.experimental.aio.secure_channel
] = grpc.experimental.aio.insecure_channel,
channel_factory_kwargs: dict = None,
grace: Optional[float] = None,
thread_pool: Optional[ThreadPoolExecutor] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
lbs: Union["LBS", str, None] = None):
super(AIOZKGrpc, self).__init__(kz_client=kz_client,
zk_root_path=zk_root_path, node_prefix=node_prefix,
channel_factory=channel_factory, channel_factory_kwargs=channel_factory_kwargs,
thread_pool=thread_pool,
lbs=lbs)
self.channel_grace = grace
self._loop = loop
self._is_aio = True
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, value: asyncio.AbstractEventLoop):
self._loop = value
async def wrap_stub(self, stub_class: "StubClass", service_name: str = None, lbs: Union["LBS", str, None] = None):
if not service_name:
class_name = stub_class.__name__
service_name = "".join(class_name.rsplit("Stub", 1))
channel = await self.get_channel(service_name, lbs=lbs)
return cast(stub_class, stub_class(channel))
async def _close_channel(self, server: ServerInfo) -> None:
if server and isinstance(server, ServerInfo):
await server.channel.close(self.channel_grace)
def _close_channels(self, servers: Iterable[ServerInfo]) -> None:
for _ser in servers:
asyncio.run_coroutine_threadsafe(self._close_channel(_ser), self.loop)
async def fetch_servers(self, service_name: str) -> None:
service_path = "/".join((self.zk_root_path.rstrip("/"), service_name))
fu = asyncio.wrap_future(
self._thread_pool.submit(self._kz_client.ensure_path,
service_path)
)
await fu
fu = asyncio.wrap_future(
self._thread_pool.submit(self.get_children,
path=service_path)
)
childs = await fu
if not childs:
raise NoServerAvailable("There is no available servers for %s" % service_name)
fus = [
asyncio.wrap_future(
self._thread_pool.submit(self.set_server,
service_path=service_path,
child_name=child)
) for child in childs
]
# wait for first completed
await asyncio.wait(fus, return_when=FIRST_COMPLETED) # Todo: set timeout
async def get_channel(self, service_name: str,
lbs: Union["LBS", str, None] = None) -> grpc.experimental.aio.Channel:
service = self.services.get(service_name)
if service is None:
with self._locks[service_name]:
service = self.services.get(service_name)
if service is not None:
return self._get_channel(service_name, lbs=lbs)
# get server from zk
await self.fetch_servers(service_name)
return self._get_channel(service_name, lbs=lbs)
return self._get_channel(service_name, lbs=lbs)
async def stop(self) -> None:
servers = list()
for _, _sers in self.services.items():
servers.extend((self._close_channel(_ser) for _ser in _sers))
self.services.clear()
if servers: await asyncio.wait(servers)
class AIOZKRegister(ZKRegisterMixin):
async def register_server(self, service: Union["ServicerClass", str],
host: str, port: int, weight: int = DEFAILT_WEIGHT) -> None:
value_str = "{}:{}||{}".format(host, port, weight)
if isclass(service):
class_name = service.__name__
service_name = "".join(class_name.rsplit("Servicer", 1))
else:
service_name = str(service)
await asyncio.wrap_future(
self._thread_pool.submit(self._create_server_node,
service_name=service_name,
value=value_str)
)
async def stop(self) -> None:
self._stopped = True
fus = [asyncio.wrap_future(self._thread_pool.submit(self._kz_client.delete, path)) for _, path, _ in
self._creted_nodes]
if fus: await asyncio.wait(fus) | zk-grpc | /zk-grpc-0.1.0.tar.gz/zk-grpc-0.1.0/zk_grpc/aio.py | aio.py |
import random
import typing
from typing import Union, Callable
import grpc.experimental.aio
from .definition import NoServerAvailable, LBS, UnregisteredLBSError
try:
if typing.TYPE_CHECKING:
from .aio import AIOZKGrpc
from .basic import ZKGrpc
except AttributeError:
pass
LBSFunc = Callable[[str, Union["ZKGrpc", "AIOZKGrpc"]],
Union[grpc.Channel, grpc.experimental.aio.Channel]]
class LBSRegistry(object):
def __init__(self):
self._lbs_funcs = dict()
self._default_lbs = LBS.RANDOM
def register(self, name: Union["LBS", str], lbs_func: LBSFunc) -> None:
self._lbs_funcs[name] = lbs_func
def unregister(self, name: Union["LBS", str]) -> None:
self._lbs_funcs.pop(name, None)
def get_channel(self, service_name: str, zk_grpc_obj: Union["ZKGrpc", "AIOZKGrpc"],
lbs: Union["LBS", str, None] = None) -> Union[grpc.Channel, grpc.experimental.aio.Channel]:
if not lbs:
lbs = self._default_lbs
service_map = zk_grpc_obj.services[service_name]
if not service_map:
raise NoServerAvailable("There is no available servers for %s" % service_name)
lbs_func = self._lbs_funcs.get(lbs)
if not lbs_func:
raise UnregisteredLBSError("Unregistered lbs_func name: {}".format(lbs))
return lbs_func(service_name, zk_grpc_obj)
def random_lbs_func(service_name: str, zk_grpc_obj: Union["ZKGrpc", "AIOZKGrpc"]) -> Union[
grpc.Channel, grpc.experimental.aio.Channel]:
service_map = zk_grpc_obj.services[service_name]
if not service_map:
raise NoServerAvailable("There is no available servers for %s" % service_name)
servers = service_map.keys()
server = random.choice(list(servers))
return service_map[server].channel
def weighted_random_lbs_func(service_name: str, zk_grpc_obj: Union["ZKGrpc", "AIOZKGrpc"]) -> Union[
grpc.Channel, grpc.experimental.aio.Channel]:
service_map = zk_grpc_obj.services[service_name]
if not service_map:
raise NoServerAvailable("There is no available servers for %s" % service_name)
servers = service_map.keys()
server = random.choices(list(servers), [server.weight for server in service_map.values()])[0]
return service_map[server].channel
lbs_registry = LBSRegistry()
lbs_registry.register(LBS.RANDOM, random_lbs_func)
lbs_registry.register(LBS.WEIGHTED_RANDOM, weighted_random_lbs_func) | zk-grpc | /zk-grpc-0.1.0.tar.gz/zk-grpc-0.1.0/zk_grpc/lbs.py | lbs.py |
import threading
import asyncio
from typing import Union, Optional, cast, Iterable, Tuple, List
from inspect import isclass
from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait
from collections import defaultdict
from functools import partial
import grpc.experimental.aio
from grpc._server import _Server
from kazoo.client import KazooClient
from kazoo.protocol.states import EventType, WatchedEvent, ZnodeStat, KazooState
from .definition import (ZK_ROOT_PATH, SNODE_PREFIX,
ServerInfo,
NoServerAvailable,
StubClass, ServicerClass, InitServiceFlag,
W_VALUE_RE, VALUE_RE, DEFAILT_WEIGHT, LBS)
from .lbs import lbs_registry
class ZKRegisterMixin(object):
def __init__(self, kz_client: KazooClient,
zk_root_path: str = ZK_ROOT_PATH, node_prefix: str = SNODE_PREFIX,
thread_pool: Optional[ThreadPoolExecutor] = None):
self._kz_client = kz_client
self.zk_root_path = zk_root_path
self.node_prefix = node_prefix
self._creted_nodes = set()
self._lock = threading.RLock()
self._stopped = False
self._thread_pool = thread_pool or ThreadPoolExecutor() # for running sync func in main thread
self._kz_client.add_listener(self._session_watcher)
def _create_server_node(self, service_name: str, value: Union[str, bytes]) -> None:
if not isinstance(value, bytes):
value = value.encode("utf-8")
service_path = "/".join((self.zk_root_path.rstrip("/"), service_name))
pre_path = "/".join((service_path, self.node_prefix.strip("/")))
path = self._kz_client.create(pre_path, value, ephemeral=True, sequence=True, makepath=True)
self._creted_nodes.add((service_name, path, value))
def _session_watcher(self, state) -> None:
if state == KazooState.CONNECTED and not self._stopped:
self._kz_client.handler.spawn(self.resume_nodes)
def resume_nodes(self) -> None:
with self._lock:
expired = set()
created = self._creted_nodes.copy()
for node in created:
service_name, path, value = node
stat = self._kz_client.exists(path)
if stat:
continue
expired.add(node)
self._create_server_node(service_name=service_name, value=value)
self._creted_nodes.difference_update(expired)
class ZKGrpcMixin(object):
def __init__(self, kz_client: KazooClient,
zk_root_path: str = ZK_ROOT_PATH, node_prefix: str = SNODE_PREFIX,
channel_factory: Union[
grpc.insecure_channel, grpc.secure_channel,
grpc.experimental.aio.insecure_channel, grpc.experimental.aio.secure_channel
] = grpc.insecure_channel,
channel_factory_kwargs: dict = None,
thread_pool: Optional[ThreadPoolExecutor] = None,
lbs: Union["LBS", str, None] = None):
self._kz_client = kz_client
self.zk_root_path = zk_root_path
self.node_prefix = node_prefix
self.channel_factory = channel_factory
self.channel_factory_kwargs = channel_factory_kwargs or {}
self.services = defaultdict(dict)
self._locks = defaultdict(threading.RLock)
self._thread_pool = thread_pool or ThreadPoolExecutor() # for running sync func in main thread
self._is_aio = False
self.loop = None
self.lbs = lbs
def _split_service_name(self, service_path: str) -> str:
return service_path.rsplit("/", 1)[-1]
def _split_server_name(self, server_path: str) -> Tuple[str, str, str]:
service_path, server_name = server_path.rsplit("/", 1)
service_name = self._split_service_name(service_path)
return service_path, service_name, server_name
def _get_channel(self, service_name: str, lbs: Union["LBS", str, None] = None) -> Union[
grpc.Channel, grpc.experimental.aio.Channel]:
lbs = lbs or self.lbs
return lbs_registry.get_channel(service_name=service_name, zk_grpc_obj=self, lbs=lbs)
def _close_channels(self, servers: Iterable[ServerInfo]) -> None:
# close grpc channels in subthread
pass
def set_server(self, service_path: str, child_name: str) -> None:
child_path = "/".join((service_path, child_name))
watcher = partial(self.data_watcher, path=child_path)
self._kz_client.DataWatch(child_path, func=watcher)
def data_watcher(self, data: Optional[bytes], stat: Optional[ZnodeStat], event: Optional[WatchedEvent],
path: str) -> Optional[bool]:
if event is None or event.type == EventType.CHANGED or event.type == EventType.NONE:
if stat is None:
_, service_name, server_name = self._split_server_name(path)
services = self.services.get(service_name)
if services is not None:
s_info = services.pop(server_name, None)
self._close_channels((s_info,))
return False
self._set_channel(data, path)
elif event.type == EventType.DELETED:
return False
def _set_channel(self, data: bytes, path: str) -> None:
if self._is_aio:
asyncio.set_event_loop(self.loop)
service_path, service_name, server_name = self._split_server_name(path)
data = data.decode("utf-8")
weight_re_ret = W_VALUE_RE.match(data)
old_re_ret = VALUE_RE.match(data)
if weight_re_ret:
server_addr, weight = weight_re_ret.groups()
elif old_re_ret:
server_addr, weight = old_re_ret.group(), DEFAILT_WEIGHT
else:
return
weight = int(weight)
servers = self.services.get(service_name)
channel = None
if servers is not None:
ori_ser_info = servers.get(server_name)
if ori_ser_info and isinstance(ori_ser_info, ServerInfo):
ori_addr, ori_weight = ori_ser_info.addr, ori_ser_info.weight
if server_addr == ori_addr and ori_weight == weight:
return
elif server_addr == ori_addr:
channel = ori_ser_info.channel
if channel is None:
channel = self.channel_factory(server_addr, **self.channel_factory_kwargs)
self.services[service_name].update(
{server_name: ServerInfo(channel=channel, addr=server_addr, path=path, weight=weight)}
)
def get_children(self, path: str) -> List[str]:
watcher = partial(self.children_watcher, init_flag=InitServiceFlag(path))
child_watcher = self._kz_client.ChildrenWatch(path, func=watcher, send_event=True)
return child_watcher._prior_children
def children_watcher(self, childs: list, event: WatchedEvent, init_flag: InitServiceFlag = None) -> Optional[bool]:
if not init_flag.is_set():
init_flag.set()
return
path = init_flag.path
service_name = self._split_service_name(path)
if event is None or event.type == EventType.CHILD:
# update
with self._locks[service_name]:
fetched_servers = self.services[service_name].keys()
new_servers = set(childs)
expr_servers = fetched_servers - new_servers # servers to delete
for server in new_servers:
if server in fetched_servers:
continue
self.set_server(service_path=path, child_name=server)
_sers = [self.services[service_name].pop(server, None) for server in expr_servers]
self._close_channels(_sers)
elif event.type == EventType.DELETED:
# delete
with self._locks[service_name]:
_sers = self.services.pop(service_name, {})
self._locks.pop(service_name, None)
self._close_channels(_sers.values())
return False # to remove watcher
class ZKGrpc(ZKGrpcMixin):
def __init__(self, kz_client: KazooClient,
zk_root_path: str = ZK_ROOT_PATH, node_prefix: str = SNODE_PREFIX,
channel_factory: Union[grpc.insecure_channel, grpc.secure_channel] = grpc.insecure_channel,
channel_factory_kwargs: dict = None,
thread_pool: Optional[ThreadPoolExecutor] = None,
lbs: Union["LBS", str, None] = None):
super(ZKGrpc, self).__init__(kz_client=kz_client,
zk_root_path=zk_root_path, node_prefix=node_prefix,
channel_factory=channel_factory, channel_factory_kwargs=channel_factory_kwargs,
thread_pool=thread_pool,
lbs=lbs)
def wrap_stub(self, stub_class: "StubClass", service_name: str = None, lbs: Union["LBS", str, None] = None):
if not service_name:
class_name = stub_class.__name__
service_name = "".join(class_name.rsplit("Stub", 1))
channel = self.get_channel(service_name, lbs=lbs)
return cast(stub_class, stub_class(channel))
def _close_channel(self, server: ServerInfo) -> None:
if server and isinstance(server, ServerInfo):
server.channel.close()
def _close_channels(self, servers: Iterable[ServerInfo]) -> None:
for _ser in servers:
self._close_channel(_ser)
def fetch_servers(self, service_name: str) -> None:
service_path = "/".join((self.zk_root_path.rstrip("/"), service_name))
self._kz_client.ensure_path(service_path)
childs = self.get_children(path=service_path)
if not childs:
raise NoServerAvailable("There is no available servers for %s" % service_name)
fus = [
self._thread_pool.submit(self.set_server,
service_path=service_path,
child_name=child) for child in childs
]
wait(fus, return_when=FIRST_COMPLETED) # Todo: set timeout
def get_channel(self, service_name: str, lbs: Union["LBS", str, None] = None) -> grpc.Channel:
service = self.services.get(service_name)
if service is None:
with self._locks[service_name]:
service = self.services.get(service_name)
if service is not None:
return self._get_channel(service_name, lbs=lbs)
# get server from zk
self.fetch_servers(service_name)
return self._get_channel(service_name, lbs=lbs)
return self._get_channel(service_name, lbs=lbs)
def stop(self) -> None:
for _, _sers in self.services.items():
for _ser in _sers:
self._close_channel(_ser)
self.services.clear()
class ZKRegister(ZKRegisterMixin):
def register_grpc_server(self, server: grpc._server._Server, host: str, port: int,
weight: int = DEFAILT_WEIGHT) -> None:
value_str = "{}:{}||{}".format(host, port, weight)
with self._lock:
fus = [
self._thread_pool.submit(
self._create_server_node,
service_name=s.service_name(), value=value_str
) for s in server._state.generic_handlers
]
if fus: wait(fus)
def register_server(self, service: Union["ServicerClass", str], host: str, port: int,
weight: int = DEFAILT_WEIGHT) -> None:
value_str = "{}:{}||{}".format(host, port, weight)
if isclass(service):
class_name = service.__name__
service_name = "".join(class_name.rsplit("Servicer", 1))
else:
service_name = str(service)
with self._lock:
self._create_server_node(service_name=service_name, value=value_str)
def stop(self) -> None:
self._stopped = True
rets = [self._kz_client.delete_async(path) for _, path, _ in self._creted_nodes]
for ret in rets:
ret.get() | zk-grpc | /zk-grpc-0.1.0.tar.gz/zk-grpc-0.1.0/zk_grpc/basic.py | basic.py |
import sys
import os
from PIL import Image, ImageDraw, ImageFont
ANTIALIAS_SIZE = 16
LOGO_SIZE = 1024*ANTIALIAS_SIZE
MAIN_POS = 546*ANTIALIAS_SIZE
CIRCLE_EDGE_Y = 848*ANTIALIAS_SIZE
CIRCLE_RADIUS = 1380*ANTIALIAS_SIZE
SUB_POS = 986*ANTIALIAS_SIZE
COLOR_MAIN = '#268bf1'
COLOR_SECOND = '#ffffff'
FONT_MAIN_SUM = 840*ANTIALIAS_SIZE
FONT_SIZE_SUB = 104*ANTIALIAS_SIZE
SUB_TITLE = u'智课'
# https://www.zcool.com.cn/article/ZNDg2Mzg4.html
# FONT_FILE_NAME = 'HappyZcool-2016.ttf'
# FONT_FILE_NAME = 'zcoolqinkehuangyouti.ttf'
# FONT_FILE_NAME = 'lianmengqiyilushuaizhengruiheiti.ttf'
FONT_FILE_NAME = 'ZhenyanGB.ttf'
def brother_path(file_name):
return os.path.join(os.path.abspath(
os.path.dirname(__file__)), file_name)
def draw_zk_bg():
img = Image.new('RGB', (LOGO_SIZE, LOGO_SIZE), COLOR_MAIN)
draw = ImageDraw.Draw(img)
ellipseX1 = LOGO_SIZE/2 - CIRCLE_RADIUS
ellipseX2 = LOGO_SIZE/2 + CIRCLE_RADIUS
draw.ellipse((ellipseX1, CIRCLE_EDGE_Y, ellipseX2,
CIRCLE_EDGE_Y+CIRCLE_RADIUS*2), COLOR_SECOND)
return img
def text_horzontal_center(text, color, font, img, base_y):
text_width, text_height = font.getsize(text)
draw = ImageDraw.Draw(img)
x = (LOGO_SIZE-text_width)/2
y = base_y-text_height
draw.text((x, y), text, color, font=font)
def print_using():
print("使用方法:zk_logo_maker.py 产品名 filename")
def count_length(title):
len = 0
for s in title:
len += 1
return len
def main():
param_len = len(sys.argv)
if param_len < 2:
print_using()
exit()
title = sys.argv[1]
if param_len == 2:
file_path = f"{title}.png"
else:
file_path = f"{sys.argv[2]}.png"
print(f"title:{title}")
print(f"file_path:{file_path}")
title_len = len(title)
main_title_font_size = int(FONT_MAIN_SUM/title_len)
font = ImageFont.truetype(
brother_path(FONT_FILE_NAME),
main_title_font_size
)
img = draw_zk_bg()
text_horzontal_center(
title,
COLOR_SECOND,
font,
img,
MAIN_POS)
font_sub = ImageFont.truetype(
brother_path(FONT_FILE_NAME),
FONT_SIZE_SUB
)
text_horzontal_center(
SUB_TITLE,
COLOR_MAIN,
font_sub,
img,
SUB_POS)
logo_size = int(LOGO_SIZE/ANTIALIAS_SIZE)
img = img.resize((logo_size, logo_size), Image.ANTIALIAS)
img.save(file_path, 'PNG')
if __name__ == "__main__":
main() | zk-logo-maker | /zk_logo_maker-1.0.5-py3-none-any.whl/zk_logo_maker/__main__.py | __main__.py |
import game_sprites,pygame,data,time
class GameEngine():
def __init__(self):
#定义游戏窗口
self.scene = pygame.display.set_mode(data.SCREEN_SIZE)
data.scene = self.scene
#定义游戏名称
pygame.display.set_caption("飞机大战")
#定义时钟
self.clock = pygame.time.Clock()
# 间隔一定的时间,触发一次创建敌机的事件
pygame.time.set_timer(data.ENEMY_CREATE, 2000)
# 创建一敌机发射子弹的事件
pygame.time.set_timer(data.ENEMY_ATTACK, 1000)
def create_scene(self,img_path,img_path2):
"""创建游戏场景"""
#定义游戏背景精灵
self.bg1 = game_sprites.BackgroundSprite(img_path)
self.bg2 = game_sprites.BackgroundSprite(img_path,prepare=True)
#定义英雄飞机
self.hero = game_sprites.HeroSprite(img_path2)
# 定义精灵组对象
self.resources = pygame.sprite.Group(self.bg1, self.bg2, self.hero)
#定义一个敌人飞机的精灵组
self.enemys = pygame.sprite.Group()
def update_scene(self):
"""更新游戏场景"""
#精灵组渲染
self.resources.update()
self.resources.draw(self.scene)
data.resources = self.resources
#英雄子弹精灵组渲染
self.hero.bullets.update()
self.hero.bullets.draw(self.scene)
data.bullets = self.hero.bullets
#敌机精灵组渲染
self.enemys.update()
self.enemys.draw(self.scene)
data.enemys = self.enemys
#屏幕更新
pygame.display.update()
def check_event(self):
"""监听事件"""
event_list = pygame.event.get()
if len(event_list) > 0:
print(event_list)
for event in event_list:
# 如果当前事件是quit事件
if event.type == pygame.QUIT:
# 退出程序
pygame.quit()
exit()
elif event.type == data.ENEMY_CREATE:
print("创建一个敌机")
enemy = game_sprites.EnemySprite()
# 添加到敌机精灵组中
self.enemys.add(enemy)
# c创建一个子弹对象
bullets = game_sprites.Bullet_Enemy(enemy.rect.x + 15, enemy.rect.y)
# 将子弹对象添加到敌机精灵组中
self.enemys.add(bullets)
elif event.type == data.ENEMY_ATTACK:
print("敌机发射子弹")
# 获取当前用户键盘上被操作的按键
key_down = pygame.key.get_pressed()
if key_down[pygame.K_LEFT]:
print("向左运动<<<<<<")
self.hero.rect.x -= 5
elif key_down[pygame.K_RIGHT]:
print("向右运动>>>>>>")
self.hero.rect.x += 5
elif key_down[pygame.K_UP]:
print("向上运动^^^^^^")
self.hero.rect.y -= 5
elif key_down[pygame.K_DOWN]:
print("向下运动>>>>>>")
self.hero.rect.y += 5
elif key_down[pygame.K_SPACE]:
self.hero.fire()
print("发射子弹", self.hero.bullets)
def check_collide(self):
# 碰撞检测:子弹和敌机之间的碰撞
bol = pygame.sprite.groupcollide(self.enemys, self.hero.bullets, False, True)
for i in bol:
i.destroy()
# 碰撞检测:英雄飞机和敌方飞机之间的碰撞
e = pygame.sprite.spritecollide(self.hero, self.enemys, True)
if len(e) > 0:
self.hero.destroy()
print("Game Over")
pygame.quit()
exit()
def start(self):
"""游戏开始"""
#开场图片
self.scene = pygame.display.set_mode(data.SCREEN_SIZE)
flag = False
while True:
# 添加一个背景图片
self.background_image = game_sprites.BackgroundSprite("./image/kc.jpg")
#将背景图片放到精灵组
self.resp = pygame.sprite.Group(self.background_image)
#设置监听
event_list = pygame.event.get()
if len(event_list) > 0:
print(event_list)
for event in event_list:
# 如果当前事件是quit事件
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
flag = True
# 将背景图片渲染到窗口中展示
if flag:
break
self.resp.update()
self.resp.draw(self.scene)
pygame.display.update()
#初始化游戏数据
pygame.init()
# 创建场景
self.create_scene("./image/bg_img_3.jpg","./image/hero_18.png")
score = 0
while True:
# 定义时钟刷新帧:每秒让循环运行多少次
self.clock.tick(50)
#监听事件
self.check_event()
#碰撞检测
self.check_collide()
if data.score >= 20:
break
#更新场景
self.update_scene()
#清空精灵组
self.resources.empty()
self.hero.bullets.empty()
self.enemys.empty()
#初始化所有模块
super().__init__()
pygame.init()
#创建游戏场景
self.create_scene("./image/bg_img_4.jpg","./image/hero.png")
#游戏循环
while True:
self.clock.tick(50)
#监听事件
self.check_event()
#碰撞检测
self.check_collide()
#渲染展示
self.update_scene() | zk-pkg | /zk_pkg-1.0.tar.gz/zk_pkg-1.0/game_engine.py | game_engine.py |
import pygame,data,random,time
class GameSprite(pygame.sprite.Sprite):
"""游戏精灵对象"""
def __init__(self,img_path,speed=1):
#调用父类初始化数据
super().__init__()
self.image = pygame.image.load(img_path)
self.rect = self.image.get_rect()
self.speed = speed
def update(self):
"""默认运动方法"""
self.rect.y += self.speed
def destroy(self):
print("飞机销毁")
#音效
data.yx.play()
for image_path in ["./image/enemy2_down1.png","./image/enemy2_down2.png",
"./image/enemy2_down3.png","./image/enemy2_down4.png",]:
self.image = pygame.image.load(image_path)
time.sleep(0.03)
data.resources.update()
data.resources.draw(data.scene)
data.bullets.update()
data.bullets.draw(data.scene)
data.enemys.update()
data.enemys.draw(data.scene)
pygame.display.update()
self.kill()
data.score += 1
class BackgroundSprite(GameSprite):
def __init__(self,img_path,prepare = False):
super().__init__(img_path)
if prepare:
self.rect.y = -data.SCREEN_SIZE[1]
def update(self):
#调用父类的方法进行运动
super().update()
#子类中判断边界
if self.rect.y > data.SCREEN_SIZE[1]:
self.rect.y = -data.SCREEN_SIZE[1]
class HeroSprite(GameSprite):
"""英雄精灵对象"""
def __init__(self,img_path):
#初始化英雄飞机的图片、速度
super().__init__(img_path,speed=0)
#初始化英雄飞机的位置
self.rect.centerx = data.SCREEN_RECT.centerx
self.rect.y = data.SCREEN_RECT.centery + 200
#添加子弹对象到精灵组
self.bullets = pygame.sprite.Group()
def update(self):
#水平边界判断
if self.rect.x <= 0:
self.rect.x = 0
elif self.rect.x >= data.SCREEN_RECT.width - self.rect.width:
self.rect.x = data.SCREEN_RECT.width - self.rect.width
#垂直边界判断
if self.rect.y <= 0:
self.rect.y = 0
elif self.rect.y >= data.SCREEN_RECT.height - self.rect.height:
self.rect.y = data.SCREEN_RECT.height - self.rect.height
def fire(self):
"""飞机攻击"""
# 创建一个子弹对象
bullet = BulletSprite(self.rect.centerx - 60, self.rect.y)
# 添加到精灵族对象
self.bullets.add(bullet)
class BulletSprite(GameSprite):
"""子弹精灵"""
def __init__(self, x, y):
super().__init__("./image/bullet_1.png", speed=-8)
self.rect.x = x
self.rect.y = y
def update(self):
# 调用父类的方法进行操作
super().update()
# 边界判断
if self.rect.y <= -self.rect.height:
# 子弹从精灵组删除
self.kill()
def __del__(self):
print("子弹对象已经销毁")
class EnemySprite(GameSprite):
"""敌方飞机"""
def __init__(self):
# 初始化敌方飞机的数据
super().__init__("./image/enemy2.png", speed=random.randint(3, 5))
# 初始化敌方飞机的位置
self.rect.x = random.randint(0, data.SCREEN_RECT.width - self.rect.width)
self.rect.y = -self.rect.height
# 将子弹对象添加到精灵组
self.bullets = pygame.sprite.Group()
def update(self):
# 调用父类的方法直接运动
super().update()
# 边界判断
if self.rect.y > data.SCREEN_RECT.height:
# 飞机一旦超出屏幕,销毁
self.kill()
class Bullet_Enemy(GameSprite):
def __init__(self, x, y):
super().__init__("./image/bullet2.png", speed=8)
self.rect.x = x
self.rect.y = y
def update(self):
# 调用父类的方法进行操作
super().update()
# 边界判断
if self.rect.y >= data.SCREEN_SIZE[1]:
# 子弹从精灵组删除
self.kill() | zk-pkg | /zk_pkg-1.0.tar.gz/zk_pkg-1.0/game_sprites.py | game_sprites.py |
import os
import sys
from PIL import Image, ImageDraw, ImageFont
from theme_info import load_theme_infos, safe_get
def text_horzontal_center(text, color, font, img, screen_width, base_y):
text_width, text_height = font.getsize(text)
draw = ImageDraw.Draw(img)
x = (screen_width-text_width)/2
y = base_y-text_height
draw.text((x, y), text, color, font=font)
def draw_title(img, title, title_font):
screen_width = img.width
title_y = safe_get(title_font, 'yoff')
title_font_name = safe_get(title_font, 'font')
title_font_size = int(safe_get(title_font, 'size'))
font = ImageFont.truetype(
brother_path(f"theme/{title_font_name}"),
title_font_size
)
text_horzontal_center(title, "#fff", font, img, screen_width, title_y)
def draw_bg(img, bg_file):
bgImg = Image.open(bg_file)
box = (0, 0,
min(img.width, bgImg.width),
min(img.height, bgImg.height))
bgImgCrop = bgImg.crop(box)
img.paste(bgImgCrop, box)
def draw_fg(img, fg_file):
fgImg = Image.open(fg_file)
fgImg = fgImg.convert("RGBA")
boxSrc = (0,
0,
min(img.width, fgImg.width),
min(img.height, fgImg.height))
boxDst = (0,
max(0, img.height-fgImg.height),
min(img.width, fgImg.width),
img.height)
bgImgCrop = fgImg.crop(boxSrc)
img.paste(bgImgCrop, boxDst, bgImgCrop)
def draw_screenshot(img, screenshot_file, theme_info):
ssImg = Image.open(screenshot_file)
factor = 0.72
screenshot_info = safe_get(theme_info, "screenshot_info")
w = int(safe_get(screenshot_info, "width"))
h = int(safe_get(screenshot_info, "height"))
yoff = int(safe_get(screenshot_info, "yoff"))
x = int((img.width - w)/2)
y = int((img.height - h)/2)+yoff
box = (x, y,
x+w,
y+h)
device_type = safe_get(theme_info, "device_type")
screenshot_type = safe_get(screenshot_info, "device_type")
if device_type == 'iphone65':
draw_iphone65(img, box)
elif device_type == 'iphone55':
draw_iphone55(img, box)
else:
draw_pad(img, box)
draw_screen_edge(img, box)
ssImg = ssImg.resize((w, h), Image.ANTIALIAS)
img.paste(ssImg, box)
def draw_iphone65(img, box_screen_of_iphone65):
'''
屏幕:
"width": 1242,
"height": 2688,
设备:
宽度:77.4 毫米 (3.05 英寸) 1388 px
高度:157.5 毫米 (6.20 英寸) 2825 px
屏幕对角线 6.5 165.1mm 像素 2961px 17.934585099939431 px/mm
'''
x1, y1, x2, y2 = box_screen_of_iphone65
screen_w = x2-x1
screen_h = y2-y1
# device_w = screen_w*1388/1242
# device_h = screen_h*2825/2688
device_w = screen_w+80
device_h = screen_h+80
draw_device_frame(img, box_screen_of_iphone65,
(device_w, device_h))
def draw_iphone55(img, box_screen_of_iphone55):
'''
1、长×宽×高:158.1毫米 × 77.8毫米× 7.1毫米;2493x1227
2、5.5英寸Retina HD高清显示屏,1920x1080像素分辨率;
5.5英寸=139.7mm 2203px 15.768841589708649 px/mm
'''
x1, y1, x2, y2 = box_screen_of_iphone55
screen_w = x2-x1
screen_h = y2-y1
device_w = screen_w*1227/1080
device_h = screen_h*2493/1920
# device_w = screen_w+80
# device_h = screen_h+160
draw_device_frame(img, box_screen_of_iphone55,
(device_w, device_h))
def draw_pad(img, box_screen_of_pad):
'''
一个iPad的屏幕120mm*160mm,外壳135mm*200mm
'''
x1, y1, x2, y2 = box_screen_of_pad
screen_w = x2-x1
screen_h = y2-y1
device_w = screen_w*135/120
device_h = screen_h*200/160
draw_device_frame(img, box_screen_of_pad,
(device_w, device_h))
def draw_device_frame(img, box_screen, device_size):
device_w, device_h = device_size
screenshot_edg = 2
pad_fill = "#DDBB99"
pad_edg1 = "#CCB097"
pad_edg2 = "#BEA286"
screen_x1, screen_y1, screen_x2, screen_y2 = box_screen
screen_w = screen_x2 - screen_x1
screen_h = screen_y2 - screen_y1
off_x = (device_w-screen_w)/2
off_y = (device_h-screen_h)/2
device_x1 = screen_x1-off_x
device_y1 = screen_y1-off_y
device_x2 = screen_x2+off_x
device_y2 = screen_y2+off_y
draw = ImageDraw.Draw(img)
draw.rectangle((device_x1 - screenshot_edg,
device_y1 - screenshot_edg,
device_x2, device_y2),
pad_edg1)
draw.rectangle((device_x1, device_y1,
device_x2 + screenshot_edg,
device_y2 + screenshot_edg),
pad_edg2)
draw.rectangle((device_x1, device_y1, device_x2, device_y2), pad_fill)
def draw_screen_edge(img, box_screen):
width_edg = 2
screen_edg1 = "#CCAA88"
screen_edg2 = "#BB9977"
screen_x1, screen_y1, screen_x2, screen_y2 = box_screen
draw = ImageDraw.Draw(img)
draw.rectangle((screen_x1 - width_edg,
screen_y1 - width_edg,
screen_x2,
screen_y2),
screen_edg1)
draw.rectangle((screen_x1,
screen_y1,
screen_x2 + width_edg,
screen_y2 + width_edg),
screen_edg2)
def make_screenshot(screenshot_info, theme_info, screenshot_index):
width = safe_get(theme_info, "width")
height = safe_get(theme_info, "height")
device_type = safe_get(theme_info, "device_type")
screen_shot_yoff = safe_get(safe_get(theme_info, "screen_shot"), "yoff")
bg = safe_get(theme_info, "bg")
fg = safe_get(theme_info, "fg")
fg_frame = safe_get(fg[0], "frame")
title_font = safe_get(theme_info, "title")
sub_title_font = safe_get(theme_info, "sub_title")
title = safe_get(screenshot_info, "title")
sub_title = safe_get(screenshot_info, "sub_title")
index = screenshot_index % len(bg)
bg_file = brother_path(f"theme/{bg[index]}")
fg_file = brother_path(f"theme/{fg_frame}")
screenshot_dir = safe_get(screenshot_info, "screenshot_dir")
screenshot_file = safe_get(screenshot_info, "screenshot_path")
img = Image.new('RGB', (width, height), "#3399ff")
draw_bg(img, bg_file)
draw_screenshot(img, screenshot_file, theme_info)
draw_title(img, title, title_font)
draw_title(img, sub_title, sub_title_font)
draw_fg(img, fg_file)
path, filename = os.path.split(screenshot_file)
name, ext = os.path.splitext(filename)
file_dir = os.path.join(screenshot_dir, 'output', device_type)
file_path = os.path.join(screenshot_dir, 'output',
device_type, f'{name}.png')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
img.save(file_path, 'PNG')
print(
f"""
------------------------------------
GEN:
{file_path}
WITH:
{screenshot_file}
theme_type:{device_type}
size:({width}x{height})
""")
def brother_path(file_name):
return os.path.join(os.path.abspath(
os.path.dirname(__file__)), file_name) | zk-screenshot-maker | /zk_screenshot_maker-1.0.5-py3-none-any.whl/zk_screenshot_maker/drawer.py | drawer.py |
zk-shell
========
.. image:: https://travis-ci.org/rgs1/zk_shell.svg?branch=master
:target: https://travis-ci.org/rgs1/zk_shell
:alt: Build Status
.. image:: https://coveralls.io/repos/rgs1/zk_shell/badge.png?branch=master
:target: https://coveralls.io/r/rgs1/zk_shell?branch=master
:alt: Coverage Status
.. image:: https://badge.fury.io/py/zk_shell.svg
:target: http://badge.fury.io/py/zk_shell
:alt: PyPI version
.. image:: https://requires.io/github/rgs1/zk_shell/requirements.svg?branch=master
:target: https://requires.io/github/rgs1/zk_shell/requirements/?branch=master
:alt: Requirements Status
.. image:: https://img.shields.io/pypi/pyversions/zk_shell.svg
:target: https://pypi.python.org/pypi/zk_shell
:alt: Python Versions
.. image:: https://codeclimate.com/github/rgs1/zk_shell.png
:target: https://codeclimate.com/github/rgs1/zk_shell
:alt: Code Climate
**Table of Contents**
- `tl;dr <#tldr>`__
- `Installing <#installing>`__
- `Usage <#usage>`__
- `Dependencies <#dependencies>`__
tl;dr
~~~~~
A powerful & scriptable shell for `Apache
ZooKeeper <http://zookeeper.apache.org/>`__
Installing
~~~~~~~~~~
As Dockerfile:
::
$ docker build . -f Dockerfile -t zk-shell:1.3.3
From PyPI:
::
$ pip install zk-shell
Or running from the source:
::
# Kazoo is needed
$ pip install kazoo
$ git clone https://github.com/rgs1/zk_shell.git
$ cd zk_shell
$ export ZKSHELL_SRC=1; bin/zk-shell
Welcome to zk-shell (0.99.04)
(DISCONNECTED) />
You can also build a self-contained PEX file:
::
$ pip install pex
$ pex -v -e zk_shell.cli -o zk-shell.pex .
More info about PEX `here <https://pex.readthedocs.org>`__.
Usage
~~~~~
Docker Version
::
$ docker run -it zk-shell:1.3.3
and use the connect command to connect to your zookeeper instance
::
$ zk-shell localhost:2181
(CONNECTED) /> ls
zookeeper
(CONNECTED) /> create foo 'bar'
(CONNECTED) /> get foo
bar
(CONNECTED) /> cd foo
(CONNECTED) /foo> create ish 'barish'
(CONNECTED) /foo> cd ..
(CONNECTED) /> ls foo
ish
(CONNECTED) /> create temp- 'temp' true true
(CONNECTED) /> ls
zookeeper foo temp-0000000001
(CONNECTED) /> rmr foo
(CONNECTED) />
(CONNECTED) /> tree
.
├── zookeeper
│ ├── config
│ ├── quota
Line editing and command history is supported via readline (if readline
is available). There's also autocomplete for most commands and their
parameters.
Individual files can be copied between the local filesystem and
ZooKeeper. Recursively copying from the filesystem to ZooKeeper is
supported as well, but not the other way around since znodes can have
content and children.
::
(CONNECTED) /> cp file:///etc/passwd zk://localhost:2181/passwd
(CONNECTED) /> get passwd
(...)
unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
haldaemon:x:68:68:HAL daemon:/:/sbin/nologin
Copying between one ZooKeeper cluster to another is supported, too:
::
(CONNECTED) /> cp zk://localhost:2181/passwd zk://othercluster:2183/mypasswd
Copying between a ZooKeeper cluster and JSON files is supported as well:
::
(CONNECTED) /> cp zk://localhost:2181/something json://!tmp!backup.json/ true true
Mirroring paths to between clusters or JSON files is also supported.
Mirroring replaces the destination path with the content and structure
of the source path.
::
(CONNECTED) /> create /source/znode1/znode11 'Hello' false false true
(CONNECTED) /> create /source/znode2 'Hello' false false true
(CONNECTED) /> create /target/znode1/znode12 'Hello' false false true
(CONNECTED) /> create /target/znode3 'Hello' false false true
(CONNECTED) /> tree
.
├── target
│ ├── znode3
│ ├── znode1
│ │ ├── znode12
├── source
│ ├── znode2
│ ├── znode1
│ │ ├── znode11
├── zookeeper
│ ├── config
│ ├── quota
(CONNECTED) /> mirror /source /target
Are you sure you want to replace /target with /source? [y/n]:
y
Mirroring took 0.04 secs
(CONNECTED) /> tree
.
├── target
│ ├── znode2
│ ├── znode1
│ │ ├── znode11
├── source
│ ├── znode2
│ ├── znode1
│ │ ├── znode11
├── zookeeper
│ ├── config
│ ├── quota
(CONNECTED) /> create /target/znode4 'Hello' false false true
(CONNECTED) /> mirror /source /target false false true
Mirroring took 0.03 secs
(CONNECTED) />
Debugging watches can be done with the watch command. It allows
monitoring all the child watches that, recursively, fire under :
::
(CONNECTED) /> watch start /
(CONNECTED) /> create /foo 'test'
(CONNECTED) /> create /bar/foo 'test'
(CONNECTED) /> rm /bar/foo
(CONNECTED) /> watch stats /
Watches Stats
/foo: 1
/bar: 2
/: 1
(CONNECTED) /> watch stop /
Searching for paths or znodes which match a given text can be done via
find:
::
(CONNECTED) /> find / foo
/foo2
/fooish/wayland
/fooish/xorg
/copy/foo
Or a case-insensitive match using ifind:
::
(CONNECTED) /> ifind / foo
/foo2
/FOOish/wayland
/fooish/xorg
/copy/Foo
Grepping for content in znodes can be done via grep:
::
(CONNECTED) /> grep / unbound true
/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
/copy/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
Or via igrep for a case-insensitive version.
Non-interactive mode can be used passing commands via ``--run-once``:
::
$ zk-shell --run-once "create /foo 'bar'" localhost
$ zk-shell --run-once "get /foo" localhost
bar
Or piping commands through stdin:
::
$ echo "get /foo" | zk-shell --run-from-stdin localhost
bar
It's also possible to connect using an SSH tunnel, by specifying a host
to use:
::
$ zk-shell --tunnel ssh-host zk-host
Dependencies
~~~~~~~~~~~~
- Python 2.7, 3.3, 3.4, 3.5 or 3.6
- Kazoo >= 2.2
Testing and Development
~~~~~~~~~~~~~~~~~~~~~~~
Please see `CONTRIBUTING.rst <CONTRIBUTING.rst>`__.
| zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/README.rst | README.rst |
from collections import namedtuple
try:
from itertools import izip
except ImportError:
# py3k
izip = zip
import os
import re
import socket
import sys
PYTHON3 = sys.version_info > (3, )
def pretty_bytes(num):
""" pretty print the given number of bytes """
for unit in ['', 'KB', 'MB', 'GB']:
if num < 1024.0:
if unit == '':
return "%d" % (num)
else:
return "%3.1f%s" % (num, unit)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def to_bool(boolstr):
""" str to bool """
return boolstr.lower() == "true"
def to_bytes(value):
""" str to bytes (py3k) """
vtype = type(value)
if vtype == bytes or vtype == type(None):
return value
try:
return vtype.encode(value)
except UnicodeEncodeError:
pass
return value
def to_int(sint, default):
""" get an int from an str """
try:
return int(sint)
except ValueError:
return default
def decoded(s):
if PYTHON3:
return str.encode(s).decode('unicode_escape')
else:
return s.decode('string_escape')
def decoded_utf8(s):
return s if PYTHON3 else s.decode('utf-8')
class Netloc(namedtuple("Netloc", "host scheme credential")):
"""
network location info: host, scheme and credential
"""
@classmethod
def from_string(cls, netloc_string):
host = scheme = credential = ""
if not "@" in netloc_string:
host = netloc_string
else:
scheme_credential, host = netloc_string.rsplit("@", 1)
if ":" not in scheme_credential:
raise ValueError("Malformed scheme/credential (must be scheme:credential)")
scheme, credential = scheme_credential.split(":", 1)
return cls(host, scheme, credential)
_empty = re.compile("\A\s*\Z")
_valid_host_part = re.compile("(?!-)[a-z\d-]{1,63}(?<!-)$", re.IGNORECASE)
_valid_ipv4 = re.compile("\A(\d+)\.(\d+)\.(\d+)\.(\d+)\Z")
def valid_port(port, start=1, end=65535):
try:
port = int(port)
return port >= start and port <= end
except ValueError: pass
return False
def valid_ipv4(ip):
""" check if ip is a valid ipv4 """
match = _valid_ipv4.match(ip)
if match is None:
return False
octets = match.groups()
if len(octets) != 4:
return False
first = int(octets[0])
if first < 1 or first > 254:
return False
for i in range(1, 4):
octet = int(octets[i])
if octet < 0 or octet > 255:
return False
return True
def valid_host(host):
""" check valid hostname """
for part in host.split("."):
if not _valid_host_part.match(part):
return False
return True
def valid_host_with_port(hostport):
"""
matches hostname or an IP, optionally with a port
"""
host, port = hostport.rsplit(":", 1) if ":" in hostport else (hostport, None)
# first, validate host or IP
if not valid_ipv4(host) and not valid_host(host):
return False
# now, validate port
if port is not None and not valid_port(port):
return False
return True
def valid_hosts(hosts):
"""
matches a comma separated list of hosts (possibly with ports)
"""
if _empty.match(hosts):
return False
for host in hosts.split(","):
if not valid_host_with_port(host):
return False
return True
def invalid_hosts(hosts):
"""
the inverse of valid_hosts()
"""
return not valid_hosts(hosts)
def split(path):
"""
splits path into parent, child
"""
if path == '/':
return ('/', None)
parent, child = path.rsplit('/', 1)
if parent == '':
parent = '/'
return (parent, child)
def get_ips(host, port):
"""
lookup all IPs (v4 and v6)
"""
ips = set()
for af_type in (socket.AF_INET, socket.AF_INET6):
try:
records = socket.getaddrinfo(host, port, af_type, socket.SOCK_STREAM)
ips.update(rec[4][0] for rec in records)
except socket.gaierror as ex:
pass
return ips
def hosts_to_endpoints(hosts, port=2181):
"""
return a list of (host, port) tuples from a given host[:port],... str
"""
endpoints = []
for host in hosts.split(","):
endpoints.append(tuple(host.rsplit(":", 1)) if ":" in host else (host, port))
return endpoints
def find_outliers(group, delta):
"""
given a list of values, find those that are apart from the rest by
`delta`. the indexes for the outliers is returned, if any.
examples:
values = [100, 6, 7, 8, 9, 10, 150]
find_outliers(values, 5) -> [0, 6]
values = [5, 6, 5, 4, 5]
find_outliers(values, 3) -> []
"""
with_pos = sorted([pair for pair in enumerate(group)], key=lambda p: p[1])
outliers_start = outliers_end = -1
for i in range(0, len(with_pos) - 1):
cur = with_pos[i][1]
nex = with_pos[i + 1][1]
if nex - cur > delta:
# depending on where we are, outliers are the remaining
# items or the ones that we've already seen.
if i < (len(with_pos) - i):
# outliers are close to the start
outliers_start, outliers_end = 0, i + 1
else:
# outliers are close to the end
outliers_start, outliers_end = i + 1, len(with_pos)
break
if outliers_start != -1:
return [with_pos[i][0] for i in range(outliers_start, outliers_end)]
else:
return []
def which(program):
""" analagous to /usr/bin/which """
is_exe = lambda fpath: os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath and is_exe(program):
return program
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_matching(content, match):
""" filters out lines that don't include match """
if match != "":
lines = [line for line in content.split("\n") if match in line]
content = "\n".join(lines)
return content
def grouper(iterable, n):
""" Group iterable in chunks of n size """
args = [iter(iterable)] * n
return izip(*args) | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/util.py | util.py |
import copy
import json
import re
def container_for_key(key):
""" Determines what type of container is needed for `key` """
try:
int(key)
return []
except ValueError:
return {}
def safe_list_set(plist, idx, fill_with, value):
"""
Sets:
```
plist[idx] = value
```
If len(plist) is smaller than what idx is trying
to dereferece, we first grow plist to get the needed
capacity and fill the new elements with fill_with
(or fill_with(), if it's a callable).
"""
try:
plist[idx] = value
return
except IndexError:
pass
# Fill in the missing positions. Handle negative indexes.
end = idx + 1 if idx >= 0 else abs(idx)
for _ in range(len(plist), end):
if callable(fill_with):
plist.append(fill_with())
else:
plist.append(fill_with)
plist[idx] = value
class Keys(object):
"""
this class contains logic to parse the DSL to address
keys within JSON objects and extrapolate keys variables
in template strings
"""
# Good keys:
# * foo.bar
# * foo_bar
# * foo-bar
ALLOWED_KEY = '\w+(?:[\.-]\w+)*'
class Bad(Exception):
pass
class Missing(Exception):
pass
@classmethod
def extract(cls, keystr):
""" for #{key} returns key """
regex = r'#{\s*(%s)\s*}' % cls.ALLOWED_KEY
return re.match(regex, keystr).group(1)
@classmethod
def validate_one(cls, keystr):
""" validates one key string """
regex = r'%s$' % cls.ALLOWED_KEY
if re.match(regex, keystr) is None:
raise cls.Bad("Bad key syntax for: %s. Should be: key1.key2..." % (keystr))
return True
@classmethod
def from_template(cls, template):
"""
extracts keys out of template in the form of: "a = #{key1}, b = #{key2.key3} ..."
"""
regex = r'#{\s*%s\s*}' % cls.ALLOWED_KEY
keys = re.findall(regex, template)
if len(keys) == 0:
raise cls.Bad("Bad keys template: %s. Should be: \"%s\"" % (
template, "a = #{key1}, b = #{key2.key3} ..."))
return keys
@classmethod
def validate(cls, keystr):
""" raises cls.Bad if keys has errors """
if "#{" in keystr:
# it's a template with keys vars
keys = cls.from_template(keystr)
for k in keys:
cls.validate_one(cls.extract(k))
else:
# plain keys str
cls.validate_one(keystr)
@classmethod
def fetch(cls, obj, keys):
"""
fetches the value corresponding to keys from obj
"""
current = obj
for key in keys.split("."):
if type(current) == list:
try:
key = int(key)
except TypeError:
raise cls.Missing(key)
try:
current = current[key]
except (IndexError, KeyError, TypeError) as ex:
raise cls.Missing(key)
return current
@classmethod
def value(cls, obj, keystr):
"""
gets the value corresponding to keys from obj. if keys is a template
string, it extrapolates the keys in it
"""
if "#{" in keystr:
# it's a template with keys vars
keys = cls.from_template(keystr)
for k in keys:
v = cls.fetch(obj, cls.extract(k))
keystr = keystr.replace(k, str(v))
value = keystr
else:
# plain keys str
value = cls.fetch(obj, keystr)
return value
@classmethod
def set(cls, obj, keys, value, fill_list_value=None):
"""
sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers.
"""
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
# Validate this key works with a list.
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
# This is the last key, so set the value.
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
# done.
return
# More keys left, ensure we have a container for this key.
if type(key) == int:
try:
current[key]
except IndexError:
# Create a list for this key.
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
# Create a list for this key.
current[key] = container_for_key(keys_list[idx])
# Move on to the next key.
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key)
def to_type(value, ptype):
""" Convert value to ptype """
if ptype == 'str':
return str(value)
elif ptype == 'int':
return int(value)
elif ptype == 'float':
return float(value)
elif ptype == 'bool':
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
raise ValueError('Bad bool value: %s' % value)
elif ptype == 'json':
return json.loads(value)
return ValueError('Unknown type') | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/keys.py | keys.py |
from kazoo.security import (
ACL,
Id,
make_acl,
make_digest_acl,
Permissions
)
class ACLReader(object):
""" Helper class to parse/unparse ACLs """
class BadACL(Exception):
""" Couldn't parse the ACL """
pass
valid_schemes = [
"world",
"auth",
"digest",
"host",
"ip",
"sasl",
"username_password", # internal-only: gen digest from user:password
]
@classmethod
def extract(cls, acls):
""" parse a str that represents a list of ACLs """
return [cls.extract_acl(acl) for acl in acls]
@classmethod
def extract_acl(cls, acl):
""" parse an individual ACL (i.e.: world:anyone:cdrwa) """
try:
scheme, rest = acl.split(":", 1)
credential = ":".join(rest.split(":")[0:-1])
cdrwa = rest.split(":")[-1]
except ValueError:
raise cls.BadACL("Bad ACL: %s. Format is scheme:id:perms" % (acl))
if scheme not in cls.valid_schemes:
raise cls.BadACL("Invalid scheme: %s" % (acl))
create = True if "c" in cdrwa else False
read = True if "r" in cdrwa else False
write = True if "w" in cdrwa else False
delete = True if "d" in cdrwa else False
admin = True if "a" in cdrwa else False
if scheme == "username_password":
try:
username, password = credential.split(":", 1)
except ValueError:
raise cls.BadACL("Bad ACL: %s. Format is scheme:id:perms" % (acl))
return make_digest_acl(username,
password,
read,
write,
create,
delete,
admin)
else:
return make_acl(scheme,
credential,
read,
write,
create,
delete,
admin)
@classmethod
def to_dict(cls, acl):
""" transform an ACL to a dict """
return {
"perms": acl.perms,
"id": {
"scheme": acl.id.scheme,
"id": acl.id.id
}
}
@classmethod
def from_dict(cls, acl_dict):
""" ACL from dict """
perms = acl_dict.get("perms", Permissions.ALL)
id_dict = acl_dict.get("id", {})
id_scheme = id_dict.get("scheme", "world")
id_id = id_dict.get("id", "anyone")
return ACL(perms, Id(id_scheme, id_id)) | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/acl.py | acl.py |
import os
try:
from Queue import Queue
except ImportError: # py3k
from queue import Queue
from kazoo.exceptions import NoAuthError, NoNodeError
class Request(object):
__slots__ = ('path', 'result')
def __init__(self, path, result):
self.path, self.result = path, result
@property
def value(self):
return self.result.get()
class GetData(Request): pass
class GetChildren(Request): pass
class PathMap(object):
__slots__ = ("zk", "path")
def __init__(self, zk, path):
self.zk, self.path = zk, path
def get(self):
reqs = Queue()
child_pending = 1
data_pending = 0
path = self.path
zk = self.zk
child_of = lambda path: zk.get_children_async(path)
dispatch_child = lambda path: GetChildren(path, child_of(path))
data_of = lambda path: zk.get_async(path)
dispatch_data = lambda path: GetData(path, data_of(path))
stat = zk.exists(path)
if stat is None or stat.numChildren == 0:
return
reqs.put(dispatch_child(path))
while child_pending or data_pending:
req = reqs.get()
if type(req) == GetChildren:
try:
children = req.value
for child in children:
data_pending += 1
reqs.put(dispatch_data(os.path.join(req.path, child)))
except (NoNodeError, NoAuthError): pass
child_pending -= 1
else:
try:
data, stat = req.value
try:
if data is not None:
data = data.decode(encoding="utf-8")
except UnicodeDecodeError: pass
yield (req.path, data)
# Does it have children? If so, get them
if stat.numChildren > 0:
child_pending += 1
reqs.put(dispatch_child(req.path))
except (NoNodeError, NoAuthError): pass
data_pending -= 1 | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/pathmap.py | pathmap.py |
from contextlib import contextmanager
import os
import re
import socket
import sre_constants
import time
from kazoo.client import KazooClient, TransactionRequest
from kazoo.exceptions import NoAuthError, NoNodeError
from kazoo.protocol.states import KazooState
from .statmap import StatMap
from .tree import Tree
from .usage import Usage
from .util import get_ips, hosts_to_endpoints, to_bytes
@contextmanager
def connected_socket(address, timeout=3):
""" yields a connected socket """
sock = socket.create_connection(address, timeout)
yield sock
sock.close()
class ClientInfo(object):
__slots__ = "id", "ip", "port", "client_hostname", "server_ip", "server_port", "server_hostname"
def __init__(self, sid=None, ip=None, port=None, server_ip=None, server_port=None):
setattr(self, "id", sid)
setattr(self, "ip", ip)
setattr(self, "port", port)
setattr(self, "server_ip", server_ip)
setattr(self, "server_port", server_port)
setattr(self, "client_hostname", None)
setattr(self, "server_hostname", None)
def __call__(self, ip, port, server_ip, server_port):
setattr(self, "ip", ip)
setattr(self, "port", port)
setattr(self, "server_ip", server_ip)
setattr(self, "server_port", server_port)
def __str__(self):
return "%s %s" % (self.id, self.endpoints)
@property
def endpoints(self):
return "%s:%s %s:%s" % (self.ip, self.port, self.server_ip, self.server_port)
@property
def resolved(self):
self._resolve_hostnames()
return "%s %s" % (self.id, self.resolved_endpoints)
@property
def resolved_endpoints(self):
self._resolve_hostnames()
return "%s:%s %s:%s" % (
self.client_hostname, self.port, self.server_hostname, self.server_port)
def _resolve_hostnames(self):
if self.client_hostname is None and self.ip:
self.resolve_ip("client_hostname", self.ip)
if self.server_hostname is None and self.server_ip:
self.resolve_ip("server_hostname", self.server_ip)
def resolve_ip(self, attr, ip):
try:
hname = socket.gethostbyaddr(ip)[0]
setattr(self, attr, hname)
except socket.herror:
pass
class XTransactionRequest(TransactionRequest):
""" wrapper to make PY3K (slightly) painless """
def create(self, path, value=b"", acl=None, ephemeral=False,
sequence=False):
""" wrapper that handles encoding (yay Py3k) """
super(XTransactionRequest, self).create(path, to_bytes(value), acl, ephemeral, sequence)
def set_data(self, path, value, version=-1):
""" wrapper that handles encoding (yay Py3k) """
super(XTransactionRequest, self).set_data(path, to_bytes(value), version)
class XClient():
""" adds some extra methods to a wrapped KazooClient """
class CmdFailed(Exception):
""" 4 letter cmd failed """
pass
SESSION_REGEX = re.compile(r"^(0x\w+):")
IP_PORT_REGEX = re.compile(r"^\tip:\s/(\d+\.\d+\.\d+\.\d+):(\d+)\ssessionId:\s(0x\w+)\Z")
PATH_REGEX = re.compile(r"^\t((?:/.*)+)\Z")
def __init__(self, zk_client=None):
self._zk = zk_client or KazooClient()
@property
def xid(self):
""" the session's current xid or -1 if not connected """
conn = self._connection
return conn._xid if conn else -1
@property
def session_timeout(self):
""" the negotiated session timeout """
return self._session_timeout
@property
def server(self):
""" the (hostaddr, port) of the connected ZK server (or "") """
conn = self._connection
return conn._socket.getpeername() if conn else ""
@property
def client(self):
""" the (hostaddr, port) of the local endpoint (or "") """
conn = self._connection
return conn._socket.getsockname() if conn else ""
@property
def sessionid(self):
return "0x%x" % (getattr(self, "_session_id", 0))
@property
def protocol_version(self):
""" this depends on https://github.com/python-zk/kazoo/pull/182,
so play conservatively
"""
return getattr(self, "_protocol_version", 0)
@property
def data_watches(self):
""" paths for data watches """
return self._data_watchers.keys()
@property
def child_watches(self):
""" paths for child watches """
return self._child_watchers.keys()
def get(self, *args, **kwargs):
""" wraps the default get() and deals with encoding """
value, stat = self._zk.get(*args, **kwargs)
try:
if value is not None:
value = value.decode(encoding="utf-8")
except UnicodeDecodeError:
pass
return (value, stat)
def get_bytes(self, *args, **kwargs):
""" no string decoding performed """
return self._zk.get(*args, **kwargs)
def set(self, path, value, version=-1):
""" wraps the default set() and handles encoding (Py3k) """
value = to_bytes(value)
self._zk.set(path, value, version)
def create(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False):
""" wraps the default create() and handles encoding (Py3k) """
value = to_bytes(value)
return self._zk.create(path, value, acl, ephemeral, sequence, makepath)
def create_async(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False):
""" wraps the default create() and handles encoding (Py3k) """
value = to_bytes(value)
return self._zk.create_async(path, value, acl, ephemeral, sequence, makepath)
def transaction(self):
""" use XTransactionRequest which is encoding aware (Py3k) """
return XTransactionRequest(self)
def du(self, path):
""" returns the bytes used under path """
return Usage(self, path).value
def get_acls_recursive(self, path, depth, include_ephemerals):
"""A recursive generator wrapper for get_acls
:param path: path from which to start
:param depth: depth of the recursion (-1 no recursion, 0 means no limit)
:param include_ephemerals: get ACLs for ephemerals too
"""
yield path, self.get_acls(path)[0]
if depth == -1:
return
for tpath, _ in self.tree(path, depth, full_path=True):
try:
acls, stat = self.get_acls(tpath)
except NoNodeError:
continue
if not include_ephemerals and stat.ephemeralOwner != 0:
continue
yield tpath, acls
def find(self, path, match, flags):
""" find every matching child path under path """
try:
match = re.compile(match, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
offset = len(path)
for cpath in Tree(self, path).get():
if match.search(cpath[offset:]):
yield cpath
def grep(self, path, content, flags):
""" grep every child path under path for content """
try:
match = re.compile(content, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
for gpath, matches in self.do_grep(path, match):
yield (gpath, matches)
def do_grep(self, path, match):
""" grep's work horse """
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
full_path = os.path.join(path, child)
try:
value, _ = self.get(full_path)
except (NoNodeError, NoAuthError):
value = ""
if value is not None:
if isinstance(value, bytes):
value = value.decode(errors='ignore')
matches = [line for line in value.split("\n") if match.search(line)]
if len(matches) > 0:
yield (full_path, matches)
for mpath, matches in self.do_grep(full_path, match):
yield (mpath, matches)
def child_count(self, path):
"""
returns the child count under path (deals with znodes going away as it's
traversing the tree).
"""
stat = self.stat(path)
if not stat:
return 0
count = stat.numChildren
for _, _, stat in self.tree(path, 0, include_stat=True):
if stat:
count += stat.numChildren
return count
def tree(self, path, max_depth, full_path=False, include_stat=False):
"""DFS generator which starts from a given path and goes up to a max depth.
:param path: path from which the DFS will start
:param max_depth: max depth of DFS (0 means no limit)
:param full_path: should the full path of the child node be returned
:param include_stat: return the child Znode's stat along with the name & level
"""
for child_level_stat in self.do_tree(path, max_depth, 0, full_path, include_stat):
yield child_level_stat
def do_tree(self, path, max_depth, level, full_path, include_stat):
""" tree's work horse """
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
cpath = os.path.join(path, child) if full_path else child
if include_stat:
yield cpath, level, self.stat(os.path.join(path, child))
else:
yield cpath, level
if max_depth == 0 or level + 1 < max_depth:
cpath = os.path.join(path, child)
for rchild_rlevel_rstat in self.do_tree(cpath, max_depth, level + 1, full_path, include_stat):
yield rchild_rlevel_rstat
def fast_tree(self, path, exclude_recurse=None):
""" a fast async version of tree() """
for cpath in Tree(self, path).get(exclude_recurse):
yield cpath
def stat_map(self, path):
""" a generator for <child, Stat> """
return StatMap(self, path).get()
def diff(self, path_a, path_b):
""" Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source)
"""
path_a = path_a.rstrip("/")
path_b = path_b.rstrip("/")
if not self.exists(path_a) or not self.exists(path_b):
return
if not self.equal(path_a, path_b):
yield 0, "/"
seen = set()
len_a = len(path_a)
len_b = len(path_b)
# first, check what's missing & changed in dst
for child_a, level in self.tree(path_a, 0, True):
child_sub = child_a[len_a + 1:]
child_b = os.path.join(path_b, child_sub)
if not self.exists(child_b):
yield -1, child_sub
else:
if not self.equal(child_a, child_b):
yield 0, child_sub
seen.add(child_sub)
# now, check what's new in dst
for child_b, level in self.tree(path_b, 0, True):
child_sub = child_b[len_b + 1:]
if child_sub not in seen:
yield 1, child_sub
def equal(self, path_a, path_b):
"""
compare if a and b have the same bytes
"""
content_a, _ = self.get_bytes(path_a)
content_b, _ = self.get_bytes(path_b)
return content_a == content_b
def stat(self, path):
""" safely gets the Znode's Stat """
try:
stat = self.exists(str(path))
except (NoNodeError, NoAuthError):
stat = None
return stat
def _to_endpoints(self, hosts):
return [self.current_endpoint] if hosts is None else hosts_to_endpoints(hosts)
def mntr(self, hosts=None):
""" send an mntr cmd to either host or the connected server """
return self.cmd(self._to_endpoints(hosts), "mntr")
def cons(self, hosts=None):
""" send a cons cmd to either host or the connected server """
return self.cmd(self._to_endpoints(hosts), "cons")
def dump(self, hosts=None):
""" send a dump cmd to either host or the connected server """
return self.cmd(self._to_endpoints(hosts), "dump")
def cmd(self, endpoints, cmd):
"""endpoints is [(host1, port1), (host2, port), ...]"""
replies = []
for ep in endpoints:
try:
replies.append(self._cmd(ep, cmd))
except self.CmdFailed as ex:
# if there's only 1 endpoint, give up.
# if there's more, keep trying.
if len(endpoints) == 1:
raise ex
return "".join(replies)
def _cmd(self, endpoint, cmd):
""" endpoint is (host, port) """
cmdbuf = "%s\n" % (cmd)
# some cmds have large outputs and ZK closes the connection as soon as it
# finishes writing. so read in huge chunks.
recvsize = 1 << 20
replies = []
host, port = endpoint
ips = get_ips(host, port)
if len(ips) == 0:
raise self.CmdFailed("Failed to resolve: %s" % (host))
for ip in ips:
try:
with connected_socket((ip, port)) as sock:
sock.send(cmdbuf.encode())
while True:
buf = sock.recv(recvsize).decode("utf-8")
if buf == "":
break
replies.append(buf)
except socket.error as ex:
# if there's only 1 record, give up.
# if there's more, keep trying.
if len(ips) == 1:
raise self.CmdFailed("Error(%s): %s" % (ip, ex))
return "".join(replies)
@property
def current_endpoint(self):
if not self.connected:
raise self.CmdFailed("Not connected and no host given.")
# If we are using IPv6, getpeername() returns a 4-tuple
return self._connection._socket.getpeername()[:2]
def zk_url(self):
""" returns `zk://host:port` for the connected host:port """
return "zk://%s:%d" % self.current_endpoint
def reconnect(self):
""" forces a reconnect by shutting down the connected socket
return True if the reconnect happened, False otherwise
"""
state_change_event = self.handler.event_object()
def listener(state):
if state is KazooState.SUSPENDED:
state_change_event.set()
self.add_listener(listener)
self._connection._socket.shutdown(socket.SHUT_RDWR)
state_change_event.wait(1)
if not state_change_event.is_set():
return False
# wait until we are back
while not self.connected:
time.sleep(0.1)
return True
def dump_by_server(self, hosts):
"""Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo).
"""
dump_by_endpoint = {}
for endpoint in self._to_endpoints(hosts):
try:
out = self.cmd([endpoint], "dump")
except self.CmdFailed as ex:
out = ""
dump_by_endpoint[endpoint] = out
return dump_by_endpoint
def ephemerals_info(self, hosts):
"""Returns ClientInfo per path.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (path, ClientInfo).
"""
info_by_path, info_by_id = {}, {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
sid = None
for line in dump.split("\n"):
mat = self.SESSION_REGEX.match(line)
if mat:
sid = mat.group(1)
continue
mat = self.PATH_REGEX.match(line)
if mat:
info = info_by_id.get(sid, None)
if info is None:
info = info_by_id[sid] = ClientInfo(sid)
info_by_path[mat.group(1)] = info
continue
mat = self.IP_PORT_REGEX.match(line)
if mat:
ip, port, sid = mat.groups()
if sid not in info_by_id:
continue
info_by_id[sid](ip, int(port), server_ip, server_port)
return info_by_path
def sessions_info(self, hosts):
"""Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo).
"""
info_by_id = {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
for line in dump.split("\n"):
mat = self.IP_PORT_REGEX.match(line)
if mat is None:
continue
ip, port, sid = mat.groups()
info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port)
return info_by_id
def __getattr__(self, attr):
"""kazoo.client method and attribute proxy"""
return getattr(self._zk, attr) | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/xclient.py | xclient.py |
from __future__ import print_function
from base64 import b64decode, b64encode
from collections import defaultdict
import json
import os
import re
import time
import shutil
try:
from urlparse import urlparse
except ImportError:
# Python 3.3?
from urllib.parse import urlparse
from kazoo.client import KazooClient
from kazoo.exceptions import (
NoAuthError,
NodeExistsError,
NoNodeError,
NoChildrenForEphemeralsError,
ZookeeperError,
)
from .acl import ACLReader
from .statmap import StatMap
from .util import Netloc, to_bytes
DEFAULT_ZK_PORT = 2181
def zk_client(host, scheme, credential):
""" returns a connected (and possibly authenticated) ZK client """
if not re.match(r".*:\d+$", host):
host = "%s:%d" % (host, DEFAULT_ZK_PORT)
client = KazooClient(hosts=host)
client.start()
if scheme != "":
client.add_auth(scheme, credential)
return client
class CopyError(Exception):
""" base exception for Copy errors """
def __init__(self, message, early_error=False):
super(CopyError, self).__init__(message)
self._early_error = early_error
@property
def is_early_error(self):
return self._early_error
class AuthError(CopyError):
""" authentication exception for Copy """
def __init__(self, operation, path):
super(AuthError, self).__init__(
"Permission denied: Could not %s znode %s." % (operation, path))
class PathValue(object):
def __init__(self, value, acl=None):
self._value = value
self._acl = acl if acl else []
@property
def value(self):
return self._value
@property
def value_as_bytes(self):
return to_bytes(self.value)
@property
def acl(self):
return self._acl
@property
def acl_as_dict(self):
return self._acl
class ProxyType(type):
TYPES = {}
SCHEME = ""
def __new__(mcs, clsname, bases, dct):
obj = super(ProxyType, mcs).__new__(mcs, clsname, bases, dct)
if obj.SCHEME in mcs.TYPES:
raise ValueError("Duplicate scheme handler: %s" % obj.SCHEME)
if obj.SCHEME != "":
mcs.TYPES[obj.SCHEME] = obj
return obj
class Proxy(ProxyType("ProxyBase", (object,), {})):
SCHEME = ""
def __init__(self, parse_result, exists, asynchronous, verbose):
self.parse_result = parse_result
self.netloc = Netloc.from_string(parse_result.netloc)
self.exists = exists
self.asynchronous = asynchronous
self.verbose = verbose
@property
def scheme(self):
return self.parse_result.scheme
@property
def url(self):
return self.parse_result.geturl()
@property
def path(self):
path = self.parse_result.path
if path == "":
return "/"
return "/" if path == "/" else path.rstrip("/")
@property
def host(self):
return self.netloc.host
@property
def auth_scheme(self):
return self.netloc.scheme
@property
def auth_credential(self):
return self.netloc.credential
def set_url(self, string):
""" useful for recycling a stateful proxy """
self.parse_result = Proxy.parse(string)
@classmethod
def from_string(cls, string, exists=False, asynchronous=False, verbose=False):
"""
if exists is bool, then check it either exists or it doesn't.
if exists is None, we don't care.
"""
result = cls.parse(string)
if result.scheme not in cls.TYPES:
raise CopyError("Invalid scheme: %s" % (result.scheme))
return cls.TYPES[result.scheme](result, exists, asynchronous, verbose)
@classmethod
def parse(cls, url_string):
return urlparse(url_string)
def __enter__(self):
pass
def __exit__(self, etype, value, traceback):
pass
def check_path(self):
raise NotImplementedError("check_path must be implemented")
def read_path(self):
raise NotImplementedError("read_path must be implemented")
def write_path(self, path_value):
raise NotImplementedError("write_path must be implemented")
def children_of(self):
raise NotImplementedError("children_of must be implemented")
def delete_path_recursively(self):
raise NotImplementedError("delete_path must be implemented")
def copy(self, dst, recursive, max_items, mirror):
opname = "Copy" if not mirror else "Mirror"
# basic sanity check
if mirror and self.scheme == "zk" and dst.scheme == "file":
raise CopyError("Mirror from zk to fs isn't supported", True)
if recursive and self.scheme == "zk" and dst.scheme == "file":
raise CopyError("Recursive %s from zk to fs isn't supported" %
opname.lower(), True)
if mirror and not recursive:
raise CopyError("Mirroring must be recursive", True)
start = time.time()
src_url = self.url
dst_url = dst.url
with self:
with dst:
if mirror:
dst_children = set(c for c in dst.children_of())
self.do_copy(dst, opname)
if recursive:
for i, child in enumerate(self.children_of()):
if mirror and child in dst_children:
dst_children.remove(child)
if max_items > 0 and i == max_items:
break
self.set_url(os.path.join(src_url, child))
dst.set_url(os.path.join(dst_url, child))
self.do_copy(dst, opname)
# reset to base urls
self.set_url(src_url)
dst.set_url(dst_url)
if mirror:
for child in dst_children:
dst.set_url(os.path.join(dst_url, child))
dst.delete_path_recursively()
end = time.time()
print("%sing took %.2f secs" % (opname, round(end - start, 2)))
def do_copy(self, dst, opname):
if self.verbose:
if self.asynchronous:
print("%sing (asynchronously) from %s to %s" % (opname, self.url, dst.url))
else:
print("%sing from %s to %s" % (opname, self.url, dst.url))
dst.write_path(self.read_path())
class ZKProxy(Proxy):
""" read/write ZooKeeper paths """
SCHEME = "zk"
class ZKPathValue(PathValue):
""" handle ZK specific meta attribs (i.e.: acls) """
def __init__(self, value, acl=None):
PathValue.__init__(self, value)
self._acl = acl
@property
def acl(self):
return self._acl
@property
def acl_as_dict(self):
acls = self.acl if self.acl else []
return [ACLReader.to_dict(a) for a in acls]
def __init__(self, parse_result, exists, asynchronous, verbose):
super(ZKProxy, self).__init__(parse_result, exists, asynchronous, verbose)
self.client = None
self.need_client = True # whether we build a client or one is provided
def connect(self):
if self.need_client:
self.client = zk_client(self.host, self.auth_scheme, self.auth_credential)
def disconnect(self):
if self.need_client:
if self.client:
self.client.stop()
def __enter__(self):
self.connect()
if self.exists is not None:
self.check_path()
def __exit__(self, etype, value, traceback):
self.disconnect()
def check_path(self):
try:
retval = True if self.client.exists(self.path) else False
except NoAuthError:
raise AuthError("read", self.path)
if retval is not self.exists:
if self.exists:
error = "znode %s in %s doesn't exist" % \
(self.path, self.host)
else:
error = "znode %s in %s exists" % (self.path, self.host)
raise CopyError(error)
def read_path(self):
try:
# TODO: propose a new ZK opcode (GetWithACLs) so we can do this in 1 rt
value = self.get_value(self.path)
acl, _ = self.client.get_acls(self.path)
return self.ZKPathValue(value, acl)
except NoAuthError:
raise AuthError("read", self.path)
def write_path(self, path_value):
if isinstance(path_value, self.ZKPathValue):
acl = path_value.acl
else:
acl = [ACLReader.from_dict(a) for a in path_value.acl]
if self.client.exists(self.path):
try:
value = self.get_value(self.path)
if path_value.value != value:
self.client.set(self.path, path_value.value)
except NoAuthError:
raise AuthError("write", self.path)
else:
try:
# Kazoo's create() doesn't handle acl=[] correctly
# See: https://github.com/python-zk/kazoo/pull/164
acl = acl or None
self.client.create(self.path, path_value.value, acl=acl, makepath=True)
except NoAuthError:
raise AuthError("create", self.path)
except NodeExistsError:
raise CopyError("Node %s exists" % (self.path))
except NoNodeError:
raise CopyError("Parent node for %s is missing" % (self.path))
except NoChildrenForEphemeralsError:
raise CopyError("Ephemeral znodes can't have children")
except ZookeeperError:
raise CopyError("ZooKeeper server error")
def get_value(self, path):
try:
if hasattr(self.client, 'get_bytes'):
v, _ = self.client.get_bytes(path)
else:
v, _ = self.client.get(path)
except NoAuthError:
raise AuthError("read", path)
return v
def delete_path_recursively(self):
try:
self.client.delete(self.path, recursive=True)
except NoNodeError:
pass
except NoAuthError:
raise AuthError("delete", self.path)
except ZookeeperError:
raise CopyError("Zookeeper server error")
def children_of(self):
if self.asynchronous:
offs = 1 if self.path == "/" else len(self.path) + 1
for path, stat in StatMap(self.client, self.path, recursive=True).get():
if stat.ephemeralOwner == 0:
yield path[offs:]
else:
for path in self.zk_walk(self.path, None):
yield path
def zk_walk(self, root_path, branch_path):
"""
skip ephemeral znodes since there's no point in copying those
"""
full_path = os.path.join(root_path, branch_path) if branch_path else root_path
try:
children = self.client.get_children(full_path)
except NoNodeError:
children = set()
except NoAuthError:
raise AuthError("read children", full_path)
for child in children:
child_path = os.path.join(branch_path, child) if branch_path else child
try:
stat = self.client.exists(os.path.join(root_path, child_path))
except NoAuthError:
raise AuthError("read", child)
if stat is None or stat.ephemeralOwner != 0:
continue
yield child_path
for new_path in self.zk_walk(root_path, child_path):
yield new_path
class FileProxy(Proxy):
SCHEME = "file"
def __init__(self, parse_result, exists, asynchronous, verbose):
super(FileProxy, self).__init__(parse_result, exists, asynchronous, verbose)
if exists is not None:
self.check_path()
def check_path(self):
if os.path.exists(self.path) is not self.exists:
error = "Path %s " % (self.path)
error += "doesn't exist" if self.exists else "exists"
raise CopyError(error)
def read_path(self):
if os.path.isfile(self.path):
with open(self.path, "r") as fph:
return PathValue("".join(fph.readlines()))
elif os.path.isdir(self.path):
return PathValue("")
raise CopyError("%s is of unknown file type" % (self.path))
def write_path(self, path_value):
""" this will overwrite dst path - be careful """
parent_dir = os.path.dirname(self.path)
try:
os.makedirs(parent_dir)
except OSError:
pass
with open(self.path, "w") as fph:
fph.write(path_value.value)
def children_of(self):
root_path = self.path[0:-1] if self.path.endswith("/") else self.path
for path, _, files in os.walk(root_path):
path = path.replace(root_path, "")
if path.startswith("/"):
path = path[1:]
if path != "":
yield path
for filename in files:
yield os.path.join(path, filename) if path != "" else filename
def delete_path_recursively(self):
shutil.rmtree(self.path, True)
class JSONProxy(Proxy):
""" read/write from JSON files discovered via:
json://!some!path!backup.json/some/path
the serialized version looks like this:
.. code-block:: python
{
'/some/path': {
'content': 'blob',
'acls': []},
'/some/other/path': {
'content': 'other-blob',
'acls': []},
}
For simplicity, a flat dictionary is used as opposed as
using a tree like format with children accessible from
their parent.
"""
def __init__(self, *args, **kwargs):
super(JSONProxy, self).__init__(*args, **kwargs)
self._dirty = None
self._tree = None
SCHEME = "json"
def __enter__(self):
self._dirty = False # tracks writes
self._tree = defaultdict(dict)
if os.path.exists(self.host):
with open(self.host, "r") as fph:
try:
ondisc_tree = json.load(fph)
self._tree.update(ondisc_tree)
except ValueError:
pass
if self.exists is not None:
self.check_path()
def __exit__(self, etype, value, traceback):
if not self._dirty:
return
with open(self.host, "w") as fph:
json.dump(self._tree, fph, indent=4)
@property
def host(self):
return super(JSONProxy, self).host.replace("!", "/")
def check_path(self):
if (self.path in self._tree) != self.exists:
error = "Path %s " % (self.path)
error += "doesn't exist" if self.exists else "exists"
raise CopyError(error)
def read_path(self):
value = self._tree[self.path]["content"]
if value is not None:
try:
value = b64decode(value)
except:
print("Failed to b64decode %s" % self.path)
acl = self._tree[self.path].get("acls", [])
return PathValue(value, acl)
def write_path(self, path_value):
content = path_value.value_as_bytes
if content is not None:
try:
content = b64encode(content).decode(encoding="utf-8")
except:
print("Failed to b64encode %s" % self.path)
self._tree[self.path]["content"] = content
self._tree[self.path]["acls"] = path_value.acl_as_dict
self._dirty = True
def children_of(self):
offs = 1 if self.path == "/" else len(self.path) + 1
good = lambda k: k != self.path and k.startswith(self.path)
for child in self._tree.keys():
if good(child):
yield child[offs:]
def delete_path_recursively(self):
if self.path in self._tree:
# build a set from the iterable so we don't change the dictionary during iteration
for c in set(self.children_of()):
self._tree.pop(os.path.join(self.path, c))
self._tree.pop(self.path) | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/copy_util.py | copy_util.py |
from __future__ import print_function
import os
from collections import defaultdict
from kazoo.protocol.states import EventType, KazooState
from kazoo.exceptions import NoNodeError
class PathStats(object):
""" per path stats """
def __init__(self, debug):
self.debug = debug
self.paths = defaultdict(int)
class WatchManager(object):
""" keep track of paths being watched """
def __init__(self, client):
self._client = client
self._client.add_listener(self._session_watcher)
self._reset_paths()
def _session_watcher(self, state):
""" if the session expires we've lost everything """
if state == KazooState.LOST:
self._reset_paths()
def _reset_paths(self):
self._stats_by_path = {}
PARENT_ERR = "%s is a parent of %s which is already watched"
CHILD_ERR = "%s is a child of %s which is already watched"
def add(self, path, debug, children):
"""
Set a watch for path and (maybe) its children depending on the value
of children:
-1: all children
0: no children
> 0: up to level depth children
If debug is true, print each received events.
"""
if path in self._stats_by_path:
print("%s is already being watched" % (path))
return
# we can't watch child paths of what's already being watched,
# because that generates a race between firing and resetting
# watches for overlapping paths.
if "/" in self._stats_by_path:
print("/ is already being watched, so everything is watched")
return
for epath in self._stats_by_path:
if epath.startswith(path):
print(self.PARENT_ERR % (path, epath))
return
if path.startswith(epath):
print(self.CHILD_ERR % (path, epath))
return
self._stats_by_path[path] = PathStats(debug)
self._watch(path, 0, children)
def remove(self, path):
if path not in self._stats_by_path:
print("%s is not being watched" % (path))
else:
del self._stats_by_path[path]
def stats(self, path):
if path not in self._stats_by_path:
print("%s is not being watched" % (path))
else:
print("\nWatches Stats\n")
for path, count in self._stats_by_path[path].paths.items():
print("%s: %d" % (path, count))
def _watch(self, path, current_level, max_level):
"""
we need to catch ZNONODE because children might be removed whilst we
are iterating (specially ephemeral znodes)
"""
# ephemeral znodes can't have children, so skip them
stat = self._client.exists(path)
if stat is None or stat.ephemeralOwner != 0:
return
try:
children = self._client.get_children(path, self._watcher)
except NoNodeError:
children = []
if max_level >= 0 and current_level + 1 > max_level:
return
for child in children:
self._watch(os.path.join(path, child), current_level + 1, max_level)
def _watcher(self, watched_event):
for path, stats in self._stats_by_path.items():
if not watched_event.path.startswith(path):
continue
if watched_event.type == EventType.CHILD:
stats.paths[watched_event.path] += 1
if stats.debug:
print(str(watched_event))
if watched_event.type == EventType.CHILD:
try:
children = self._client.get_children(watched_event.path,
self._watcher)
except NoNodeError:
pass
_wm = None
def get_watch_manager(client):
global _wm
if _wm is None:
_wm = WatchManager(client)
return _wm | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/watch_manager.py | watch_manager.py |
from collections import namedtuple
from functools import partial
import argparse
import logging
import signal
import sys
from . import __version__
from .shell import Shell
try:
raw_input
except NameError:
raw_input = input
class CLIParams(
namedtuple("CLIParams",
"connect_timeout run_once run_from_stdin sync_connect hosts readonly tunnel version")):
"""
This defines the running params for a CLI() object. If you'd like to do parameters processing
from some other point you'll need to fill up an instance of this class and pass it to
CLI()(), i.e.:
```
params = parmas_from_argv()
clip = CLIParams(params.connect_timeout, ...)
cli = CLI()
cli(clip)
```
"""
pass
def get_params():
""" get the cmdline params """
parser = argparse.ArgumentParser()
parser.add_argument("--connect-timeout",
type=float,
default=10.0,
help="ZK connect timeout")
parser.add_argument("--run-once",
type=str,
default="",
help="Run a command non-interactively and exit")
parser.add_argument("--run-from-stdin",
action="store_true",
default=False,
help="Read cmds from stdin, run them and exit")
parser.add_argument("--sync-connect",
action="store_true",
default=False,
help="Connect synchronously.")
parser.add_argument("--readonly",
action="store_true",
default=False,
help="Enable readonly.")
parser.add_argument("--tunnel",
type=str,
help="Create a ssh tunnel via this host",
default=None)
parser.add_argument("--version",
action="store_true",
default=False,
help="Display version and exit.")
parser.add_argument("hosts",
nargs="*",
help="ZK hosts to connect")
params = parser.parse_args()
return CLIParams(
params.connect_timeout,
params.run_once,
params.run_from_stdin,
params.sync_connect,
params.hosts,
params.readonly,
params.tunnel,
params.version
)
class StateTransition(Exception):
""" raised when the connection changed state """
pass
def sigusr_handler(shell, *_):
""" handler for SIGUSR2 """
if shell.state_transitions_enabled:
raise StateTransition()
def set_unbuffered_mode():
"""
make output unbuffered
"""
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
class CLI(object):
""" the REPL """
def __call__(self, params=None):
""" parse params & loop forever """
logging.basicConfig(level=logging.ERROR)
if params is None:
params = get_params()
if params.version:
sys.stdout.write("%s\n" % __version__)
sys.exit(0)
interactive = params.run_once == "" and not params.run_from_stdin
asynchronous = False if params.sync_connect or not interactive else True
if not interactive:
set_unbuffered_mode()
shell = Shell(params.hosts,
params.connect_timeout,
setup_readline=interactive,
output=sys.stdout,
asynchronous=asynchronous,
read_only=params.readonly,
tunnel=params.tunnel)
if not interactive:
rc = 0
try:
if params.run_once != "":
rc = 0 if shell.onecmd(params.run_once) == None else 1
else:
for cmd in sys.stdin.readlines():
cur_rc = 0 if shell.onecmd(cmd.rstrip()) == None else 1
if cur_rc != 0:
rc = cur_rc
except IOError:
rc = 1
sys.exit(rc)
if not params.sync_connect:
signal.signal(signal.SIGUSR2, partial(sigusr_handler, shell))
intro = "Welcome to zk-shell (%s)" % (__version__)
first = True
while True:
wants_exit = False
try:
shell.run(intro if first else None)
except StateTransition:
pass
except KeyboardInterrupt:
wants_exit = True
if wants_exit:
try:
done = raw_input("\nExit? (y|n) ")
if done == "y":
break
except EOFError:
pass
first = False
if __name__ == "__main__":
CLI()() | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/cli.py | cli.py |
from collections import defaultdict
from contextlib import contextmanager
from functools import partial, wraps
from threading import Thread
import bisect
import copy
import difflib
import json
import os
import re
import shlex
import signal
import socket
import stat as statlib
import sys
import tempfile
import time
import zlib
from colors import green, red
from kazoo.client import KazooClient
from kazoo.exceptions import (
APIError,
AuthFailedError,
BadArgumentsError,
BadVersionError,
ConnectionLoss,
InvalidACLError,
NewConfigNoQuorumError,
NoAuthError,
NodeExistsError,
NoNodeError,
NotEmptyError,
NotReadOnlyCallError,
ReconfigInProcessError,
SessionExpiredError,
UnimplementedError,
ZookeeperError,
)
from kazoo.protocol.states import KazooState
from kazoo.security import OPEN_ACL_UNSAFE, READ_ACL_UNSAFE
from tabulate import tabulate
from twitter.common.net.tunnel import TunnelHelper
from xcmd.complete import (
complete,
complete_boolean,
complete_labeled_boolean,
complete_values
)
from xcmd.conf import Conf, ConfVar
from xcmd.xcmd import (
XCmd,
FloatRequired,
IntegerOptional,
IntegerRequired,
LabeledBooleanOptional,
interruptible,
ensure_params,
Multi,
MultiOptional,
Optional,
Required,
)
from .acl import ACLReader
from .copy_util import CopyError, Proxy
from .keys import Keys, to_type
from .pathmap import PathMap
from .watcher import get_child_watcher
from .watch_manager import get_watch_manager
from .util import (
decoded,
find_outliers,
get_ips,
get_matching,
grouper,
hosts_to_endpoints,
invalid_hosts,
Netloc,
pretty_bytes,
split,
to_bool,
to_int,
which
)
from .xclient import XClient
def connected(func):
""" check connected, fails otherwise """
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if not self.connected:
self.show_output("Not connected.")
else:
try:
return func(*args, **kwargs)
except APIError:
self.show_output("ZooKeeper internal error.")
except AuthFailedError:
self.show_output("Authentication failed.")
except NoAuthError:
self.show_output("Not authenticated.")
except BadVersionError:
self.show_output("Bad version.")
except ConnectionLoss:
self.show_output("Connection loss.")
except NotReadOnlyCallError:
self.show_output("Not a read-only operation.")
except BadArgumentsError:
self.show_output("Bad arguments.")
except SessionExpiredError:
self.show_output("Session expired.")
except UnimplementedError as ex:
self.show_output("Not implemented by the server: %s." % str(ex))
except ZookeeperError as ex:
self.show_output("Unknown ZooKeeper error: %s" % str(ex))
return wrapper
def check_path_exists_foreach(path_params, func):
""" check that paths exist (unless we are in a transaction) """
@wraps(func)
def wrapper(*args):
self = args[0]
params = args[1]
if not self.in_transaction:
for name in path_params:
value = getattr(params, name)
paths = value if type(value) == list else [value]
resolved = []
for path in paths:
path = self.resolve_path(path)
if not self.client.exists(path):
self.show_output("Path %s doesn't exist", path)
return False
resolved.append(path)
if type(value) == list:
setattr(params, name, resolved)
else:
setattr(params, name, resolved[0])
return func(self, params)
return wrapper
def check_paths_exists(*paths):
""" check that each path exists """
return partial(check_path_exists_foreach, paths)
def check_path_absent(func):
"""
check path doesn't exist (unless we are in a txn or it's sequential)
note: when creating sequential znodes, a trailing slash means no prefix, i.e.:
create(/some/path/, sequence=True) -> /some/path/0000001
for all other cases, it's dropped.
"""
@wraps(func)
def wrapper(*args):
self = args[0]
params = args[1]
orig_path = params.path
sequence = getattr(params, 'sequence', False)
params.path = self.resolve_path(params.path)
if self.in_transaction or sequence or not self.client.exists(params.path):
if sequence and orig_path.endswith("/") and params.path != "/":
params.path += "/"
return func(self, params)
self.show_output("Path %s already exists", params.path)
return wrapper
class BadJSON(Exception):
pass
def json_deserialize(data):
if data is None:
raise BadJSON()
try:
obj = json.loads(data)
except ValueError:
raise BadJSON()
return obj
# pylint: disable=R0904
class Shell(XCmd):
CONF_PATH = os.path.join(os.environ["HOME"], ".zk_shell")
DEFAULT_CONF = Conf(
ConfVar(
"chkzk_stat_retries",
"Retries when running stat command on a server",
10
),
ConfVar(
"chkzk_znode_delta",
"Difference in znodes to claim inconsistency between servers",
100
),
ConfVar(
"chkzk_ephemeral_delta",
"Difference in ephemerals to claim inconsistency between servers",
50
),
ConfVar(
"chkzk_datasize_delta",
"Difference in datasize to claim inconsistency between servers",
1000
),
ConfVar(
"chkzk_session_delta",
"Difference in sessions to claim inconsistency between servers",
150
),
ConfVar(
"chkzk_zxid_delta",
"Difference in zxids to claim inconsistency between servers",
200
)
)
""" main class """
def __init__(self,
hosts=None,
timeout=10.0,
output=sys.stdout,
setup_readline=True,
asynchronous=True,
read_only=False,
tunnel=None,
zk_client=None):
XCmd.__init__(self, None, setup_readline, output)
self._hosts = hosts if hosts else []
self._connect_timeout = float(timeout)
self._read_only = read_only
self._asynchronous = asynchronous
self._zk = None
self._txn = None # holds the current transaction, if any
self.connected = False
self.state_transitions_enabled = True
self._tunnel = tunnel
if hosts or zk_client:
self._connect(self._hosts, zk_client)
if not self.connected:
self.update_curdir("/")
def _complete_path(self, cmd_param_text, full_cmd, *_):
""" completes paths """
if full_cmd.endswith(" "):
cmd_param, path = " ", " "
else:
pieces = shlex.split(full_cmd)
if len(pieces) > 1:
cmd_param = pieces[-1]
else:
cmd_param = cmd_param_text
path = cmd_param.rstrip("/") if cmd_param != "/" else "/"
if re.match(r"^\s*$", path):
return self._zk.get_children(self.curdir)
rpath = self.resolve_path(path)
if self._zk.exists(rpath):
opts = [os.path.join(path, znode) for znode in self._zk.get_children(rpath)]
else:
parent, child = os.path.dirname(rpath), os.path.basename(rpath)
relpath = os.path.dirname(path)
to_rel = lambda n: os.path.join(relpath, n) if relpath != "" else n
opts = [to_rel(n) for n in self._zk.get_children(parent) if n.startswith(child)]
offs = len(cmd_param) - len(cmd_param_text)
return [opt[offs:] for opt in opts]
@property
def client(self):
""" the connected ZK client, if any """
return self._zk
@property
def server_endpoint(self):
""" the literal endpoint for the currently connected server """
return "%s:%s" % self._zk.server if self.connected else ""
@connected
@ensure_params(Required("scheme"), Required("credential"))
def do_add_auth(self, params):
"""
\x1b[1mNAME\x1b[0m
add_auth - Authenticates the session
\x1b[1mSYNOPSIS\x1b[0m
add_auth <scheme> <credential>
\x1b[1mEXAMPLES\x1b[0m
> add_auth digest super:s3cr3t
"""
self._zk.add_auth(params.scheme, params.credential)
def complete_add_auth(self, cmd_param_text, full_cmd, *rest):
completers = [partial(complete_values, ["digest"])]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), Required("acls"), LabeledBooleanOptional("recursive"))
@check_paths_exists("path")
def do_set_acls(self, params):
"""
\x1b[1mNAME\x1b[0m
set_acls - Sets ACLs for a given path
\x1b[1mSYNOPSIS\x1b[0m
set_acls <path> <acls> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recursively set the acls on the children
\x1b[1mEXAMPLES\x1b[0m
> set_acls /some/path 'world:anyone:r digest:user:aRxISyaKnTP2+OZ9OmQLkq04bvo=:cdrwa'
> set_acls /some/path 'world:anyone:r username_password:user:p@ass0rd:cdrwa'
> set_acls /path 'world:anyone:r' true
"""
try:
acls = ACLReader.extract(shlex.split(params.acls))
except ACLReader.BadACL as ex:
self.show_output("Failed to set ACLs: %s.", ex)
return
def set_acls(path):
try:
self._zk.set_acls(path, acls)
except (NoNodeError, BadVersionError, InvalidACLError, ZookeeperError) as ex:
self.show_output("Failed to set ACLs: %s. Error: %s", str(acls), str(ex))
if params.recursive:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
set_acls(cpath)
set_acls(params.path)
def complete_set_acls(self, cmd_param_text, full_cmd, *rest):
""" FIXME: complete inside a quoted param is broken """
possible_acl = [
"digest:",
"username_password:",
"world:anyone:c",
"world:anyone:cd",
"world:anyone:cdr",
"world:anyone:cdrw",
"world:anyone:cdrwa",
]
complete_acl = partial(complete_values, possible_acl)
completers = [self._complete_path, complete_acl, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@interruptible
@ensure_params(Required("path"), IntegerOptional("depth", -1), LabeledBooleanOptional("ephemerals"))
@check_paths_exists("path")
def do_get_acls(self, params):
"""
\x1b[1mNAME\x1b[0m
get_acls - Gets ACLs for a given path
\x1b[1mSYNOPSIS\x1b[0m
get_acls <path> [depth] [ephemerals]
\x1b[1mOPTIONS\x1b[0m
* depth: -1 is no recursion, 0 is infinite recursion, N > 0 is up to N levels (default: 0)
* ephemerals: include ephemerals (default: false)
\x1b[1mEXAMPLES\x1b[0m
> get_acls /zookeeper
[ACL(perms=31, acl_list=['ALL'], id=Id(scheme=u'world', id=u'anyone'))]
> get_acls /zookeeper -1
/zookeeper: [ACL(perms=31, acl_list=['ALL'], id=Id(scheme=u'world', id=u'anyone'))]
/zookeeper/config: [ACL(perms=31, acl_list=['ALL'], id=Id(scheme=u'world', id=u'anyone'))]
/zookeeper/quota: [ACL(perms=31, acl_list=['ALL'], id=Id(scheme=u'world', id=u'anyone'))]
"""
def replace(plist, oldv, newv):
try:
plist.remove(oldv)
plist.insert(0, newv)
except ValueError:
pass
for path, acls in self._zk.get_acls_recursive(params.path, params.depth, params.ephemerals):
replace(acls, READ_ACL_UNSAFE[0], "WORLD_READ")
replace(acls, OPEN_ACL_UNSAFE[0], "WORLD_ALL")
self.show_output("%s: %s", path, acls)
def complete_get_acls(self, cmd_param_text, full_cmd, *rest):
complete_depth = partial(complete_values, [str(i) for i in range(-1, 11)])
completers = [self._complete_path, complete_depth, complete_labeled_boolean("ephemerals")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Optional("path"), LabeledBooleanOptional("watch"), Optional("sep", "\n"))
@check_paths_exists("path")
def do_ls(self, params):
"""
\x1b[1mNAME\x1b[0m
ls - Lists the znodes for the given <path>
\x1b[1mSYNOPSIS\x1b[0m
ls <path> [watch] [sep]
\x1b[1mOPTIONS\x1b[0m
* watch: set a (child) watch on the path (default: false)
* sep: separator to be used (default: '\\n')
\x1b[1mEXAMPLES\x1b[0m
> ls /
configs
zookeeper
Setting a watch:
> ls / true
configs
zookeeper
> create /foo 'bar'
WatchedEvent(type='CHILD', state='CONNECTED', path=u'/')
> ls / false ,
configs,zookeeper
"""
watcher = lambda evt: self.show_output(str(evt))
kwargs = {"watch": watcher} if params.watch else {}
znodes = self._zk.get_children(params.path, **kwargs)
self.show_output(params.sep.join(sorted(znodes)))
def complete_ls(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path, complete_labeled_boolean("watch")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@interruptible
@ensure_params(Required("command"), Required("path"), Optional("debug"), Optional("sleep"))
@check_paths_exists("path")
def do_watch(self, params):
"""
\x1b[1mNAME\x1b[0m
watch - Recursively watch for all changes under a path.
\x1b[1mSYNOPSIS\x1b[0m
watch <start|stop|stats> <path> [options]
\x1b[1mDESCRIPTION\x1b[0m
watch start <path> [debug] [depth]
with debug=true, print watches as they fire. depth is
the level for recursively setting watches:
* -1: recurse all the way
* 0: don't recurse, only watch the given path
* > 0: recurse up to <level> children
watch stats <path> [repeat] [sleep]
with repeat=0 this command will loop until interrupted. sleep sets
the pause duration in between each iteration.
watch stop <path>
\x1b[1mEXAMPLES\x1b[0m
> watch start /foo/bar
> watch stop /foo/bar
> watch stats /foo/bar
"""
wm = get_watch_manager(self._zk)
if params.command == "start":
debug = to_bool(params.debug)
children = to_int(params.sleep, -1)
wm.add(params.path, debug, children)
elif params.command == "stop":
wm.remove(params.path)
elif params.command == "stats":
repeat = to_int(params.debug, 1)
sleep = to_int(params.sleep, 1)
if repeat == 0:
while True:
wm.stats(params.path)
time.sleep(sleep)
else:
for _ in range(0, repeat):
wm.stats(params.path)
time.sleep(sleep)
else:
self.show_output("watch <start|stop|stats> <path> [verbose]")
def complete_watch(self, cmd_param_text, full_cmd, *rest):
complete_cmd = partial(complete_values, ["start", "stats", "stop"])
complete_sleep = partial(complete_values, [str(i) for i in range(-1, 11)])
completers = [complete_cmd, self._complete_path, complete_boolean, complete_sleep]
return complete(completers, cmd_param_text, full_cmd, *rest)
@ensure_params(
Required("src"),
Required("dst"),
LabeledBooleanOptional("recursive"),
LabeledBooleanOptional("overwrite"),
LabeledBooleanOptional("asynchronous"),
LabeledBooleanOptional("verbose"),
IntegerOptional("max_items", 0)
)
def do_cp(self, params):
"""
\x1b[1mNAME\x1b[0m
cp - Copy from/to local/remote or remote/remote paths
\x1b[1mSYNOPSIS\x1b[0m
cp <src> <dst> [recursive] [overwrite] [asynchronous] [verbose] [max_items]
\x1b[1mDESCRIPTION\x1b[0m
src and dst can be:
/some/path (in the connected server)
zk://[scheme:user:passwd@]host/<path>
json://!some!path!backup.json/some/path
file:///some/file
with a few restrictions. Given the semantic differences that znodes have with filesystem
directories recursive copying from znodes to an fs could lose data, but to a JSON file it
would work just fine.
\x1b[1mOPTIONS\x1b[0m
* recursive: recursively copy src (default: false)
* overwrite: overwrite the dst path (default: false)
* asynchronous: do asynchronous copies (default: false)
* verbose: verbose output of every path (default: false)
* max_items: max number of paths to copy (0 is infinite) (default: 0)
\x1b[1mEXAMPLES\x1b[0m
> cp /some/znode /backup/copy-znode # local
> cp /some/znode zk://digest:bernie:[email protected]/backup true true
> cp /some/znode json://!home!user!backup.json/ true true
> cp file:///tmp/file /some/zone # fs to zk
"""
try:
self.copy(params, params.recursive, params.overwrite, params.max_items, False)
except AuthFailedError:
self.show_output("Authentication failed.")
def complete_cp(self, cmd_param_text, full_cmd, *rest):
complete_max = partial(complete_values, [str(i) for i in range(0, 11)])
completers = [
self._complete_path,
self._complete_path,
complete_labeled_boolean("recursive"),
complete_labeled_boolean("overwrite"),
complete_labeled_boolean("asynchronous"),
complete_labeled_boolean("verbose"),
complete_max
]
return complete(completers, cmd_param_text, full_cmd, *rest)
@ensure_params(
Required("src"),
Required("dst"),
LabeledBooleanOptional("asynchronous"),
LabeledBooleanOptional("verbose"),
LabeledBooleanOptional("skip_prompt")
)
def do_mirror(self, params):
"""
\x1b[1mNAME\x1b[0m
mirror - Mirrors from/to local/remote or remote/remote paths
\x1b[1mSYNOPSIS\x1b[0m
mirror <src> <dst> [async] [verbose] [skip_prompt]
\x1b[1mDESCRIPTION\x1b[0m
src and dst can be:
/some/path (in the connected server)
zk://[user:passwd@]host/<path>
json://!some!path!backup.json/some/path
with a few restrictions. Given the semantic differences that znodes have with filesystem
directories recursive copying from znodes to an fs could lose data, but to a JSON file it
would work just fine.
The dst subtree will be modified to look the same as the src subtree with the exception
of ephemeral nodes.
\x1b[1mOPTIONS\x1b[0m
* async: do asynchronous copies (default: false)
* verbose: verbose output of every path (default: false)
* skip_prompt: don't ask for confirmation (default: false)
\x1b[1mEXAMPLES\x1b[0m
> mirror /some/znode /backup/copy-znode # local
> mirror /some/path json://!home!user!backup.json/ true true
"""
question = "Are you sure you want to replace %s with %s?" % (params.dst, params.src)
if params.skip_prompt or self.prompt_yes_no(question):
self.copy(params, True, True, 0, True)
def complete_mirror(self, cmd_param_text, full_cmd, *rest):
completers = [
self._complete_path,
self._complete_path,
complete_labeled_boolean("asynchronous"),
complete_labeled_boolean("verbose"),
complete_labeled_boolean("skip_prompt")
]
return complete(completers, cmd_param_text, full_cmd, *rest)
def copy(self, params, recursive, overwrite, max_items, mirror):
# default to zk://connected_host, if connected
src_connected_zk = dst_connected_zk = False
if self.connected:
zk_url = self._zk.zk_url()
# if these are local paths, make them absolute paths
if not re.match(r"^\w+://", params.src):
params.src = "%s%s" % (zk_url, self.resolve_path(params.src))
src_connected_zk = True
if not re.match(r"^\w+://", params.dst):
params.dst = "%s%s" % (zk_url, self.resolve_path(params.dst))
dst_connected_zk = True
try:
if mirror and not recursive:
raise CopyError("Mirroring must be recursive", True)
if mirror and not overwrite:
raise CopyError("Mirroring must overwrite", True)
if mirror and not max_items == 0:
raise CopyError("Mirroring must not have a max items limit", True)
src = Proxy.from_string(params.src, True, params.asynchronous, params.verbose)
if src_connected_zk:
src.need_client = False
src.client = self._zk
dst = Proxy.from_string(params.dst,
exists=None if overwrite else False,
asynchronous=params.asynchronous,
verbose=params.verbose)
if dst_connected_zk:
dst.need_client = False
dst.client = self._zk
src.copy(dst, recursive, max_items, mirror)
except CopyError as ex:
if ex.is_early_error:
msg = str(ex)
else:
msg = ("%s failed; "
"it may have partially completed. To return to a "
"stable state, either fix the issue and re-run the "
"command or manually revert.\nFailure reason:"
"\n%s") % ("Copy" if not mirror else "Mirror", str(ex))
self.show_output(msg)
@connected
@interruptible
@ensure_params(Optional("path"), IntegerOptional("max_depth"))
@check_paths_exists("path")
def do_tree(self, params):
"""
\x1b[1mNAME\x1b[0m
tree - Print the tree under a given path
\x1b[1mSYNOPSIS\x1b[0m
tree [path] [max_depth]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* max_depth: max recursion limit (0 is no limit) (default: 0)
\x1b[1mEXAMPLES\x1b[0m
> tree
.
├── zookeeper
│ ├── config
│ ├── quota
> tree 1
.
├── zookeeper
├── foo
├── bar
"""
self.show_output(".")
for child, level in self._zk.tree(params.path, params.max_depth):
self.show_output(u"%s├── %s", u"│ " * level, child)
def complete_tree(self, cmd_param_text, full_cmd, *rest):
complete_depth = partial(complete_values, [str(i) for i in range(0, 11)])
completers = [self._complete_path, complete_depth]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@interruptible
@ensure_params(Optional("path"), IntegerOptional("depth", 1))
@check_paths_exists("path")
def do_child_count(self, params):
"""
\x1b[1mNAME\x1b[0m
child_count - Prints the child count for paths
\x1b[1mSYNOPSIS\x1b[0m
child_count [path] [depth]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* max_depth: max recursion limit (0 is no limit) (default: 1)
\x1b[1mEXAMPLES\x1b[0m
> child-count /
/zookeeper: 2
/foo: 0
/bar: 3
"""
for child, level in self._zk.tree(params.path, params.depth, full_path=True):
self.show_output("%s: %d", child, self._zk.child_count(child))
def complete_child_count(self, cmd_param_text, full_cmd, *rest):
complete_depth = partial(complete_values, [str(i) for i in range(1, 11)])
completers = [self._complete_path, complete_depth]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Optional("path"))
@check_paths_exists("path")
def do_du(self, params):
"""
\x1b[1mNAME\x1b[0m
du - Total number of bytes under a path
\x1b[1mSYNOPSIS\x1b[0m
du [path]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
\x1b[1mEXAMPLES\x1b[0m
> du /
90
"""
self.show_output(pretty_bytes(self._zk.du(params.path)))
complete_du = _complete_path
@connected
@ensure_params(Optional("path"), Required("match"))
@check_paths_exists("path")
def do_find(self, params):
"""
\x1b[1mNAME\x1b[0m
find - Find znodes whose path matches a given text
\x1b[1mSYNOPSIS\x1b[0m
find [path] [match]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* match: the string to match in the paths (default: '')
\x1b[1mEXAMPLES\x1b[0m
> find / foo
/foo2
/fooish/wayland
/fooish/xorg
/copy/foo
"""
for path in self._zk.find(params.path, params.match, 0):
self.show_output(path)
complete_find = _complete_path
@connected
@ensure_params(
Required("path"),
Required("pattern"),
LabeledBooleanOptional("inverse", default=False)
)
@check_paths_exists("path")
def do_child_matches(self, params):
"""
\x1b[1mNAME\x1b[0m
child_matches - Prints paths that have at least 1 child that matches <pattern>
\x1b[1mSYNOPSIS\x1b[0m
child_matches <path> <pattern> [inverse]
\x1b[1mOPTIONS\x1b[0m
* inverse: display paths which don't match (default: false)
\x1b[1mEXAMPLES\x1b[0m
> child_matches /services/registrations member_
/services/registrations/foo
/services/registrations/bar
...
"""
seen = set()
# we don't want to recurse once there's a child matching, hence exclude_recurse=
for path in self._zk.fast_tree(params.path, exclude_recurse=params.pattern):
parent, child = split(path)
if parent in seen:
continue
match = params.pattern in child
if params.inverse:
if not match:
self.show_output(parent)
seen.add(parent)
else:
if match:
self.show_output(parent)
seen.add(parent)
def complete_child_matches(self, cmd_param_text, full_cmd, *rest):
complete_pats = partial(complete_values, ["some-pattern"])
completers = [self._complete_path, complete_pats, complete_labeled_boolean("inverse")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(
Optional("path"),
IntegerOptional("top", 0)
)
@check_paths_exists("path")
def do_summary(self, params):
"""
\x1b[1mNAME\x1b[0m
summary - Prints summarized details of a path's children
\x1b[1mSYNOPSIS\x1b[0m
summary [path] [top]
\x1b[1mDESCRIPTION\x1b[0m
The results are sorted by name.
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* top: number of results to be displayed (0 is all) (default: 0)
\x1b[1mEXAMPLES\x1b[0m
> summary /services/registrations
Created Last modified Owner Name
Thu Oct 11 09:14:39 2014 Thu Oct 11 09:14:39 2014 - bar
Thu Oct 16 18:54:39 2014 Thu Oct 16 18:54:39 2014 - foo
Thu Oct 12 10:04:01 2014 Thu Oct 12 10:04:01 2014 0x14911e869aa0dc1 member_0000001
"""
self.show_output("%s%s%s%s",
"Created".ljust(32),
"Last modified".ljust(32),
"Owner".ljust(23),
"Name")
results = sorted(self._zk.stat_map(params.path))
# what slice do we want?
if params.top == 0:
start, end = 0, len(results)
elif params.top > 0:
start, end = 0, params.top if params.top < len(results) else len(results)
else:
start = len(results) + params.top if abs(params.top) < len(results) else 0
end = len(results)
offs = 1 if params.path == "/" else len(params.path) + 1
for i in range(start, end):
path, stat = results[i]
self.show_output(
"%s%s%s%s",
time.ctime(stat.created).ljust(32),
time.ctime(stat.last_modified).ljust(32),
("0x%x" % stat.ephemeralOwner).ljust(23),
path[offs:]
)
def complete_summary(self, cmd_param_text, full_cmd, *rest):
complete_top = partial(complete_values, [str(i) for i in range(1, 11)])
completers = [self._complete_path, complete_top]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Optional("path"), Required("match"))
@check_paths_exists("path")
def do_ifind(self, params):
"""
\x1b[1mNAME\x1b[0m
ifind - Find znodes whose path (insensitively) matches a given text
\x1b[1mSYNOPSIS\x1b[0m
ifind [path] [match]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* match: the string to match in the paths (default: '')
\x1b[1mEXAMPLES\x1b[0m
> ifind / fOO
/foo2
/FOOish/wayland
/fooish/xorg
/copy/Foo
"""
for path in self._zk.find(params.path, params.match, re.IGNORECASE):
self.show_output(path)
def complete_ifind(self, cmd_param_text, full_cmd, *rest):
complete_match = partial(complete_values, ["sometext"])
completers = [self._complete_path, complete_match]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Optional("path"), Required("content"), LabeledBooleanOptional("show_matches"))
@check_paths_exists("path")
def do_grep(self, params):
"""
\x1b[1mNAME\x1b[0m
grep - Prints znodes with a value matching the given text
\x1b[1mSYNOPSIS\x1b[0m
grep [path] <content> [show_matches]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* show_matches: show the content that matched (default: false)
\x1b[1mEXAMPLES\x1b[0m
> grep / unbound true
/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
/copy/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
"""
self.grep(params.path, params.content, 0, params.show_matches)
def complete_grep(self, cmd_param_text, full_cmd, *rest):
complete_content = partial(complete_values, ["sometext"])
completers = [self._complete_path, complete_content, complete_labeled_boolean("show_matches")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Optional("path"), Required("content"), LabeledBooleanOptional("show_matches"))
@check_paths_exists("path")
def do_igrep(self, params):
"""
\x1b[1mNAME\x1b[0m
igrep - Prints znodes with a value matching the given text (ignoring case)
\x1b[1mSYNOPSIS\x1b[0m
igrep [path] <content> [show_matches]
\x1b[1mOPTIONS\x1b[0m
* path: the path (default: cwd)
* show_matches: show the content that matched (default: false)
\x1b[1mEXAMPLES\x1b[0m
> igrep / UNBound true
/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
/copy/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin
"""
self.grep(params.path, params.content, re.IGNORECASE, params.show_matches)
complete_igrep = complete_grep
def grep(self, path, content, flags, show_matches):
for path, matches in self._zk.grep(path, content, flags):
if show_matches:
self.show_output("%s:", path)
for match in matches:
self.show_output(match)
else:
self.show_output(path)
@connected
@ensure_params(Optional("path", "/"))
@check_paths_exists("path")
def do_cd(self, params):
"""
\x1b[1mNAME\x1b[0m
cd - Change the working path
\x1b[1mSYNOPSIS\x1b[0m
cd [path]
\x1b[1mOPTIONS\x1b[0m
* path: the path, if path is '-', move to the previous path (default: /)
\x1b[1mEXAMPLES\x1b[0m
> cd /foo/bar
> pwd
/foo/bar
> cd ..
> pwd
/foo
> cd -
> pwd
/foo/bar
> cd
> pwd
/
"""
self.update_curdir(params.path)
complete_cd = _complete_path
@connected
@ensure_params(Required("path"), LabeledBooleanOptional("watch"))
@check_paths_exists("path")
def do_get(self, params):
"""
\x1b[1mNAME\x1b[0m
get - Gets the znode's value
\x1b[1mSYNOPSIS\x1b[0m
get <path> [watch]
\x1b[1mOPTIONS\x1b[0m
* watch: set a (data) watch on the path (default: false)
\x1b[1mEXAMPLES\x1b[0m
> get /foo
bar
# sets a watch
> get /foo true
bar
# trigger the watch
> set /foo 'notbar'
WatchedEvent(type='CHANGED', state='CONNECTED', path=u'/foo')
"""
watcher = lambda evt: self.show_output(str(evt))
kwargs = {"watch": watcher} if params.watch else {}
value, _ = self._zk.get(params.path, **kwargs)
# maybe it's compressed?
if value is not None:
try:
value = zlib.decompress(value)
except:
pass
self.show_output(value)
def complete_get(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path, complete_labeled_boolean("watch")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), LabeledBooleanOptional("watch"), LabeledBooleanOptional("pretty_date"))
def do_exists(self, params):
"""
\x1b[1mNAME\x1b[0m
exists - Gets the znode's stat information
\x1b[1mSYNOPSIS\x1b[0m
exists <path> [watch] [pretty_date]
\x1b[1mOPTIONS\x1b[0m
* watch: set a (data) watch on the path (default: false)
\x1b[1mEXAMPLES\x1b[0m
exists /foo
Stat(
czxid=101,
mzxid=102,
ctime=1382820644375,
mtime=1382820693801,
version=1,
cversion=0,
aversion=0,
ephemeralOwner=0,
dataLength=6,
numChildren=0,
pzxid=101
)
# sets a watch
> exists /foo true
...
# trigger the watch
> rm /foo
WatchedEvent(type='DELETED', state='CONNECTED', path=u'/foo')
"""
watcher = lambda evt: self.show_output(str(evt))
kwargs = {"watch": watcher} if params.watch else {}
pretty = params.pretty_date
path = self.resolve_path(params.path)
stat = self._zk.exists(path, **kwargs)
if stat:
session = stat.ephemeralOwner if stat.ephemeralOwner else 0
self.show_output("Stat(")
self.show_output(" czxid=0x%x", stat.czxid)
self.show_output(" mzxid=0x%x", stat.mzxid)
self.show_output(" ctime=%s", time.ctime(stat.created) if pretty else stat.ctime)
self.show_output(" mtime=%s", time.ctime(stat.last_modified) if pretty else stat.mtime)
self.show_output(" version=%s", stat.version)
self.show_output(" cversion=%s", stat.cversion)
self.show_output(" aversion=%s", stat.aversion)
self.show_output(" ephemeralOwner=0x%x", session)
self.show_output(" dataLength=%s", stat.dataLength)
self.show_output(" numChildren=%s", stat.numChildren)
self.show_output(" pzxid=0x%x", stat.pzxid)
self.show_output(")")
else:
self.show_output("Path %s doesn't exist", params.path)
def complete_exists(self, cmd_param_text, full_cmd, *rest):
completers = [
self._complete_path,
complete_labeled_boolean("watch"),
complete_labeled_boolean("pretty_date")
]
return complete(completers, cmd_param_text, full_cmd, *rest)
def do_stat(self, *args, **kwargs):
"""
An alias for exists.
"""
self.do_exists(*args, **kwargs)
def complete_stat(self, *args, **kwargs):
return self.complete_exists(*args, **kwargs)
@connected
@ensure_params(
Required("path"),
Required("value"),
LabeledBooleanOptional("ephemeral"),
LabeledBooleanOptional("sequence"),
LabeledBooleanOptional("recursive"),
LabeledBooleanOptional("asynchronous"),
)
@check_path_absent
def do_create(self, params):
"""
\x1b[1mNAME\x1b[0m
create - Creates a znode
\x1b[1mSYNOPSIS\x1b[0m
create <path> <value> [ephemeral] [sequence] [recursive] [async]
\x1b[1mOPTIONS\x1b[0m
* ephemeral: make the znode ephemeral (default: false)
* sequence: make the znode sequential (default: false)
* recursive: recursively create the path (default: false)
* async: don't block waiting on the result (default: false)
\x1b[1mEXAMPLES\x1b[0m
> create /foo 'bar'
# create an ephemeral znode
> create /foo1 '' true
# create an ephemeral|sequential znode
> create /foo1 '' true true
# recursively create a path
> create /very/long/path/here '' false false true
# check the new subtree
> tree
.
├── zookeeper
│ ├── config
│ ├── quota
├── very
│ ├── long
│ │ ├── path
│ │ │ ├── here
"""
try:
kwargs = {"acl": None, "ephemeral": params.ephemeral, "sequence": params.sequence}
if not self.in_transaction:
kwargs["makepath"] = params.recursive
if params.asynchronous and not self.in_transaction:
self.client_context.create_async(params.path, decoded(params.value), **kwargs)
else:
self.client_context.create(params.path, decoded(params.value), **kwargs)
except NodeExistsError:
self.show_output("Path %s exists", params.path)
except NoNodeError:
self.show_output("Missing path in %s (try recursive?)", params.path)
def complete_create(self, cmd_param_text, full_cmd, *rest):
complete_value = partial(complete_values, ["somevalue"])
completers = [
self._complete_path,
complete_value,
complete_labeled_boolean("ephemeral"),
complete_labeled_boolean("sequence"),
complete_labeled_boolean("recursive"),
complete_labeled_boolean("asynchronous"),
]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), Required("value"), IntegerOptional("version", -1))
@check_paths_exists("path")
def do_set(self, params):
"""
\x1b[1mNAME\x1b[0m
set - Updates the znode's value
\x1b[1mSYNOPSIS\x1b[0m
set <path> <value> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> set /foo 'bar'
> set /foo 'verybar' 3
"""
self.set(params.path, decoded(params.value), version=params.version)
def complete_set(self, cmd_param_text, full_cmd, *rest):
""" TODO: suggest the old value & the current version """
complete_value = partial(complete_values, ["updated-value"])
complete_version = partial(complete_values, [str(i) for i in range(1, 11)])
completers = [self._complete_path, complete_value, complete_version]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), IntegerOptional("version", -1))
@check_paths_exists("path")
def do_zero(self, params):
"""
\x1b[1mNAME\x1b[0m
zero - Set the znode's to None (no bytes)
\x1b[1mSYNOPSIS\x1b[0m
zero <path> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> zero /foo
> zero /foo 3
"""
self.set(params.path, None, version=params.version)
def complete_zero(self, cmd_param_text, full_cmd, *rest):
""" TODO: suggest the current version """
complete_version = partial(complete_values, [str(i) for i in range(1, 11)])
completers = [self._complete_path, complete_version]
return complete(completers, cmd_param_text, full_cmd, *rest)
def set(self, path, value, version):
""" sets a znode's data """
if self.in_transaction:
self.client_context.set_data(path, value, version=version)
else:
self.client_context.set(path, value, version=version)
@connected
@ensure_params(Multi("paths"))
@check_paths_exists("paths")
def do_rm(self, params):
"""
\x1b[1mNAME\x1b[0m
rm - Remove the znode
\x1b[1mSYNOPSIS\x1b[0m
rm <path> [path] [path] ... [path]
\x1b[1mEXAMPLES\x1b[0m
> rm /foo
> rm /foo /bar
"""
for path in params.paths:
try:
self.client_context.delete(path)
except NotEmptyError:
self.show_output("%s is not empty.", path)
except NoNodeError:
self.show_output("%s doesn't exist.", path)
def complete_rm(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path for i in range(0, 10)]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), IntegerRequired("version"))
def do_check(self, params):
"""
\x1b[1mNAME\x1b[0m
check - Checks that a path is at a given version (only works within a transaction)
\x1b[1mSYNOPSIS\x1b[0m
check <path> <version>
\x1b[1mEXAMPLES\x1b[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1'
"""
if not self.in_transaction:
return
self.client_context.check(params.path, params.version)
@connected
@ensure_params(Multi("cmds"))
def do_txn(self, params):
"""
\x1b[1mNAME\x1b[0m
txn - Create and execute a transaction
\x1b[1mSYNOPSIS\x1b[0m
txn <cmd> [cmd] [cmd] ... [cmd]
\x1b[1mDESCRIPTION\x1b[0m
Allowed cmds are check, create, rm and set. Check parameters are:
check <path> <version>
For create, rm and set see their help menu for their respective parameters.
\x1b[1mEXAMPLES\x1b[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1'
"""
try:
with self.transaction():
for cmd in params.cmds:
try:
self.onecmd(cmd)
except AttributeError:
# silently swallow unrecognized commands
pass
except BadVersionError:
self.show_output("Bad version.")
except NoNodeError:
self.show_output("Missing path.")
except NodeExistsError:
self.show_output("One of the paths exists.")
def transaction(self):
class TransactionInProgress(Exception): pass
class TransactionNotStarted(Exception): pass
class Transaction(object):
def __init__(self, shell):
self._shell = shell
def __enter__(self):
if self._shell._txn is not None:
raise TransactionInProgress()
self._shell._txn = self._shell._zk.transaction()
def __exit__(self, type, value, traceback):
if self._shell._txn is None:
raise TransactionNotStarted()
try:
self._shell._txn.commit()
finally:
self._shell._txn = None
return Transaction(self)
@property
def client_context(self):
""" checks if we are within a transaction or not """
return self._txn if self.in_transaction else self._zk
@property
def in_transaction(self):
""" are we inside a transaction? """
return self._txn is not None
def complete_txn(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path for i in range(0, 10)]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Optional("match"))
def do_session_info(self, params):
"""
\x1b[1mNAME\x1b[0m
session_info - Shows information about the current session
\x1b[1mSYNOPSIS\x1b[0m
session_info [match]
\x1b[1mOPTIONS\x1b[0m
* match: only include lines that match (default: '')
\x1b[1mEXAMPLES\x1b[0m
> session_info
state=CONNECTED
xid=4
last_zxid=0x000000505f8be5b3
timeout=10000
client=('127.0.0.1', 60348)
server=('127.0.0.1', 2181)
"""
fmt_str = """state=%s
sessionid=%s
auth_info=%s
protocol_version=%d
xid=%d
last_zxid=0x%.16x
timeout=%d
client=%s
server=%s
data_watches=%s
child_watches=%s"""
content = fmt_str % (
self._zk.client_state,
self._zk.sessionid,
list(self._zk.auth_data),
self._zk.protocol_version,
self._zk.xid,
self._zk.last_zxid,
self._zk.session_timeout,
self._zk.client,
self._zk.server,
",".join(self._zk.data_watches),
",".join(self._zk.child_watches)
)
output = get_matching(content, params.match)
self.show_output(output)
def complete_session_info(self, cmd_param_text, full_cmd, *rest):
values = [
"sessionid",
"auth_info",
"protocol_version",
"xid",
"last_zxid",
"timeout",
"client",
"server",
"data_watches",
"child_watches"
]
completers = [partial(complete_values, values)]
return complete(completers, cmd_param_text, full_cmd, *rest)
@ensure_params(Optional("hosts"), Optional("match"))
def do_mntr(self, params):
"""
\x1b[1mNAME\x1b[0m
mntr - Executes the mntr four-letter command
\x1b[1mSYNOPSIS\x1b[0m
mntr [hosts] [match]
\x1b[1mOPTIONS\x1b[0m
* hosts: the hosts to connect to (default: the current connected host)
* match: only output lines that include the given string (default: '')
\x1b[1mEXAMPLES\x1b[0m
> mntr
zk_version 3.5.0--1, built on 11/14/2014 10:45 GMT
zk_min_latency 0
zk_max_latency 8
zk_avg_latency 0
"""
hosts = params.hosts if params.hosts != "" else None
if hosts is not None and invalid_hosts(hosts):
self.show_output("List of hosts has the wrong syntax.")
return
if self._zk is None:
self._zk = XClient()
try:
content = get_matching(self._zk.mntr(hosts), params.match)
self.show_output(content)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
@ensure_params(Optional("hosts"), Optional("match"))
def do_cons(self, params):
"""
\x1b[1mNAME\x1b[0m
cons - Executes the cons four-letter command
\x1b[1mSYNOPSIS\x1b[0m
cons [hosts] [match]
\x1b[1mOPTIONS\x1b[0m
* hosts: the hosts to connect to (default: the current connected host)
* match: only output lines that include the given string (default: '')
\x1b[1mEXAMPLES\x1b[0m
> cons
/127.0.0.1:40535[0](queued=0,recved=1,sent=0)
...
"""
hosts = params.hosts if params.hosts != "" else None
if hosts is not None and invalid_hosts(hosts):
self.show_output("List of hosts has the wrong syntax.")
return
if self._zk is None:
self._zk = XClient()
try:
content = get_matching(self._zk.cons(hosts), params.match)
self.show_output(content)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
@ensure_params(Optional("hosts"), Optional("match"))
def do_dump(self, params):
"""
\x1b[1mNAME\x1b[0m
dump - Executes the dump four-letter command
\x1b[1mSYNOPSIS\x1b[0m
dump [hosts] [match]
\x1b[1mOPTIONS\x1b[0m
* hosts: the hosts to connect to (default: the current connected host)
* match: only output lines that include the given string (default: '')
\x1b[1mEXAMPLES\x1b[0m
> dump
SessionTracker dump:
Session Sets (3)/(1):
0 expire at Fri Nov 14 02:49:52 PST 2014:
0 expire at Fri Nov 14 02:49:56 PST 2014:
1 expire at Fri Nov 14 02:50:00 PST 2014:
0x149adea89940107
ephemeral nodes dump:
Sessions with Ephemerals (0):
"""
hosts = params.hosts if params.hosts != "" else None
if hosts is not None and invalid_hosts(hosts):
self.show_output("List of hosts has the wrong syntax.")
return
if self._zk is None:
self._zk = XClient()
try:
content = get_matching(self._zk.dump(hosts), params.match)
self.show_output(content)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
@ensure_params(
Required("hosts"),
LabeledBooleanOptional("verbose", default=False),
LabeledBooleanOptional("reverse_lookup")
)
def do_chkzk(self, params):
"""
\x1b[1mNAME\x1b[0m
chkzk - Consistency check for a cluster
\x1b[1mSYNOPSIS\x1b[0m
chkzk <server1,server2,...> [verbose] [reverse_lookup]
\x1b[1mOPTIONS\x1b[0m
* verbose: expose the values for each accounted stat (default: false)
* reverse_lookup: convert IPs back to hostnames (default: false)
\x1b[1mEXAMPLES\x1b[0m
> chkzk cluster.example.net
passed
> chkzk cluster.example.net true true
+-------------+-------------+-------------+-------------+-------------+-------------+
| | server1 | server2 | server3 | server4 | server5 |
+=============+=============+=============+=============+=============+=============+
| state | follower | follower | follower | follower | leader |
+-------------+-------------+-------------+-------------+-------------+-------------+
| znode count | 70061 | 70062 | 70161 | 70261 | 70061 |
+-------------+-------------+-------------+-------------+-------------+-------------+
| ephemerals | 60061 | 60062 | 60161 | 60261 | 60061 |
+-------------+-------------+-------------+-------------+-------------+-------------+
| data size | 1360061 | 1360062 | 1360161 | 1360261 | 1360061 |
+-------------+-------------+-------------+-------------+-------------+-------------+
| sessions | 40061 | 40062 | 40161 | 40261 | 40061 |
+-------------+-------------+-------------+-------------+-------------+-------------+
| zxid | 0xce1526bb7 | 0xce1526bb7 | 0xce1526bb7 | 0xce1526bb7 | 0xce1526bb7 |
+-------------+-------------+-------------+-------------+-------------+-------------+
"""
conf = self._conf
stat_retries = conf.get_int("chkzk_stat_retries", 10)
endpoints = set()
for host, port in hosts_to_endpoints(params.hosts):
for ip in get_ips(host, port):
endpoints.add("%s:%s" % (ip, port))
endpoints = sorted(endpoints)
values = []
states = ["state"] + ["-"] * len(endpoints)
values.append(states)
znodes = ["znode count"] + [-1] * len(endpoints)
values.append(znodes)
ephemerals = ["ephemerals"] + [-1] * len(endpoints)
values.append(ephemerals)
datasize = ["data size"] + [-1] * len(endpoints)
values.append(datasize)
sessions = ["sessions"] + [-1] * len(endpoints)
values.append(sessions)
zxids = ["zxid"] + [-1] * len(endpoints)
values.append(zxids)
if self._zk is None:
self._zk = XClient()
def mntr_values(endpoint):
vals = {}
try:
mntr = self._zk.mntr(endpoint)
for line in mntr.split("\n"):
k, v = line.split(None, 1)
vals[k] = v
except Exception as ex:
pass
return vals
def fetch(endpoint, states, znodes, ephemerals, datasize, sessions, zxids, idx):
mntr = mntr_values(endpoint)
state = mntr.get("zk_server_state", "-")
znode_count = mntr.get("zk_znode_count", -1)
eph_count = mntr.get("zk_ephemerals_count", -1)
dsize = mntr.get("zk_approximate_data_size", -1)
session_count = mntr.get("zk_global_sessions", -1)
states[idx] = state
znodes[idx] = int(znode_count)
ephemerals[idx] = int(eph_count)
datasize[idx] = int(dsize)
sessions[idx] = int(session_count)
zxids[idx] = -1
try:
srvr = self._zk.cmd(hosts_to_endpoints(endpoint), "srvr")
for line in srvr.split("\n"):
if "Zxid:" in line:
zxids[idx] = int(line.split(None)[1], 0)
break
except:
pass
workers = []
for idx, endpoint in enumerate(endpoints, 1):
worker = Thread(
target=fetch,
args=(endpoint, states, znodes, ephemerals, datasize, sessions, zxids, idx)
)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
def color_outliers(group, delta, marker=lambda x: red(str(x))):
colored = False
outliers = find_outliers(group[1:], delta)
for outlier in outliers:
group[outlier + 1] = marker(group[outlier + 1])
colored = True
return colored
passed = True
passed = passed and not color_outliers(znodes, conf.get_int("chkzk_znode_delta", 100))
passed = passed and not color_outliers(ephemerals, conf.get_int("chkzk_ephemeral_delta", 50))
passed = passed and not color_outliers(datasize, conf.get_int("chkzk_datasize_delta", 1000))
passed = passed and not color_outliers(sessions, conf.get_int("chkzk_session_delta", 150))
passed = passed and not color_outliers(zxids, conf.get_int("chkzk_zxid_delta", 200), lambda x: red(str(hex(x))))
# convert zxids (that aren't outliers) back to hex strs
for i, zxid in enumerate(zxids[0:]):
zxids[i] = zxid if type(zxid) == str else hex(zxid)
if params.verbose:
if params.reverse_lookup:
def reverse_endpoint(endpoint):
ip = endpoint.rsplit(":", 1)[0]
try:
return socket.gethostbyaddr(ip)[0]
except socket.herror:
pass
return ip
endpoints = [reverse_endpoint(endp) for endp in endpoints]
headers = [""] + endpoints
table = tabulate(values, headers=headers, tablefmt="grid", stralign="right")
self.show_output("%s", table)
else:
self.show_output("%s", green("passed") if passed else red("failed"))
return passed
def complete_chkzk(self, cmd_param_text, full_cmd, *rest):
# TODO: store a list of used clusters
complete_cluster = partial(complete_values, ["localhost", "0"])
completers = [
complete_cluster,
complete_labeled_boolean("verbose"),
complete_labeled_boolean("reverse_lookup")
]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Multi("paths"))
@check_paths_exists("paths")
def do_rmr(self, params):
"""
\x1b[1mNAME\x1b[0m
rmr - Delete a path and all its children
\x1b[1mSYNOPSIS\x1b[0m
rmr <path> [path] [path] ... [path]
\x1b[1mEXAMPLES\x1b[0m
> rmr /foo
> rmr /foo /bar
"""
for path in params.paths:
self._zk.delete(path, recursive=True)
complete_rmr = complete_rm
@connected
@ensure_params(Required("path"))
@check_paths_exists("path")
def do_sync(self, params):
"""
\x1b[1mNAME\x1b[0m
sync - Forces the current server to sync with the rest of the cluster
\x1b[1mSYNOPSIS\x1b[0m
sync <path>
\x1b[1mOPTIONS\x1b[0m
* path: the path (ZooKeeper currently ignore this) (default: '')
\x1b[1mEXAMPLES\x1b[0m
> sync /foo
"""
self._zk.sync(params.path)
complete_sync = _complete_path
@connected
@ensure_params(Required("path"), LabeledBooleanOptional("verbose"))
@check_paths_exists("path")
def do_child_watch(self, params):
"""
\x1b[1mNAME\x1b[0m
child_watch - Watch a path for child changes
\x1b[1mSYNOPSIS\x1b[0m
child_watch <path> [verbose]
\x1b[1mOPTIONS\x1b[0m
* verbose: prints list of znodes (default: false)
\x1b[1mEXAMPLES\x1b[0m
# only prints the current number of children
> child_watch /
# prints num of children along with znodes listing
> child_watch / true
"""
get_child_watcher(self._zk, print_func=self.show_output).update(
params.path, params.verbose)
def complete_child_watch(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path, complete_labeled_boolean("verbose")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path_a"), Required("path_b"))
@check_paths_exists("path_a", "path_b")
def do_diff(self, params):
"""
\x1b[1mNAME\x1b[0m
diff - Display the differences between two paths
\x1b[1mSYNOPSIS\x1b[0m
diff <src> <dst>
\x1b[1mDESCRIPTION\x1b[0m
The output is interpreted as:
-- means the znode is missing in /new-configs
++ means the znode is new in /new-configs
+- means the znode's content differ between /configs and /new-configs
\x1b[1mEXAMPLES\x1b[0m
> diff /configs /new-configs
-- service-x/hosts
++ service-x/hosts.json
+- service-x/params
"""
count = 0
for count, (diff, path) in enumerate(self._zk.diff(params.path_a, params.path_b), 1):
if diff == -1:
self.show_output("-- %s", path)
elif diff == 0:
self.show_output("-+ %s", path)
elif diff == 1:
self.show_output("++ %s", path)
if count == 0:
self.show_output("Branches are equal.")
def complete_diff(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path, self._complete_path]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), LabeledBooleanOptional("recursive"))
@check_paths_exists("path")
def do_json_valid(self, params):
"""
\x1b[1mNAME\x1b[0m
json_valid - Checks znodes for valid JSON
\x1b[1mSYNOPSIS\x1b[0m
json_valid <path> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse to all children (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_valid /some/valid/json_znode
yes.
> json_valid /some/invalid/json_znode
no.
> json_valid /configs true
/configs/a: yes.
/configs/b: no.
"""
def check_valid(path, print_path):
result = "no"
value, _ = self._zk.get(path)
if value is not None:
try:
x = json.loads(value)
result = "yes"
except ValueError:
pass
if print_path:
self.show_output("%s: %s.", os.path.basename(path), result)
else:
self.show_output("%s.", result)
if not params.recursive:
check_valid(params.path, False)
else:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
check_valid(cpath, True)
def complete_json_valid(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), LabeledBooleanOptional("recursive"))
@check_paths_exists("path")
def do_json_cat(self, params):
"""
\x1b[1mNAME\x1b[0m
json_cat - Pretty prints a znode's JSON
\x1b[1mSYNOPSIS\x1b[0m
json_cat <path> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse to all children (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_cat /configs/clusters
{
"dc0": {
"network": "10.2.0.0/16",
},
.....
}
> json_cat /configs true
/configs/clusters:
{
"dc0": {
"network": "10.2.0.0/16",
},
.....
}
/configs/dns_servers:
[
"10.2.0.1",
"10.3.0.1"
]
"""
def json_output(path, print_path):
value, _ = self._zk.get(path)
if value is not None:
try:
value = json.dumps(json.loads(value), indent=4)
except ValueError:
pass
if print_path:
self.show_output("%s:\n%s", os.path.basename(path), value)
else:
self.show_output(value)
if not params.recursive:
json_output(params.path, False)
else:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
json_output(cpath, True)
def complete_json_cat(self, cmd_param_text, full_cmd, *rest):
completers = [self._complete_path, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), Required("keys"), LabeledBooleanOptional("recursive"))
@check_paths_exists("path")
def do_json_get(self, params):
"""
\x1b[1mNAME\x1b[0m
json_get - Get key (or keys, if nested) from a JSON object serialized in the given path
\x1b[1mSYNOPSIS\x1b[0m
json_get <path> <keys> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse to all children (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_get /configs/primary_service endpoint.clientPort
32768
> json_get /configs endpoint.clientPort true
primary_service: 32768
secondary_service: 32769
# Use template strings to access various keys at once:
> json_get /configs/primary_service '#{endpoint.ipAddress}:#{endpoint.clientPort}'
10.2.2.3:32768
"""
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
if params.recursive:
paths = self._zk.tree(params.path, 0, full_path=True)
print_path = True
else:
paths = [(params.path, 0)]
print_path = False
for cpath, _ in paths:
try:
jstr, _ = self._zk.get(cpath)
value = Keys.value(json_deserialize(jstr), params.keys)
if print_path:
self.show_output("%s: %s", os.path.basename(cpath), value)
else:
self.show_output(value)
except BadJSON as ex:
self.show_output("Path %s has bad JSON.", cpath)
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", cpath, ex)
def complete_json_get(self, cmd_param_text, full_cmd, *rest):
""" TODO: prefetch & parse znodes & suggest keys """
complete_keys = partial(complete_values, ["key1", "key2", "#{key1.key2}"])
completers = [self._complete_path, complete_keys, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(
Required("path"),
Required("keys"),
Required("value"),
Required("value_type"),
LabeledBooleanOptional("confirm")
)
@check_paths_exists("path")
def do_json_set(self, params):
"""
\x1b[1mNAME\x1b[0m
json_set - Sets the value for the given (possibly nested) key on a JSON object serialized in the given path
\x1b[1mSYNOPSIS\x1b[0m
json_set <path> <keys> <value> <value_type> [confirm]
\x1b[1mDESCRIPTION\x1b[0m
If the key exists and the value is different, the znode will be updated with the key set to its new value.
If the key does not exist, it'll be created and the znode will be updated with the serialized version of
the new object. The value's type will be determined by the value_type parameter.
\x1b[1mEXAMPLES\x1b[0m
> create /props '{"a": {"b": 4}}'
> json_cat /props
{
"a": {
"b": 4
}
}
> json_set /props a.b 5 int
> json_cat /props
{
"a": {
"b": 5
}
}
> json_set /props a.c.d true bool
> json_cat /props
{
"a": {
"c": {
"d": true
},
"b": 5
}
}
"""
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
try:
jstr, stat = self._zk.get(params.path)
obj_src = json_deserialize(jstr)
obj_dst = copy.deepcopy(obj_src)
# Cast value to its given type.
value = to_type(params.value, params.value_type)
Keys.set(obj_dst, params.keys, value)
if params.confirm:
a = json.dumps(obj_src, sort_keys=True, indent=4)
b = json.dumps(obj_dst, sort_keys=True, indent=4)
diff = difflib.unified_diff(a.split("\n"), b.split("\n"))
self.show_output("\n".join(diff))
if not self.prompt_yes_no("Apply update?"):
return
# Pass along the read version, to ensure we are updating what we read.
self.set(params.path, json.dumps(obj_dst), version=stat.version)
except BadJSON:
self.show_output("Path %s has bad JSON.", params.path)
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", params.path, ex)
except ValueError:
self.show_output("Bad value_type")
complete_json_set = complete_json_get
@connected
@ensure_params(
Required("path"),
Multi("keys_values_types")
)
@check_paths_exists("path")
def do_json_set_many(self, params):
"""
\x1b[1mNAME\x1b[0m
json_set_many - like `json_set`, but for multiple key/value pairs
\x1b[1mSYNOPSIS\x1b[0m
json_set_many <path> <keys> <value> <value_type> <keys1> <value1> <value_type1> ...
\x1b[1mDESCRIPTION\x1b[0m
If the key exists and the value is different, the znode will be updated with the key set to its new value.
If the key does not exist, it'll be created and the znode will be updated with the serialized version of
the new object. The value's type will be determined by the value_type parameter.
This is an atomic operation, either all given keys are set in one ZK operation or none are.
\x1b[1mEXAMPLES\x1b[0m
> create /props '{"a": {"b": 4}}'
> json_cat /props
{
"a": {
"b": 4
}
}
> json_set_many /props a.b 5 int a.c.d true bool
> json_cat /props
{
"a": {
"c": {
"d": true
},
"b": 5
}
}
"""
# Ensure we have a balance set of (key, value, type) tuples.
if len(params.keys_values_types) % 3 != 0:
self.show_output('Bad list of parameters')
return
for key, _, _ in grouper(params.keys_values_types, 3):
try:
Keys.validate(key)
except Keys.Bad as ex:
self.show_output(str(ex))
return
# Fetch & deserialize znode.
jstr, stat = self._zk.get(params.path)
try:
obj_src = json_deserialize(jstr)
except BadJSON:
self.show_output("Path %s has bad JSON.", params.path)
obj_dst = copy.deepcopy(obj_src)
# Cast values to their given type.
for key, value, ptype in grouper(params.keys_values_types, 3):
try:
Keys.set(obj_dst, key, to_type(value, ptype))
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", params.path, ex)
return
except ValueError:
self.show_output("Bad value_type")
return
# Pass along the read version, to ensure we are updating what we read.
self.set(params.path, json.dumps(obj_dst), version=stat.version)
complete_json_set_many = complete_json_get
@connected
@ensure_params(
Required("path"),
Required("keys"),
Required("value"),
Required("value_type"),
LabeledBooleanOptional("confirm")
)
@check_paths_exists("path")
def do_json_append(self, params):
"""
\x1b[1mNAME\x1b[0m
json_append - append an element to a list
\x1b[1mSYNOPSIS\x1b[0m
json_append <path> <keys> <value> <value_type> [confirm]
\x1b[1mDESCRIPTION\x1b[0m
The key must exist within the serialized JSON object and be of type list, otherwise this command
will error out. The given value will be appended to the list and the znode will be updated with the
serialized version of the new object. The value's type will be determined by the <value_type> parameter.
This is an atomic operation, if the read version of the znode changed before the update completes
this command will fail.
\x1b[1mEXAMPLES\x1b[0m
> create /settings '{"versions": ["v1", "v2"]}'
> json_cat /settings
{
"versions": [
"v1",
"v2"
]
}
> json_append /settings versions v3 str
> json_cat /settings
{
"versions": [
"v1",
"v2",
"v3"
]
}
"""
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
try:
jstr, stat = self._zk.get(params.path)
obj_src = json_deserialize(jstr)
obj_dst = copy.deepcopy(obj_src)
plist = Keys.fetch(obj_dst, params.keys)
if not isinstance(plist, list):
self.show_output("%s is not a list.", params.keys)
return
# Cast value to its given type.
value = to_type(params.value, params.value_type)
plist.append(value)
if params.confirm:
a = json.dumps(obj_src, sort_keys=True, indent=4)
b = json.dumps(obj_dst, sort_keys=True, indent=4)
diff = difflib.unified_diff(a.split("\n"), b.split("\n"))
self.show_output("\n".join(diff))
if not self.prompt_yes_no("Apply update?"):
return
# Pass along the read version, to ensure we are updating what we read.
self.set(params.path, json.dumps(obj_dst), version=stat.version)
except BadJSON:
self.show_output("Path %s has bad JSON.", params.path)
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", params.path, ex)
except ValueError:
self.show_output("Bad value_type")
complete_json_append = complete_json_get
@connected
@ensure_params(
Required("path"),
Required("keys"),
Required("value"),
Required("value_type"),
LabeledBooleanOptional("remove_all"),
LabeledBooleanOptional("confirm")
)
@check_paths_exists("path")
def do_json_remove(self, params):
"""
\x1b[1mNAME\x1b[0m
json_remove - remove occurrences of the given value from a list
\x1b[1mSYNOPSIS\x1b[0m
json_remove <path> <keys> <value> <value_type> [remove_all] [confirm]
\x1b[1mDESCRIPTION\x1b[0m
The key must exist within the serialized JSON object and be of type list, otherwise this command
will error out. The first occurrence of the value will be removed from the list. If the optional
parameter <remove_all> is true, then all occurrences will be removed. The value's type will be
determined by the <value_type> parameter.
The znode will be updated with the serialized version of the updated object.
This is an atomic operation, if the read version of the znode changed before the update completes
this command will fail.
\x1b[1mEXAMPLES\x1b[0m
> create /settings '{"versions": ["v1", "v2", "v3"]}'
> json_cat /settings
{
"versions": [
"v1",
"v2",
"v3"
]
}
> json_remove /settings versions v2 str
> json_cat /settings
{
"versions": [
"v1",
"v3"
]
}
"""
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
try:
jstr, stat = self._zk.get(params.path)
obj_src = json_deserialize(jstr)
obj_dst = copy.deepcopy(obj_src)
plist = Keys.fetch(obj_dst, params.keys)
if not isinstance(plist, list):
self.show_output("%s is not a list.", params.keys)
return
# Cast value to its given type.
value = to_type(params.value, params.value_type)
# Remove one or more occurrences of value.
while True:
try:
plist.remove(value)
if not params.remove_all:
break
except ValueError:
# no more remaining values.
break
if params.confirm:
a = json.dumps(obj_src, sort_keys=True, indent=4)
b = json.dumps(obj_dst, sort_keys=True, indent=4)
diff = difflib.unified_diff(a.split("\n"), b.split("\n"))
self.show_output("\n".join(diff))
if not self.prompt_yes_no("Apply update?"):
return
# Pass along the read version, to ensure we are updating what we read.
self.set(params.path, json.dumps(obj_dst), version=stat.version)
except BadJSON:
self.show_output("Path %s has bad JSON.", params.path)
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", params.path, ex)
except ValueError:
self.show_output("Bad value_type")
complete_json_remove = complete_json_get
@connected
@ensure_params(
Required("path"),
Required("keys"),
IntegerOptional("top", 0),
IntegerOptional("minfreq", 1),
LabeledBooleanOptional("reverse", default=True),
LabeledBooleanOptional("report_errors", default=False),
LabeledBooleanOptional("print_path", default=False),
)
@check_paths_exists("path")
def do_json_count_values(self, params):
"""
\x1b[1mNAME\x1b[0m
json_count_values - Gets the frequency of the values associated with the given keys
\x1b[1mSYNOPSIS\x1b[0m
json_count_values <path> <keys> [top] [minfreq] [reverse] [report_errors] [print_path]
\x1b[1mOPTIONS\x1b[0m
* top: number of results to show (0 is all) (default: 0)
* minfreq: minimum frequency to be displayed (default: 1)
* reverse: sort in descending order (default: true)
* report_errors: report bad znodes (default: false)
* print_path: print the path if there are results (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_count_values /configs/primary_service endpoint.host
10.20.0.2 3
10.20.0.4 3
10.20.0.5 3
10.20.0.6 1
10.20.0.7 1
...
"""
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
path_map = PathMap(self._zk, params.path)
values = defaultdict(int)
for path, data in path_map.get():
try:
value = Keys.value(json_deserialize(data), params.keys)
values[value] += 1
except BadJSON as ex:
if params.report_errors:
self.show_output("Path %s has bad JSON.", path)
except Keys.Missing as ex:
if params.report_errors:
self.show_output("Path %s is missing key %s.", path, ex)
results = sorted(values.items(), key=lambda item: item[1], reverse=params.reverse)
results = [r for r in results if r[1] >= params.minfreq]
# what slice do we want?
if params.top == 0:
start, end = 0, len(results)
elif params.top > 0:
start, end = 0, params.top if params.top < len(results) else len(results)
else:
start = len(results) + params.top if abs(params.top) < len(results) else 0
end = len(results)
if len(results) > 0 and params.print_path:
self.show_output(params.path)
for i in range(start, end):
value, frequency = results[i]
self.show_output("%s = %d", value, frequency)
# if no results were found we call it a failure (i.e.: exit(1) from --run-once)
if len(results) == 0:
return False
def complete_json_count_values(self, cmd_param_text, full_cmd, *rest):
complete_keys = partial(complete_values, ["key1", "key2", "#{key1.key2}"])
complete_top = partial(complete_values, [str(i) for i in range(1, 11)])
complete_freq = partial(complete_values, [str(i) for i in range(1, 11)])
completers = [
self._complete_path,
complete_keys,
complete_top,
complete_freq,
complete_labeled_boolean("reverse"),
complete_labeled_boolean("report_errors"),
complete_labeled_boolean("print_path")
]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(
Required("path"),
Required("keys"),
Optional("prefix", ""),
LabeledBooleanOptional("report_errors", default=False),
LabeledBooleanOptional("first", default=False)
)
@check_paths_exists("path")
def do_json_dupes_for_keys(self, params):
"""
\x1b[1mNAME\x1b[0m
json_duples_for_keys - Gets the duplicate znodes for the given keys
\x1b[1mSYNOPSIS\x1b[0m
json_dupes_for_keys <path> <keys> [prefix] [report_errors] [first]
\x1b[1mDESCRIPTION\x1b[0m
Znodes with duplicated keys are sorted and all but the first (original) one
are printed.
\x1b[1mOPTIONS\x1b[0m
* prefix: only include matching znodes
* report_errors: turn on error reporting (i.e.: bad JSON in a znode)
* first: print the first, non duplicated, znode too.
\x1b[1mEXAMPLES\x1b[0m
> json_cat /configs/primary_service true
member_0000000186
{
"status": "ALIVE",
"serviceEndpoint": {
"http": {
"host": "10.0.0.2",
"port": 31994
}
},
"shard": 0
}
member_0000000187
{
"status": "ALIVE",
"serviceEndpoint": {
"http": {
"host": "10.0.0.2",
"port": 31994
}
},
"shard": 0
}
> json_dupes_for_keys /configs/primary_service shard
member_0000000187
"""
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
path_map = PathMap(self._zk, params.path)
dupes_by_path = defaultdict(lambda: defaultdict(list))
for path, data in path_map.get():
parent, child = split(path)
if not child.startswith(params.prefix):
continue
try:
value = Keys.value(json_deserialize(data), params.keys)
dupes_by_path[parent][value].append(path)
except BadJSON as ex:
if params.report_errors:
self.show_output("Path %s has bad JSON.", path)
except Keys.Missing as ex:
if params.report_errors:
self.show_output("Path %s is missing key %s.", path, ex)
dupes = []
for _, paths_by_value in dupes_by_path.items():
for _, paths in paths_by_value.items():
if len(paths) > 1:
paths.sort()
paths = paths if params.first else paths[1:]
for path in paths:
idx = bisect.bisect(dupes, path)
dupes.insert(idx, path)
for dup in dupes:
self.show_output(dup)
# if no dupes were found we call it a failure (i.e.: exit(1) from --run-once)
if len(dupes) == 0:
return False
def complete_json_dupes_for_keys(self, cmd_param_text, full_cmd, *rest):
complete_keys = partial(complete_values, ["key1", "key2", "#{key1.key2}"])
completers = [
self._complete_path,
complete_keys,
complete_labeled_boolean("report_errors")
]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"))
@check_paths_exists("path")
def do_edit(self, params):
"""
\x1b[1mNAME\x1b[0m
edit - Opens up an editor to modify and update a znode.
\x1b[1mSYNOPSIS\x1b[0m
edit <path>
\x1b[1mDESCRIPTION\x1b[0m
If the content has not changed, the znode won't be updated.
$EDITOR must be set for zk-shell to find your editor.
\x1b[1mEXAMPLES\x1b[0m
# make sure $EDITOR is set in your shell
> edit /configs/webservers/primary
# change something and save
> get /configs/webservers/primary
# updated content
"""
if os.getuid() == 0:
self.show_output("edit cannot be run as root.")
return
editor = os.getenv("EDITOR", os.getenv("VISUAL", "/usr/bin/vi"))
if editor is None:
self.show_output("No editor found, please set $EDITOR")
return
editor = which(editor)
if not editor:
self.show_output("Cannot find executable editor, please set $EDITOR")
return
st = os.stat(editor)
if (st.st_mode & statlib.S_ISUID) or (st.st_mode & statlib.S_ISUID):
self.show_output("edit cannot use setuid/setgid binaries.")
return
# copy content to tempfile
value, stat = self._zk.get(params.path)
_, tmppath = tempfile.mkstemp()
with open(tmppath, "w") as fh:
fh.write(value if value else "")
# launch editor
rv = os.system("%s %s" % (editor, tmppath))
if rv != 0:
self.show_output("%s did not exit successfully" % editor)
try:
os.unlink(tmppath)
except OSError: pass
return
# did it change? if so, save it
with open(tmppath, "r") as fh:
newvalue = fh.read()
if newvalue != value:
self.set(params.path, decoded(newvalue), stat.version)
try:
os.unlink(tmppath)
except OSError: pass
def complete_edit(self, cmd_param_text, full_cmd, *rest):
return complete([self._complete_path], cmd_param_text, full_cmd, *rest)
@ensure_params(IntegerRequired("repeat"), FloatRequired("pause"), Multi("cmds"))
def do_loop(self, params):
"""
\x1b[1mNAME\x1b[0m
loop - Runs commands in a loop
\x1b[1mSYNOPSIS\x1b[0m
loop <repeat> <pause> <cmd1> <cmd2> ... <cmdN>
\x1b[1mDESCRIPTION\x1b[0m
Runs <cmds> <repeat> times (0 means forever), with a pause of <pause> secs inbetween
each <cmd> (0 means no pause).
\x1b[1mEXAMPLES\x1b[0m
> loop 3 0 "get /foo"
...
> loop 3 0 "get /foo" "get /bar"
...
"""
repeat = params.repeat
if repeat < 0:
self.show_output("<repeat> must be >= 0.")
return
pause = params.pause
if pause < 0:
self.show_output("<pause> must be >= 0.")
return
cmds = params.cmds
i = 0
with self.transitions_disabled():
while True:
for cmd in cmds:
try:
self.onecmd(cmd)
except Exception as ex:
self.show_output("Command failed: %s.", ex)
if pause > 0.0:
time.sleep(pause)
i += 1
if repeat > 0 and i >= repeat:
break
def complete_loop(self, cmd_param_text, full_cmd, *rest):
complete_repeat = partial(complete_values, [str(i) for i in range(0, 11)])
complete_pause = partial(complete_values, [str(i) for i in range(0, 11)])
cmds = ["\"get ", "\"ls ", "\"create ", "\"set ", "\"rm "]
# FIXME: complete_values doesn't work when vals includes quotes
complete_cmds = partial(complete_values, cmds)
completers = [complete_repeat, complete_pause, complete_cmds]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(
Required("path"),
Required("hosts"),
LabeledBooleanOptional("recursive"),
LabeledBooleanOptional("reverse")
)
@check_paths_exists("path")
def do_ephemeral_endpoint(self, params):
"""
\x1b[1mNAME\x1b[0m
ephemeral_endpoint - Gets the ephemeral znode owner's session and ip:port
\x1b[1mSYNOPSIS\x1b[0m
ephemeral_endpoint <path> <hosts> [recursive] [reverse_lookup]
\x1b[1mDESCRIPTION\x1b[0m
hosts is a list of hosts in the host1[:port1][,host2[:port2]],... form.
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse through the children (default: false)
* reverse_lookup: convert IPs back to hostnames (default: false)
\x1b[1mEXAMPLES\x1b[0m
> ephemeral_endpoint /servers/member_0000044941 10.0.0.1,10.0.0.2,10.0.0.3
0xa4788b919450e6 10.3.2.12:54250 10.0.0.2:2181
"""
if invalid_hosts(params.hosts):
self.show_output("List of hosts has the wrong syntax.")
return
stat = self._zk.exists(params.path)
if stat is None:
self.show_output("%s is gone.", params.path)
return
if not params.recursive and stat.ephemeralOwner == 0:
self.show_output("%s is not ephemeral.", params.path)
return
try:
info_by_path = self._zk.ephemerals_info(params.hosts)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
return
def check(path, show_path, resolved):
info = info_by_path.get(path, None)
if info is None:
self.show_output("No session info for %s.", path)
else:
self.show_output("%s%s",
"%s: " % (path) if show_path else "",
info.resolved if resolved else str(info))
if not params.recursive:
check(params.path, False, params.reverse)
else:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
check(cpath, True, params.reverse)
def complete_ephemeral_endpoint(self, cmd_param_text, full_cmd, *rest):
""" TODO: the hosts lists can be retrieved from self.zk.hosts """
complete_hosts = partial(complete_values, ["127.0.0.1:2181"])
completers = [
self._complete_path,
complete_hosts,
complete_labeled_boolean("recursive"),
complete_labeled_boolean("reverse")
]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("session"), Required("hosts"), LabeledBooleanOptional("reverse"))
def do_session_endpoint(self, params):
"""
\x1b[1mNAME\x1b[0m
session_endpoint - Gets the session's IP endpoints
\x1b[1mSYNOPSIS\x1b[0m
session_endpoint <session> <hosts> [reverse_lookup]
\x1b[1mDESCRIPTION\x1b[0m
where hosts is a list of hosts in the host1[:port1][,host2[:port2]],... form
\x1b[1mOPTIONS\x1b[0m
* reverse_lookup: convert IPs back to hostnames (default: false)
\x1b[1mEXAMPLES\x1b[0m
> session_endpoint 0xa4788b919450e6 10.0.0.1,10.0.0.2,10.0.0.3
10.3.2.12:54250 10.0.0.2:2181
"""
if invalid_hosts(params.hosts):
self.show_output("List of hosts has the wrong syntax.")
return
try:
info_by_id = self._zk.sessions_info(params.hosts)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
return
info = info_by_id.get(params.session, None)
if info is None:
self.show_output("No session info for %s.", params.session)
else:
self.show_output("%s", info.resolved_endpoints if params.reverse else info.endpoints)
def complete_session_endpoint(self, cmd_param_text, full_cmd, *rest):
""" TODO: the hosts lists can be retrieved from self.zk.hosts """
complete_hosts = partial(complete_values, ["127.0.0.1:2181"])
completers = [self._complete_path, complete_hosts, complete_labeled_boolean("reverse")]
return complete(completers, cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("path"), Required("val"), IntegerRequired("repeat"))
@check_paths_exists("path")
def do_fill(self, params):
"""
\x1b[1mNAME\x1b[0m
fill - Fills a znode with the given value
\x1b[1mSYNOPSIS\x1b[0m
fill <path> <char> <count>
\x1b[1mEXAMPLES\x1b[0m
> fill /some/znode X 1048576
"""
self._zk.set(params.path, decoded(params.val * params.repeat))
def complete_fill(self, cmd_param_text, full_cmd, *rest):
complete_value = partial(complete_values, ["X", "Y"])
complete_repeat = partial(complete_values, [str(i) for i in range(0, 11)])
completers = [self._complete_path, complete_value, complete_repeat]
return complete(completers, cmd_param_text, full_cmd, *rest)
@ensure_params(FloatRequired("seconds"))
def do_sleep(self, params):
"""
\x1b[1mNAME\x1b[0m
sleep - Sleeps for the given seconds (may be fractional)
\x1b[1mSYNOPSIS\x1b[0m
sleep <seconds>
\x1b[1mEXAMPLES\x1b[0m
> sleep 0.5
"""
time.sleep(params.seconds)
def complete_sleep(self, cmd_param_text, full_cmd, *rest):
complete_vals = partial(complete_values, ["0.5", "1.0", "2.0", "5.0", "10.0"])
return complete([complete_vals], cmd_param_text, full_cmd, *rest)
@ensure_params(Multi("cmds"))
def do_time(self, params):
"""
\x1b[1mNAME\x1b[0m
time - Measures elapsed seconds after running commands
\x1b[1mSYNOPSIS\x1b[0m
time <cmd1> <cmd2> ... <cmdN>
\x1b[1mEXAMPLES\x1b[0m
> time 'loop 10 0 "create /foo_ bar ephemeral=false sequence=true"'
Took 0.05585 seconds
"""
start = time.time()
for cmd in params.cmds:
try:
self.onecmd(cmd)
except Exception as ex:
self.show_output("Command failed: %s.", ex)
elapsed = "{0:.5f}".format(time.time() - start)
self.show_output("Took %s seconds" % elapsed)
def complete_time(self, cmd_param_text, full_cmd, *rest):
cmds = ["get ", "ls ", "create ", "set ", "rm "]
complete_cmds = partial(complete_values, cmds)
return complete([complete_cmds], cmd_param_text, full_cmd, *rest)
@connected
@ensure_params(Required("cmd"), Required("args"), IntegerOptional("from_config", -1))
def do_reconfig(self, params):
"""
\x1b[1mNAME\x1b[0m
reconfig - Reconfigures a ZooKeeper cluster (adds/removes members)
\x1b[1mSYNOPSIS\x1b[0m
reconfig <add|remove> <arg> [from_config]
\x1b[1mDESCRIPTION\x1b[0m
reconfig add <members> [from_config]
adds the given members (i.e.: 'server.100=10.0.0.10:2889:3888:observer;0.0.0.0:2181').
reconfig remove <members_ids> [from_config]
removes the members with the given ids (i.e.: '2,3,5').
\x1b[1mEXAMPLES\x1b[0m
> reconfig add server.100=0.0.0.0:56954:37866:observer;0.0.0.0:42969
server.1=localhost:20002:20001:participant
server.2=localhost:20012:20011:participant
server.3=localhost:20022:20021:participant
server.100=0.0.0.0:56954:37866:observer;0.0.0.0:42969
version=100000003
> reconfig remove 100
server.1=localhost:20002:20001:participant
server.2=localhost:20012:20011:participant
server.3=localhost:20022:20021:participant
version=100000004
"""
if params.cmd not in ["add", "remove"]:
raise ValueError("Bad command: %s" % params.cmd)
joining, leaving, from_config = None, None, params.from_config
if params.cmd == "add":
joining = params.args
elif params.cmd == "remove":
leaving = params.args
try:
value, _ = self._zk.reconfig(
joining=joining, leaving=leaving, new_members=None, from_config=from_config)
self.show_output(value)
except NewConfigNoQuorumError:
self.show_output("No quorum available to perform reconfig.")
except ReconfigInProcessError:
self.show_output("There's a reconfig in process.")
def complete_reconfig(self, cmd_param_text, full_cmd, *rest):
complete_cmd = partial(complete_values, ["add", "remove"])
complete_config = partial(complete_values, ["-1"])
complete_arg = partial(
complete_values, ["server.100=0.0.0.0:2889:3888:observer;0.0.0.0:2181", "1,2,3"])
completers = [complete_cmd, complete_arg, complete_config]
return complete(completers, cmd_param_text, full_cmd, *rest)
@ensure_params(Required("fmtstr"), MultiOptional("cmds"))
def do_echo(self, params):
"""
\x1b[1mNAME\x1b[0m
echo - displays formatted data
\x1b[1mSYNOPSIS\x1b[0m
echo <fmtstr> [cmd1] [cmd2] ... [cmdN]
\x1b[1mEXAMPLES\x1b[0m
> echo hello
hello
> echo 'The value of /foo is %s' 'get /foo'
bar
"""
values = []
with self.output_context() as context:
for cmd in params.cmds:
rv = self.onecmd(cmd)
val = "" if rv is False else context.value.rstrip("\n")
values.append(val)
context.reset()
try:
self.show_output(params.fmtstr, *values)
except TypeError:
self.show_output("Bad format string or missing arguments.")
@ensure_params(Required("hosts"))
def do_connect(self, params):
"""
\x1b[1mNAME\x1b[0m
connect - Connects to a host from a list of hosts given
\x1b[1mSYNOPSIS\x1b[0m
connect <hosts>
\x1b[1mEXAMPLES\x1b[0m
> connect host1:2181,host2:2181
"""
# TODO: we should offer autocomplete based on prev hosts.
self._connect(params.hosts.split(","))
@connected
def do_disconnect(self, args):
"""
\x1b[1mNAME\x1b[0m
disconnect - Disconnects and closes the current session
"""
self._disconnect()
self._hosts = []
self.update_curdir("/")
@connected
def do_reconnect(self, args):
"""
\x1b[1mNAME\x1b[0m
reconnect - Forces a reconnect by shutting down the connected socket
"""
self._zk.reconnect()
self.update_curdir("/")
@connected
def do_pwd(self, args):
"""
\x1b[1mNAME\x1b[0m
pwd - Prints the current path
"""
self.show_output("%s", self.curdir)
def do_EOF(self, *args):
"""
\x1b[1mNAME\x1b[0m
<ctrl-d> - Exits via Ctrl-D
"""
self._exit(True)
def do_quit(self, *args):
"""
\x1b[1mNAME\x1b[0m
quit - Give up on everything and just quit
"""
self._exit(False)
def do_exit(self, *args):
"""
\x1b[1mNAME\x1b[0m
exit - Au revoir
"""
self._exit(False)
@contextmanager
def transitions_disabled(self):
"""
use this when you want to ignore state transitions (i.e.: inside loop)
"""
self.state_transitions_enabled = False
try:
yield
except KeyboardInterrupt:
pass
self.state_transitions_enabled = True
def _disconnect(self):
if self._zk and self.connected:
self._zk.stop()
self._zk.close()
self._zk = None
self.connected = False
def _init_zk_client(self, hosts_list):
"""
Initialize the zookeeper client (based on the provided list of hosts.
In the basic case, hostsp is a list of hosts like:
```
[10.0.0.2:2181, 10.0.0.3:2181]
```
It might also contain auth info:
```
[digest:foo:[email protected]:2181, 10.0.0.3:2181]
```
"""
auth_data = []
hosts = []
for auth_host in hosts_list:
nl = Netloc.from_string(auth_host)
rhost, rport = hosts_to_endpoints(nl.host)[0]
if self._tunnel is not None:
lhost, lport = TunnelHelper.create_tunnel(rhost, rport, self._tunnel)
hosts.append('{0}:{1}'.format(lhost, lport))
else:
hosts.append(nl.host)
if nl.scheme != "":
auth_data.append((nl.scheme, nl.credential))
return KazooClient(",".join(hosts),
read_only=self._read_only,
timeout=self._connect_timeout,
auth_data=auth_data if len(auth_data) > 0 else None)
def _connect(self, hosts_list=None, zk_client=None):
"""
In the basic case, hostsp is a list of hosts like:
```
[10.0.0.2:2181, 10.0.0.3:2181]
```
It might also contain auth info:
```
[digest:foo:[email protected]:2181, 10.0.0.3:2181]
```
"""
self._disconnect()
if not zk_client:
zk_client = self._init_zk_client(hosts_list)
self._zk = XClient(zk_client)
hosts = ['{0}:{1}'.format(*host_port) for host_port in zk_client.hosts]
if self._asynchronous:
self._connect_async(hosts)
else:
self._connect_sync(hosts)
def _connect_async(self, hosts):
def listener(state):
self.connected = state == KazooState.CONNECTED
self._hosts = hosts
self.update_curdir("/")
# hack to restart sys.stdin.readline()
self.show_output("")
os.kill(os.getpid(), signal.SIGUSR2)
self._zk.add_listener(listener)
self._zk.start_async()
self.update_curdir("/")
def _connect_sync(self, hosts):
try:
self._zk.start(timeout=self._connect_timeout)
self.connected = True
except self._zk.handler.timeout_exception as ex:
self.show_output("Failed to connect: %s", ex)
self._hosts = hosts
self.update_curdir("/")
@property
def state(self):
if self._zk and self._zk.client_state != 'CLOSED':
return "(%s) " % ('%s [%s]' % (self._zk.client_state, ','.join(self._hosts)))
else:
return "(DISCONNECTED) "
def do_man(self, *args, **kwargs):
"""
An alias for help.
"""
self.do_help(*args, **kwargs)
def complete_man(self, *args, **kwargs):
return self.complete_help(*args, **kwargs) | zk-shell | /zk_shell-1.3.4.tar.gz/zk_shell-1.3.4/zk_shell/shell.py | shell.py |
ZKWatcher: Watches and Registers Apps in Zookeeper
==================================================
|build_status|_ |doc_status|_ |pypi_download|_
Documentation
-------------
Documentation is hosted at `https://zkwatcher.readthedocs.org <https://zkwatcher.readthedocs.org>`_
.. |build_status| image:: https://travis-ci.org/Nextdoor/zkwatcher.svg?branch=master
.. _build_status: https://travis-ci.org/Nextdoor/zkwatcher
.. |doc_status| image:: https://readthedocs.org/projects/zkwatcher/badge/?version=latest
.. _doc_status: https://zkwatcher.readthedocs.org
.. |pypi_download| image:: https://badge.fury.io/py/zkwatcher.png
.. _pypi_download: https://pypi.python.org/pypi/zkwatcher
| zk-watcher | /zk_watcher-0.4.0.tar.gz/zk_watcher-0.4.0/README.rst | README.rst |
from future import standard_library
standard_library.install_aliases()
from builtins import str # noqa: E402
from builtins import object # noqa: E402
import configparser # noqa: E402
import json # noqa: E402
import logging # noqa: E402
import logging.handlers # noqa: E402
import optparse # noqa: E402
import os # noqa: E402
import signal # noqa: E402
import socket # noqa: E402
import subprocess # noqa: E402
import threading # noqa: E402
import time # noqa: E402
# Get our ServiceRegistry class
from nd_service_registry import KazooServiceRegistry as ServiceRegistry # noqa: E402
from nd_service_registry import exceptions # noqa: E402
# Our default variables
from .version import __version__ as VERSION # noqa: E402
__author__ = '[email protected] (Matt Wise)'
# Defaults
LOG = '/var/log/zk_watcher.log'
ZOOKEEPER_SESSION_TIMEOUT_USEC = 300000 # microseconds
ZOOKEEPER_URL = 'localhost:2181'
# This global variable is used to trigger the service stopping/starting...
RUN_STATE = True
# First handle all of the options passed to us
usage = 'usage: %prog <options>'
parser = optparse.OptionParser(usage=usage, version=VERSION,
add_help_option=True)
parser.set_defaults(verbose=True)
parser.add_option('-c', '--config', dest='config',
default='/etc/zk/config.cfg',
help='override the default config file (/etc/zk/config.cfg)')
parser.add_option('-s', '--server', dest='server', default=ZOOKEEPER_URL,
help='server address (default: localhost:2181')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='verbose mode')
parser.add_option('-l', '--syslog', action='store_true', dest='syslog',
default=False,
help='log to syslog')
(options, args) = parser.parse_args()
class WatcherDaemon(threading.Thread):
"""The main daemon process.
This is the main object that defines all of our major functions and
connection information."""
LOGGER = 'WatcherDaemon'
def __init__(self, server, config_file=None, verbose=False):
"""Initilization code for the main WatcherDaemon.
Set up our local logger reference, and pid file locations."""
# Initiate our thread
super(WatcherDaemon, self).__init__()
self.log = logging.getLogger(self.LOGGER)
self.log.info('WatcherDaemon %s' % VERSION)
self._watchers = []
self._sr = None
self._config_file = config_file
self._server = server
self._verbose = verbose
self.done = False
# Set up our threading environment
self._event = threading.Event()
# These threads can die with prejudice. Make sure that any time the
# python interpreter exits, we exit immediately
self.setDaemon(True)
# Watch for any signals
signal.signal(signal.SIGHUP, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
# Bring in our configuration options
self._parse_config()
# Create our ServiceRegistry object
self._connect()
# Start up
self.start()
def _signal_handler(self, signum, frame):
"""Watch for certain signals"""
self.log.warning('Received signal: %s' % signum)
if signum == signal.SIGHUP:
self.log.warning('Reloading config.')
self._parse_config()
self._connect()
self._setup_watchers()
if signum == signal.SIGTERM:
self.log.warning('Terminating all node registrations.')
self.stop()
def _parse_config(self):
"""Read in the supplied config file and update our local settings."""
self.log.debug('Loading config...')
self._config = configparser.ConfigParser()
self._config.read(self._config_file)
# Check if auth data was supplied. If it is, read it in and then remove
# it from our configuration object so its not used anywhere else.
try:
self.user = self._config.get('auth', 'user')
self.password = self._config.get('auth', 'password')
self._config.remove_section('auth')
except (configparser.NoOptionError, configparser.NoSectionError):
self.user = None
self.password = None
def _connect(self):
"""Connects to the ServiceRegistry.
If already connected, updates the current connection settings."""
self.log.debug('Checking for ServiceRegistry object...')
if not self._sr:
self.log.debug('Creating new ServiceRegistry object...')
self._sr = ServiceRegistry(server=self._server, lazy=True,
username=self.user,
password=self.password)
else:
self.log.debug('Updating existing object...')
self._sr.set_username(self.user)
self._sr.set_password(self.password)
def _setup_watchers(self):
# For each watcher, see if we already have one for a given path or not.
for service in self._config.sections():
w = self._get_watcher(service)
# Gather up the config data for our section into a few local
# variables so that we can shorten the statements below.
command = self._config.get(service, 'cmd')
service_port = self._config.get(service, 'service_port')
zookeeper_path = self._config.get(service, 'zookeeper_path')
refresh = self._config.get(service, 'refresh')
# Gather our optional parameters. If they don't exist, set
# some reasonable default.
try:
zookeeper_data = self._parse_data(
self._config.get(service, 'zookeeper_data'))
except:
zookeeper_data = {}
try:
service_hostname = self._config.get(
service, 'service_hostname')
except:
service_hostname = socket.getfqdn()
if w:
# Certain fields cannot be changed without destroying the
# object and its registration with Zookeeper.
if w._service_port != service_port or \
w._service_hostname != service_hostname or \
w._path != zookeeper_path:
w.stop()
w = None
if w:
# We already have a watcher for this service. Update its
# object data, and let it keep running.
w.set(command=command,
data=zookeeper_data,
refresh=refresh)
# If there's still no 'w' returned (either _get_watcher failed, or
# we noticed that certain un-updatable fields were changed, then
# create a new object.
if not w:
w = ServiceWatcher(registry=self._sr,
service=service,
service_port=service_port,
service_hostname=service_hostname,
command=command,
path=zookeeper_path,
data=zookeeper_data,
refresh=refresh)
self._watchers.append(w)
# Check if any watchers need to be destroyed because they're no longer
# in our config.
for w in self._watchers:
if w._service not in list(self._config.sections()):
w.stop()
self._watchers.remove(w)
def _get_watcher(self, service):
"""Returns a watcher based on the service name."""
for watcher in self._watchers:
if watcher._service == service:
return watcher
return None
def _parse_data(self, data):
"""Convert a string of data from ConfigParse into our dict.
The zookeeper_data field supports one of two types of fields. Either
a single key=value string, or a JSON-formatted set of key=value
pairs:
zookeeper_data: foo=bar
zookeeper_data: foo=bar, bar=foo
zookeeper_data: { "foo": "bar", "bar": "foo" }
Args:
data: String representing data above"""
try:
data_dict = json.loads(data)
except:
data_dict = {}
for pair in data.split(','):
if pair.split('=').__len__() == 2:
key = pair.split('=')[0]
value = pair.split('=')[1]
data_dict[key] = value
return data_dict
def run(self):
"""Start up all of the worker threads and keep an eye on them"""
self._setup_watchers()
# Now, loop. Wait for a death signal
while True and not self._event.is_set():
self._event.wait(1)
# At this point we must be exiting. Kill off our above threads
self.log.info('Shutting down')
for w in self._watchers:
self.log.info('Stopping watcher: %s' % w._path)
w.stop()
# Finally, mark us as done
self.done = True
def stop(self):
self._event.set()
class ServiceWatcher(threading.Thread):
"""Monitors a particular service definition."""
LOGGER = 'WatcherDaemon.ServiceWatcher'
def __init__(self, registry, service, service_port, command, path, data,
service_hostname, refresh=15):
"""Initialize the object and begin monitoring the service."""
# Initiate our thread
super(ServiceWatcher, self).__init__()
self._sr = registry
self._service = service
self._service_port = service_port
self._service_hostname = service_hostname
self._path = path
self._fullpath = '%s/%s:%s' % (path, service_hostname, service_port)
self.set(command, data, refresh)
self.log = logging.getLogger('%s.%s' % (self.LOGGER, self._service))
self.log.debug('Initializing...')
self._event = threading.Event()
self.setDaemon(True)
self.start()
def set(self, command, data, refresh):
"""Public method for re-configuring our service checks.
NOTE: You cannot re-configure the port or server-name currently.
Args:
command: (String) command to execute
data: (String/Dict) configuration data to pass with registration
refresh: (Int) frequency (in seconds) of check"""
self._command = command
self._refresh = int(refresh)
self._data = data
def run(self):
"""Monitors the supplied service, and keeps it registered.
We loop every second, checking whether or not we need to run our
check. If we do, we run the check. If we don't, we wait until
we need to, or we receive a stop."""
last_checked = 0
self.log.debug('Beginning run() loop')
while True and not self._event.is_set():
if time.time() - last_checked > self._refresh:
self.log.debug('[%s] running' % self._command)
# First, run our service check command and see what the
# return code is
c = Command(self._command, self._service)
ret = c.run(timeout=90)
if ret == 0:
# If the command was successfull...
self.log.debug('[%s] returned successfull' % self._command)
self._update(state=True)
else:
# If the command failed...
self.log.warning('[%s] returned a failed exit code [%s]' %
(self._command, ret))
self._update(state=False)
# Now that our service check is done, update our lastrun{}
# array with the current time, so that we can check how
# long its been since the last run.
last_checked = time.time()
# Sleep for one second just so that we dont run in a crazy loop
# taking up all kinds of resources.
self._event.wait(1)
self._update(False)
self.log.info('Deregistering %s' % self._fullpath)
self._sr.unset(self._fullpath)
self._sr = None
self.log.info('Watcher %s is exiting the run() loop.' % self._service)
def stop(self):
"""Stop the run() loop."""
self._event.set()
self.log.debug("Waiting for run() loop to exit.")
while self._sr is not None:
self._event.wait(1)
def _update(self, state):
# Call ServiceRegistry.set() method with our state, data,
# path information. The ServiceRegistry module will take care of
# updating the data, state, etc.
self.log.debug('Attempting to update service [%s] with '
'data [%s], and state [%s].' %
(self._service, self._data, state))
try:
self._sr.set_node(self._fullpath, self._data, state)
self.log.debug('[%s] sucessfully updated path %s with state %s' %
(self._service, self._fullpath, state))
return True
except exceptions.NoConnection as e:
self.log.warn('[%s] could not update path %s with state %s: %s' %
(self._service, self._fullpath, state, e))
return False
class Command(object):
"""Wrapper to run a command with a timeout for safety."""
LOGGER = 'WatcherDaemon.Command'
def __init__(self, cmd, service):
"""Initialize the Command object.
This object can be created once, and run many times. Each time it
runs we initiate a small thread to run our process, and if that
process times out, we kill it."""
self._cmd = cmd
self._process = None
self.log = logging.getLogger('%s.%s' % (self.LOGGER, service))
def run(self, timeout):
def target():
self.log.debug('[%s] started...' % self._cmd)
# Deliberately do not capture any output. Using PIPEs can
# cause deadlocks according to the Python documentation here
# (http://docs.python.org/library/subprocess.html)
#
# "Warning This will deadlock when using stdout=PIPE and/or
# stderr=PIPE and the child process generates enough output to
# a pipe such that it blocks waiting for the OS pipe buffer to
# accept more # data. Use communicate() to avoid that."
#
# We only care about the exit code of the command anyways...
try:
self._process = subprocess.Popen(
self._cmd.split(' '),
shell=False,
stdout=open('/dev/null', 'w'),
stderr=None,
stdin=None)
self._process.communicate()
except OSError as e:
self.log.warn('Failed to run: %s' % e)
return 1
self.log.debug('[%s] finished... returning %s' %
(self._cmd, self._process.returncode))
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.log.debug('[%s] taking too long to respond, terminating.' %
self._cmd)
try:
self._process.terminate()
except:
pass
thread.join()
# If the subprocess.Popen() fails for any reason, it returns 1... but
# because its in a thread, we never actually see that error code.
if self._process:
return self._process.returncode
else:
return 1
def setup_logger():
"""Configure our main logger object"""
# Get our logger
logger = logging.getLogger()
pid = os.getpid()
format = 'zk_watcher[' + str(pid) + '] [%(name)s] ' \
'[%(funcName)s]: (%(levelname)s) %(message)s'
formatter = logging.Formatter(format)
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if options.syslog:
handler = logging.handlers.SysLogHandler('/dev/log', 'syslog')
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def main():
logger = setup_logger()
w = WatcherDaemon(
config_file=options.config,
server=options.server,
verbose=options.verbose)
while True and not w.done:
try:
time.sleep(1)
except KeyboardInterrupt:
break
time.sleep(1)
logger.info('Exiting')
if __name__ == '__main__':
main() | zk-watcher | /zk_watcher-0.4.0.tar.gz/zk_watcher-0.4.0/zk_watcher/zk_watcher.py | zk_watcher.py |
Changelog
=========
0.8.5 (2021-09-20)
-------------------
- Always Send a Trace-Id event if the trace has not be sent to zipkin (slow query logs)
0.8.4 (2021-07-21)
-------------------
- Fix django issue on missing trace.
0.8.3 (2021-06-28)
-------------------
- Fix issue for django on slow request logs. The reset of the stack must
be done on every requests.
0.8.2 (2021-06-28)
-------------------
- Ensure we never raise an exception if we cannot collect a trace,
avoid side effect to clients.
0.8.1 (2021-06-25)
-------------------
- Hotfix Pyramid slow query logger that does not cleanup the trace stack.
0.8.0 (2021-06-25)
-------------------
- Rewrite Pyramid binding.
- Add settings for pyramid to log slow queries only with a configurable time.
- Add a setting to list what library should be traced.
- Make socket timout configurable and change default timeout to 1 second.
0.7.2 (2021-06-22)
-------------------
- Make the scribe async/sync socket configurable
- Add a middleware/setting for Django to track only slow query
0.7.1 (2021-06-22)
-------------------
- Add a Trace context manager for more flexibility
- Add django support using a django middleware and app.
- Add psycopg2 cursor support to trace sql query
- Add an http client (synchronous) to push trace, client transport is
configurable.
0.6.10 (2020-01-28)
-------------------
- update logging level to avoid errors logs for SQL query outside http context
- remove deprecated log.warn
0.6.9 (2019-09-26)
------------------
- requests: fixup infinite recursion
0.6.8 (2019-09-23)
------------------
- pyramid: fixup tests if zipkin not configured
0.6.7 (2019-09-23)
------------------
- pyramid: fixup tweenview init
0.6.6 (2019-09-23)
------------------
- pyramid: register trace in a tweenview
0.6.5 (2019-09-17)
------------------
- advertise version in pyramid module
0.6.4 (2019-09-18)
------------------
- do not throw exception when trying to format to thrift
0.6.3 (2019-09-18)
------------------
- ensure zipkin does not raise when trace id is larger than expected
0.6.2 (2019-09-17)
------------------
- do not throw warning on configuration mistakes
0.6.1 (2019-09-17)
------------------
- fixup python2 support
0.6.0 (2019-09-17)
------------------
- refactor pyramid plugin
- fix reporter
0.5 (2019-09-10)
----------------
- Use thriftpy2
0.4 (2015-08-21)
----------------
- Flask bindings
- xmlrpclib client bindings
- Filtered parameters in sqlalchemy binding
- Implement exponential backoff on connection
0.3 (2015-02-16)
----------------
- Make the service name configurable for pyramid application
0.2 (2015-02-16)
----------------
- Keep @trace usable when zipkin is not configured
0.1 (2015-02-16)
----------------
- Initial version
| zk | /zk-0.8.5.tar.gz/zk-0.8.5/CHANGES.rst | CHANGES.rst |
import random
import struct
import socket
from base64 import b64encode
from six import text_type
from thriftpy2.protocol import TBinaryProtocol
from thriftpy2.thrift import TType
from thriftpy2.protocol.binary import write_list_begin
from thriftpy2.transport import TMemoryBuffer
from thriftpy2.thrift import TDecodeException
from .zipkin import zipkincore_thrift as ttypes
def int_or_none(val):
if val is None:
return None
return int(val, 16)
def hex_str(n):
return "%0.16x" % (n,)
def uniq_id():
"""
Create a random 64-bit signed integer appropriate
for use as trace and span IDs.
@returns C{int}
"""
return random.randint(0, (2 ** 64) - 1)
def base64_thrift(thrift_obj):
trans = TMemoryBuffer()
tbp = TBinaryProtocol(trans)
thrift_obj.write(tbp)
return b64encode(bytes(trans.getvalue())).strip()
def ipv4_to_int(ipv4):
return struct.unpack("!i", socket.inet_aton(ipv4))[0]
def binary_annotation_formatter(annotation, host=None):
annotation_types = {
"string": ttypes.AnnotationType.STRING,
"bytes": ttypes.AnnotationType.BYTES,
}
annotation_type = annotation_types[annotation.annotation_type]
value = annotation.value
if isinstance(value, text_type):
value = value.encode("utf-8")
return ttypes.BinaryAnnotation(annotation.name, value, annotation_type, host)
def base64_thrift_formatter(trace, annotations):
thrift_annotations = []
binary_annotations = []
try:
for annotation in annotations:
host = None
if annotation.endpoint:
host = ttypes.Endpoint(
ipv4=ipv4_to_int(annotation.endpoint.ip),
port=annotation.endpoint.port,
service_name=annotation.endpoint.service_name,
)
if annotation.annotation_type == "timestamp":
thrift_annotations.append(
ttypes.Annotation(
timestamp=annotation.value, value=annotation.name, host=host
)
)
else:
binary_annotations.append(binary_annotation_formatter(annotation, host))
thrift_trace = ttypes.Span(
name=trace.name,
trace_id=u64_as_i64(trace.trace_id),
id=u64_as_i64(trace.span_id),
parent_id=u64_as_i64(trace.parent_span_id),
annotations=thrift_annotations,
binary_annotations=binary_annotations,
)
return base64_thrift(thrift_trace)
except TDecodeException as e:
raise ValueError(e)
def u64_as_i64(value):
if not value:
return value
try:
data = struct.pack(">Q", value)
data = struct.unpack(">q", data)
return data[0]
except struct.error as e:
raise ValueError(e)
def span_to_bytes(thrift_span):
"""
Returns a TBinaryProtocol encoded Thrift span.
:param thrift_span: thrift object to encode.
:returns: thrift object in TBinaryProtocol format bytes.
"""
transport = TMemoryBuffer()
protocol = TBinaryProtocol(transport)
thrift_span.write(protocol)
return bytes(transport.getvalue())
def base64_thrift_formatter_many(parent_trace):
"""
Returns a TBinaryProtocol encoded list of Thrift objects.
:param binary_thrift_obj_list: list of TBinaryProtocol objects to encode.
:returns: bynary object representing the encoded list.
"""
traces = list(parent_trace.children())
transport = TMemoryBuffer()
write_list_begin(transport, TType.STRUCT, len(traces))
for trace in traces:
thrift_annotations = []
binary_annotations = []
for annotation in trace.annotations:
host = None
if annotation.endpoint:
host = ttypes.Endpoint(
ipv4=ipv4_to_int(annotation.endpoint.ip),
port=annotation.endpoint.port,
service_name=annotation.endpoint.service_name,
)
if annotation.annotation_type == "timestamp":
thrift_annotations.append(
ttypes.Annotation(
timestamp=annotation.value, value=annotation.name, host=host
)
)
else:
binary_annotations.append(binary_annotation_formatter(annotation, host))
thrift_trace = ttypes.Span(
name=trace.name,
trace_id=u64_as_i64(trace.trace_id),
id=u64_as_i64(trace.span_id),
parent_id=u64_as_i64(trace.parent_span_id),
annotations=thrift_annotations,
binary_annotations=binary_annotations,
)
transport.write(span_to_bytes(thrift_trace))
return bytes(transport.getvalue()) | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/util.py | util.py |
import math
import six
import time
import socket
from threading import Lock
from .util import uniq_id
from .client import Local
from .zipkin import zipkincore_thrift as constants
if not six.PY2:
long = int
class Id(long):
def __repr__(self):
return "<Id %x>" % self
def __str__(self):
return "%x" % self
class Endpoint(object):
"""
:param ip: C{str} ip address
:param port: C{int} port number
:param service_name: C{str} service_name
"""
def __init__(self, service_name, ip=None, port=0):
try:
if not ip:
if Local.local_ip:
ip = Local.local_ip
else:
ip = socket.gethostbyname_ex(socket.gethostname())[2][0]
except socket.gaierror:
ip = "127.0.0.1"
self.ip = ip
self.port = port
self.service_name = service_name
# TODO
# __eq__, __ne__, __repr__
class TraceStack(object):
def __init__(self):
self.stack = []
self.cur = None
# Locking is required, as stack and cur should mutate at the same time
self.lock = Lock()
def child(self, name, endpoint=None):
assert isinstance(name, six.string_types), "name parameter should be a string"
assert (
isinstance(endpoint, Endpoint) or endpoint is None
), "endpoint parameter should be an Endpoint"
try:
trace = self.cur.child(name, endpoint)
self.lock.acquire()
self.stack.append(trace)
self.cur = trace
return trace
finally:
self.lock.release()
def reset(self):
try:
self.lock.acquire()
self.stack = []
self.cur = None
finally:
self.lock.release()
def replace(self, trace):
assert isinstance(trace, Trace), "trace parameter should be of type Trace"
try:
self.lock.acquire()
self.stack = [trace]
self.cur = trace
finally:
self.lock.release()
def append(self, trace):
assert isinstance(trace, Trace), "trace parameter should be of type Trace"
try:
self.lock.acquire()
self.stack.append(trace)
self.cur = trace
finally:
self.lock.release()
def pop(self):
try:
self.lock.acquire()
if self.cur is None:
raise IndexError("pop from an empty stack")
# pop is safe here, cur is not none, current stack can't be empty
trace = self.stack.pop()
try:
cur = self.stack.pop()
self.stack.append(cur)
self.cur = cur
except:
self.cur = None
return trace
finally:
self.lock.release()
@property
def current(self):
return self.cur
class Trace(object):
def __init__(
self, name, trace_id=None, span_id=None, parent_span_id=None, endpoint=None
):
assert isinstance(name, six.string_types), "name parameter should be a string"
self.name = name
self.trace_id = Id(trace_id or uniq_id())
self.span_id = Id(span_id or uniq_id())
self.parent_span_id = parent_span_id
self.annotations = []
self._children = []
self._endpoint = endpoint
def record(self, *annotations):
for a in annotations:
if a.endpoint is None:
a.endpoint = self._endpoint
self.annotations.extend(annotations)
def child_noref(self, name, endpoint=None):
if endpoint is not None:
e = endpoint
else:
e = self._endpoint
trace = self.__class__(
name, trace_id=self.trace_id, parent_span_id=self.span_id, endpoint=e
)
return trace
def child(self, name, endpoint=None):
trace = self.child_noref(name, endpoint)
self._children.append(trace)
return trace
def children(self):
return [y for x in self._children for y in x.children()] + [self]
def __repr__(self):
return "<Trace %s>" % self.trace_id
class Annotation(object):
"""
:param name: C{str} name of this annotation.
:param value: A value of the appropriate type based on
C{annotation_type}.
:param annotation_type: C{str} the expected type of our C{value}.
:param endpoint: An optional L{IEndpoint} provider to associate with
this annotation or C{None}
"""
def __init__(self, name, value, annotation_type, endpoint=None):
self.name = name
self.value = value
self.annotation_type = annotation_type
self.endpoint = endpoint
@classmethod
def timestamp(cls, name, timestamp=None):
if timestamp is None:
timestamp = math.trunc(time.time() * 1000 * 1000)
return cls(name, timestamp, "timestamp")
@classmethod
def server_send(cls, timestamp=None):
return cls.timestamp(constants.SERVER_SEND, timestamp)
@classmethod
def server_recv(cls, timestamp=None):
return cls.timestamp(constants.SERVER_RECV, timestamp)
@classmethod
def client_send(cls, timestamp=None):
return cls.timestamp(constants.CLIENT_SEND, timestamp)
@classmethod
def client_recv(cls, timestamp=None):
return cls.timestamp(constants.CLIENT_RECV, timestamp)
@classmethod
def string(cls, name, value):
return cls(name, value, "string")
@classmethod
def bytes(cls, name, value):
return cls(name, value, "bytes") | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/models.py | models.py |
import logging
from six.moves.urllib.parse import urlparse
from zipkin import local
from zipkin.models import Annotation
from zipkin.util import hex_str
log = logging.getLogger(__name__)
def filter_url_path(url):
url = urlparse(url)._replace(path="", query=None, fragment="")
url = url._replace(netloc=url.netloc.split("@")[-1])
return url.geturl()
def pre_request(request):
parent_trace = local().current
if not parent_trace:
return request
url = filter_url_path(request.url)
request.trace = parent_trace.child("requests:%s %s" % (request.method, url))
forwarded_trace = request.trace.child_noref("subservice")
request.headers["X-B3-TraceId"] = hex_str(forwarded_trace.trace_id)
request.headers["X-B3-SpanId"] = hex_str(forwarded_trace.span_id)
if forwarded_trace.parent_span_id is not None:
request.headers["X-B3-ParentSpanId"] = hex_str(forwarded_trace.parent_span_id)
request.trace.record(Annotation.string("http.method", request.method))
request.trace.record(Annotation.string("http.url", request.url))
request.trace.record(Annotation.string("span.kind", "client"))
request.trace.record(Annotation.server_recv())
return request
def pre_response(resp, req=None):
if not req:
req = resp.request
if not hasattr(req, "trace"):
return resp
req.trace.record(
Annotation.string(
"http.status_code", "{0}".format(getattr(resp, "status", None))
)
)
req.trace.record(Annotation.server_send())
return resp
class Proxy(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj"))
def __str__(self):
return str(object.__getattribute__(self, "_obj"))
def __repr__(self):
return repr(object.__getattribute__(self, "_obj"))
#
# factories
#
_special_names = [
"__abs__",
"__add__",
"__and__",
"__call__",
"__cmp__",
"__coerce__",
"__contains__",
"__delitem__",
"__delslice__",
"__div__",
"__divmod__",
"__eq__",
"__float__",
"__floordiv__",
"__ge__",
"__getitem__",
"__getslice__",
"__gt__",
"__hash__",
"__hex__",
"__iadd__",
"__iand__",
"__idiv__",
"__idivmod__",
"__ifloordiv__",
"__ilshift__",
"__imod__",
"__imul__",
"__int__",
"__invert__",
"__ior__",
"__ipow__",
"__irshift__",
"__isub__",
"__iter__",
"__itruediv__",
"__ixor__",
"__le__",
"__len__",
"__long__",
"__lshift__",
"__lt__",
"__mod__",
"__mul__",
"__ne__",
"__neg__",
"__oct__",
"__or__",
"__pos__",
"__pow__",
"__radd__",
"__rand__",
"__rdiv__",
"__rdivmod__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rfloorfiv__",
"__rlshift__",
"__rmod__",
"__rmul__",
"__ror__",
"__rpow__",
"__rrshift__",
"__rshift__",
"__rsub__",
"__rtruediv__",
"__rxor__",
"__setitem__",
"__setslice__",
"__sub__",
"__truediv__",
"__xor__",
"next",
]
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
try:
from requests.adapters import HTTPAdapter
class ZipkinAdapterProxy(Proxy, HTTPAdapter):
"""This class will proxy all methods or attributes to underlying
HTTPAdapter, it also override the send and build_response to hook
zipkin headers and tracing.
The proxy allows to hook into an existing HTTPAdapter
"""
def send(self, request, *args, **kwargs):
pre_request(request)
return super(ZipkinAdapterProxy, self).send(request, *args, **kwargs)
def build_response(self, req, resp, *args, **kwargs):
pre_response(resp, req)
return super(ZipkinAdapterProxy, self).build_response(
req, resp, *args, **kwargs
)
def request_adapter(adapter):
return ZipkinAdapterProxy(adapter)
except ImportError:
# requests < 1.0.0
def request_adapter(adapter):
return adapter
def session_init(init):
def func(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "mount"):
adapter = ZipkinAdapterProxy(HTTPAdapter())
self.mount("http://", adapter)
self.mount("https://", adapter)
else:
self.hooks["pre_request"] = pre_request
self.hooks["response"] = pre_response
return func | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/binding/requests/events.py | events.py |
import logging
from zipkin import get_current_trace
from zipkin.models import Annotation
log = logging.getLogger(__name__)
endpoints = {}
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
try:
endpoint = endpoints.get(conn.engine)
parent_trace = get_current_trace()
if not parent_trace:
log.warning("No parent found while tracing SQL")
return
try:
context.trace = parent_trace.child("SQL", endpoint=endpoint)
abstract = context
except AttributeError:
cursor.trace = parent_trace.child("SQL", endpoint=endpoint)
abstract = cursor
abstract.trace.record(Annotation.string("db.statement", statement))
if parameters:
if isinstance(parameters, dict):
parameters = dict(
[
(key, getattr(param, "logged_value", param))
for key, param in parameters.items()
]
)
else:
parameters = [
getattr(param, "logged_value", param) for param in parameters
]
abstract.trace.record(Annotation.string("db.parameters", repr(parameters)))
abstract.trace.record(Annotation.string("span.kind", "client"))
abstract.trace.record(Annotation.server_recv())
except Exception:
log.exception("Unexpected exception while tracing SQL")
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
if not hasattr(context, "trace") and not hasattr(cursor, "trace"):
return
abstract = context if hasattr(context, "trace") else cursor
try:
abstract.trace.record(Annotation.string("status", "OK"))
abstract.trace.record(Annotation.server_send())
except Exception:
log.exception("Unexpected exception while tracing SQL")
def dbapi_error(conn, cursor, statement, parameters, context, exception):
if not hasattr(context, "trace") and not hasattr(cursor, "trace"):
return
abstract = context if hasattr(context, "trace") else cursor
try:
abstract.trace.record(Annotation.string("status", "KO"))
abstract.trace.record(Annotation.server_send())
except Exception:
log.exception("Unexpected exception while tracing SQL") | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/binding/sqlalchemy/events.py | events.py |
import sys
from functools import wraps
from psycopg2.extensions import connection as _connection
from psycopg2.extensions import cursor as _cursor
from zipkin.api import get_current_trace
from zipkin.models import Annotation
class TraceConnection(_connection):
"""A connection that logs all queries to a zipkin."""
def cursor(self, *args, **kwargs):
kwargs.setdefault("cursor_factory", self.cursor_factory or TraceCursor)
return super(TraceConnection, self).cursor(*args, **kwargs)
def trace_req(trace_name):
def wrapper(fn):
@wraps(fn)
def wrapped(cursor, statement, vars=None):
trace = None
parent_trace = get_current_trace()
if parent_trace:
trace = parent_trace.child(trace_name)
trace.record(Annotation.string("db.statement", statement))
if vars:
if isinstance(vars, dict):
params = dict(
[
(key, getattr(param, "logged_value", param))
for key, param in vars.items()
]
)
else:
params = [
getattr(param, "logged_value", param) for param in vars
]
trace.record(Annotation.string("db.parameters", repr(params)))
trace.record(Annotation.string("span.kind", "client"))
trace.record(Annotation.server_send())
try:
fn(cursor, statement, vars)
finally:
if trace:
status = "OK" if sys.exc_info()[0] is None else "KO"
trace.record(Annotation.string("status", status))
trace.record(Annotation.server_recv())
return wrapped
return wrapper
class TraceCursor(_cursor):
"""A cursor that logs queries using its connection logging facilities."""
@trace_req("SQL")
def execute(self, query, vars=None):
return super(TraceCursor, self).execute(query, vars)
@trace_req("STORED PROCEDURE")
def callproc(self, procname, vars=None):
return super(TraceCursor, self).callproc(procname, vars) | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/binding/psycopg2/tracer.py | tracer.py |
import time
import logging
from django.conf import settings
from zipkin import local
from zipkin.api import get_current_trace, stack_trace
from zipkin.models import Trace, Annotation, Endpoint
from zipkin.util import int_or_none
from zipkin.client import log as zipkin_log
from .apps import ZipkinConfig
log = logging.getLogger(__name__)
def init_trace(request):
headers = request.headers
trace_name = request.method + " " + request.path_info
trace = Trace(
trace_name,
int_or_none(headers.get("X-B3-TraceId", None)),
int_or_none(headers.get("X-B3-SpanId", None)),
int_or_none(headers.get("X-B3-ParentSpanId", None)),
endpoint=Endpoint(ZipkinConfig.service_name),
)
trace.record(Annotation.string("http.path", request.path_info))
trace.record(Annotation.string("span.kind", "client"))
trace.record(Annotation.server_recv())
stack_trace(trace)
return trace
def log_response(trace, response):
trace.record(
Annotation.string("http.responsecode", "{0}".format(response.status_code))
)
trace.record(Annotation.server_send())
try:
zipkin_log(trace)
except Exception as err:
log.error("Error while sending trace: %s", trace)
def add_header_response(response):
trace = get_current_trace()
if trace:
response["Trace-Id"] = str(trace.trace_id)
def zk_middleware(get_response):
"""
Zipkin Middleware to add in the django settings.
Usage:
::
MIDDLEWARE = [
"zipkin.binding.django.middleware.zk_middleware",
...
]
"""
def middleware(request):
# Code to be executed for each request before
# the view (and later middleware) are called.
trace = init_trace(request)
response = get_response(request)
add_header_response(response)
log_response(trace, response)
local().reset()
return response
return middleware
def zk_slow_trace_middleware(get_response):
"""
Zipkin Middleware to trace slow query only added in the django settings.
Usage:
::
# Only send trace of query that take more than 1.5 seconds
ZIPKIN_SLOW_LOG_DURATION_EXCEED = 1.5
MIDDLEWARE = [
"zipkin.binding.django.middleware.zk_slow_trace_middleware",
...
]
"""
def middleware(request):
# Code to be executed for each request before
# the view (and later middleware) are called.
start = time.time()
trace = init_trace(request)
response = get_response(request)
duration = time.time() - start
add_header_response(response)
if duration >= settings.ZIPKIN_SLOW_LOG_DURATION_EXCEED:
log_response(trace, response)
local().reset()
return response
return middleware | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/binding/django/middleware.py | middleware.py |
from __future__ import absolute_import
import time
import logging
from pyramid.tweens import INGRESS
from pyramid.settings import aslist
from zipkin import local
from zipkin.api import stack_trace
from zipkin.models import Trace, Annotation
from zipkin.util import int_or_none
from zipkin.client import log as zipkin_log
from zipkin.config import configure as configure_zk
log = logging.getLogger(__name__)
class AllTraceTweenView(object):
endpoint = None
@classmethod
def configure(cls, settings):
default_name = "Registry" # Keep compat with `registry.__name__` ?
name = settings.get("zipkin.service_name", default_name)
bindings = aslist(settings.get("zipkin.bindings", "requests celery xmlrpclib"))
cls.endpoint = configure_zk(
name,
settings,
use_requests="requests" in bindings,
use_celery="celery" in bindings,
use_xmlrpclib="xmlrpclib" in bindings,
)
def __init__(self, handler, registry):
self.handler = handler
self.trace = None
def track_start_request(self, request):
headers = request.headers
trace_name = request.path_qs
if request.matched_route:
# we only get a matched route if we've gone through the router.
trace_name = request.matched_route.pattern
trace = Trace(
request.method + " " + trace_name,
int_or_none(headers.get("X-B3-TraceId", None)),
int_or_none(headers.get("X-B3-SpanId", None)),
int_or_none(headers.get("X-B3-ParentSpanId", None)),
endpoint=self.endpoint,
)
if "X-B3-TraceId" not in headers:
log.info("no trace info from request: %s", request.path_qs)
if request.matchdict: # matchdict maybe none if no route is registered
for k, v in request.matchdict.items():
trace.record(Annotation.string("route.param.%s" % k, v))
trace.record(Annotation.string("http.path", request.path_qs))
log.info("new trace %r", trace.trace_id)
stack_trace(trace)
trace.record(Annotation.server_recv())
self.trace = trace
def track_end_request(self, request, response):
if self.trace:
self.trace.record(Annotation.server_send())
log.info("reporting trace %s", self.trace.name)
response.headers["Trace-Id"] = str(self.trace.trace_id)
zipkin_log(self.trace)
def __call__(self, request):
self.track_start_request(request)
response = None
try:
response = self.handler(request)
finally:
# request.response in case an exception is raised ?
self.track_end_request(request, response or request.response)
local().reset()
self.trace = None
return response or request.response
class SlowQueryTweenView(AllTraceTweenView):
max_duration = None
@classmethod
def configure(cls, settings):
super(SlowQueryTweenView, cls).configure(settings)
setting = settings.get("zipkin.slow_log_duration_exceed")
if setting is None:
log.error(
"Missing setting 'zipkin.slow_log_duration_exceed' %r",
list(settings.keys()),
)
return
try:
cls.max_duration = float(setting)
except ValueError:
log.error("Invalid setting 'zipkin.slow_log_duration_exceed'")
def __init__(self, handler, registry):
super(SlowQueryTweenView, self).__init__(handler, registry)
self.start = None
def track_start_request(self, request):
self.start = time.time()
super(SlowQueryTweenView, self).track_start_request(request)
def track_end_request(self, request, response):
if self.max_duration is None:
# unconfigure, we don't care
return
if self.start:
duration = time.time() - self.start
if duration > self.max_duration:
super(SlowQueryTweenView, self).track_end_request(request, response)
else:
response.headers["Trace-Id"] = str(self.trace.trace_id)
def includeme(config):
"""Include the zipkin definitions"""
# Attach the subscriber a couple of times, this allow to start logging as
# early as possible. Later calls on the same request will enhance the more
# we proceed through the stack (after authentication, after router, ...)
settings = config.registry.settings
tween_factory = settings.get("zipkin.tween_factory", "all")
assert tween_factory in ["all", "slow_query"]
if tween_factory == "all":
tween_factory = AllTraceTweenView
elif tween_factory == "slow_query":
tween_factory = SlowQueryTweenView
else:
log.error(
"Invalid value for settings 'zipkin.tween_factory', should be all or slow_query, not %s",
tween_factory,
)
return
tween_factory.configure(settings)
config.add_tween(
"{}.{}".format(tween_factory.__module__, tween_factory.__name__),
under=INGRESS,
) | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/binding/pyramid/pyramidhook.py | pyramidhook.py |
from __future__ import absolute_import
import re
import six.moves.xmlrpc_client as xmlrpclib
import logging
from zipkin import local
from zipkin.models import Annotation, Endpoint
from zipkin.util import hex_str
log = logging.getLogger(__name__)
_parse_method_name = re.compile(r"<methodName>([^<]*)</methodName>", re.I)
class MonkeyTransport(xmlrpclib.Transport):
"""Monkey patched version of xmlrpclib.Transport to plug zipkin in"""
__origin = xmlrpclib.Transport
def request(self, host, handler, request_body, verbose=0):
try:
https = isinstance(self, xmlrpclib.SafeTransport)
protocol = "https://" if https else "http://"
target = "%s%s%s" % (protocol, host, handler)
match = _parse_method_name.search(request_body)
method = match.group(1) if match else None
parent_trace = local().current
self._trace = parent_trace.child("xmlrpclib")
self._trace.record(Annotation.string("uri", target))
if method:
self._trace.record(Annotation.string("method", method))
self._trace.record(Annotation.server_recv())
except Exception as exc:
log.error(repr(exc))
try:
return self.__origin.request(self, host, handler, request_body, verbose)
finally:
try:
self._trace.record(Annotation.server_send())
except:
pass
def send_host(self, connection, host):
ret = self.__origin.send_host(self, connection, host)
try:
forward = self.trace.child_noref("subservice")
connection.putheader("X-B3-TraceId", hex_str(forward.trace_id))
connection.putheader("X-B3-SpanId", hex_str(forward.span_id))
if forward.parent_span_id is not None:
connection.putheader(
"X-B3-ParentSpanId", hex_str(forward.parent_span_id)
)
finally:
return ret
def bind(endpoint=None):
log.info("Binding zipkin to xmlrpclib")
if not endpoint:
endpoint = Endpoint("xmlrpc")
xmlrpclib.Transport = MonkeyTransport
log.info("zipkin bound to xmlrpclib")
def unbind():
xmlrpclib.Transport = MonkeyTransport.__origin | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/binding/xmlrpclib/impl.py | impl.py |
import errno
import logging
import socket
import struct
from io import BytesIO
from thriftpy2.protocol import TBinaryProtocolFactory
from thriftpy2.protocol.binary import write_message_begin, write_val
from thriftpy2.thrift import TClient, TMessageType, TType
from thriftpy2.transport import TFramedTransportFactory, TSocket, TTransportException
from ..client import Local
from ..util import base64_thrift_formatter
from .scribe import scribe_thrift
logger = logging.getLogger(__name__)
CONNECTION_RETRIES = [1, 10, 20, 50, 100, 200, 400, 1000]
try:
MSG_NOSIGNAL = socket.MSG_NOSIGNAL
except:
MSG_NOSIGNAL = 16384 # python2
class TNonBlockingSocket(TSocket):
def _init_sock(self):
super(TNonBlockingSocket, self)._init_sock()
# 1M sendq buffer
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024 * 1024)
self.sock.setblocking(False)
def open(self):
self._init_sock()
addr = self.unix_socket or (self.host, self.port)
status = self.sock.connect_ex(addr)
try:
Local.local_ip = self.sock.getsockname()[0]
except Exception:
pass
if status not in [errno.EINPROGRESS, errno.EALREADY]:
raise IOError(
"connection attempt on a non-clean socket", errno.errorcode[status]
)
def write(self, buff):
# First ensure our incoming end is always empty
self.read_all()
# Then actually try to write to socket
try:
# We are a library, we can't just set sighandlers. But we don't
# want SIGPIPE if peer has gone away either. We better set
# MSG_NOSIGNAL to avoid that.
# If peer has disconnected, then a errno.EPIPE will raise, and
# will be catched on the uppper layer
self.sock.sendall(buff, MSG_NOSIGNAL)
except socket.error as e:
if e.errno not in [
errno.EINPROGRESS, # Not connected yet
errno.EWOULDBLOCK,
]: # write buffer full
# In all other cases, raise.
raise
# If not yet connected or write buffer is full, silently drop.
def read_all(self):
"""
Flush incoming buffer
"""
try:
receiving = " "
while len(receiving) > 0: # socket.error.errno.EAGAIN will exit this
receiving = self.sock.recv(1024)
except socket.error as e:
# if EAGAIN or EWOULDBLOCK, then there is nothing to read
if e.errno not in [errno.EAGAIN, errno.EWOULDBLOCK]:
# Otherwise that's an error.
raise
return # No more data to read, or connection is not ready
def read(self, _):
"""
Mock response, we don't care about results. We never actually read
them. But we don't want client to wait for server to reply.
"""
buffer = BytesIO()
seq_id = 0 # Sequence id is never compared to message.
write_message_begin(buffer, "Log_result", TMessageType.REPLY, seq_id)
response = scribe_thrift.Scribe.Log_result(success=scribe_thrift.ResultCode.OK)
write_val(buffer, TType.STRUCT, response)
out = buffer.getvalue()
# Framed message, starts with length of message.
return struct.pack("!i", len(out)) + out
def make_client(
service,
host,
port,
proto_factory=TBinaryProtocolFactory(),
trans_factory=TFramedTransportFactory(),
socket_factory=TNonBlockingSocket,
socket_timeout=1000,
):
socket = socket_factory(host, port, socket_timeout=socket_timeout)
transport = trans_factory.get_transport(socket)
protocol = proto_factory.get_protocol(transport)
transport.open()
return TClient(service, protocol)
class Client(object):
host = None
port = 9410
_client = None
_connection_attempts = 0
_socket_factory = TNonBlockingSocket
_socket_timeout = 1000
@classmethod
def configure(cls, settings, prefix):
cls.host = settings.get(prefix + "collector")
if prefix + "collector.port" in settings:
cls.port = int(settings[prefix + "collector.port"])
if prefix + "transport.async" in settings:
if settings[prefix + "transport.async"].lower() == "false":
cls._socket_factory = TSocket
if prefix + "transport.socket_timeout" in settings:
cls._socket_timeout = int(settings[prefix + "transport.socket_timeout"])
@classmethod
def get_connection(cls):
if not cls._client:
cls._connection_attempts += 1
max_retries = CONNECTION_RETRIES[-1]
if (cls._connection_attempts > max_retries) and not (
(cls._connection_attempts % max_retries) == 0
):
return
if (cls._connection_attempts < max_retries) and (
cls._connection_attempts not in CONNECTION_RETRIES
):
return
try:
cls._client = make_client(
scribe_thrift.Scribe,
host=cls.host,
port=cls.port,
socket_factory=cls._socket_factory,
socket_timeout=cls._socket_timeout,
)
cls._connection_attempts = 0
except TTransportException:
cls._client = None
logger.error(
"Can't connect to zipkin collector %s:%d" % (cls.host, cls.port)
)
except Exception:
cls._client = None
logger.exception(
"Can't connect to zipkin collector %s:%d" % (cls.host, cls.port)
)
return cls._client
@classmethod
def log(cls, trace):
if not cls.host:
logger.debug("Zipkin tracing is disabled")
return
logger.info("logging trace %s", trace.trace_id)
unknown = (
"Unknown Exception while logging a trace on "
"zipkin collector %s:%d" % (cls.host, cls.port)
)
client = cls.get_connection()
if client:
try:
messages = [
base64_thrift_formatter(t, t.annotations) for t in trace.children()
]
log_entries = [
scribe_thrift.LogEntry("zipkin", message) for message in messages
]
except ValueError:
logger.exception("Error while serializing trace")
return
try:
client.Log(messages=log_entries)
except EOFError:
cls._client = None
logger.error(
"EOFError while logging a trace on zipkin "
"collector %s:%d" % (cls.host, cls.port)
)
except socket.error as err:
cls._client = None
if err.errno == errno.EPIPE:
logger.error(
"Broken pipe while logging a trace "
"on zipkin collector %s:%d",
cls.host,
cls.port,
)
else:
logger.exception(unknown)
except Exception:
cls._client = None
logger.exception(unknown)
else:
logger.warning("Can't log zipkin trace, not connected")
@classmethod
def disconnect(cls):
if cls._client:
cls._client.close() | zk | /zk-0.8.5.tar.gz/zk-0.8.5/zipkin/transport/scribeclient.py | scribeclient.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | zk225-probability | /zk225_probability-0.1.tar.gz/zk225_probability-0.1/zk225_probability/Gaussiandistribution.py | Gaussiandistribution.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | zk225-probability | /zk225_probability-0.1.tar.gz/zk225_probability-0.1/zk225_probability/Binomialdistribution.py | Binomialdistribution.py |
__version__ = '1.0.0'
__token__ = ''
__api__ = 'http://www.jianyan360.com/openapi/zkapi'
version = 'zk2jy360 version ' + __version__
onReq = False
import sys
import os
import json
try:
import requests
onReq = True
except ImportError:
import warnings
warnings.warn("Python requests package required for zk2jy360.",ImportWarning)
def __sendToJy360(data):
postdata = {
'token':__token__
}
for record in data:
for i in data[record]:
postdata['%s[%d]'%(record,i)] = data[record][i]
try:
res = requests.post(url=__api__,data=postdata,timeout=60)
status_code = res.status_code
if status_code!=200:
return {'state':False,'msg':'Server response failed','code':status_code}
else:
ret = res.content
try:
ret = json.loads(ret)
if ret['state']==1:
return {'state':True}
else:
return {'state':False,'msg':ret['msg']}
except Exception, e:
return {'state':False,'msg':'Server return code exception'}
except Exception, e:
return {'state':False,'msg':'Server response failed','code':status_code}
def submit(attnd=None):
"""
Upload attendance record
"""
if attnd==None or type(attnd)!=list:
return {'state':False,'msg':'Abnormal attendance records'}
if len(attnd)<0:
return {'state':True}
__data = {
'realname':{},
'job_num':{},
'device_no':{},
'device_name':{},
'add_at':{},
'record_id':{}
}
i = 0
for record in attnd:
if type(record)!=dict:
continue
"""
Verify data validity
"""
record_keys = record.keys()
_verify_keys_ = ['ename','pin','alias','sn','checktime','id']
for _verify_ in _verify_keys_:
if _verify_ not in record_keys:
return {'state':False,'msg':'Abnormal attendance records, Not find field `%s`'%(_verify_)}
"""
Processing data
"""
__data['realname'][i] = record['ename']
__data['job_num'][i] = record['pin']
__data['device_no'][i] = record['sn']
__data['device_name'][i] = record['alias']
__data['add_at'][i] = record['checktime']
__data['record_id'][i] = record['id']
i += 1
return __sendToJy360(__data)
if __name__ == "__main__":
"""
tester = [
{
'ename':'Zhang San',
'pin':'100001',
'alias':'XXXX attendance device',
'sn':'201565894816',
'checktime':'2019-02-13 08:54:53',
'id':1
},
{
'ename':'Li si',
'pin':'100002',
'alias':'XXXX attendance device',
'sn':'201565894816',
'checktime':'2019-02-13 08:55:02',
'id':2
}
]
__token__ = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
result = submit(tester)
print result
"""
pass | zk2jy360 | /zk2jy360-1.0.0.tar.gz/zk2jy360-1.0.0/zk2jy360.py | zk2jy360.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.