Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/simulation/renderer.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for viewing Physics objects in the DM Control viewer."""
import abc
import enum
import sys
from typing import Dict, Optional
import numpy as np
from dm_control.mujoco import wrapper
from d4rl_alt.kitchen.adept_envs.simulation import module
# Default window dimensions.
DEFAULT_WINDOW_WIDTH = 1024
DEFAULT_WINDOW_HEIGHT = 768
DEFAULT_WINDOW_TITLE = "MuJoCo Viewer"
_MAX_RENDERBUFFER_SIZE = 2048
class RenderMode(enum.Enum):
"""Rendering modes for offscreen rendering."""
RGB = 0
DEPTH = 1
SEGMENTATION = 2
class Renderer(abc.ABC):
"""Base interface for rendering simulations."""
def __init__(self, camera_settings: Optional[Dict] = None):
self._camera_settings = camera_settings
@abc.abstractmethod
def close(self):
"""Cleans up any resources being used by the renderer."""
@abc.abstractmethod
def render_to_window(self):
"""Renders the simulation to a window."""
@abc.abstractmethod
def render_offscreen(
self,
width: int,
height: int,
mode: RenderMode = RenderMode.RGB,
camera_id: int = -1,
) -> np.ndarray:
"""Renders the camera view as a NumPy array of pixels.
Args:
width: The viewport width (pixels).
height: The viewport height (pixels).
mode: The rendering mode.
camera_id: The ID of the camera to render from. By default, uses
the free camera.
Returns:
A NumPy array of the pixels.
"""
def _update_camera(self, camera):
"""Updates the given camera to move to the initial settings."""
if not self._camera_settings:
return
distance = self._camera_settings.get("distance")
azimuth = self._camera_settings.get("azimuth")
elevation = self._camera_settings.get("elevation")
lookat = self._camera_settings.get("lookat")
if distance is not None:
camera.distance = distance
if azimuth is not None:
camera.azimuth = azimuth
if elevation is not None:
camera.elevation = elevation
if lookat is not None:
camera.lookat[:] = lookat
class MjPyRenderer(Renderer):
"""Class for rendering mujoco_py simulations."""
def __init__(self, sim, **kwargs):
assert isinstance(
sim, module.get_mujoco_py().MjSim
), "MjPyRenderer takes a mujoco_py MjSim object."
super().__init__(**kwargs)
self._sim = sim
self._onscreen_renderer = None
self._offscreen_renderer = None
def render_to_window(self):
"""Renders the simulation to a window."""
if not self._onscreen_renderer:
self._onscreen_renderer = module.get_mujoco_py().MjViewer(self._sim)
self._update_camera(self._onscreen_renderer.cam)
self._onscreen_renderer.render()
def render_offscreen(
self,
width: int,
height: int,
mode: RenderMode = RenderMode.RGB,
camera_id: int = -1,
) -> np.ndarray:
"""Renders the camera view as a NumPy array of pixels.
Args:
width: The viewport width (pixels).
height: The viewport height (pixels).
mode: The rendering mode.
camera_id: The ID of the camera to render from. By default, uses
the free camera.
Returns:
A NumPy array of the pixels.
"""
if not self._offscreen_renderer:
self._offscreen_renderer = module.get_mujoco_py().MjRenderContextOffscreen(
self._sim
)
# Update the camera configuration for the free-camera.
if camera_id == -1:
self._update_camera(self._offscreen_renderer.cam)
self._offscreen_renderer.render(width, height, camera_id)
if mode == RenderMode.RGB:
data = self._offscreen_renderer.read_pixels(width, height, depth=False)
# Original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == RenderMode.DEPTH:
data = self._offscreen_renderer.read_pixels(width, height, depth=True)[1]
# Original image is upside-down, so flip it
return data[::-1, :]
else:
raise NotImplementedError(mode)
def close(self):
"""Cleans up any resources being used by the renderer."""
class DMRenderer(Renderer):
"""Class for rendering DM Control Physics objects."""
def __init__(self, physics, clear_geom_group_0=False, camera_select_next=False, **kwargs):
assert isinstance(
physics, module.get_dm_mujoco().Physics
), "DMRenderer takes a DM Control Physics object."
super().__init__(**kwargs)
self._physics = physics
self._window = None
self.clear_geom_group_0 = clear_geom_group_0
self.camera_select_next = camera_select_next
# Set the camera to lookat the center of the geoms. (mujoco_py does
# this automatically.
if "lookat" not in self._camera_settings:
self._camera_settings["lookat"] = [
np.median(self._physics.data.geom_xpos[:, i]) for i in range(3)
]
def render_to_window(self):
"""Renders the Physics object to a window.
The window continuously renders the Physics in a separate thread.
This function is a no-op if the window was already created.
"""
if not self._window:
self._window = DMRenderWindow(clear_geom_group_0=self.clear_geom_group_0, camera_select_next=self.camera_select_next)
self._window.load_model(self._physics)
self._update_camera(self._window.camera)
#useful for tuning camera parameters!
# print(self._window.camera.distance)
# print(self._window.camera.lookat)
# print(self._window.camera.azimuth)
# print(self._window.camera.elevation)
# print()
# print()
# print()
self._window.run_frame()
def render_offscreen(
self,
width: int,
height: int,
mode: RenderMode = RenderMode.RGB,
camera_id: int = -1,
) -> np.ndarray:
"""Renders the camera view as a NumPy array of pixels.
Args:
width: The viewport width (pixels).
height: The viewport height (pixels).
mode: The rendering mode.
camera_id: The ID of the camera to render from. By default, uses
the free camera.
Returns:
A NumPy array of the pixels.
"""
mujoco = module.get_dm_mujoco()
# TODO(michaelahn): Consider caching the camera.
camera = mujoco.Camera(
physics=self._physics, height=height, width=width, camera_id=camera_id
)
# Update the camera configuration for the free-camera.
if camera_id == -1:
self._update_camera(
camera._render_camera, # pylint: disable=protected-access
)
scene_option = wrapper.MjvOption()
if self.clear_geom_group_0:
scene_option.geomgroup[0] = 0
image = camera.render(
depth=(mode == RenderMode.DEPTH),
segmentation=(mode == RenderMode.SEGMENTATION),
scene_option=scene_option,
)
camera._scene.free() # pylint: disable=protected-access
return image
def close(self):
"""Cleans up any resources being used by the renderer."""
if self._window:
self._window.close()
self._window = None
class DMRenderWindow:
"""Class that encapsulates a graphical window."""
def __init__(
self,
width: int = DEFAULT_WINDOW_WIDTH,
height: int = DEFAULT_WINDOW_HEIGHT,
title: str = DEFAULT_WINDOW_TITLE,
clear_geom_group_0: bool = False,
camera_select_next: bool = False,
):
"""Creates a graphical render window.
Args:
width: The width of the window.
height: The height of the window.
title: The title of the window.
"""
dmv = module.get_dm_viewer()
self._viewport = dmv.renderer.Viewport(width, height)
self._window = dmv.gui.RenderWindow(width, height, title)
self._viewer = dmv.viewer.Viewer(
self._viewport, self._window.mouse, self._window.keyboard
)
self._draw_surface = None
self._renderer = dmv.renderer.NullRenderer()
self.camera_select_next = camera_select_next
if clear_geom_group_0:
# for robosuite this gets rid of extraneous collision/visual meshes
self._viewer._render_settings.geom_groups[0] = 0
@property
def camera(self):
return self._viewer._camera._camera
def close(self):
self._viewer.deinitialize()
self._renderer.release()
self._draw_surface.free()
self._window.close()
def load_model(self, physics):
"""Loads the given Physics object to render."""
self._viewer.deinitialize()
self._draw_surface = module.get_dm_render().Renderer(
max_width=_MAX_RENDERBUFFER_SIZE, max_height=_MAX_RENDERBUFFER_SIZE
)
self._renderer = module.get_dm_viewer().renderer.OffScreenRenderer(
physics.model, self._draw_surface
)
self._viewer.initialize(physics, self._renderer, touchpad=False)
if self.camera_select_next:
#for robosuite we want to shift the camera idx by 1
self._viewer._camera_select.select_next()
def run_frame(self):
"""Renders one frame of the simulation.
NOTE: This is extremely slow at the moment.
"""
glfw = module.get_dm_viewer().gui.glfw_gui.glfw
glfw_window = self._window._context.window
if glfw.window_should_close(glfw_window):
sys.exit(0)
self._viewport.set_size(*self._window.shape)
self._viewer.render()
pixels = self._renderer.pixels
with self._window._context.make_current() as ctx:
ctx.call(self._window._update_gui_on_render_thread, glfw_window, pixels)
self._window._mouse.process_events()
self._window._keyboard.process_events()
| 10,963 | 32.426829 | 129 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/simulation/sim_robot.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for loading MuJoCo models."""
import os
from typing import Dict, Optional
from d4rl_alt.kitchen.adept_envs.simulation import module
from d4rl_alt.kitchen.adept_envs.simulation.renderer import (
DMRenderer,
MjPyRenderer,
RenderMode,
)
import numpy as np
class MujocoSimRobot:
"""Class that encapsulates a MuJoCo simulation.
This class exposes methods that are agnostic to the simulation backend.
Two backends are supported:
1. mujoco_py - MuJoCo v1.50
2. dm_control - MuJoCo v2.00
"""
def __init__(
self,
model_file: str,
use_dm_backend: bool = False,
camera_settings: Optional[Dict] = None,
):
"""Initializes a new simulation.
Args:
model_file: The MuJoCo XML model file to load.
use_dm_backend: If True, uses DM Control's Physics (MuJoCo v2.0) as
the backend for the simulation. Otherwise, uses mujoco_py (MuJoCo
v1.5) as the backend.
camera_settings: Settings to initialize the renderer's camera. This
can contain the keys `distance`, `azimuth`, and `elevation`.
"""
self._use_dm_backend = use_dm_backend
if not os.path.isfile(model_file):
raise ValueError(
"[MujocoSimRobot] Invalid model file path: {}".format(model_file)
)
if self._use_dm_backend:
dm_mujoco = module.get_dm_mujoco()
if model_file.endswith(".mjb"):
self.sim = dm_mujoco.Physics.from_binary_path(model_file)
else:
self.sim = dm_mujoco.Physics.from_xml_path(model_file)
self.model = self.sim.model
self._patch_mjlib_accessors(self.model, self.sim.data)
self.renderer = DMRenderer(self.sim, camera_settings=camera_settings)
else: # Use mujoco_py
mujoco_py = module.get_mujoco_py()
self.model = mujoco_py.load_model_from_path(model_file)
self.sim = mujoco_py.MjSim(self.model)
self.renderer = MjPyRenderer(self.sim, camera_settings=camera_settings)
self.data = self.sim.data
def close(self):
"""Cleans up any resources being used by the simulation."""
self.renderer.close()
def save_binary(self, path: str):
"""Saves the loaded model to a binary .mjb file."""
if os.path.exists(path):
raise ValueError("[MujocoSimRobot] Path already exists: {}".format(path))
if not path.endswith(".mjb"):
path = path + ".mjb"
if self._use_dm_backend:
self.model.save_binary(path)
else:
with open(path, "wb") as f:
f.write(self.model.get_mjb())
def get_mjlib(self):
"""Returns an object that exposes the low-level MuJoCo API."""
if self._use_dm_backend:
return module.get_dm_mujoco().wrapper.mjbindings.mjlib
else:
return module.get_mujoco_py_mjlib()
def _patch_mjlib_accessors(self, model, data):
"""Adds accessors to the DM Control objects to support mujoco_py API."""
assert self._use_dm_backend
mjlib = self.get_mjlib()
def name2id(type_name, name):
obj_id = mjlib.mj_name2id(
model.ptr, mjlib.mju_str2Type(type_name.encode()), name.encode()
)
if obj_id < 0:
raise ValueError('No {} with name "{}" exists.'.format(type_name, name))
return obj_id
def id2name(type_name, id):
obj_name = mjlib.mj_id2name(
model.ptr, mjlib.mju_str2Type(type_name.encode()), id
)
return obj_name
if not hasattr(model, "body_name2id"):
model.body_name2id = lambda name: name2id("body", name)
if not hasattr(model, "geom_name2id"):
model.geom_name2id = lambda name: name2id("geom", name)
if not hasattr(model, "geom_id2name"):
model.geom_id2name = lambda id: id2name("geom", id)
if not hasattr(model, "site_name2id"):
model.site_name2id = lambda name: name2id("site", name)
if not hasattr(model, "joint_name2id"):
model.joint_name2id = lambda name: name2id("joint", name)
if not hasattr(model, "actuator_name2id"):
model.actuator_name2id = lambda name: name2id("actuator", name)
if not hasattr(model, "camera_name2id"):
model.camera_name2id = lambda name: name2id("camera", name)
if not hasattr(model, "sensor_name2id"):
model.sensor_name2id = lambda name: name2id("sensor", name)
if not hasattr(data, "body_xpos"):
data.body_xpos = data.xpos
if not hasattr(data, "body_xquat"):
data.body_xquat = data.xquat
if not hasattr(data, "get_body_xpos"):
data.get_body_xpos = lambda name: data.body_xpos[model.body_name2id(name)]
if not hasattr(data, "get_body_xquat"):
data.get_body_xquat = lambda name: data.body_xquat[model.body_name2id(name)]
if not hasattr(data, "get_body_xmat"):
data.get_body_xmat = lambda name: data.xmat[
model.body_name2id(name)
].reshape(3, 3)
if not hasattr(data, "get_geom_xpos"):
data.get_geom_xpos = lambda name: data.geom_xpos[model.geom_name2id(name)]
if not hasattr(data, "get_geom_xquat"):
data.get_geom_xquat = lambda name: data.geom_xquat[model.geom_name2id(name)]
if not hasattr(data, "get_joint_qpos"):
data.get_joint_qpos = lambda name: data.qpos[model.joint_name2id(name)]
if not hasattr(data, "set_joint_qpos"):
def set_joint_qpos(name, value):
data.qpos[
model.joint_name2id(name) : model.joint_name2id(name)
+ value.shape[0]
] = value
data.set_joint_qpos = lambda name, value: set_joint_qpos(name, value)
if not hasattr(data, "get_site_xmat"):
data.get_site_xmat = lambda name: data.site_xmat[
model.site_name2id(name)
].reshape(3, 3)
if not hasattr(model, "get_joint_qpos_addr"):
model.get_joint_qpos_addr = lambda name: model.joint_name2id(name)
if not hasattr(model, "get_joint_qvel_addr"):
model.get_joint_qvel_addr = lambda name: model.joint_name2id(name)
if not hasattr(data, "get_geom_xmat"):
data.get_geom_xmat = lambda name: data.geom_xmat[
model.geom_name2id(name)
].reshape(3, 3)
if not hasattr(data, "get_mocap_pos"):
data.get_mocap_pos = lambda name: data.mocap_pos[
model.body_mocapid[model.body_name2id(name)]
]
if not hasattr(data, "get_mocap_quat"):
data.get_mocap_quat = lambda name: data.mocap_quat[
model.body_mocapid[model.body_name2id(name)]
]
if not hasattr(data, "set_mocap_pos"):
def set_mocap_pos(name, value):
data.mocap_pos[model.body_mocapid[model.body_name2id(name)]] = value
data.set_mocap_pos = lambda name, value: set_mocap_pos(name, value)
if not hasattr(data, "set_mocap_quat"):
def set_mocap_quat(name, value):
data.mocap_quat[model.body_mocapid[model.body_name2id(name)]] = value
data.set_mocap_quat = lambda name, value: set_mocap_quat(name, value)
def site_jacp():
jacps = np.zeros((model.nsite, 3 * model.nv))
for i, jacp in enumerate(jacps):
jacp_view = jacp.reshape(3, -1)
mjlib.mj_jacSite(model.ptr, data.ptr, jacp_view, None, i)
return jacps
def site_xvelp():
jacp = site_jacp().reshape((model.nsite, 3, model.nv))
xvelp = np.dot(jacp, data.qvel)
return xvelp
def site_jacr():
jacrs = np.zeros((model.nsite, 3 * model.nv))
for i, jacr in enumerate(jacrs):
jacr_view = jacr.reshape(3, -1)
mjlib.mj_jacSite(model.ptr, data.ptr, None, jacr_view, i)
return jacrs
def site_xvelr():
jacr = site_jacr().reshape((model.nsite, 3, model.nv))
xvelr = np.dot(jacr, data.qvel)
return xvelr
if not hasattr(data, "site_xvelp"):
data.site_xvelp = site_xvelp()
if not hasattr(data, "site_xvelr"):
data.site_xvelr = site_xvelr()
if not hasattr(data, "get_site_jacp"):
data.get_site_jacp = lambda name: site_jacp()[
model.site_name2id(name)
].reshape(3, model.nv)
if not hasattr(data, "get_site_jacr"):
data.get_site_jacr = lambda name: site_jacr()[
model.site_name2id(name)
].reshape(3, model.nv)
| 9,574 | 35.826923 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/utils/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/utils/config.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import cElementTree as ET
except ImportError:
try:
# Python 2.5 need to import a different module
import xml.etree.cElementTree as ET
except ImportError:
exit_err("Failed to import cElementTree from any known place")
CONFIG_XML_DATA = """
<config name='dClaw1 dClaw2'>
<limits low="1 2" high="2 3"/>
<scale joint="10 20"/>
<data type="test1 test2"/>
</config>
"""
# Read config from root
def read_config_from_node(root_node, parent_name, child_name, dtype=int):
# find parent
parent_node = root_node.find(parent_name)
if parent_node == None:
quit("Parent %s not found" % parent_name)
# get child data
child_data = parent_node.get(child_name)
if child_data == None:
quit("Child %s not found" % child_name)
config_val = np.array(child_data.split(), dtype=dtype)
return config_val
# get config frlom file or string
def get_config_root_node(config_file_name=None, config_file_data=None):
try:
# get root
if config_file_data is None:
config_file_content = open(config_file_name, "r")
config = ET.parse(config_file_content)
root_node = config.getroot()
else:
root_node = ET.fromstring(config_file_data)
# get root data
root_data = root_node.get("name")
root_name = np.array(root_data.split(), dtype=str)
except:
quit("ERROR: Unable to process config file %s" % config_file_name)
return root_node, root_name
# Read config from config_file
def read_config_from_xml(config_file_name, parent_name, child_name, dtype=int):
root_node, root_name = get_config_root_node(config_file_name=config_file_name)
return read_config_from_node(root_node, parent_name, child_name, dtype)
# tests
if __name__ == "__main__":
print("Read config and parse -------------------------")
root, root_name = get_config_root_node(config_file_data=CONFIG_XML_DATA)
print("Root:name \t", root_name)
print("limit:low \t", read_config_from_node(root, "limits", "low", float))
print("limit:high \t", read_config_from_node(root, "limits", "high", float))
print("scale:joint \t", read_config_from_node(root, "scale", "joint", float))
print("data:type \t", read_config_from_node(root, "data", "type", str))
# read straight from xml (dum the XML data as duh.xml for this test)
root, root_name = get_config_root_node(config_file_name="duh.xml")
print("Read from xml --------------------------------")
print("limit:low \t", read_config_from_xml("duh.xml", "limits", "low", float))
print("limit:high \t", read_config_from_xml("duh.xml", "limits", "high", float))
print("scale:joint \t", read_config_from_xml("duh.xml", "scale", "joint", float))
print("data:type \t", read_config_from_xml("duh.xml", "data", "type", str))
| 3,478 | 35.239583 | 85 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/utils/configurable.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
from gym.envs.registration import registry as gym_registry
def import_class_from_path(class_path):
"""Given 'path.to.module:object', imports and returns the object."""
module_path, class_name = class_path.split(":")
module = importlib.import_module(module_path)
return getattr(module, class_name)
class ConfigCache(object):
"""Configuration class to store constructor arguments.
This is used to store parameters to pass to Gym environments at init time.
"""
def __init__(self):
self._configs = {}
self._default_config = {}
def set_default_config(self, config):
"""Sets the default configuration used for all RobotEnv envs."""
self._default_config = dict(config)
def set_config(self, cls_or_env_id, config):
"""Sets the configuration for the given environment within a context.
Args:
cls_or_env_id (Class | str): A class type or Gym environment ID to
configure.
config (dict): The configuration parameters.
"""
config_key = self._get_config_key(cls_or_env_id)
self._configs[config_key] = dict(config)
def get_config(self, cls_or_env_id):
"""Returns the configuration for the given env name.
Args:
cls_or_env_id (Class | str): A class type or Gym environment ID to
get the configuration of.
"""
config_key = self._get_config_key(cls_or_env_id)
config = dict(self._default_config)
config.update(self._configs.get(config_key, {}))
return config
def clear_config(self, cls_or_env_id):
"""Clears the configuration for the given ID."""
config_key = self._get_config_key(cls_or_env_id)
if config_key in self._configs:
del self._configs[config_key]
def _get_config_key(self, cls_or_env_id):
if inspect.isclass(cls_or_env_id):
return cls_or_env_id
env_id = cls_or_env_id
assert isinstance(env_id, str)
if env_id not in gym_registry.env_specs:
raise ValueError("Unregistered environment name {}.".format(env_id))
entry_point = gym_registry.env_specs[env_id]._entry_point
if callable(entry_point):
return entry_point
else:
return import_class_from_path(entry_point)
# Global robot config.
global_config = ConfigCache()
def configurable(config_id=None, pickleable=False, config_cache=global_config):
"""Class decorator to allow injection of constructor arguments.
This allows constructor arguments to be passed via ConfigCache.
Example usage:
@configurable()
class A:
def __init__(b=None, c=2, d='Wow'):
...
global_config.set_config(A, {'b': 10, 'c': 20})
a = A() # b=10, c=20, d='Wow'
a = A(b=30) # b=30, c=20, d='Wow'
Args:
config_id: ID of the config to use. This defaults to the class type.
pickleable: Whether this class is pickleable. If true, causes the pickle
state to include the config and constructor arguments.
config_cache: The ConfigCache to use to read config data from. Uses
the global ConfigCache by default.
"""
def cls_decorator(cls):
assert inspect.isclass(cls)
# Overwrite the class constructor to pass arguments from the config.
base_init = cls.__init__
def __init__(self, *args, **kwargs):
config = config_cache.get_config(config_id or type(self))
# Allow kwargs to override the config.
kwargs = {**config, **kwargs}
# print('Initializing {} with params: {}'.format(type(self).__name__,
# kwargs))
if pickleable:
self._pkl_env_args = args
self._pkl_env_kwargs = kwargs
base_init(self, *args, **kwargs)
cls.__init__ = __init__
# If the class is pickleable, overwrite the state methods to save
# the constructor arguments and config.
if pickleable:
# Use same pickle keys as gym.utils.ezpickle for backwards compat.
PKL_ARGS_KEY = "_ezpickle_args"
PKL_KWARGS_KEY = "_ezpickle_kwargs"
def __getstate__(self):
return {
PKL_ARGS_KEY: self._pkl_env_args,
PKL_KWARGS_KEY: self._pkl_env_kwargs,
}
cls.__getstate__ = __getstate__
def __setstate__(self, data):
saved_args = data[PKL_ARGS_KEY]
saved_kwargs = data[PKL_KWARGS_KEY]
# Override the saved state with the current config.
config = config_cache.get_config(config_id or type(self))
# Allow kwargs to override the config.
kwargs = {**saved_kwargs, **config}
inst = type(self)(*saved_args, **kwargs)
self.__dict__.update(inst.__dict__)
cls.__setstate__ = __setstate__
return cls
return cls_decorator
| 5,714 | 32.617647 | 81 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/utils/constants.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
ENVS_ROOT_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
)
MODELS_PATH = os.path.abspath(os.path.join(ENVS_ROOT_PATH, "../adept_models/"))
| 795 | 32.166667 | 79 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/utils/parse_demos.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import pickle
import time as timer
import adept_envs
import click
import gym
import numpy as np
import skvideo.io
from mjrl.utils.gym_env import GymEnv
from parse_mjl import parse_mjl_logs, viz_parsed_mjl_logs
# headless renderer
render_buffer = [] # rendering buffer
def viewer(
env,
mode="initialize",
filename="video",
frame_size=(640, 480),
camera_id=0,
render=None,
):
if render == "onscreen":
env.mj_render()
elif render == "offscreen":
global render_buffer
if mode == "initialize":
render_buffer = []
mode = "render"
if mode == "render":
curr_frame = env.render(mode="rgb_array")
render_buffer.append(curr_frame)
if mode == "save":
skvideo.io.vwrite(filename, np.asarray(render_buffer))
print("\noffscreen buffer saved", filename)
elif render == "None":
pass
else:
print("unknown render: ", render)
# view demos (physics ignored)
def render_demos(env, data, filename="demo_rendering.mp4", render=None):
FPS = 30
render_skip = max(
1, round(1.0 / (FPS * env.sim.model.opt.timestep * env.frame_skip))
)
t0 = timer.time()
viewer(env, mode="initialize", render=render)
for i_frame in range(data["ctrl"].shape[0]):
env.sim.data.qpos[:] = data["qpos"][i_frame].copy()
env.sim.data.qvel[:] = data["qvel"][i_frame].copy()
env.sim.forward()
if i_frame % render_skip == 0:
viewer(env, mode="render", render=render)
print(i_frame, end=", ", flush=True)
viewer(env, mode="save", filename=filename, render=render)
print("time taken = %f" % (timer.time() - t0))
# playback demos and get data(physics respected)
def gather_training_data(env, data, filename="demo_playback.mp4", render=None):
env = env.env
FPS = 30
render_skip = max(
1, round(1.0 / (FPS * env.sim.model.opt.timestep * env.frame_skip))
)
t0 = timer.time()
# initialize
env.reset()
init_qpos = data["qpos"][0].copy()
init_qvel = data["qvel"][0].copy()
act_mid = env.act_mid
act_rng = env.act_amp
# prepare env
env.sim.data.qpos[:] = init_qpos
env.sim.data.qvel[:] = init_qvel
env.sim.forward()
viewer(env, mode="initialize", render=render)
# step the env and gather data
path_obs = None
for i_frame in range(data["ctrl"].shape[0] - 1):
# Reset every time step
# if i_frame % 1 == 0:
# qp = data['qpos'][i_frame].copy()
# qv = data['qvel'][i_frame].copy()
# env.sim.data.qpos[:] = qp
# env.sim.data.qvel[:] = qv
# env.sim.forward()
obs = env._get_obs()
# Construct the action
# ctrl = (data['qpos'][i_frame + 1][:9] - obs[:9]) / (env.skip * env.model.opt.timestep)
ctrl = (data["ctrl"][i_frame] - obs[:9]) / (env.skip * env.model.opt.timestep)
act = (ctrl - act_mid) / act_rng
act = np.clip(act, -0.999, 0.999)
next_obs, reward, done, env_info = env.step(act)
if path_obs is None:
path_obs = obs
path_act = act
else:
path_obs = np.vstack((path_obs, obs))
path_act = np.vstack((path_act, act))
# render when needed to maintain FPS
if i_frame % render_skip == 0:
viewer(env, mode="render", render=render)
print(i_frame, end=", ", flush=True)
# finalize
if render:
viewer(env, mode="save", filename=filename, render=render)
t1 = timer.time()
print("time taken = %f" % (t1 - t0))
# note that <init_qpos, init_qvel> are one step away from <path_obs[0], path_act[0]>
return path_obs, path_act, init_qpos, init_qvel
# MAIN =========================================================
@click.command(help="parse tele-op demos")
@click.option("--env", "-e", type=str, help="gym env name", required=True)
@click.option(
"--demo_dir", "-d", type=str, help="directory with tele-op logs", required=True
)
@click.option(
"--skip", "-s", type=int, help="number of frames to skip (1:no skip)", default=1
)
@click.option("--graph", "-g", type=bool, help="plot logs", default=False)
@click.option("--save_logs", "-l", type=bool, help="save logs", default=False)
@click.option("--view", "-v", type=str, help="render/playback", default="render")
@click.option("--render", "-r", type=str, help="onscreen/offscreen", default="onscreen")
def main(env, demo_dir, skip, graph, save_logs, view, render):
gym_env = gym.make(env)
paths = []
print("Scanning demo_dir: " + demo_dir + "=========")
for ind, file in enumerate(glob.glob(demo_dir + "*.mjl")):
# process logs
print("processing: " + file, end=": ")
data = parse_mjl_logs(file, skip)
print("log duration %0.2f" % (data["time"][-1] - data["time"][0]))
# plot logs
if graph:
print("plotting: " + file)
viz_parsed_mjl_logs(data)
# save logs
if save_logs:
pickle.dump(data, open(file[:-4] + ".pkl", "wb"))
# render logs to video
if view == "render":
render_demos(
gym_env,
data,
filename=data["logName"][:-4] + "_demo_render.mp4",
render=render,
)
# playback logs and gather data
elif view == "playback":
try:
obs, act, init_qpos, init_qvel = gather_training_data(
gym_env,
data,
filename=data["logName"][:-4] + "_playback.mp4",
render=render,
)
except Exception as e:
print(e)
continue
path = {
"observations": obs,
"actions": act,
"goals": obs,
"init_qpos": init_qpos,
"init_qvel": init_qvel,
}
paths.append(path)
# accept = input('accept demo?')
# if accept == 'n':
# continue
pickle.dump(path, open(demo_dir + env + str(ind) + "_path.pkl", "wb"))
print(demo_dir + env + file + "_path.pkl")
if __name__ == "__main__":
main()
| 6,965 | 29.96 | 96 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/utils/quatmath.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# For testing whether a number is close to zero
_FLOAT_EPS = np.finfo(np.float64).eps
_EPS4 = _FLOAT_EPS * 4.0
def mulQuat(qa, qb):
res = np.zeros(4)
res[0] = qa[0] * qb[0] - qa[1] * qb[1] - qa[2] * qb[2] - qa[3] * qb[3]
res[1] = qa[0] * qb[1] + qa[1] * qb[0] + qa[2] * qb[3] - qa[3] * qb[2]
res[2] = qa[0] * qb[2] - qa[1] * qb[3] + qa[2] * qb[0] + qa[3] * qb[1]
res[3] = qa[0] * qb[3] + qa[1] * qb[2] - qa[2] * qb[1] + qa[3] * qb[0]
return res
def negQuat(quat):
return np.array([quat[0], -quat[1], -quat[2], -quat[3]])
def quat2Vel(quat, dt=1):
axis = quat[1:].copy()
sin_a_2 = np.sqrt(np.sum(axis ** 2))
axis = axis / (sin_a_2 + 1e-8)
speed = 2 * np.arctan2(sin_a_2, quat[0]) / dt
return speed, axis
def quatDiff2Vel(quat1, quat2, dt):
neg = negQuat(quat1)
diff = mulQuat(quat2, neg)
return quat2Vel(diff, dt)
def axis_angle2quat(axis, angle):
c = np.cos(angle / 2)
s = np.sin(angle / 2)
return np.array([c, s * axis[0], s * axis[1], s * axis[2]])
def euler2mat(euler):
""" Convert Euler Angles to Rotation Matrix. See rotation.py for notes """
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler)
ai, aj, ak = -euler[..., 2], -euler[..., 1], -euler[..., 0]
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
mat = np.empty(euler.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 2, 2] = cj * ck
mat[..., 2, 1] = sj * sc - cs
mat[..., 2, 0] = sj * cc + ss
mat[..., 1, 2] = cj * sk
mat[..., 1, 1] = sj * ss + cc
mat[..., 1, 0] = sj * cs - sc
mat[..., 0, 2] = -sj
mat[..., 0, 1] = cj * si
mat[..., 0, 0] = cj * ci
return mat
def euler2quat(euler):
""" Convert Euler Angles to Quaternions. See rotation.py for notes """
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shape euler {}".format(euler)
ai, aj, ak = euler[..., 2] / 2, -euler[..., 1] / 2, euler[..., 0] / 2
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
quat = np.empty(euler.shape[:-1] + (4,), dtype=np.float64)
quat[..., 0] = cj * cc + sj * ss
quat[..., 3] = cj * sc - sj * cs
quat[..., 2] = -(cj * ss + sj * cc)
quat[..., 1] = cj * cs - sj * sc
return quat
def mat2euler(mat):
""" Convert Rotation Matrix to Euler Angles. See rotation.py for notes """
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
cy = np.sqrt(mat[..., 2, 2] * mat[..., 2, 2] + mat[..., 1, 2] * mat[..., 1, 2])
condition = cy > _EPS4
euler = np.empty(mat.shape[:-1], dtype=np.float64)
euler[..., 2] = np.where(
condition,
-np.arctan2(mat[..., 0, 1], mat[..., 0, 0]),
-np.arctan2(-mat[..., 1, 0], mat[..., 1, 1]),
)
euler[..., 1] = np.where(
condition, -np.arctan2(-mat[..., 0, 2], cy), -np.arctan2(-mat[..., 0, 2], cy)
)
euler[..., 0] = np.where(
condition, -np.arctan2(mat[..., 1, 2], mat[..., 2, 2]), 0.0
)
return euler
def mat2quat(mat):
""" Convert Rotation Matrix to Quaternion. See rotation.py for notes """
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
Qxx, Qyx, Qzx = mat[..., 0, 0], mat[..., 0, 1], mat[..., 0, 2]
Qxy, Qyy, Qzy = mat[..., 1, 0], mat[..., 1, 1], mat[..., 1, 2]
Qxz, Qyz, Qzz = mat[..., 2, 0], mat[..., 2, 1], mat[..., 2, 2]
# Fill only lower half of symmetric matrix
K = np.zeros(mat.shape[:-2] + (4, 4), dtype=np.float64)
K[..., 0, 0] = Qxx - Qyy - Qzz
K[..., 1, 0] = Qyx + Qxy
K[..., 1, 1] = Qyy - Qxx - Qzz
K[..., 2, 0] = Qzx + Qxz
K[..., 2, 1] = Qzy + Qyz
K[..., 2, 2] = Qzz - Qxx - Qyy
K[..., 3, 0] = Qyz - Qzy
K[..., 3, 1] = Qzx - Qxz
K[..., 3, 2] = Qxy - Qyx
K[..., 3, 3] = Qxx + Qyy + Qzz
K /= 3.0
# TODO: vectorize this -- probably could be made faster
q = np.empty(K.shape[:-2] + (4,))
it = np.nditer(q[..., 0], flags=["multi_index"])
while not it.finished:
# Use Hermitian eigenvectors, values for speed
vals, vecs = np.linalg.eigh(K[it.multi_index])
# Select largest eigenvector, reorder to w,x,y,z quaternion
q[it.multi_index] = vecs[[3, 0, 1, 2], np.argmax(vals)]
# Prefer quaternion with positive w
# (q * -1 corresponds to same rotation as q)
if q[it.multi_index][0] < 0:
q[it.multi_index] *= -1
it.iternext()
return q
def quat2euler(quat):
""" Convert Quaternion to Euler Angles. See rotation.py for notes """
return mat2euler(quat2mat(quat))
def quat2mat(quat):
""" Convert Quaternion to Euler Angles. See rotation.py for notes """
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, "Invalid shape quat {}".format(quat)
w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]
Nq = np.sum(quat * quat, axis=-1)
s = 2.0 / Nq
X, Y, Z = x * s, y * s, z * s
wX, wY, wZ = w * X, w * Y, w * Z
xX, xY, xZ = x * X, x * Y, x * Z
yY, yZ, zZ = y * Y, y * Z, z * Z
mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 0, 0] = 1.0 - (yY + zZ)
mat[..., 0, 1] = xY - wZ
mat[..., 0, 2] = xZ + wY
mat[..., 1, 0] = xY + wZ
mat[..., 1, 1] = 1.0 - (xX + zZ)
mat[..., 1, 2] = yZ - wX
mat[..., 2, 0] = xZ - wY
mat[..., 2, 1] = yZ + wX
mat[..., 2, 2] = 1.0 - (xX + yY)
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
| 6,477 | 33.457447 | 85 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_models/CONTRIBUTING.public.md | # How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows
[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
| 1,101 | 37 | 84 | md |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_models/README.public.md | # D'Suite Scenes
This repository is based on a collection of [MuJoCo](http://www.mujoco.org/) simulation
scenes and common assets for D'Suite environments. Based on code in the ROBEL suite
https://github.com/google-research/robel
## Disclaimer
This is not an official Google product.
| 289 | 25.363636 | 87 | md |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_models/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/third_party/franka/README.md | # franka
Franka panda mujoco models
# Environment
franka_panda.xml | comming soon
:-------------------------:|:-------------------------:
 | comming soon
| 217 | 20.8 | 64 | md |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/__init__.py | from gym.envs.registration import register
from d4rl_alt.locomotion import ant, maze_env
"""
register(
id='antmaze-umaze-v0',
entry_point='d4rl_alt.locomotion.ant:make_ant_maze_env',
max_episode_steps=700,
kwargs={
'maze_map': maze_env.U_MAZE_TEST,
'reward_type':'sparse',
'dataset_url':'http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_False_multigoal_False_sparse.hdf5',
'non_zero_reset':False,
'eval':True,
'maze_size_scaling': 4.0,
'ref_min_score': 0.0,
'ref_max_score': 1.0,
}
)
"""
register(
id="antmaze-umaze-v0",
entry_point="d4rl_alt.locomotion.ant:make_ant_maze_env",
max_episode_steps=700,
kwargs={
"maze_map": maze_env.U_MAZE_TEST,
"reward_type": "sparse",
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_False_multigoal_False_sparse.hdf5",
"non_zero_reset": False,
"eval": True,
"maze_size_scaling": 4.0,
"ref_min_score": 0.0,
"ref_max_score": 1.0,
},
)
register(
id="antmaze-umaze-diverse-v0",
entry_point="d4rl_alt.locomotion.ant:make_ant_maze_env",
max_episode_steps=700,
kwargs={
"maze_map": maze_env.U_MAZE_TEST,
"reward_type": "sparse",
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_True_multigoal_True_sparse.hdf5",
"non_zero_reset": False,
"eval": True,
"maze_size_scaling": 4.0,
"ref_min_score": 0.0,
"ref_max_score": 1.0,
},
)
register(
id="antmaze-medium-play-v0",
entry_point="d4rl_alt.locomotion.ant:make_ant_maze_env",
max_episode_steps=1000,
kwargs={
"maze_map": maze_env.BIG_MAZE_TEST,
"reward_type": "sparse",
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_False_sparse.hdf5",
"non_zero_reset": False,
"eval": True,
"maze_size_scaling": 4.0,
"ref_min_score": 0.0,
"ref_max_score": 1.0,
},
)
register(
id="antmaze-medium-diverse-v0",
entry_point="d4rl_alt.locomotion.ant:make_ant_maze_env",
max_episode_steps=1000,
kwargs={
"maze_map": maze_env.BIG_MAZE_TEST,
"reward_type": "sparse",
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_True_sparse.hdf5",
"non_zero_reset": False,
"eval": True,
"maze_size_scaling": 4.0,
"ref_min_score": 0.0,
"ref_max_score": 1.0,
},
)
register(
id="antmaze-large-diverse-v0",
entry_point="d4rl_alt.locomotion.ant:make_ant_maze_env",
max_episode_steps=1000,
kwargs={
"maze_map": maze_env.HARDEST_MAZE_TEST,
"reward_type": "sparse",
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_True_sparse.hdf5",
"non_zero_reset": False,
"eval": True,
"maze_size_scaling": 4.0,
"ref_min_score": 0.0,
"ref_max_score": 1.0,
},
)
register(
id="antmaze-large-play-v0",
entry_point="d4rl_alt.locomotion.ant:make_ant_maze_env",
max_episode_steps=1000,
kwargs={
"maze_map": maze_env.HARDEST_MAZE_TEST,
"reward_type": "sparse",
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_False_sparse.hdf5",
"non_zero_reset": False,
"eval": True,
"maze_size_scaling": 4.0,
"ref_min_score": 0.0,
"ref_max_score": 1.0,
},
)
| 3,854 | 31.669492 | 160 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/ant.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for creating the ant environment."""
import math
import os
import mujoco_py
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from d4rl_alt import offline_env
from d4rl_alt.locomotion import goal_reaching_env, maze_env, mujoco_goal_env, wrappers
GYM_ASSETS_DIR = os.path.join(os.path.dirname(mujoco_goal_env.__file__), "assets")
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
"""Basic ant locomotion environment."""
FILE = os.path.join(GYM_ASSETS_DIR, "ant.xml")
def __init__(
self,
file_path=None,
expose_all_qpos=False,
expose_body_coms=None,
expose_body_comvels=None,
non_zero_reset=False,
):
if file_path is None:
file_path = self.FILE
self._expose_all_qpos = expose_all_qpos
self._expose_body_coms = expose_body_coms
self._expose_body_comvels = expose_body_comvels
self._body_com_indices = {}
self._body_comvel_indices = {}
self._non_zero_reset = non_zero_reset
mujoco_env.MujocoEnv.__init__(self, file_path, 5)
utils.EzPickle.__init__(self)
@property
def physics(self):
# Check mujoco version is greater than version 1.50 to call correct physics
# model containing PyMjData object for getting and setting position/velocity.
# Check https://github.com/openai/mujoco-py/issues/80 for updates to api.
if mujoco_py.get_version() >= "1.50":
return self.sim
else:
return self.model
def _step(self, a):
return self.step(a)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore) / self.dt
ctrl_cost = 0.5 * np.square(a).sum()
contact_cost = (
0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
)
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return (
ob,
reward,
done,
dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward,
),
)
def _get_obs(self):
# No cfrc observation.
if self._expose_all_qpos:
obs = np.concatenate(
[
self.physics.data.qpos.flat[:15], # Ensures only ant obs.
self.physics.data.qvel.flat[:14],
]
)
else:
obs = np.concatenate(
[
self.physics.data.qpos.flat[2:15],
self.physics.data.qvel.flat[:14],
]
)
if self._expose_body_coms is not None:
for name in self._expose_body_coms:
com = self.get_body_com(name)
if name not in self._body_com_indices:
indices = range(len(obs), len(obs) + len(com))
self._body_com_indices[name] = indices
obs = np.concatenate([obs, com])
if self._expose_body_comvels is not None:
for name in self._expose_body_comvels:
comvel = self.get_body_comvel(name)
if name not in self._body_comvel_indices:
indices = range(len(obs), len(obs) + len(comvel))
self._body_comvel_indices[name] = indices
obs = np.concatenate([obs, comvel])
return obs
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-0.1, high=0.1
)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
if self._non_zero_reset:
"""Now the reset is supposed to be to a non-zero location"""
reset_location = self._get_reset_location()
qpos[:2] = reset_location
# Set everything other than ant to original position and 0 velocity.
qpos[15:] = self.init_qpos[15:]
qvel[14:] = 0.0
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def get_xy(self):
return self.physics.data.qpos[:2]
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
class GoalReachingAntEnv(goal_reaching_env.GoalReachingEnv, AntEnv):
"""Ant locomotion rewarded for goal-reaching."""
BASE_ENV = AntEnv
def __init__(
self,
goal_sampler=goal_reaching_env.disk_goal_sampler,
file_path=None,
expose_all_qpos=False,
non_zero_reset=False,
eval=False,
reward_type="dense",
**kwargs
):
goal_reaching_env.GoalReachingEnv.__init__(
self, goal_sampler, eval=eval, reward_type=reward_type
)
AntEnv.__init__(
self,
file_path=file_path,
expose_all_qpos=expose_all_qpos,
expose_body_coms=None,
expose_body_comvels=None,
non_zero_reset=non_zero_reset,
)
class AntMazeEnv(maze_env.MazeEnv, GoalReachingAntEnv, offline_env.OfflineEnv):
"""Ant navigating a maze."""
LOCOMOTION_ENV = GoalReachingAntEnv
def __init__(
self,
goal_sampler=None,
expose_all_qpos=True,
reward_type="dense",
*args,
**kwargs
):
if goal_sampler is None:
goal_sampler = lambda np_rand: maze_env.MazeEnv.goal_sampler(self, np_rand)
maze_env.MazeEnv.__init__(
self,
*args,
manual_collision=False,
goal_sampler=goal_sampler,
expose_all_qpos=expose_all_qpos,
reward_type=reward_type,
**kwargs
)
offline_env.OfflineEnv.__init__(self, **kwargs)
## We set the target foal here for evaluation
self.set_target()
def set_target(self, target_location=None):
return self.set_target_goal(target_location)
def seed(self, seed=0):
mujoco_env.MujocoEnv.seed(self, seed)
def make_ant_maze_env(**kwargs):
env = AntMazeEnv(**kwargs)
return wrappers.NormalizedBoxEnv(env)
| 7,442 | 31.220779 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/common.py | def run_policy_on_env(policy_fn, env, truncate_episode_at=None, first_obs=None):
if first_obs is None:
obs = env.reset()
else:
obs = first_obs
trajectory = []
step_num = 0
while True:
act = policy_fn(obs)
next_obs, rew, done, _ = env.step(act)
trajectory.append((obs, act, rew, done))
obs = next_obs
step_num += 1
if done or (
truncate_episode_at is not None and step_num >= truncate_episode_at
):
break
return trajectory
| 543 | 26.2 | 80 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/generate_dataset.py | import argparse
import gzip
import os
import pickle
import h5py
import numpy as np
import skvideo.io
import torch
from PIL import Image
from rlkit.torch.pytorch_util import set_gpu_mode
from d4rl_alt.locomotion import ant, maze_env, swimmer
from d4rl_alt.locomotion.wrappers import NormalizedBoxEnv
def reset_data():
return {
"observations": [],
"actions": [],
"terminals": [],
"rewards": [],
"infos/goal": [],
"infos/qpos": [],
"infos/qvel": [],
}
def append_data(data, s, a, r, tgt, done, env_data):
data["observations"].append(s)
data["actions"].append(a)
data["rewards"].append(r)
data["terminals"].append(done)
data["infos/goal"].append(tgt)
data["infos/qpos"].append(env_data.qpos.ravel().copy())
data["infos/qvel"].append(env_data.qvel.ravel().copy())
def npify(data):
for k in data:
if k == "terminals":
dtype = np.bool_
else:
dtype = np.float32
data[k] = np.array(data[k], dtype=dtype)
def load_policy(policy_file):
data = torch.load(policy_file)
policy = data["exploration/policy"]
env = data["evaluation/env"]
print("Policy loaded")
if True:
set_gpu_mode(True)
policy.cuda()
return policy, env
def save_video(save_dir, file_name, frames, episode_id=0):
filename = os.path.join(save_dir, file_name + "_episode_{}".format(episode_id))
if not os.path.exists(filename):
os.makedirs(filename)
num_frames = frames.shape[0]
for i in range(num_frames):
img = Image.fromarray(np.flipud(frames[i]), "RGB")
img.save(os.path.join(filename, "frame_{}.png".format(i)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--noisy", action="store_true", help="Noisy actions")
parser.add_argument(
"--maze", type=str, default="u-maze", help="Maze type. small or default"
)
parser.add_argument(
"--num_samples", type=int, default=int(1e6), help="Num samples to collect"
)
parser.add_argument("--env", type=str, default="Ant", help="Environment type")
parser.add_argument(
"--policy_file", type=str, default="policy_file", help="file_name"
)
parser.add_argument("--max_episode_steps", default=1000, type=int)
parser.add_argument("--video", action="store_true")
parser.add_argument("--multi_start", action="store_true")
parser.add_argument("--multigoal", action="store_true")
args = parser.parse_args()
if args.maze == "u-maze":
maze = maze_env.U_MAZE
elif args.maze == "big-maze":
maze = maze_env.BIG_MAZE
elif args.maze == "hardest-maze":
maze = maze_env.HARDEST_MAZE
else:
raise NotImplementedError
if args.env == "Ant":
env = NormalizedBoxEnv(
ant.AntMazeEnv(
maze_map=maze, maze_size_scaling=4.0, non_zero_reset=args.multi_start
)
)
elif args.env == "Swimmer":
env = NormalizedBoxEnv(
swimmer.SwimmerMazeEnv(
mmaze_map=maze, maze_size_scaling=4.0, non_zero_reset=args.multi_start
)
)
env.set_target_goal()
s = env.reset()
print(s.shape)
act = env.action_space.sample()
done = False
# Load the policy
policy, train_env = load_policy(args.policy_file)
# Define goal reaching policy fn
def _goal_reaching_policy_fn(obs, goal):
goal_x, goal_y = goal
obs_new = obs[2:-2]
goal_tuple = np.array([goal_x, goal_y])
# normalize the norm of the relative goals to in-distribution values
goal_tuple = goal_tuple / np.linalg.norm(goal_tuple) * 10.0
new_obs = np.concatenate([obs_new, goal_tuple], -1)
return policy.get_action(new_obs)[0], (
goal_tuple[0] + obs[0],
goal_tuple[1] + obs[1],
)
data = reset_data()
# create waypoint generating policy integrated with high level controller
data_collection_policy = env.create_navigation_policy(
_goal_reaching_policy_fn,
)
if args.video:
frames = []
ts = 0
num_episodes = 0
for _ in range(args.num_samples):
act, waypoint_goal = data_collection_policy(s)
if args.noisy:
act = act + np.random.randn(*act.shape) * 0.2
act = np.clip(act, -1.0, 1.0)
ns, r, done, info = env.step(act)
if ts >= args.max_episode_steps:
done = True
append_data(data, s[:-2], act, r, env.target_goal, done, env.physics.data)
if len(data["observations"]) % 10000 == 0:
print(len(data["observations"]))
ts += 1
if done:
done = False
ts = 0
s = env.reset()
env.set_target_goal()
if args.video:
frames = np.array(frames)
save_video("./videos/", args.env + "_navigation", frames, num_episodes)
num_episodes += 1
frames = []
else:
s = ns
if args.video:
curr_frame = env.physics.render(width=500, height=500, depth=False)
frames.append(curr_frame)
if args.noisy:
fname = args.env + "_maze_%s_noisy_multistart_%s_multigoal_%s.hdf5" % (
args.maze,
str(args.multi_start),
str(args.multigoal),
)
else:
fname = args.env + "maze_%s_multistart_%s_multigoal_%s.hdf5" % (
args.maze,
str(args.multi_start),
str(args.multigoal),
)
dataset = h5py.File(fname, "w")
npify(data)
for k in data:
dataset.create_dataset(k, data=data[k], compression="gzip")
if __name__ == "__main__":
main()
| 5,781 | 27.482759 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/goal_reaching_env.py | import numpy as np
def disk_goal_sampler(np_random, goal_region_radius=10.0):
th = 2 * np.pi * np_random.uniform()
radius = goal_region_radius * np_random.uniform()
return radius * np.array([np.cos(th), np.sin(th)])
def constant_goal_sampler(np_random, location=10.0 * np.ones([2])):
return location
class GoalReachingEnv(object):
"""General goal-reaching environment."""
BASE_ENV = None # Must be specified by child class.
def __init__(self, goal_sampler, eval=False, reward_type="dense"):
self._goal_sampler = goal_sampler
self._goal = np.ones([2])
self.target_goal = self._goal
# This flag is used to make sure that when using this environment
# for evaluation, that is no goals are appended to the state
self.eval = eval
# This is the reward type fed as input to the goal confitioned policy
self.reward_type = reward_type
def _get_obs(self):
base_obs = self.BASE_ENV._get_obs(self)
goal_direction = self._goal - self.get_xy()
if not self.eval:
obs = np.concatenate([base_obs, goal_direction])
return obs
else:
return base_obs
def step(self, a):
self.BASE_ENV.step(self, a)
if self.reward_type == "dense":
reward = -np.linalg.norm(self.target_goal - self.get_xy())
elif self.reward_type == "sparse":
reward = (
1.0 if np.linalg.norm(self.get_xy() - self.target_goal) <= 0.5 else 0.0
)
done = False
# Terminate episode when we reach a goal
if self.eval and np.linalg.norm(self.get_xy() - self.target_goal) <= 0.5:
done = True
obs = self._get_obs()
return obs, reward, done, {}
def reset_model(self):
if self.target_goal is not None or self.eval:
self._goal = self.target_goal
else:
self._goal = self._goal_sampler(self.np_random)
return self.BASE_ENV.reset_model(self)
| 2,036 | 30.828125 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/maze_env.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapted from efficient-hrl maze_env.py."""
import math
import os
import tempfile
import xml.etree.ElementTree as ET
from copy import deepcopy
import gym
import numpy as np
RESET = R = "r" # Reset position.
GOAL = G = "g"
# Maze specifications for dataset generation
U_MAZE = [
[1, 1, 1, 1, 1],
[1, R, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, G, 0, 0, 1],
[1, 1, 1, 1, 1],
]
BIG_MAZE = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, R, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 0, G, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 1],
[1, G, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 1, G, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
HARDEST_MAZE = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, R, 0, 0, 0, 1, G, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, G, 0, 1, 0, 0, G, 1],
[1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, G, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, G, 0, G, 1, 0, G, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
# Maze specifications for evaluation
U_MAZE_TEST = [
[1, 1, 1, 1, 1],
[1, R, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, G, 0, 0, 1],
[1, 1, 1, 1, 1],
]
BIG_MAZE_TEST = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, R, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0, G, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
HARDEST_MAZE_TEST = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, R, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 1, 0, G, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
class MazeEnv(gym.Env):
LOCOMOTION_ENV = None # Must be specified by child class.
def __init__(
self,
maze_map,
maze_size_scaling,
maze_height=0.5,
manual_collision=False,
non_zero_reset=False,
reward_type="dense",
*args,
**kwargs
):
if self.LOCOMOTION_ENV is None:
raise ValueError("LOCOMOTION_ENV is unspecified.")
xml_path = self.LOCOMOTION_ENV.FILE
tree = ET.parse(xml_path)
worldbody = tree.find(".//worldbody")
self._maze_map = maze_map
self._maze_height = maze_height
self._maze_size_scaling = maze_size_scaling
self._manual_collision = manual_collision
self._maze_map = maze_map
# Obtain a numpy array form for a maze map in case we want to reset
# to multiple starting states
temp_maze_map = deepcopy(self._maze_map)
for i in range(len(maze_map)):
for j in range(len(maze_map[0])):
if temp_maze_map[i][j] in [
RESET,
]:
temp_maze_map[i][j] = 0
elif temp_maze_map[i][j] in [
GOAL,
]:
temp_maze_map[i][j] = 1
self._np_maze_map = np.array(temp_maze_map)
torso_x, torso_y = self._find_robot()
self._init_torso_x = torso_x
self._init_torso_y = torso_y
for i in range(len(self._maze_map)):
for j in range(len(self._maze_map[0])):
struct = self._maze_map[i][j]
if struct == 1: # Unmovable block.
# Offset all coordinates so that robot starts at the origin.
ET.SubElement(
worldbody,
"geom",
name="block_%d_%d" % (i, j),
pos="%f %f %f"
% (
j * self._maze_size_scaling - torso_x,
i * self._maze_size_scaling - torso_y,
self._maze_height / 2 * self._maze_size_scaling,
),
size="%f %f %f"
% (
0.5 * self._maze_size_scaling,
0.5 * self._maze_size_scaling,
self._maze_height / 2 * self._maze_size_scaling,
),
type="box",
material="",
contype="1",
conaffinity="1",
rgba="0.7 0.5 0.3 1.0",
)
torso = tree.find(".//body[@name='torso']")
geoms = torso.findall(".//geom")
_, file_path = tempfile.mkstemp(text=True, suffix=".xml")
tree.write(file_path)
self.LOCOMOTION_ENV.__init__(
self,
*args,
file_path=file_path,
non_zero_reset=non_zero_reset,
reward_type=reward_type,
**kwargs
)
self.target_goal = None
def _xy_to_rowcol(self, xy):
size_scaling = self._maze_size_scaling
xy = (max(xy[0], 1e-4), max(xy[1], 1e-4))
return (int(1 + (xy[1]) / size_scaling), int(1 + (xy[0]) / size_scaling))
def _get_reset_location(
self,
):
prob = (1.0 - self._np_maze_map) / np.sum(1.0 - self._np_maze_map)
prob_row = np.sum(prob, 1)
row_sample = np.random.choice(np.arange(self._np_maze_map.shape[0]), p=prob_row)
col_sample = np.random.choice(
np.arange(self._np_maze_map.shape[1]),
p=prob[row_sample] * 1.0 / prob_row[row_sample],
)
reset_location = self._rowcol_to_xy((row_sample, col_sample))
# Add some random noise
random_x = np.random.uniform(low=0, high=0.5) * 0.5 * self._maze_size_scaling
random_y = np.random.uniform(low=0, high=0.5) * 0.5 * self._maze_size_scaling
return (
max(reset_location[0] + random_x, 0),
max(reset_location[1] + random_y, 0),
)
def _rowcol_to_xy(self, rowcol, add_random_noise=False):
row, col = rowcol
x = col * self._maze_size_scaling - self._init_torso_x
y = row * self._maze_size_scaling - self._init_torso_y
if add_random_noise:
x = x + np.random.uniform(low=0, high=self._maze_size_scaling * 0.25)
y = y + np.random.uniform(low=0, high=self._maze_size_scaling * 0.25)
return (x, y)
def goal_sampler(self, np_random, only_free_cells=True, interpolate=True):
valid_cells = []
goal_cells = []
for i in range(len(self._maze_map)):
for j in range(len(self._maze_map[0])):
if self._maze_map[i][j] in [0, RESET, GOAL] or not only_free_cells:
valid_cells.append((i, j))
if self._maze_map[i][j] == GOAL:
goal_cells.append((i, j))
# If there is a 'goal' designated, use that. Otherwise, any valid cell can
# be a goal.
sample_choices = goal_cells if goal_cells else valid_cells
cell = sample_choices[np_random.choice(len(sample_choices))]
xy = self._rowcol_to_xy(cell, add_random_noise=True)
random_x = np.random.uniform(low=0, high=0.5) * 0.25 * self._maze_size_scaling
random_y = np.random.uniform(low=0, high=0.5) * 0.25 * self._maze_size_scaling
xy = (max(xy[0] + random_x, 0), max(xy[1] + random_y, 0))
return xy
def set_target_goal(self, goal_input=None):
if goal_input is None:
self.target_goal = self.goal_sampler(np.random)
else:
self.target_goal = goal_input
print("Target Goal: ", self.target_goal)
## Make sure that the goal used in self._goal is also reset:
self._goal = self.target_goal
def _find_robot(self):
structure = self._maze_map
size_scaling = self._maze_size_scaling
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j] == RESET:
return j * size_scaling, i * size_scaling
raise ValueError("No robot in maze specification.")
def _is_in_collision(self, pos):
x, y = pos
structure = self._maze_map
size_scaling = self._maze_size_scaling
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j] == 1:
minx = j * size_scaling - size_scaling * 0.5 - self._init_torso_x
maxx = j * size_scaling + size_scaling * 0.5 - self._init_torso_x
miny = i * size_scaling - size_scaling * 0.5 - self._init_torso_y
maxy = i * size_scaling + size_scaling * 0.5 - self._init_torso_y
if minx <= x <= maxx and miny <= y <= maxy:
return True
return False
def step(self, action):
if self._manual_collision:
old_pos = self.get_xy()
inner_next_obs, inner_reward, done, info = self.LOCOMOTION_ENV.step(
self, action
)
new_pos = self.get_xy()
if self._is_in_collision(new_pos):
self.set_xy(old_pos)
else:
inner_next_obs, inner_reward, done, info = self.LOCOMOTION_ENV.step(
self, action
)
next_obs = self._get_obs()
return next_obs, inner_reward, done, info
def _get_best_next_rowcol(self, current_rowcol, target_rowcol):
"""Runs BFS to find shortest path to target and returns best next rowcol.
Add obstacle avoidance"""
current_rowcol = tuple(current_rowcol)
target_rowcol = tuple(target_rowcol)
if target_rowcol == current_rowcol:
return target_rowcol
visited = {}
to_visit = [target_rowcol]
while to_visit:
next_visit = []
for rowcol in to_visit:
visited[rowcol] = True
row, col = rowcol
left = (row, col - 1)
right = (row, col + 1)
down = (row + 1, col)
up = (row - 1, col)
for next_rowcol in [left, right, down, up]:
if next_rowcol == current_rowcol: # Found a shortest path.
return rowcol
next_row, next_col = next_rowcol
if next_row < 0 or next_row >= len(self._maze_map):
continue
if next_col < 0 or next_col >= len(self._maze_map[0]):
continue
if self._maze_map[next_row][next_col] not in [0, RESET, GOAL]:
continue
if next_rowcol in visited:
continue
next_visit.append(next_rowcol)
to_visit = next_visit
raise ValueError("No path found to target.")
def create_navigation_policy(
self,
goal_reaching_policy_fn,
obs_to_robot=lambda obs: obs[:2],
obs_to_target=lambda obs: obs[-2:],
relative=False,
):
"""Creates a navigation policy by guiding a sub-policy to waypoints."""
def policy_fn(obs):
# import ipdb; ipdb.set_trace()
robot_x, robot_y = obs_to_robot(obs)
robot_row, robot_col = self._xy_to_rowcol([robot_x, robot_y])
target_x, target_y = self.target_goal
if relative:
target_x += robot_x # Target is given in relative coordinates.
target_y += robot_y
target_row, target_col = self._xy_to_rowcol([target_x, target_y])
print("Target: ", target_row, target_col, target_x, target_y)
print("Robot: ", robot_row, robot_col, robot_x, robot_y)
waypoint_row, waypoint_col = self._get_best_next_rowcol(
[robot_row, robot_col], [target_row, target_col]
)
if waypoint_row == target_row and waypoint_col == target_col:
waypoint_x = target_x
waypoint_y = target_y
else:
waypoint_x, waypoint_y = self._rowcol_to_xy(
[waypoint_row, waypoint_col], add_random_noise=True
)
goal_x = waypoint_x - robot_x
goal_y = waypoint_y - robot_y
print("Waypoint: ", waypoint_row, waypoint_col, waypoint_x, waypoint_y)
return goal_reaching_policy_fn(obs, (goal_x, goal_y))
return policy_fn
| 13,316 | 34.512 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/mujoco_goal_env.py | import os
from collections import OrderedDict
from os import path
import gym
import numpy as np
from gym import error, spaces
from gym.utils import seeding
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(
e
)
)
DEFAULT_SIZE = 500
def convert_observation_to_space(observation):
if isinstance(observation, dict):
space = spaces.Dict(
OrderedDict(
[
(key, convert_observation_to_space(value))
for key, value in observation.items()
]
)
)
elif isinstance(observation, np.ndarray):
low = np.full(observation.shape, -float("inf"), dtype=np.float32)
high = np.full(observation.shape, float("inf"), dtype=np.float32)
space = spaces.Box(low, high, dtype=observation.dtype)
else:
raise NotImplementedError(type(observation), observation)
return space
class MujocoGoalEnv(gym.Env):
"""SuperClass for all MuJoCo goal reaching environments"""
def __init__(self, model_path, frame_skip):
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
self.frame_skip = frame_skip
self.model = mujoco_py.load_model_from_path(fullpath)
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
self.viewer = None
self._viewers = {}
self.metadata = {
"render.modes": ["human", "rgb_array", "depth_array"],
"video.frames_per_second": int(np.round(1.0 / self.dt)),
}
self.init_qpos = self.sim.data.qpos.ravel().copy()
self.init_qvel = self.sim.data.qvel.ravel().copy()
self._set_action_space()
action = self.action_space.sample()
# import ipdb; ipdb.set_trace()
observation, _reward, done, _info = self.step(action)
assert not done
self._set_observation_space(observation["observation"])
self.seed()
def _set_action_space(self):
bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)
low, high = bounds.T
self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)
return self.action_space
# def _set_observation_space(self, observation):
# self.observation_space = convert_observation_to_space(observation)
# return self.observation_space
def _set_observation_space(self, observation):
temp_observation_space = convert_observation_to_space(observation)
self.observation_space = spaces.Dict(
dict(
observation=temp_observation_space,
desired_goal=spaces.Box(-np.inf, np.inf, shape=(2,), dtype=np.float32),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=(2,), dtype=np.float32),
)
)
return self.observation_space
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# methods to override:
# ----------------------------
def reset_model(self):
"""
Reset the robot degrees of freedom (qpos and qvel).
Implement this in each subclass.
"""
raise NotImplementedError
def viewer_setup(self):
"""
This method is called when the viewer is initialized.
Optionally implement this method, if you need to tinker with camera position
and so forth.
"""
pass
def reset(self):
self.sim.reset()
ob = self.reset_model()
return ob
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
old_state = self.sim.get_state()
new_state = mujoco_py.MjSimState(
old_state.time, qpos, qvel, old_state.act, old_state.udd_state
)
self.sim.set_state(new_state)
self.sim.forward()
@property
def dt(self):
return self.model.opt.timestep * self.frame_skip
def do_simulation(self, ctrl, n_frames):
self.sim.data.ctrl[:] = ctrl
for _ in range(n_frames):
self.sim.step()
def render(
self,
mode="human",
width=DEFAULT_SIZE,
height=DEFAULT_SIZE,
camera_id=None,
camera_name=None,
):
if mode == "rgb_array":
if camera_id is not None and camera_name is not None:
raise ValueError(
"Both `camera_id` and `camera_name` cannot be"
" specified at the same time."
)
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = "track"
if camera_id is None and camera_name in self.model._camera_name2id:
camera_id = self.model.camera_name2id(camera_name)
self._get_viewer(mode).render(width, height, camera_id=camera_id)
# window size used for old mujoco-py:
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == "depth_array":
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]
# original image is upside-down, so flip it
return data[::-1, :]
elif mode == "human":
self._get_viewer(mode).render()
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
self._viewers = {}
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == "human":
self.viewer = mujoco_py.MjViewer(self.sim)
elif mode == "rgb_array" or mode == "depth_array":
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def get_body_com(self, body_name):
return self.data.get_body_xpos(body_name)
def state_vector(self):
return np.concatenate([self.sim.data.qpos.flat, self.sim.data.qvel.flat])
| 6,822 | 32.446078 | 144 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/point.py | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for creating the point environment."""
import math
import os
import mujoco_py
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from d4rl_alt.locomotion import goal_reaching_env, maze_env, mujoco_goal_env
MY_ASSETS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets")
class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle):
FILE = os.path.join(MY_ASSETS_DIR, "point.xml")
def __init__(self, file_path=None, expose_all_qpos=False):
if file_path is None:
file_path = self.FILE
self._expose_all_qpos = expose_all_qpos
mujoco_env.MujocoEnv.__init__(self, file_path, 1)
# mujoco_goal_env.MujocoGoalEnv.__init__(self, file_path, 1)
utils.EzPickle.__init__(self)
@property
def physics(self):
# Check mujoco version is greater than version 1.50 to call correct physics
# model containing PyMjData object for getting and setting position/velocity.
# Check https://github.com/openai/mujoco-py/issues/80 for updates to api.
if mujoco_py.get_version() >= "1.50":
return self.sim
else:
return self.model
def _step(self, a):
return self.step(a)
def step(self, action):
action[0] = 0.2 * action[0]
qpos = np.copy(self.physics.data.qpos)
qpos[2] += action[1]
ori = qpos[2]
# Compute increment in each direction.
dx = math.cos(ori) * action[0]
dy = math.sin(ori) * action[0]
# Ensure that the robot is within reasonable range.
qpos[0] = np.clip(qpos[0] + dx, -100, 100)
qpos[1] = np.clip(qpos[1] + dy, -100, 100)
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
for _ in range(0, self.frame_skip):
self.physics.step()
next_obs = self._get_obs()
reward = 0
done = False
info = {}
return next_obs, reward, done, info
def _get_obs(self):
if self._expose_all_qpos:
return np.concatenate(
[
self.physics.data.qpos.flat[:3], # Only point-relevant coords.
self.physics.data.qvel.flat[:3],
]
)
return np.concatenate(
[self.physics.data.qpos.flat[2:3], self.physics.data.qvel.flat[:3]]
)
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.physics.model.nq, low=-0.1, high=0.1
)
qvel = self.init_qvel + self.np_random.randn(self.physics.model.nv) * 0.1
# Set everything other than point to original position and 0 velocity.
qpos[3:] = self.init_qpos[3:]
qvel[3:] = 0.0
self.set_state(qpos, qvel)
return self._get_obs()
def get_xy(self):
return self.physics.data.qpos[:2]
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
class GoalReachingPointEnv(goal_reaching_env.GoalReachingEnv, PointEnv):
"""Point locomotion rewarded for goal-reaching."""
BASE_ENV = PointEnv
def __init__(
self,
goal_sampler=goal_reaching_env.disk_goal_sampler,
file_path=None,
expose_all_qpos=False,
):
goal_reaching_env.GoalReachingEnv.__init__(self, goal_sampler)
PointEnv.__init__(self, file_path=file_path, expose_all_qpos=expose_all_qpos)
class GoalReachingPointDictEnv(goal_reaching_env.GoalReachingDictEnv, PointEnv):
"""Ant locomotion for goal reaching in a disctionary compatible format."""
BASE_ENV = PointEnv
def __init__(
self,
goal_sampler=goal_reaching_env.disk_goal_sampler,
file_path=None,
expose_all_qpos=False,
):
goal_reaching_env.GoalReachingDictEnv.__init__(self, goal_sampler)
PointEnv.__init__(self, file_path=file_path, expose_all_qpos=expose_all_qpos)
class PointMazeEnv(maze_env.MazeEnv, GoalReachingPointEnv):
"""Point navigating a maze."""
LOCOMOTION_ENV = GoalReachingPointEnv
def __init__(self, goal_sampler=None, expose_all_qpos=True, *args, **kwargs):
if goal_sampler is None:
goal_sampler = lambda np_rand: maze_env.MazeEnv.goal_sampler(self, np_rand)
maze_env.MazeEnv.__init__(
self,
*args,
manual_collision=True,
goal_sampler=goal_sampler,
expose_all_qpos=expose_all_qpos,
**kwargs
)
def create_goal_reaching_policy(
obs_to_goal=lambda obs: obs[-2:], obs_to_ori=lambda obs: obs[0]
):
"""A hard-coded policy for reaching a goal position."""
def policy_fn(obs):
goal_x, goal_y = obs_to_goal(obs)
goal_dist = np.linalg.norm([goal_x, goal_y])
goal_ori = np.arctan2(goal_y, goal_x)
ori = obs_to_ori(obs)
ori_diff = (goal_ori - ori) % (2 * np.pi)
radius = goal_dist / 2.0 / max(0.1, np.abs(np.sin(ori_diff)))
rotation_left = (2 * ori_diff) % np.pi
circumference_left = max(goal_dist, radius * rotation_left)
speed = min(circumference_left * 5.0, 1.0)
velocity = speed
if ori_diff > np.pi / 2 and ori_diff < 3 * np.pi / 2:
velocity *= -1
time_left = min(circumference_left / (speed * 0.2), 10.0)
signed_ori_diff = ori_diff
if signed_ori_diff >= 3 * np.pi / 2:
signed_ori_diff = 2 * np.pi - signed_ori_diff
elif signed_ori_diff > np.pi / 2 and signed_ori_diff < 3 * np.pi / 2:
signed_ori_diff = signed_ori_diff - np.pi
angular_velocity = signed_ori_diff / time_left
angular_velocity = np.clip(angular_velocity, -1.0, 1.0)
return np.array([velocity, angular_velocity])
return policy_fn
def create_maze_navigation_policy(maze_env):
"""Creates a hard-coded policy to navigate a maze."""
ori_index = 2 if maze_env._expose_all_qpos else 0
obs_to_ori = lambda obs: obs[ori_index]
goal_reaching_policy = create_goal_reaching_policy(obs_to_ori=obs_to_ori)
goal_reaching_policy_fn = lambda obs, goal: goal_reaching_policy(
np.concatenate([obs, goal])
)
return maze_env.create_navigation_policy(goal_reaching_policy_fn)
| 7,070 | 32.995192 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/swimmer.py | """Wrapper for creating the swimmer environment."""
import math
import os
import mujoco_py
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from d4rl_alt import offline_env
from d4rl_alt.locomotion import goal_reaching_env, maze_env, mujoco_goal_env
GYM_ASSETS_DIR = os.path.join(os.path.dirname(mujoco_env.__file__), "assets")
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
"""Basic swimmer locomotion environment."""
FILE = os.path.join(GYM_ASSETS_DIR, "swimmer.xml")
def __init__(self, file_path=None, expose_all_qpos=False, non_zero_reset=False):
if file_path is None:
file_path = self.FILE
self._expose_all_qpos = expose_all_qpos
mujoco_env.MujocoEnv.__init__(self, file_path, 5)
utils.EzPickle.__init__(self)
@property
def physics(self):
# Check mujoco version is greater than version 1.50 to call correct physics
# model containing PyMjData object for getting and setting position/velocity.
# Check https://github.com/openai/mujoco-py/issues/80 for updates to api.
if mujoco_py.get_version() >= "1.50":
return self.sim
else:
return self.model
def _step(self, a):
return self.step(a)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = -ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
if self._expose_all_qpos:
obs = np.concatenate(
[
self.physics.data.qpos.flat[:5], # Ensures only swimmer obs.
self.physics.data.qvel.flat[:5],
]
)
else:
obs = np.concatenate(
[
self.physics.data.qpos.flat[2:5],
self.physics.data.qvel.flat[:5],
]
)
return obs
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-0.1, high=0.1
)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
# Set everything other than swimmer to original position and 0 velocity.
qpos[5:] = self.init_qpos[5:]
qvel[5:] = 0.0
self.set_state(qpos, qvel)
return self._get_obs()
def get_xy(self):
return self.physics.data.qpos[:2]
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
class GoalReachingSwimmerEnv(goal_reaching_env.GoalReachingEnv, SwimmerEnv):
"""Swimmer locomotion rewarded for goal-reaching."""
BASE_ENV = SwimmerEnv
def __init__(
self,
goal_sampler=goal_reaching_env.disk_goal_sampler,
file_path=None,
expose_all_qpos=False,
non_zero_reset=False,
eval=False,
reward_type="dense",
**kwargs
):
goal_reaching_env.GoalReachingEnv.__init__(
self, goal_sampler, eval=eval, reward_type=reward_type
)
SwimmerEnv.__init__(
self,
file_path=file_path,
expose_all_qpos=expose_all_qpos,
non_zero_reset=non_zero_reset,
)
class SwimmerMazeEnv(maze_env.MazeEnv, GoalReachingSwimmerEnv, offline_env.OfflineEnv):
"""Swimmer navigating a maze."""
LOCOMOTION_ENV = GoalReachingSwimmerEnv
def __init__(
self,
goal_sampler=None,
expose_all_qpos=True,
reward_type="dense",
*args,
**kwargs
):
if goal_sampler is None:
goal_sampler = lambda np_rand: maze_env.MazeEnv.goal_sampler(self, np_rand)
maze_env.MazeEnv.__init__(
self,
*args,
manual_collision=False,
goal_sampler=goal_sampler,
expose_all_qpos=expose_all_qpos,
reward_type=reward_type,
**kwargs
)
offline_env.OfflineEnv.__init__(self, **kwargs)
def set_target(self, target_location=None):
return self.set_target_goal(target_location)
| 4,524 | 29.166667 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/locomotion/wrappers.py | import itertools
from collections import deque
import numpy as np
from gym import Env
from gym.spaces import Box, Discrete
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == "_wrapped_env":
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return "{}({})".format(type(self).__name__, self.wrapped_env)
class HistoryEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, history_len):
super().__init__(wrapped_env)
self.history_len = history_len
high = np.inf * np.ones(self.history_len * self.observation_space.low.size)
low = -high
self.observation_space = Box(
low=low,
high=high,
)
self.history = deque(maxlen=self.history_len)
def step(self, action):
state, reward, done, info = super().step(action)
self.history.append(state)
flattened_history = self._get_history().flatten()
return flattened_history, reward, done, info
def reset(self, **kwargs):
state = super().reset()
self.history = deque(maxlen=self.history_len)
self.history.append(state)
flattened_history = self._get_history().flatten()
return flattened_history
def _get_history(self):
observations = list(self.history)
obs_count = len(observations)
for _ in range(self.history_len - obs_count):
dummy = np.zeros(self._wrapped_env.observation_space.low.size)
observations.append(dummy)
return np.c_[observations]
class DiscretizeEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, num_bins):
super().__init__(wrapped_env)
low = self.wrapped_env.action_space.low
high = self.wrapped_env.action_space.high
action_ranges = [
np.linspace(low[i], high[i], num_bins) for i in range(len(low))
]
self.idx_to_continuous_action = [
np.array(x) for x in itertools.product(*action_ranges)
]
self.action_space = Discrete(len(self.idx_to_continuous_action))
def step(self, action):
continuous_action = self.idx_to_continuous_action[action]
return super().step(continuous_action)
class NormalizedBoxEnv(ProxyEnv):
"""
Normalize action to in [-1, 1].
Optionally normalize observations and scale reward.
"""
def __init__(
self,
env,
reward_scale=1.0,
obs_mean=None,
obs_std=None,
):
ProxyEnv.__init__(self, env)
self._should_normalize = not (obs_mean is None and obs_std is None)
if self._should_normalize:
if obs_mean is None:
obs_mean = np.zeros_like(env.observation_space.low)
else:
obs_mean = np.array(obs_mean)
if obs_std is None:
obs_std = np.ones_like(env.observation_space.low)
else:
obs_std = np.array(obs_std)
self._reward_scale = reward_scale
self._obs_mean = obs_mean
self._obs_std = obs_std
ub = np.ones(self._wrapped_env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def estimate_obs_stats(self, obs_batch, override_values=False):
if self._obs_mean is not None and not override_values:
raise Exception(
"Observation mean and std already set. To "
"override, set override_values to True."
)
self._obs_mean = np.mean(obs_batch, axis=0)
self._obs_std = np.std(obs_batch, axis=0)
def _apply_normalize_obs(self, obs):
return (obs - self._obs_mean) / (self._obs_std + 1e-8)
def step(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.0) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._should_normalize:
next_obs = self._apply_normalize_obs(next_obs)
return next_obs, reward * self._reward_scale, done, info
def __str__(self):
return "Normalized: %s" % self._wrapped_env
| 5,434 | 31.159763 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/__init__.py | from gym.envs.registration import register
from .maze_model import (
LARGE_MAZE,
LARGE_MAZE_EVAL,
MEDIUM_MAZE,
MEDIUM_MAZE_EVAL,
OPEN,
U_MAZE,
U_MAZE_EVAL,
MazeEnv,
)
register(
id="maze2d-open-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=150,
kwargs={
"maze_spec": OPEN,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 0.01,
"ref_max_score": 20.66,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-open-sparse.hdf5",
},
)
register(
id="maze2d-umaze-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=150,
kwargs={
"maze_spec": U_MAZE,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 0.94,
"ref_max_score": 62.6,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-umaze-sparse.hdf5",
},
)
register(
id="maze2d-medium-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=250,
kwargs={
"maze_spec": MEDIUM_MAZE,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 5.77,
"ref_max_score": 85.14,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-medium-sparse.hdf5",
},
)
register(
id="maze2d-large-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=600,
kwargs={
"maze_spec": LARGE_MAZE,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 4.83,
"ref_max_score": 191.99,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-large-sparse.hdf5",
},
)
register(
id="maze2d-umaze-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=300,
kwargs={
"maze_spec": U_MAZE,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 23.85,
"ref_max_score": 161.86,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-umaze-sparse-v1.hdf5",
},
)
register(
id="maze2d-medium-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=600,
kwargs={
"maze_spec": MEDIUM_MAZE,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 13.13,
"ref_max_score": 277.39,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-medium-sparse-v1.hdf5",
},
)
register(
id="maze2d-large-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=800,
kwargs={
"maze_spec": LARGE_MAZE,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 6.7,
"ref_max_score": 273.99,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-large-sparse-v1.hdf5",
},
)
register(
id="maze2d-eval-umaze-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=300,
kwargs={
"maze_spec": U_MAZE_EVAL,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 36.63,
"ref_max_score": 141.4,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-umaze-sparse-v1.hdf5",
},
)
register(
id="maze2d-eval-medium-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=600,
kwargs={
"maze_spec": MEDIUM_MAZE_EVAL,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 13.07,
"ref_max_score": 204.93,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-medium-sparse-v1.hdf5",
},
)
register(
id="maze2d-eval-large-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=800,
kwargs={
"maze_spec": LARGE_MAZE_EVAL,
"reward_type": "sparse",
"reset_target": False,
"ref_min_score": 16.4,
"ref_max_score": 302.22,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-large-sparse-v1.hdf5",
},
)
register(
id="maze2d-open-dense-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=150,
kwargs={
"maze_spec": OPEN,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 11.17817,
"ref_max_score": 27.166538620695782,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-open-dense.hdf5",
},
)
register(
id="maze2d-umaze-dense-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=150,
kwargs={
"maze_spec": U_MAZE,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 23.249793,
"ref_max_score": 81.78995240126592,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-umaze-dense.hdf5",
},
)
register(
id="maze2d-medium-dense-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=250,
kwargs={
"maze_spec": MEDIUM_MAZE,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 19.477620,
"ref_max_score": 96.03474232952358,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-medium-dense.hdf5",
},
)
register(
id="maze2d-large-dense-v0",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=600,
kwargs={
"maze_spec": LARGE_MAZE,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 27.388310,
"ref_max_score": 215.09965671563742,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-large-dense.hdf5",
},
)
register(
id="maze2d-umaze-dense-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=300,
kwargs={
"maze_spec": U_MAZE,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 68.537689,
"ref_max_score": 193.66285642381482,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-umaze-dense-v1.hdf5",
},
)
register(
id="maze2d-medium-dense-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=600,
kwargs={
"maze_spec": MEDIUM_MAZE,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 44.264742,
"ref_max_score": 297.4552547777125,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-medium-dense-v1.hdf5",
},
)
register(
id="maze2d-large-dense-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=800,
kwargs={
"maze_spec": LARGE_MAZE,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 30.569041,
"ref_max_score": 303.4857382709002,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-large-dense-v1.hdf5",
},
)
register(
id="maze2d-eval-umaze-dense-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=300,
kwargs={
"maze_spec": U_MAZE_EVAL,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 56.95455,
"ref_max_score": 178.21373133248397,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-umaze-dense-v1.hdf5",
},
)
register(
id="maze2d-eval-medium-dense-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=600,
kwargs={
"maze_spec": MEDIUM_MAZE_EVAL,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 42.28578,
"ref_max_score": 235.5658957482388,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-medium-dense-v1.hdf5",
},
)
register(
id="maze2d-eval-large-dense-v1",
entry_point="d4rl_alt.pointmaze:MazeEnv",
max_episode_steps=800,
kwargs={
"maze_spec": LARGE_MAZE_EVAL,
"reward_type": "dense",
"reset_target": False,
"ref_min_score": 56.95455,
"ref_max_score": 326.09647655082637,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-large-dense-v1.hdf5",
},
)
| 8,531 | 27.345515 | 116 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/dynamic_mjc.py | """
dynamic_mjc.py
A small library for programatically building MuJoCo XML files
"""
import tempfile
from contextlib import contextmanager
import numpy as np
def default_model(name):
"""
Get a model with basic settings such as gravity and RK4 integration enabled
"""
model = MJCModel(name)
root = model.root
# Setup
root.compiler(angle="radian", inertiafromgeom="true")
default = root.default()
default.joint(armature=1, damping=1, limited="true")
default.geom(contype=0, friction="1 0.1 0.1", rgba="0.7 0.7 0 1")
root.option(gravity="0 0 -9.81", integrator="RK4", timestep=0.01)
return model
def pointmass_model(name):
"""
Get a model with basic settings such as gravity and Euler integration enabled
"""
model = MJCModel(name)
root = model.root
# Setup
root.compiler(angle="radian", inertiafromgeom="true", coordinate="local")
default = root.default()
default.joint(limited="false", damping=1)
default.geom(
contype=2,
conaffinity="1",
condim="1",
friction=".5 .1 .1",
density="1000",
margin="0.002",
)
root.option(timestep=0.01, gravity="0 0 0", iterations="20", integrator="Euler")
return model
class MJCModel(object):
def __init__(self, name):
self.name = name
self.root = MJCTreeNode("mujoco").add_attr("model", name)
@contextmanager
def asfile(self):
"""
Usage:
model = MJCModel('reacher')
with model.asfile() as f:
print f.read() # prints a dump of the model
"""
with tempfile.NamedTemporaryFile(mode="w+", suffix=".xml", delete=True) as f:
self.root.write(f)
f.seek(0)
yield f
def open(self):
self.file = tempfile.NamedTemporaryFile(mode="w+", suffix=".xml", delete=True)
self.root.write(self.file)
self.file.seek(0)
return self.file
def close(self):
self.file.close()
def find_attr(self, attr, value):
return self.root.find_attr(attr, value)
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
class MJCTreeNode(object):
def __init__(self, name):
self.name = name
self.attrs = {}
self.children = []
def add_attr(self, key, value):
if isinstance(value, str):
pass
elif isinstance(value, list) or isinstance(value, np.ndarray):
value = " ".join([str(val).lower() for val in value])
else:
value = str(value).lower()
self.attrs[key] = value
return self
def __getattr__(self, name):
def wrapper(**kwargs):
newnode = MJCTreeNode(name)
for (k, v) in kwargs.items():
newnode.add_attr(k, v)
self.children.append(newnode)
return newnode
return wrapper
def dfs(self):
yield self
if self.children:
for child in self.children:
for node in child.dfs():
yield node
def find_attr(self, attr, value):
""" Run DFS to find a matching attr """
if attr in self.attrs and self.attrs[attr] == value:
return self
for child in self.children:
res = child.find_attr(attr, value)
if res is not None:
return res
return None
def write(self, ostream, tabs=0):
contents = " ".join(['%s="%s"' % (k, v) for (k, v) in self.attrs.items()])
if self.children:
ostream.write("\t" * tabs)
ostream.write("<%s %s>\n" % (self.name, contents))
for child in self.children:
child.write(ostream, tabs=tabs + 1)
ostream.write("\t" * tabs)
ostream.write("</%s>\n" % self.name)
else:
ostream.write("\t" * tabs)
ostream.write("<%s %s/>\n" % (self.name, contents))
def __str__(self):
s = "<" + self.name
s += " ".join(['%s="%s"' % (k, v) for (k, v) in self.attrs.items()])
return s + ">"
| 4,150 | 27.047297 | 86 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/maze_model.py | """ A pointmass maze env."""
import random
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from d4rl_alt import offline_env
from d4rl_alt.pointmaze.dynamic_mjc import MJCModel
WALL = 10
EMPTY = 11
GOAL = 12
def parse_maze(maze_str):
lines = maze_str.strip().split("\\")
width, height = len(lines), len(lines[0])
maze_arr = np.zeros((width, height), dtype=np.int32)
for w in range(width):
for h in range(height):
tile = lines[w][h]
if tile == "#":
maze_arr[w][h] = WALL
elif tile == "G":
maze_arr[w][h] = GOAL
elif tile == " " or tile == "O" or tile == "0":
maze_arr[w][h] = EMPTY
else:
raise ValueError("Unknown tile type: %s" % tile)
return maze_arr
def point_maze(maze_str):
maze_arr = parse_maze(maze_str)
mjcmodel = MJCModel("point_maze")
mjcmodel.root.compiler(inertiafromgeom="true", angle="radian", coordinate="local")
mjcmodel.root.option(
timestep="0.01", gravity="0 0 0", iterations="20", integrator="Euler"
)
default = mjcmodel.root.default()
default.joint(damping=1, limited="false")
default.geom(
friction=".5 .1 .1",
density="1000",
margin="0.002",
condim="1",
contype="2",
conaffinity="1",
)
asset = mjcmodel.root.asset()
asset.texture(
type="2d",
name="groundplane",
builtin="checker",
rgb1="0.2 0.3 0.4",
rgb2="0.1 0.2 0.3",
width=100,
height=100,
)
asset.texture(
name="skybox",
type="skybox",
builtin="gradient",
rgb1=".4 .6 .8",
rgb2="0 0 0",
width="800",
height="800",
mark="random",
markrgb="1 1 1",
)
asset.material(name="groundplane", texture="groundplane", texrepeat="20 20")
asset.material(name="wall", rgba=".7 .5 .3 1")
asset.material(name="target", rgba=".6 .3 .3 1")
visual = mjcmodel.root.visual()
visual.headlight(ambient=".4 .4 .4", diffuse=".8 .8 .8", specular="0.1 0.1 0.1")
visual.map(znear=0.01)
visual.quality(shadowsize=2048)
worldbody = mjcmodel.root.worldbody()
worldbody.geom(
name="ground",
size="40 40 0.25",
pos="0 0 -0.1",
type="plane",
contype=1,
conaffinity=0,
material="groundplane",
)
particle = worldbody.body(name="particle", pos=[1.2, 1.2, 0])
particle.geom(
name="particle_geom", type="sphere", size=0.1, rgba="0.0 0.0 1.0 0.0", contype=1
)
particle.site(
name="particle_site", pos=[0.0, 0.0, 0], size=0.2, rgba="0.3 0.6 0.3 1"
)
particle.joint(name="ball_x", type="slide", pos=[0, 0, 0], axis=[1, 0, 0])
particle.joint(name="ball_y", type="slide", pos=[0, 0, 0], axis=[0, 1, 0])
worldbody.site(name="target_site", pos=[0.0, 0.0, 0], size=0.2, material="target")
width, height = maze_arr.shape
for w in range(width):
for h in range(height):
if maze_arr[w, h] == WALL:
worldbody.geom(
conaffinity=1,
type="box",
name="wall_%d_%d" % (w, h),
material="wall",
pos=[w + 1.0, h + 1.0, 0],
size=[0.5, 0.5, 0.2],
)
actuator = mjcmodel.root.actuator()
actuator.motor(joint="ball_x", ctrlrange=[-1.0, 1.0], ctrllimited=True, gear=100)
actuator.motor(joint="ball_y", ctrlrange=[-1.0, 1.0], ctrllimited=True, gear=100)
return mjcmodel
LARGE_MAZE = (
"############\\"
+ "#OOOO#OOOOO#\\"
+ "#O##O#O#O#O#\\"
+ "#OOOOOO#OOO#\\"
+ "#O####O###O#\\"
+ "#OO#O#OOOOO#\\"
+ "##O#O#O#O###\\"
+ "#OO#OOO#OGO#\\"
+ "############"
)
LARGE_MAZE_EVAL = (
"############\\"
+ "#OO#OOO#OGO#\\"
+ "##O###O#O#O#\\"
+ "#OO#O#OOOOO#\\"
+ "#O##O#OO##O#\\"
+ "#OOOOOO#OOO#\\"
+ "#O##O#O#O###\\"
+ "#OOOO#OOOOO#\\"
+ "############"
)
MEDIUM_MAZE = (
"########\\"
+ "#OO##OO#\\"
+ "#OO#OOO#\\"
+ "##OOO###\\"
+ "#OO#OOO#\\"
+ "#O#OO#O#\\"
+ "#OOO#OG#\\"
+ "########"
)
MEDIUM_MAZE_EVAL = (
"########\\"
+ "#OOOOOG#\\"
+ "#O#O##O#\\"
+ "#OOOO#O#\\"
+ "###OO###\\"
+ "#OOOOOO#\\"
+ "#OO##OO#\\"
+ "########"
)
SMALL_MAZE = "######\\" + "#OOOO#\\" + "#O##O#\\" + "#OOOO#\\" + "######"
U_MAZE = "#####\\" + "#GOO#\\" + "###O#\\" + "#OOO#\\" + "#####"
U_MAZE_EVAL = "#####\\" + "#OOG#\\" + "#O###\\" + "#OOO#\\" + "#####"
OPEN = "#######\\" + "#OOOOO#\\" + "#OOGOO#\\" + "#OOOOO#\\" + "#######"
class MazeEnv(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(
self, maze_spec=U_MAZE, reward_type="dense", reset_target=False, **kwargs
):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.reset_target = reset_target
self.str_maze_spec = maze_spec
self.maze_arr = parse_maze(maze_spec)
self.reward_type = reward_type
self.reset_locations = list(zip(*np.where(self.maze_arr == EMPTY)))
self.reset_locations.sort()
self._target = np.array([0.0, 0.0])
model = point_maze(maze_spec)
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, model_path=f.name, frame_skip=1)
utils.EzPickle.__init__(self)
# Set the default goal (overriden by a call to set_target)
# Try to find a goal if it exists
self.goal_locations = list(zip(*np.where(self.maze_arr == GOAL)))
if len(self.goal_locations) == 1:
self.set_target(self.goal_locations[0])
elif len(self.goal_locations) > 1:
raise ValueError("More than 1 goal specified!")
else:
# If no goal, use the first empty tile
self.set_target(
np.array(self.reset_locations[0]).astype(self.observation_space.dtype)
)
self.empty_and_goal_locations = self.reset_locations + self.goal_locations
def step(self, action):
action = np.clip(action, -1.0, 1.0)
self.clip_velocity()
self.do_simulation(action, self.frame_skip)
self.set_marker()
ob = self._get_obs()
if self.reward_type == "sparse":
reward = 1.0 if np.linalg.norm(ob[0:2] - self._target) <= 0.5 else 0.0
elif self.reward_type == "dense":
reward = np.exp(-np.linalg.norm(ob[0:2] - self._target))
else:
raise ValueError("Unknown reward type %s" % self.reward_type)
done = False
return ob, reward, done, {}
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def get_target(self):
return self._target
def set_target(self, target_location=None):
if target_location is None:
idx = self.np_random.choice(len(self.empty_and_goal_locations))
reset_location = np.array(self.empty_and_goal_locations[idx]).astype(
self.observation_space.dtype
)
target_location = reset_location + self.np_random.uniform(
low=-0.1, high=0.1, size=self.model.nq
)
self._target = target_location
def set_marker(self):
self.data.site_xpos[self.model.site_name2id("target_site")] = np.array(
[self._target[0] + 1, self._target[1] + 1, 0.0]
)
def clip_velocity(self):
qvel = np.clip(self.sim.data.qvel, -5.0, 5.0)
self.set_state(self.sim.data.qpos, qvel)
def reset_model(self):
idx = self.np_random.choice(len(self.empty_and_goal_locations))
reset_location = np.array(self.empty_and_goal_locations[idx]).astype(
self.observation_space.dtype
)
qpos = reset_location + self.np_random.uniform(
low=-0.1, high=0.1, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
self.set_state(qpos, qvel)
if self.reset_target:
self.set_target()
return self._get_obs()
def reset_to_location(self, location):
self.sim.reset()
reset_location = np.array(location).astype(self.observation_space.dtype)
qpos = reset_location + self.np_random.uniform(
low=-0.1, high=0.1, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
pass
| 8,681 | 29.787234 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/q_iteration.py | """
Use q-iteration to solve for an optimal policy
Usage: q_iteration(env, gamma=discount factor, ent_wt= entropy bonus)
"""
import numpy as np
from scipy.special import logsumexp as sp_lse
def softmax(q, alpha=1.0):
q = (1.0 / alpha) * q
q = q - np.max(q)
probs = np.exp(q)
probs = probs / np.sum(probs)
return probs
def logsumexp(q, alpha=1.0, axis=1):
if alpha == 0:
return np.max(q, axis=axis)
return alpha * sp_lse((1.0 / alpha) * q, axis=axis)
def get_policy(q_fn, ent_wt=1.0):
v_rew = logsumexp(q_fn, alpha=ent_wt)
adv_rew = q_fn - np.expand_dims(v_rew, axis=1)
if ent_wt == 0:
pol_probs = adv_rew
pol_probs[pol_probs >= 0] = 1.0
pol_probs[pol_probs < 0] = 0.0
else:
pol_probs = np.exp((1.0 / ent_wt) * adv_rew)
pol_probs /= np.sum(pol_probs, axis=1, keepdims=True)
assert np.all(np.isclose(np.sum(pol_probs, axis=1), 1.0)), str(pol_probs)
return pol_probs
def softq_iteration(
env,
transition_matrix=None,
reward_matrix=None,
num_itrs=50,
discount=0.99,
ent_wt=0.1,
warmstart_q=None,
policy=None,
):
"""
Perform tabular soft Q-iteration
"""
dim_obs = env.num_states
dim_act = env.num_actions
if reward_matrix is None:
reward_matrix = env.reward_matrix()
reward_matrix = reward_matrix[:, :, 0]
if warmstart_q is None:
q_fn = np.zeros((dim_obs, dim_act))
else:
q_fn = warmstart_q
if transition_matrix is None:
t_matrix = env.transition_matrix()
else:
t_matrix = transition_matrix
for k in range(num_itrs):
if policy is None:
v_fn = logsumexp(q_fn, alpha=ent_wt)
else:
v_fn = np.sum((q_fn - ent_wt * np.log(policy)) * policy, axis=1)
new_q = reward_matrix + discount * t_matrix.dot(v_fn)
q_fn = new_q
return q_fn
def q_iteration(env, **kwargs):
return softq_iteration(env, ent_wt=0.0, **kwargs)
def compute_visitation(env, q_fn, ent_wt=1.0, env_time_limit=50, discount=1.0):
pol_probs = get_policy(q_fn, ent_wt=ent_wt)
dim_obs = env.num_states
dim_act = env.num_actions
state_visitation = np.zeros((dim_obs, 1))
for (state, prob) in env.initial_state_distribution.items():
state_visitation[state] = prob
t_matrix = env.transition_matrix() # S x A x S
sa_visit_t = np.zeros((dim_obs, dim_act, env_time_limit))
for i in range(env_time_limit):
sa_visit = state_visitation * pol_probs
# sa_visit_t[:, :, i] = (discount ** i) * sa_visit
sa_visit_t[:, :, i] = sa_visit
# sum-out (SA)S
new_state_visitation = np.einsum("ij,ijk->k", sa_visit, t_matrix)
state_visitation = np.expand_dims(new_state_visitation, axis=1)
return np.sum(sa_visit_t, axis=2) / float(env_time_limit)
def compute_occupancy(env, q_fn, ent_wt=1.0, env_time_limit=50, discount=1.0):
pol_probs = get_policy(q_fn, ent_wt=ent_wt)
dim_obs = env.num_states
dim_act = env.num_actions
state_visitation = np.zeros((dim_obs, 1))
for (state, prob) in env.initial_state_distribution.items():
state_visitation[state] = prob
t_matrix = env.transition_matrix() # S x A x S
sa_visit_t = np.zeros((dim_obs, dim_act, env_time_limit))
for i in range(env_time_limit):
sa_visit = state_visitation * pol_probs
sa_visit_t[:, :, i] = (discount ** i) * sa_visit
# sa_visit_t[:, :, i] = sa_visit
# sum-out (SA)S
new_state_visitation = np.einsum("ij,ijk->k", sa_visit, t_matrix)
state_visitation = np.expand_dims(new_state_visitation, axis=1)
return np.sum(sa_visit_t, axis=2) # / float(env_time_limit)
| 3,734 | 29.867769 | 79 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/waypoint_controller.py | import numpy as np
from d4rl_alt.pointmaze import q_iteration
from d4rl_alt.pointmaze.gridcraft import grid_env, grid_spec
ZEROS = np.zeros((2,), dtype=np.float32)
ONES = np.zeros((2,), dtype=np.float32)
class WaypointController(object):
def __init__(self, maze_str, solve_thresh=0.1, p_gain=10.0, d_gain=-1.0):
self.maze_str = maze_str
self._target = -1000 * ONES
self.p_gain = p_gain
self.d_gain = d_gain
self.solve_thresh = solve_thresh
self.vel_thresh = 0.1
self._waypoint_idx = 0
self._waypoints = []
self._waypoint_prev_loc = ZEROS
self.env = grid_env.GridEnv(grid_spec.spec_from_string(maze_str))
def current_waypoint(self):
return self._waypoints[self._waypoint_idx]
def get_action(self, location, velocity, target):
if np.linalg.norm(self._target - np.array(self.gridify_state(target))) > 1e-3:
# print('New target!', target, 'old:', self._target)
self._new_target(location, target)
dist = np.linalg.norm(location - self._target)
vel = self._waypoint_prev_loc - location
vel_norm = np.linalg.norm(vel)
task_not_solved = (dist >= self.solve_thresh) or (vel_norm >= self.vel_thresh)
if task_not_solved:
next_wpnt = self._waypoints[self._waypoint_idx]
else:
next_wpnt = self._target
# Compute control
prop = next_wpnt - location
action = self.p_gain * prop + self.d_gain * velocity
dist_next_wpnt = np.linalg.norm(location - next_wpnt)
if (
task_not_solved
and (dist_next_wpnt < self.solve_thresh)
and (vel_norm < self.vel_thresh)
):
self._waypoint_idx += 1
if self._waypoint_idx == len(self._waypoints) - 1:
assert (
np.linalg.norm(self._waypoints[self._waypoint_idx] - self._target)
<= self.solve_thresh
)
self._waypoint_prev_loc = location
action = np.clip(action, -1.0, 1.0)
return action, (not task_not_solved)
def gridify_state(self, state):
return (int(round(state[0])), int(round(state[1])))
def _new_target(self, start, target):
# print('Computing waypoints from %s to %s' % (start, target))
start = self.gridify_state(start)
start_idx = self.env.gs.xy_to_idx(start)
target = self.gridify_state(target)
target_idx = self.env.gs.xy_to_idx(target)
self._waypoint_idx = 0
self.env.gs[target] = grid_spec.REWARD
q_values = q_iteration.q_iteration(env=self.env, num_itrs=50, discount=0.99)
# compute waypoints by performing a rollout in the grid
max_ts = 100
s = start_idx
waypoints = []
for i in range(max_ts):
a = np.argmax(q_values[s])
new_s, reward = self.env.step_stateless(s, a)
waypoint = self.env.gs.idx_to_xy(new_s)
if new_s != target_idx:
waypoint = waypoint - np.random.uniform(size=(2,)) * 0.2
waypoints.append(waypoint)
s = new_s
if new_s == target_idx:
break
self.env.gs[target] = grid_spec.EMPTY
self._waypoints = waypoints
self._waypoint_prev_loc = start
self._target = target
if __name__ == "__main__":
print(q_iteration.__file__)
TEST_MAZE = "######\\" + "#OOOO#\\" + "#O##O#\\" + "#OOOO#\\" + "######"
controller = WaypointController(TEST_MAZE)
start = np.array((1, 1), dtype=np.float32)
target = np.array((4, 3), dtype=np.float32)
act, done = controller.get_action(start, target)
print("wpt:", controller._waypoints)
print(act, done)
import pdb
pdb.set_trace()
pass
| 3,837 | 33.267857 | 86 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/gridcraft/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/gridcraft/grid_env.py | import sys
import gym
import gym.spaces
import numpy as np
from d4rl_alt.pointmaze.gridcraft.grid_spec import (
LAVA,
RENDER_DICT,
REWARD,
REWARD2,
REWARD3,
REWARD4,
START,
TILES,
WALL,
)
from d4rl_alt.pointmaze.gridcraft.utils import flat_to_one_hot, one_hot_to_flat
ACT_NOOP = 0
ACT_UP = 1
ACT_DOWN = 2
ACT_LEFT = 3
ACT_RIGHT = 4
ACT_DICT = {
ACT_NOOP: [0, 0],
ACT_UP: [0, -1],
ACT_LEFT: [-1, 0],
ACT_RIGHT: [+1, 0],
ACT_DOWN: [0, +1],
}
ACT_TO_STR = {
ACT_NOOP: "NOOP",
ACT_UP: "UP",
ACT_LEFT: "LEFT",
ACT_RIGHT: "RIGHT",
ACT_DOWN: "DOWN",
}
class TransitionModel(object):
def __init__(self, gridspec, eps=0.2):
self.gs = gridspec
self.eps = eps
def get_aprobs(self, s, a):
# TODO: could probably output a matrix over all states...
legal_moves = self.__get_legal_moves(s)
p = np.zeros(len(ACT_DICT))
p[list(legal_moves)] = self.eps / (len(legal_moves))
if a in legal_moves:
p[a] += 1.0 - self.eps
else:
# p = np.array([1.0,0,0,0,0]) # NOOP
p[ACT_NOOP] += 1.0 - self.eps
return p
def __get_legal_moves(self, s):
xy = np.array(self.gs.idx_to_xy(s))
moves = {
move
for move in ACT_DICT
if not self.gs.out_of_bounds(xy + ACT_DICT[move])
and self.gs[xy + ACT_DICT[move]] != WALL
}
moves.add(ACT_NOOP)
return moves
class RewardFunction(object):
def __init__(self, rew_map=None, default=0):
if rew_map is None:
rew_map = {
REWARD: 1.0,
REWARD2: 2.0,
REWARD3: 4.0,
REWARD4: 8.0,
LAVA: -100.0,
}
self.default = default
self.rew_map = rew_map
def __call__(self, gridspec, s, a, ns):
val = gridspec[gridspec.idx_to_xy(s)]
if val in self.rew_map:
return self.rew_map[val]
return self.default
class GridEnv(gym.Env):
def __init__(
self,
gridspec,
tiles=TILES,
rew_fn=None,
teps=0.0,
max_timesteps=None,
rew_map=None,
terminal_states=None,
default_rew=0,
):
self.num_states = len(gridspec)
self.num_actions = 5
self._env_args = {"teps": teps, "max_timesteps": max_timesteps}
self.gs = gridspec
self.model = TransitionModel(gridspec, eps=teps)
self.terminal_states = terminal_states
if rew_fn is None:
rew_fn = RewardFunction(rew_map=rew_map, default=default_rew)
self.rew_fn = rew_fn
self.possible_tiles = tiles
self.max_timesteps = max_timesteps
self._timestep = 0
self._true_q = None # q_vals for debugging
super(GridEnv, self).__init__()
def get_transitions(self, s, a):
tile_type = self.gs[self.gs.idx_to_xy(s)]
if tile_type == LAVA: # Lava gets you stuck
return {s: 1.0}
aprobs = self.model.get_aprobs(s, a)
t_dict = {}
for sa in range(5):
if aprobs[sa] > 0:
next_s = self.gs.idx_to_xy(s) + ACT_DICT[sa]
next_s_idx = self.gs.xy_to_idx(next_s)
t_dict[next_s_idx] = t_dict.get(next_s_idx, 0.0) + aprobs[sa]
return t_dict
def step_stateless(self, s, a, verbose=False):
aprobs = self.model.get_aprobs(s, a)
samp_a = np.random.choice(range(5), p=aprobs)
next_s = self.gs.idx_to_xy(s) + ACT_DICT[samp_a]
tile_type = self.gs[self.gs.idx_to_xy(s)]
if tile_type == LAVA: # Lava gets you stuck
next_s = self.gs.idx_to_xy(s)
next_s_idx = self.gs.xy_to_idx(next_s)
rew = self.rew_fn(self.gs, s, samp_a, next_s_idx)
if verbose:
print("Act: %s. Act Executed: %s" % (ACT_TO_STR[a], ACT_TO_STR[samp_a]))
return next_s_idx, rew
def step(self, a, verbose=False):
ns, r = self.step_stateless(self.__state, a, verbose=verbose)
traj_infos = {}
self.__state = ns
obs = ns # flat_to_one_hot(ns, len(self.gs))
done = False
self._timestep += 1
if self.max_timesteps is not None:
if self._timestep >= self.max_timesteps:
done = True
return obs, r, done, traj_infos
def reset(self):
start_idxs = np.array(np.where(self.gs.spec == START)).T
start_idx = start_idxs[np.random.randint(0, start_idxs.shape[0])]
start_idx = self.gs.xy_to_idx(start_idx)
self.__state = start_idx
self._timestep = 0
return start_idx # flat_to_one_hot(start_idx, len(self.gs))
def render(self, close=False, ostream=sys.stdout):
if close:
return
state = self.__state
ostream.write("-" * (self.gs.width + 2) + "\n")
for h in range(self.gs.height):
ostream.write("|")
for w in range(self.gs.width):
if self.gs.xy_to_idx((w, h)) == state:
ostream.write("*")
else:
val = self.gs[w, h]
ostream.write(RENDER_DICT[val])
ostream.write("|\n")
ostream.write("-" * (self.gs.width + 2) + "\n")
@property
def action_space(self):
return gym.spaces.Discrete(5)
@property
def observation_space(self):
dO = len(self.gs)
# return gym.spaces.Box(0,1,shape=dO)
return gym.spaces.Discrete(dO)
def transition_matrix(self):
"""Constructs this environment's transition matrix.
Returns:
A dS x dA x dS array where the entry transition_matrix[s, a, ns]
corrsponds to the probability of transitioning into state ns after taking
action a from state s.
"""
ds = self.num_states
da = self.num_actions
transition_matrix = np.zeros((ds, da, ds))
for s in range(ds):
for a in range(da):
transitions = self.get_transitions(s, a)
for next_s in transitions:
transition_matrix[s, a, next_s] = transitions[next_s]
return transition_matrix
def reward_matrix(self):
"""Constructs this environment's reward matrix.
Returns:
A dS x dA x dS numpy array where the entry reward_matrix[s, a, ns]
reward given to an agent when transitioning into state ns after taking
action s from state s.
"""
ds = self.num_states
da = self.num_actions
rew_matrix = np.zeros((ds, da, ds))
for s in range(ds):
for a in range(da):
for ns in range(ds):
rew_matrix[s, a, ns] = self.rew_fn(self.gs, s, a, ns)
return rew_matrix
| 6,899 | 29.131004 | 84 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/gridcraft/grid_spec.py | import numpy as np
EMPTY = 110
WALL = 111
START = 112
REWARD = 113
OUT_OF_BOUNDS = 114
REWARD2 = 115
REWARD3 = 116
REWARD4 = 117
LAVA = 118
GOAL = 119
TILES = {EMPTY, WALL, START, REWARD, REWARD2, REWARD3, REWARD4, LAVA, GOAL}
STR_MAP = {
"O": EMPTY,
"#": WALL,
"S": START,
"R": REWARD,
"2": REWARD2,
"3": REWARD3,
"4": REWARD4,
"G": GOAL,
"L": LAVA,
}
RENDER_DICT = {v: k for k, v in STR_MAP.items()}
RENDER_DICT[EMPTY] = " "
RENDER_DICT[START] = " "
def spec_from_string(s, valmap=STR_MAP):
if s.endswith("\\"):
s = s[:-1]
rows = s.split("\\")
rowlens = np.array([len(row) for row in rows])
assert np.all(rowlens == rowlens[0])
w, h = len(rows), len(rows[0]) # len(rows[0]), len(rows)
gs = GridSpec(w, h)
for i in range(w):
for j in range(h):
gs[i, j] = valmap[rows[i][j]]
return gs
def spec_from_sparse_locations(w, h, tile_to_locs):
"""
Example usage:
>> spec_from_sparse_locations(10, 10, {START: [(0,0)], REWARD: [(7,8), (8,8)]})
"""
gs = GridSpec(w, h)
for tile_type in tile_to_locs:
locs = np.array(tile_to_locs[tile_type])
for i in range(locs.shape[0]):
gs[tuple(locs[i])] = tile_type
return gs
def local_spec(map, xpnt):
"""
>>> local_spec("yOy\\\\Oxy", xpnt=(5,5))
array([[4, 4],
[6, 4],
[6, 5]])
"""
Y = 0
X = 1
O = 2
valmap = {"y": Y, "x": X, "O": O}
gs = spec_from_string(map, valmap=valmap)
ys = gs.find(Y)
x = gs.find(X)
result = ys - x + np.array(xpnt)
return result
class GridSpec(object):
def __init__(self, w, h):
self.__data = np.zeros((w, h), dtype=np.int32)
self.__w = w
self.__h = h
def __setitem__(self, key, val):
self.__data[key] = val
def __getitem__(self, key):
if self.out_of_bounds(key):
raise NotImplementedError("Out of bounds:" + str(key))
return self.__data[tuple(key)]
def out_of_bounds(self, wh):
""" Return true if x, y is out of bounds """
w, h = wh
if w < 0 or w >= self.__w:
return True
if h < 0 or h >= self.__h:
return True
return False
def get_neighbors(self, k, xy=False):
""" Return values of up, down, left, and right tiles """
if not xy:
k = self.idx_to_xy(k)
offsets = [
np.array([0, -1]),
np.array([0, 1]),
np.array([-1, 0]),
np.array([1, 0]),
]
neighbors = [
self[k + offset] if (not self.out_of_bounds(k + offset)) else OUT_OF_BOUNDS
for offset in offsets
]
return neighbors
def get_value(self, k, xy=False):
""" Return values of up, down, left, and right tiles """
if not xy:
k = self.idx_to_xy(k)
return self[k]
def find(self, value):
return np.array(np.where(self.spec == value)).T
@property
def spec(self):
return self.__data
@property
def width(self):
return self.__w
def __len__(self):
return self.__w * self.__h
@property
def height(self):
return self.__h
def idx_to_xy(self, idx):
if hasattr(idx, "__len__"): # array
x = idx % self.__w
y = np.floor(idx / self.__w).astype(np.int32)
xy = np.c_[x, y]
return xy
else:
return np.array([idx % self.__w, int(np.floor(idx / self.__w))])
def xy_to_idx(self, key):
shape = np.array(key).shape
if len(shape) == 1:
return key[0] + key[1] * self.__w
elif len(shape) == 2:
return key[:, 0] + key[:, 1] * self.__w
else:
raise NotImplementedError()
def __hash__(self):
data = (self.__w, self.__h) + tuple(self.__data.reshape([-1]).tolist())
return hash(data)
| 3,978 | 23.115152 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/gridcraft/utils.py | import numpy as np
def flat_to_one_hot(val, ndim):
"""
>>> flat_to_one_hot(2, ndim=4)
array([ 0., 0., 1., 0.])
>>> flat_to_one_hot(4, ndim=5)
array([ 0., 0., 0., 0., 1.])
>>> flat_to_one_hot(np.array([2, 4, 3]), ndim=5)
array([[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1.],
[ 0., 0., 0., 1., 0.]])
"""
shape = np.array(val).shape
v = np.zeros(shape + (ndim,))
if len(shape) == 1:
v[np.arange(shape[0]), val] = 1.0
else:
v[val] = 1.0
return v
def one_hot_to_flat(val):
"""
>>> one_hot_to_flat(np.array([0,0,0,0,1]))
4
>>> one_hot_to_flat(np.array([0,0,1,0]))
2
>>> one_hot_to_flat(np.array([[0,0,1,0], [1,0,0,0], [0,1,0,0]]))
array([2, 0, 1])
"""
idxs = np.array(np.where(val == 1.0))[-1]
if len(val.shape) == 1:
return int(idxs)
return idxs
| 906 | 22.868421 | 68 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/pointmaze/gridcraft/wrappers.py | import numpy as np
from gym.spaces import Box
from d4rl_alt.pointmaze.gridcraft.grid_env import REWARD, GridEnv
from d4rl_alt.pointmaze.gridcraft.wrappers import ObsWrapper
class GridObsWrapper(ObsWrapper):
def __init__(self, env):
super(GridObsWrapper, self).__init__(env)
def render(self):
self.env.render()
class EyesWrapper(ObsWrapper):
def __init__(self, env, range=4, types=(REWARD,), angle_thresh=0.8):
super(EyesWrapper, self).__init__(env)
self.types = types
self.range = range
self.angle_thresh = angle_thresh
eyes_low = np.ones(5 * len(types))
eyes_high = np.ones(5 * len(types))
low = np.r_[env.observation_space.low, eyes_low]
high = np.r_[env.observation_space.high, eyes_high]
self.__observation_space = Box(low, high)
def wrap_obs(self, obs, info=None):
gs = self.env.gs # grid spec
xy = gs.idx_to_xy(self.env.obs_to_state(obs))
# xy = np.array([x, y])
extra_obs = []
for tile_type in self.types:
idxs = gs.find(tile_type).astype(np.float32) # N x 2
# gather all idxs that are close
diffs = idxs - np.expand_dims(xy, axis=0)
dists = np.linalg.norm(diffs, axis=1)
valid_idxs = np.where(dists <= self.range)[0]
if len(valid_idxs) == 0:
eye_data = np.array([0, 0, 0, 0, 0], dtype=np.float32)
else:
diffs = diffs[valid_idxs, :]
dists = dists[valid_idxs] + 1e-6
cosines = diffs[:, 0] / dists
cosines = np.r_[cosines, 0]
sines = diffs[:, 1] / dists
sines = np.r_[sines, 0]
on_target = 0.0
if np.any(dists <= 1.0):
on_target = 1.0
eye_data = np.abs(
np.array(
[
on_target,
np.max(cosines),
np.min(cosines),
np.max(sines),
np.min(sines),
]
)
)
eye_data[np.where(eye_data <= self.angle_thresh)] = 0
extra_obs.append(eye_data)
extra_obs = np.concatenate(extra_obs)
obs = np.r_[obs, extra_obs]
# if np.any(np.isnan(obs)):
# import pdb; pdb.set_trace()
return obs
def unwrap_obs(self, obs, info=None):
if len(obs.shape) == 1:
return obs[: -5 * len(self.types)]
else:
return obs[:, : -5 * len(self.types)]
@property
def observation_space(self):
return self.__observation_space
"""
class CoordinateWiseWrapper(GridObsWrapper):
def __init__(self, env):
assert isinstance(env, GridEnv)
super(CoordinateWiseWrapper, self).__init__(env)
self.gs = env.gs
self.dO = self.gs.width+self.gs.height
self.__observation_space = Box(0, 1, self.dO)
def wrap_obs(self, obs, info=None):
state = one_hot_to_flat(obs)
xy = self.gs.idx_to_xy(state)
x = flat_to_one_hot(xy[0], self.gs.width)
y = flat_to_one_hot(xy[1], self.gs.height)
obs = np.r_[x, y]
return obs
def unwrap_obs(self, obs, info=None):
if len(obs.shape) == 1:
x = obs[:self.gs.width]
y = obs[self.gs.width:]
x = one_hot_to_flat(x)
y = one_hot_to_flat(y)
state = self.gs.xy_to_idx(np.c_[x,y])
return flat_to_one_hot(state, self.dO)
else:
raise NotImplementedError()
"""
class RandomObsWrapper(GridObsWrapper):
def __init__(self, env, dO):
assert isinstance(env, GridEnv)
super(RandomObsWrapper, self).__init__(env)
self.gs = env.gs
self.dO = dO
self.obs_matrix = np.random.randn(self.dO, len(self.gs))
self.__observation_space = Box(
np.min(self.obs_matrix),
np.max(self.obs_matrix),
shape=(self.dO,),
dtype=np.float32,
)
def wrap_obs(self, obs, info=None):
return np.inner(self.obs_matrix, obs)
def unwrap_obs(self, obs, info=None):
raise NotImplementedError()
| 4,347 | 31.447761 | 72 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/utils/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/utils/dataset_utils.py | import h5py
import numpy as np
class DatasetWriter(object):
def __init__(self, mujoco=False, goal=False):
self.mujoco = mujoco
self.goal = goal
self.data = self._reset_data()
self._num_samples = 0
def _reset_data(self):
data = {
"observations": [],
"actions": [],
"terminals": [],
"rewards": [],
}
if self.mujoco:
data["infos/qpos"] = []
data["infos/qvel"] = []
if self.goal:
data["infos/goal"] = []
return data
def __len__(self):
return self._num_samples
def append_data(self, s, a, r, done, goal=None, mujoco_env_data=None):
self._num_samples += 1
self.data["observations"].append(s)
self.data["actions"].append(a)
self.data["rewards"].append(r)
self.data["terminals"].append(done)
if self.goal:
self.data["infos/goal"].append(goal)
if self.mujoco:
self.data["infos/qpos"].append(mujoco_env_data.qpos.ravel().copy())
self.data["infos/qvel"].append(mujoco_env_data.qvel.ravel().copy())
def write_dataset(self, fname, max_size=None, compression="gzip"):
np_data = {}
for k in self.data:
if k == "terminals":
dtype = np.bool_
else:
dtype = np.float32
data = np.array(self.data[k], dtype=dtype)
if max_size is not None:
data = data[:max_size]
np_data[k] = data
dataset = h5py.File(fname, "w")
for k in np_data:
dataset.create_dataset(k, data=np_data[k], compression=compression)
dataset.close()
| 1,736 | 29.473684 | 79 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/utils/quatmath.py | import numpy as np
# For testing whether a number is close to zero
_FLOAT_EPS = np.finfo(np.float64).eps
_EPS4 = _FLOAT_EPS * 4.0
def mulQuat(qa, qb):
res = np.zeros(4)
res[0] = qa[0] * qb[0] - qa[1] * qb[1] - qa[2] * qb[2] - qa[3] * qb[3]
res[1] = qa[0] * qb[1] + qa[1] * qb[0] + qa[2] * qb[3] - qa[3] * qb[2]
res[2] = qa[0] * qb[2] - qa[1] * qb[3] + qa[2] * qb[0] + qa[3] * qb[1]
res[3] = qa[0] * qb[3] + qa[1] * qb[2] - qa[2] * qb[1] + qa[3] * qb[0]
return res
def negQuat(quat):
return np.array([quat[0], -quat[1], -quat[2], -quat[3]])
def quat2Vel(quat, dt=1):
axis = quat[1:].copy()
sin_a_2 = np.sqrt(np.sum(axis ** 2))
axis = axis / (sin_a_2 + 1e-8)
speed = 2 * np.arctan2(sin_a_2, quat[0]) / dt
return speed, axis
def quatDiff2Vel(quat1, quat2, dt):
neg = negQuat(quat1)
diff = mulQuat(quat2, neg)
return quat2Vel(diff, dt)
def axis_angle2quat(axis, angle):
c = np.cos(angle / 2)
s = np.sin(angle / 2)
return np.array([c, s * axis[0], s * axis[1], s * axis[2]])
def euler2mat(euler):
""" Convert Euler Angles to Rotation Matrix. See rotation.py for notes """
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler)
ai, aj, ak = -euler[..., 2], -euler[..., 1], -euler[..., 0]
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
mat = np.empty(euler.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 2, 2] = cj * ck
mat[..., 2, 1] = sj * sc - cs
mat[..., 2, 0] = sj * cc + ss
mat[..., 1, 2] = cj * sk
mat[..., 1, 1] = sj * ss + cc
mat[..., 1, 0] = sj * cs - sc
mat[..., 0, 2] = -sj
mat[..., 0, 1] = cj * si
mat[..., 0, 0] = cj * ci
return mat
def euler2quat(euler):
""" Convert Euler Angles to Quaternions. See rotation.py for notes """
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shape euler {}".format(euler)
ai, aj, ak = euler[..., 2] / 2, -euler[..., 1] / 2, euler[..., 0] / 2
si, sj, sk = np.sin(ai), np.sin(aj), np.sin(ak)
ci, cj, ck = np.cos(ai), np.cos(aj), np.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
quat = np.empty(euler.shape[:-1] + (4,), dtype=np.float64)
quat[..., 0] = cj * cc + sj * ss
quat[..., 3] = cj * sc - sj * cs
quat[..., 2] = -(cj * ss + sj * cc)
quat[..., 1] = cj * cs - sj * sc
return quat
def mat2euler(mat):
""" Convert Rotation Matrix to Euler Angles. See rotation.py for notes """
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
cy = np.sqrt(mat[..., 2, 2] * mat[..., 2, 2] + mat[..., 1, 2] * mat[..., 1, 2])
condition = cy > _EPS4
euler = np.empty(mat.shape[:-1], dtype=np.float64)
euler[..., 2] = np.where(
condition,
-np.arctan2(mat[..., 0, 1], mat[..., 0, 0]),
-np.arctan2(-mat[..., 1, 0], mat[..., 1, 1]),
)
euler[..., 1] = np.where(
condition, -np.arctan2(-mat[..., 0, 2], cy), -np.arctan2(-mat[..., 0, 2], cy)
)
euler[..., 0] = np.where(
condition, -np.arctan2(mat[..., 1, 2], mat[..., 2, 2]), 0.0
)
return euler
def mat2quat(mat):
""" Convert Rotation Matrix to Quaternion. See rotation.py for notes """
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), "Invalid shape matrix {}".format(mat)
Qxx, Qyx, Qzx = mat[..., 0, 0], mat[..., 0, 1], mat[..., 0, 2]
Qxy, Qyy, Qzy = mat[..., 1, 0], mat[..., 1, 1], mat[..., 1, 2]
Qxz, Qyz, Qzz = mat[..., 2, 0], mat[..., 2, 1], mat[..., 2, 2]
# Fill only lower half of symmetric matrix
K = np.zeros(mat.shape[:-2] + (4, 4), dtype=np.float64)
K[..., 0, 0] = Qxx - Qyy - Qzz
K[..., 1, 0] = Qyx + Qxy
K[..., 1, 1] = Qyy - Qxx - Qzz
K[..., 2, 0] = Qzx + Qxz
K[..., 2, 1] = Qzy + Qyz
K[..., 2, 2] = Qzz - Qxx - Qyy
K[..., 3, 0] = Qyz - Qzy
K[..., 3, 1] = Qzx - Qxz
K[..., 3, 2] = Qxy - Qyx
K[..., 3, 3] = Qxx + Qyy + Qzz
K /= 3.0
# TODO: vectorize this -- probably could be made faster
q = np.empty(K.shape[:-2] + (4,))
it = np.nditer(q[..., 0], flags=["multi_index"])
while not it.finished:
# Use Hermitian eigenvectors, values for speed
vals, vecs = np.linalg.eigh(K[it.multi_index])
# Select largest eigenvector, reorder to w,x,y,z quaternion
q[it.multi_index] = vecs[[3, 0, 1, 2], np.argmax(vals)]
# Prefer quaternion with positive w
# (q * -1 corresponds to same rotation as q)
if q[it.multi_index][0] < 0:
q[it.multi_index] *= -1
it.iternext()
return q
def quat2euler(quat):
""" Convert Quaternion to Euler Angles. See rotation.py for notes """
return mat2euler(quat2mat(quat))
def quat2mat(quat):
""" Convert Quaternion to Euler Angles. See rotation.py for notes """
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, "Invalid shape quat {}".format(quat)
w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]
Nq = np.sum(quat * quat, axis=-1)
s = 2.0 / Nq
X, Y, Z = x * s, y * s, z * s
wX, wY, wZ = w * X, w * Y, w * Z
xX, xY, xZ = x * X, x * Y, x * Z
yY, yZ, zZ = y * Y, y * Z, z * Z
mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)
mat[..., 0, 0] = 1.0 - (yY + zZ)
mat[..., 0, 1] = xY - wZ
mat[..., 0, 2] = xZ + wY
mat[..., 1, 0] = xY + wZ
mat[..., 1, 1] = 1.0 - (xX + zZ)
mat[..., 1, 2] = yZ - wX
mat[..., 2, 0] = xZ - wY
mat[..., 2, 1] = yZ + wX
mat[..., 2, 2] = 1.0 - (xX + yY)
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
| 5,881 | 33.197674 | 85 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/utils/visualize_env.py | import os
import pickle
import click
import gym
import numpy as np
from mjrl.utils.gym_env import GymEnv
import d4rl_alt
# from mjrl.policies.gaussian_mlp import MLP
DESC = """
Helper script to visualize policy (in mjrl format).\n
USAGE:\n
Visualizes policy on the env\n
$ python visualize_policy.py --env_name door-v0 \n
$ python visualize_policy.py --env_name door-v0 --policy my_policy.pickle --mode evaluation --episodes 10 \n
"""
class RandomPolicy(object):
def __init__(self, env):
self.env = env
def get_action(self, obs):
return [
self.env.action_space.sample(),
{"evaluation": self.env.action_space.sample()},
]
# MAIN =========================================================
@click.command(help=DESC)
@click.option("--env_name", type=str, help="environment to load", required=True)
@click.option(
"--policy", type=str, help="absolute path of the policy file", default=None
)
@click.option(
"--mode",
type=str,
help="exploration or evaluation mode for policy",
default="evaluation",
)
@click.option(
"--seed", type=int, help="seed for generating environment instances", default=123
)
@click.option(
"--episodes", type=int, help="number of episodes to visualize", default=10
)
def main(env_name, policy, mode, seed, episodes):
e = GymEnv(env_name)
e.set_seed(seed)
"""
if policy is not None:
pi = pickle.load(open(policy, 'rb'))
else:
pi = MLP(e.spec, hidden_sizes=(32,32), seed=seed, init_log_std=-1.0)
"""
pi = RandomPolicy(e)
# render policy
e.visualize_policy(pi, num_episodes=episodes, horizon=e.horizon, mode=mode)
if __name__ == "__main__":
main()
| 1,727 | 24.791045 | 112 | py |
CSD-manipulation | CSD-manipulation-master/envs/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/envs/maze_env.py | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
from collections import defaultdict
import gym
import numpy as np
from envs.mazes import mazes_dict, make_crazy_maze, make_experiment_maze, make_hallway_maze, make_u_maze
class DummyGoal:
pass
class MazeEnv(gym.Env):
def __init__(self, n, maze_type='square', use_antigoal=False, ddiff=True, ignore_reset_start=False,
done_on_success=True, action_range_override=None, start_random_range_override=None,
obs_include_delta=False, keep_direction=False,
action_noise_std=None):
self.n = n
self._max_episode_steps = n
self.env = DummyGoal()
self._mazes = mazes_dict
self.maze_type = maze_type.lower()
self._ignore_reset_start = bool(ignore_reset_start)
self._done_on_success = bool(done_on_success)
self._obs_include_delta = obs_include_delta
self._keep_direction = keep_direction
self._action_noise_std = action_noise_std
self._cur_direction = None
# Generate a crazy maze specified by its size and generation seed
if self.maze_type.startswith('crazy'):
_, size, seed = self.maze_type.split('_')
size = int(size)
seed = int(seed)
self._mazes[self.maze_type] = {'maze': make_crazy_maze(size, seed), 'action_range': 0.95}
# Generate an "experiment" maze specified by its height, half-width, and size of starting section
if self.maze_type.startswith('experiment'):
_, h, half_w, sz0 = self.maze_type.split('_')
h = int(h)
half_w = int(half_w)
sz0 = int(sz0)
self._mazes[self.maze_type] = {'maze': make_experiment_maze(h, half_w, sz0), 'action_range': 0.25}
if self.maze_type.startswith('corridor'):
corridor_length = int(self.maze_type.split('_')[1])
self._mazes[self.maze_type] = {'maze': make_hallway_maze(corridor_length), 'action_range': 0.95}
if self.maze_type.startswith('umaze'):
corridor_length = int(self.maze_type.split('_')[1])
self._mazes[self.maze_type] = {'maze': make_u_maze(corridor_length), 'action_range': 0.95}
assert self.maze_type in self._mazes
self.min_x = self.maze.min_x
self.max_x = self.maze.max_x
self.min_y = self.maze.min_y
self.max_y = self.maze.max_y
self.min_point = np.array([self.min_x, self.min_y], dtype=np.float32)
self.max_point = np.array([self.max_x, self.max_y], dtype=np.float32)
if action_range_override is not None:
self._mazes[self.maze_type]['action_range'] = action_range_override
if start_random_range_override is not None:
self.maze.start_random_range = start_random_range_override
self.use_antigoal = bool(use_antigoal)
self.ddiff = bool(ddiff)
self._state = dict(s0=None, prev_state=None, state=None, goal=None, n=None, done=None, d_goal_0=None,
d_antigoal_0=None)
self.dist_threshold = 0.15
self.trajectory = []
self.observation_space = gym.spaces.Dict({
'observation': gym.spaces.Box(low=-np.inf, high=np.inf, shape=(25,)),
'achieved_goal': gym.spaces.Box(low=-np.inf, high=np.inf, shape=(3,)),
'desired_goal': gym.spaces.Box(low=-np.inf, high=np.inf, shape=(3,)),
})
self.action_space = gym.spaces.Box(low=-self.action_range, high=self.action_range, shape=(2,))
self.reset()
@staticmethod
def dist(goal, outcome):
# return np.sum(np.abs(goal - outcome))
return np.sqrt(np.sum((goal - outcome) ** 2))
@property
def maze(self):
return self._mazes[self.maze_type]['maze']
@property
def action_range(self):
return self._mazes[self.maze_type]['action_range']
@property
def state(self):
return self._state['state'].reshape(-1)
@property
def goal(self):
return self._state['goal'].reshape(-1)
@property
def antigoal(self):
return self._state['antigoal'].reshape(-1)
@property
def reward(self):
# r_sparse = -np.ones(1) + float(self.is_success)
# r_dense = -self.dist(self.goal, self.state)
# if self.use_antigoal:
# r_dense += self.dist(self.antigoal, self.state)
# if not self.ddiff:
# reward = r_sparse + np.clip(r_dense, -np.inf, 0.0)
# else:
# r_dense_prev = -self.dist(self.goal, self._state['prev_state'])
# if self.use_antigoal:
# r_dense_prev += self.dist(self.antigoal, self._state['prev_state'])
# r_dense -= r_dense_prev
# reward = r_sparse + r_dense
reward = self.state[0] - self._state['prev_state'][0]
return reward
@property
def achieved(self):
return self.goal if self.is_success else self.state
@property
def is_done(self):
return bool(self._state['done'])
@property
def is_success(self):
d = self.dist(self.goal, self.state)
return d <= self.dist_threshold
@property
def d_goal_0(self):
return self._state['d_goal_0']
@property
def d_antigoal_0(self):
return self._state['d_antigoal_0']
@property
def next_phase_reset(self):
return {'state': self._state['s0'], 'goal': self.goal, 'antigoal': self.achieved}
@property
def sibling_reset(self):
return {'state': self._state['s0'], 'goal': self.goal}
def _get_mdp_state(self):
observation = np.zeros(25)
achieved_goal = np.zeros(3)
desired_goal = np.zeros(3)
observation[0:2] = self.state
observation[3:5] = self.state
return {
'observation': observation,
'achieved_goal': achieved_goal,
'desired_goal': desired_goal,
}
def reset(self, state=None, goal=None, antigoal=None):
# if state is None or self._ignore_reset_start:
# s_xy = self.maze.sample_start()
# else:
# s_xy = state
s_xy = np.zeros(2)
s_xy = np.array(s_xy)
if goal is None:
if 'square' in self.maze_type:
g_xy = self.maze.sample_goal(min_wall_dist=0.025 + self.dist_threshold)
else:
g_xy = self.maze.sample_goal()
else:
g_xy = goal
g_xy = np.array(g_xy)
if antigoal is None:
ag_xy = g_xy
else:
ag_xy = antigoal
if self._keep_direction:
self._cur_direction = np.random.random() * 2 * np.pi
self._state = {
's0': s_xy,
'prev_state': s_xy * np.ones_like(s_xy),
'state': s_xy * np.ones_like(s_xy),
'goal': g_xy,
'antigoal': ag_xy,
'n': 0,
'done': False,
'd_goal_0': self.dist(g_xy, s_xy),
'd_antigoal_0': self.dist(g_xy, ag_xy),
}
self.trajectory = [self.state]
return self._get_mdp_state()
def step(self, action):
obsbefore = self._get_mdp_state()
action = action.copy() * 0.2
if self._action_noise_std is not None:
action = action + np.random.normal(scale=self._action_noise_std, size=action.shape)
# Clip action
for i in range(len(action)):
action[i] = np.clip(action[i], -self.action_range, self.action_range)
try:
next_state = self._state['state'] + action
# if self._keep_direction:
# r = (action[0] + self.action_range) / 2
# theta = (action[1] + self.action_range) / (2 * self.action_range) * np.pi - np.pi / 2
# self._cur_direction += theta
# x = r * np.cos(self._cur_direction)
# y = r * np.sin(self._cur_direction)
# next_state = self.maze.move(
# self._state['state'],
# np.array([x, y]),
# )
# else:
# next_state = self.maze.move(
# self._state['state'],
# action
# )
next_state = np.array(next_state)
except:
print('state', self._state['state'])
print('action', action)
raise
self._state['prev_state'] = self._state['state']
self._state['state'] = next_state
self._state['n'] += 1
# done = self._state['n'] >= self.n
# if self._done_on_success:
# done = done or self.is_success
done = False
self._state['done'] = done
self.trajectory.append(self.state)
# self.render()
return self._get_mdp_state(), self.reward, self.is_done, {
'coordinates': self._state['prev_state'],
'next_coordinates': self._state['state'],
}
def sample(self):
return self.maze.sample()
def render(self, *args):
self.maze.plot(trajectory=self.trajectory)
def render_trajectories(self, trajectories, colors, plot_axis, ax):
"""Plot multiple trajectories onto ax"""
coordinates_trajectories = self._get_coordinates_trajectories(trajectories)
self.maze.plot_trajectories(coordinates_trajectories, colors, plot_axis, ax)
def _get_coordinates_trajectories(self, trajectories):
coordinates_trajectories = []
for trajectory in trajectories:
if trajectory['env_infos']['coordinates'].ndim == 2:
coordinates_trajectories.append(np.concatenate([
trajectory['env_infos']['coordinates'],
[trajectory['env_infos']['next_coordinates'][-1]]
]))
elif trajectory['env_infos']['coordinates'].ndim > 2:
# Nested array (due to the child policy)
coordinates_trajectories.append(np.concatenate([
trajectory['env_infos']['coordinates'].reshape(-1, 2),
trajectory['env_infos']['next_coordinates'].reshape(-1, 2)[-1:]
]))
return coordinates_trajectories
def calc_eval_metrics(self, trajectories, is_option_trajectories):
trajectory_eval_metrics = defaultdict(list)
coordinates_trajectories = self._get_coordinates_trajectories(trajectories)
for trajectory, coordinates_trajectory in zip(trajectories, coordinates_trajectories):
# terminal distance
trajectory_eval_metrics['TerminalDistance'].append(np.linalg.norm(
coordinates_trajectory[0] - coordinates_trajectory[-1]
))
# smoothed length
smooth_window_size = 5
num_smooth_samples = 6
if len(coordinates_trajectory) >= smooth_window_size:
smoothed_coordinates_trajectory = np.zeros((len(coordinates_trajectory) - smooth_window_size + 1, 2))
for i in range(2):
smoothed_coordinates_trajectory[:, i] = np.convolve(
coordinates_trajectory[:, i], [1 / smooth_window_size] * smooth_window_size, mode='valid'
)
idxs = np.round(np.linspace(0, len(smoothed_coordinates_trajectory) - 1, num_smooth_samples)).astype(int)
smoothed_coordinates_trajectory = smoothed_coordinates_trajectory[idxs]
else:
smoothed_coordinates_trajectory = coordinates_trajectory
sum_distances = 0
for i in range(len(smoothed_coordinates_trajectory) - 1):
sum_distances += np.linalg.norm(
smoothed_coordinates_trajectory[i] - smoothed_coordinates_trajectory[i + 1]
)
trajectory_eval_metrics['SmoothedLength'].append(sum_distances)
# cell percentage
num_grids = 10 # per one side
grid_xs = np.linspace(self.min_x, self.max_x, num_grids + 1)
grid_ys = np.linspace(self.min_y, self.max_y, num_grids + 1)
is_exist = np.zeros((num_grids, num_grids))
for coordinates_trajectory in coordinates_trajectories:
for x, y in coordinates_trajectory:
x_idx = np.searchsorted(grid_xs, x) # binary search
y_idx = np.searchsorted(grid_ys, y)
x_idx = np.clip(x_idx, 1, num_grids) - 1
y_idx = np.clip(y_idx, 1, num_grids) - 1
is_exist[x_idx, y_idx] = 1
is_exist = is_exist.flatten()
cell_percentage = np.sum(is_exist) / len(is_exist)
eval_metrics = {
'MaxTerminalDistance': np.max(trajectory_eval_metrics['TerminalDistance']),
'MeanTerminalDistance': np.mean(trajectory_eval_metrics['TerminalDistance']),
'MaxSmoothedLength': np.max(trajectory_eval_metrics['SmoothedLength']),
'MeanSmoothedLength': np.mean(trajectory_eval_metrics['SmoothedLength']),
'CellPercentage': cell_percentage,
}
if is_option_trajectories:
# option std
option_terminals = defaultdict(list)
for trajectory, coordinates_trajectory in zip(trajectories, coordinates_trajectories):
option_terminals[
tuple(trajectory['agent_infos']['option'][0])
].append(coordinates_trajectory[-1])
mean_option_terminals = [np.mean(terminals, axis=0) for terminals in option_terminals.values()]
intra_option_std = np.mean([np.mean(np.std(terminals, axis=0)) for terminals in option_terminals.values()])
inter_option_std = np.mean(np.std(mean_option_terminals, axis=0))
eval_metrics['IntraOptionStd'] = intra_option_std
eval_metrics['InterOptionStd'] = inter_option_std
eval_metrics['InterIntraOptionStdDiff'] = inter_option_std - intra_option_std
return eval_metrics
| 14,175 | 37.73224 | 121 | py |
CSD-manipulation | CSD-manipulation-master/envs/mazes.py | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import matplotlib.pyplot as plt
import numpy as np
class CircleMaze:
# Deprecated (as of now, especially regarding render())
def __init__(self):
raise NotImplementedError()
self.ring_r = 0.15
self.stop_t = 0.05
self.s_angle = 30
self.mean_s0 = (
float(np.cos(np.pi * self.s_angle / 180)),
float(np.sin(np.pi * self.s_angle / 180))
)
self.mean_g = (
float(np.cos(np.pi * (360 - self.s_angle) / 180)),
float(np.sin(np.pi * (360 - self.s_angle) / 180))
)
def plot(self, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(5, 4))
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(5, 4))
rads = np.linspace(self.stop_t * 2 * np.pi, (1 - self.stop_t) * 2 * np.pi)
xs_i = (1 - self.ring_r) * np.cos(rads)
ys_i = (1 - self.ring_r) * np.sin(rads)
xs_o = (1 + self.ring_r) * np.cos(rads)
ys_o = (1 + self.ring_r) * np.sin(rads)
ax.plot(xs_i, ys_i, 'k', linewidth=3)
ax.plot(xs_o, ys_o, 'k', linewidth=3)
ax.plot([xs_i[0], xs_o[0]], [ys_i[0], ys_o[0]], 'k', linewidth=3)
ax.plot([xs_i[-1], xs_o[-1]], [ys_i[-1], ys_o[-1]], 'k', linewidth=3)
lim = 1.1 + self.ring_r
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
def sample_start(self):
STD = 0.1
return self.move(self.mean_s0, (STD * np.random.randn(), STD * np.random.randn()))
def sample_goal(self):
STD = 0.1
return self.move(self.mean_g, (STD * np.random.randn(), STD * np.random.randn()))
@staticmethod
def xy_to_rt(xy):
x = xy[0]
y = xy[1]
r = np.sqrt(x ** 2 + y ** 2)
t = np.arctan2(y, x) % (2 * np.pi)
return r, t
def move(self, coords, action):
xp, yp = coords
rp, tp = self.xy_to_rt(coords)
xy = (coords[0] + action[0], coords[1] + action[1])
r, t = self.xy_to_rt(xy)
t = np.clip(t % (2 * np.pi), (0.001 + self.stop_t) * (2 * np.pi), (1 - (0.001 + self.stop_t)) * (2 * np.pi))
x = np.cos(t) * r
y = np.sin(t) * r
if coords is not None:
if xp > 0:
if (y < 0) and (yp > 0):
t = self.stop_t * 2 * np.pi
elif (y > 0) and (yp < 0):
t = (1 - self.stop_t) * 2 * np.pi
x = np.cos(t) * r
y = np.sin(t) * r
n = 8
xyi = np.array([xp, yp]).astype(np.float32)
dxy = (np.array([x, y]).astype(np.float32) - xyi) / n
new_r = float(rp)
new_t = float(tp)
count = 0
def r_ok(r_):
return (1 - self.ring_r) <= r_ <= (1 + self.ring_r)
def t_ok(t_):
return (self.stop_t * (2 * np.pi)) <= (t_ % (2 * np.pi)) <= ((1 - self.stop_t) * (2 * np.pi))
while r_ok(new_r) and t_ok(new_t) and count < n:
xyi += dxy
new_r, new_t = self.xy_to_rt(xyi)
count += 1
r = np.clip(new_r, 1 - self.ring_r + 0.01, 1 + self.ring_r - 0.01)
t = np.clip(new_t % (2 * np.pi), (0.001 + self.stop_t) * (2 * np.pi), (1 - (0.001 + self.stop_t)) * (2 * np.pi))
x = np.cos(t) * r
y = np.sin(t) * r
return float(x), float(y)
class Maze:
def __init__(self, *segment_dicts, goal_squares=None, start_squares=None,
min_wall_coord=None, walls_to_add=(), walls_to_remove=(), start_random_range=0.5):
self._segments = {'origin': {'loc': (0.0, 0.0), 'connect': set()}}
self._locs = set()
self._locs.add(self._segments['origin']['loc'])
self._walls = set()
for direction in ['up', 'down', 'left', 'right']:
self._walls.add(self._wall_line(self._segments['origin']['loc'], direction))
self._last_segment = 'origin'
self.goal_squares = None
# These allow to implement more complex mazes
self.min_wall_coord = min_wall_coord
self.walls_to_add = walls_to_add
self.walls_to_remove = walls_to_remove
self.start_random_range = start_random_range
if goal_squares is None:
self._goal_squares = None
elif isinstance(goal_squares, str):
self._goal_squares = [goal_squares.lower()]
elif isinstance(goal_squares, (tuple, list)):
self._goal_squares = [gs.lower() for gs in goal_squares]
else:
raise TypeError
if start_squares is None:
self.start_squares = ['origin']
elif isinstance(start_squares, str):
self.start_squares = [start_squares.lower()]
elif isinstance(start_squares, (tuple, list)):
self.start_squares = [ss.lower() for ss in start_squares]
else:
raise TypeError
for segment_dict in segment_dicts:
self._add_segment(**segment_dict)
self._finalize()
self.fig, self.ax = None, None
wall_xs = [wall[0] for walls in self._walls for wall in walls]
wall_ys = [wall[1] for walls in self._walls for wall in walls]
self.min_x = min(wall_xs)
self.max_x = max(wall_xs)
self.min_y = min(wall_ys)
self.max_y = max(wall_ys)
@staticmethod
def _wall_line(coord, direction):
x, y = coord
if direction == 'up':
w = [(x - 0.5, x + 0.5), (y + 0.5, y + 0.5)]
elif direction == 'right':
w = [(x + 0.5, x + 0.5), (y + 0.5, y - 0.5)]
elif direction == 'down':
w = [(x - 0.5, x + 0.5), (y - 0.5, y - 0.5)]
elif direction == 'left':
w = [(x - 0.5, x - 0.5), (y - 0.5, y + 0.5)]
else:
raise ValueError
w = tuple([tuple(sorted(line)) for line in w])
return w
def _add_segment(self, name, anchor, direction, connect=None, times=1):
name = str(name).lower()
original_name = str(name).lower()
if times > 1:
assert connect is None
last_name = str(anchor).lower()
for time in range(times):
this_name = original_name + str(time)
self._add_segment(name=this_name.lower(), anchor=last_name, direction=direction)
last_name = str(this_name)
return
anchor = str(anchor).lower()
assert anchor in self._segments
direction = str(direction).lower()
final_connect = set()
if connect is not None:
if isinstance(connect, str):
connect = str(connect).lower()
assert connect in ['up', 'down', 'left', 'right']
final_connect.add(connect)
elif isinstance(connect, (tuple, list)):
for connect_direction in connect:
connect_direction = str(connect_direction).lower()
assert connect_direction in ['up', 'down', 'left', 'right']
final_connect.add(connect_direction)
sx, sy = self._segments[anchor]['loc']
dx, dy = 0.0, 0.0
if direction == 'left':
dx -= 1
final_connect.add('right')
elif direction == 'right':
dx += 1
final_connect.add('left')
elif direction == 'up':
dy += 1
final_connect.add('down')
elif direction == 'down':
dy -= 1
final_connect.add('up')
else:
raise ValueError
new_loc = (sx + dx, sy + dy)
assert new_loc not in self._locs
self._segments[name] = {'loc': new_loc, 'connect': final_connect}
for direction in ['up', 'down', 'left', 'right']:
self._walls.add(self._wall_line(new_loc, direction))
self._locs.add(new_loc)
self._last_segment = name
def _finalize(self):
bottom_wall_coord = min([min(w[0]) for w in self._walls]) + 0.5
left_wall_coord = min([min(w[1]) for w in self._walls]) + 0.5
def _rm_wall(wall):
coords = wall[0] + wall[1]
# Check if this is the bottom wall
if wall[0][0] < bottom_wall_coord and wall[0][1] < bottom_wall_coord:
return False
# Check if this is the left wall
if wall[1][0] < left_wall_coord and wall[1][1] < left_wall_coord:
return False
# Remove walls in the bottom-left corner
return all(c < self.min_wall_coord for c in coords)
if self.min_wall_coord is not None:
self._walls = set([w for w in self._walls if not _rm_wall(w)])
for wall in self.walls_to_remove:
if wall in self._walls:
self._walls.remove(wall)
for segment in self._segments.values():
for c_dir in list(segment['connect']):
wall = self._wall_line(segment['loc'], c_dir)
if wall in self._walls:
self._walls.remove(wall)
for wall in self.walls_to_add:
self._walls.add(wall)
if self._goal_squares is None:
self.goal_squares = [self._last_segment]
else:
self.goal_squares = []
for gs in self._goal_squares:
assert gs in self._segments
self.goal_squares.append(gs)
def plot_maze(self, ax):
for x, y in self._walls:
ax.plot(x, y, 'k-')
def plot(self, trajectory):
"""Plot trajectory onto the screen."""
if self.fig is None:
self.fig, self.ax = plt.subplots()
self.fig.canvas.draw()
self.ax.cla()
# self.plot_maze(self.ax)
trajectory = np.array(trajectory)
self.ax.plot(trajectory[:, 0], trajectory[:, 1], 'b-', linewidth=0.7)
self.ax.axis('scaled')
self.fig.draw(self.fig.canvas.renderer)
plt.pause(0.0001)
def plot_trajectories(self, trajectories, colors, plot_axis, ax):
"""Plot trajectories onto given ax."""
# self.plot_maze(ax)
for trajectory, color in zip(trajectories, colors):
trajectory = np.array(trajectory)
ax.plot(trajectory[:, 0], trajectory[:, 1], color=color, linewidth=0.7)
if plot_axis is not None:
ax.axis(plot_axis)
else:
ax.axis('scaled')
def sample(self):
segment_keys = list(self._segments.keys())
square_id = segment_keys[np.random.randint(low=0, high=len(segment_keys))]
square_loc = self._segments[square_id]['loc']
shift = np.random.uniform(low=-0.5, high=0.5, size=(2,))
loc = square_loc + shift
return loc[0], loc[1]
def sample_start(self):
min_wall_dist = 0.05
s_square = self.start_squares[np.random.randint(low=0, high=len(self.start_squares))]
s_square_loc = self._segments[s_square]['loc']
while True:
shift = np.random.uniform(low=-self.start_random_range, high=self.start_random_range, size=(2,))
loc = s_square_loc + shift
dist_checker = np.array([min_wall_dist, min_wall_dist]) * np.sign(shift)
stopped_loc = self.move(loc, dist_checker)
if float(np.sum(np.abs((loc + dist_checker) - stopped_loc))) == 0.0:
break
return loc[0], loc[1]
def sample_goal(self, min_wall_dist=None):
if min_wall_dist is None:
min_wall_dist = 0.1
else:
min_wall_dist = min(0.4, max(0.01, min_wall_dist))
g_square = self.goal_squares[np.random.randint(low=0, high=len(self.goal_squares))]
g_square_loc = self._segments[g_square]['loc']
while True:
shift = np.random.uniform(low=-0.5, high=0.5, size=(2,))
loc = g_square_loc + shift
dist_checker = np.array([min_wall_dist, min_wall_dist]) * np.sign(shift)
stopped_loc = self.move(loc, dist_checker)
if float(np.sum(np.abs((loc + dist_checker) - stopped_loc))) == 0.0:
break
return loc[0], loc[1]
def move(self, coord_start, coord_delta, depth=None):
if depth is None:
depth = 0
cx, cy = coord_start
loc_x0 = np.round(cx)
loc_y0 = np.round(cy)
# assert (float(loc_x0), float(loc_y0)) in self._locs
dx, dy = coord_delta
loc_x1 = np.round(cx + dx)
loc_y1 = np.round(cy + dy)
d_loc_x = int(np.abs(loc_x1 - loc_x0))
d_loc_y = int(np.abs(loc_y1 - loc_y0))
xs_crossed = [loc_x0 + (np.sign(dx) * (i + 0.5)) for i in range(d_loc_x)]
ys_crossed = [loc_y0 + (np.sign(dy) * (i + 0.5)) for i in range(d_loc_y)]
rds = []
for x in xs_crossed:
r = (x - cx) / dx
loc_x = np.round(cx + (0.999 * r * dx))
loc_y = np.round(cy + (0.999 * r * dy))
direction = 'right' if dx > 0 else 'left'
crossed_line = self._wall_line((loc_x, loc_y), direction)
if crossed_line in self._walls:
rds.append([r, direction])
for y in ys_crossed:
r = (y - cy) / dy
loc_x = np.round(cx + (0.999 * r * dx))
loc_y = np.round(cy + (0.999 * r * dy))
direction = 'up' if dy > 0 else 'down'
crossed_line = self._wall_line((loc_x, loc_y), direction)
if crossed_line in self._walls:
rds.append([r, direction])
# The wall will only stop the agent in the direction perpendicular to the wall
if rds:
rds = sorted(rds)
r, direction = rds[0]
if depth < 3:
new_dx = r * dx
new_dy = r * dy
repulsion = float(np.abs(np.random.rand() * 0.01))
if direction in ['right', 'left']:
new_dx -= np.sign(dx) * repulsion
partial_coords = cx + new_dx, cy + new_dy
remaining_delta = (0.0, (1 - r) * dy)
else:
new_dy -= np.sign(dy) * repulsion
partial_coords = cx + new_dx, cy + new_dy
remaining_delta = ((1 - r) * dx, 0.0)
return self.move(partial_coords, remaining_delta, depth + 1)
else:
r = 1.0
dx *= r
dy *= r
return cx + dx, cy + dy
def make_crazy_maze(size, seed=None):
np.random.seed(seed)
deltas = [
[(-1, 0), 'right'],
[(1, 0), 'left'],
[(0, -1), 'up'],
[(0, 1), 'down'],
]
empty_locs = []
for x in range(size):
for y in range(size):
empty_locs.append((x, y))
locs = [empty_locs.pop(0)]
dirs = [None]
anchors = [None]
while len(empty_locs) > 0:
still_empty = []
np.random.shuffle(empty_locs)
for empty_x, empty_y in empty_locs:
found_anchor = False
np.random.shuffle(deltas)
for (dx, dy), direction in deltas:
c = (empty_x + dx, empty_y + dy)
if c in locs:
found_anchor = True
locs.append((empty_x, empty_y))
dirs.append(direction)
anchors.append(c)
break
if not found_anchor:
still_empty.append((empty_x, empty_y))
empty_locs = still_empty[:]
locs = [str(x) + ',' + str(y) for x, y in locs[1:]]
dirs = dirs[1:]
anchors = [str(x) + ',' + str(y) for x, y in anchors[1:]]
anchors = ['origin' if a == '0,0' else a for a in anchors]
segments = []
for loc, d, anchor in zip(locs, dirs, anchors):
segments.append(dict(name=loc, anchor=anchor, direction=d))
np.random.seed()
return Maze(*segments, goal_squares='{s},{s}'.format(s=size - 1))
def make_experiment_maze(h, half_w, sz0):
if h < 2:
h = 2
if half_w < 3:
half_w = 3
w = 1 + (2 * half_w)
# Create the starting row
segments = [{'anchor': 'origin', 'direction': 'right', 'name': '0,1'}]
for w_ in range(1, w - 1):
segments.append({'anchor': '0,{}'.format(w_), 'direction': 'right', 'name': '0,{}'.format(w_ + 1)})
# Add each row to create H
for h_ in range(1, h):
segments.append({'anchor': '{},{}'.format(h_ - 1, w - 1), 'direction': 'up', 'name': '{},{}'.format(h_, w - 1)})
c = None if h_ == sz0 else 'down'
for w_ in range(w - 2, -1, -1):
segments.append(
{'anchor': '{},{}'.format(h_, w_ + 1), 'direction': 'left', 'connect': c,
'name': '{},{}'.format(h_, w_)}
)
return Maze(*segments, goal_squares=['{},{}'.format(h - 1, half_w + d) for d in [0]])
def make_hallway_maze(corridor_length):
corridor_length = int(corridor_length)
assert corridor_length >= 1
segments = []
last = 'origin'
for x in range(1, corridor_length + 1):
next_name = '0,{}'.format(x)
segments.append({'anchor': last, 'direction': 'right', 'name': next_name})
last = str(next_name)
return Maze(*segments, goal_squares=last)
def make_u_maze(corridor_length):
corridor_length = int(corridor_length)
assert corridor_length >= 1
segments = []
last = 'origin'
for x in range(1, corridor_length + 1):
next_name = '0,{}'.format(x)
segments.append({'anchor': last, 'direction': 'right', 'name': next_name})
last = str(next_name)
assert last == '0,{}'.format(corridor_length)
up_size = 2
for x in range(1, up_size + 1):
next_name = '{},{}'.format(x, corridor_length)
segments.append({'anchor': last, 'direction': 'up', 'name': next_name})
last = str(next_name)
assert last == '{},{}'.format(up_size, corridor_length)
for x in range(1, corridor_length + 1):
next_name = '{},{}'.format(up_size, corridor_length - x)
segments.append({'anchor': last, 'direction': 'left', 'name': next_name})
last = str(next_name)
assert last == '{},0'.format(up_size)
return Maze(*segments, goal_squares=last)
mazes_dict = dict()
segments_a = [
dict(name='A', anchor='origin', direction='down', times=4),
dict(name='B', anchor='A3', direction='right', times=4),
dict(name='C', anchor='B3', direction='up', times=4),
dict(name='D', anchor='A1', direction='right', times=2),
dict(name='E', anchor='D1', direction='up', times=2),
]
mazes_dict['square_a'] = {'maze': Maze(*segments_a, goal_squares=['c2', 'c3']), 'action_range': 0.95}
segments_b = [
dict(name='A', anchor='origin', direction='down', times=4),
dict(name='B', anchor='A3', direction='right', times=4),
dict(name='C', anchor='B3', direction='up', times=4),
dict(name='D', anchor='B1', direction='up', times=4),
]
mazes_dict['square_b'] = {'maze': Maze(*segments_b, goal_squares=['c2', 'c3']), 'action_range': 0.95}
segments_c = [
dict(name='A', anchor='origin', direction='down', times=4),
dict(name='B', anchor='A3', direction='right', times=2),
dict(name='C', anchor='B1', direction='up', times=4),
dict(name='D', anchor='C3', direction='right', times=2),
dict(name='E', anchor='D1', direction='down', times=4)
]
mazes_dict['square_c'] = {'maze': Maze(*segments_c, goal_squares=['e2', 'e3']), 'action_range': 0.95}
segments_d = [
dict(name='TL', anchor='origin', direction='left', times=3),
dict(name='TLD', anchor='TL2', direction='down', times=3),
dict(name='TLR', anchor='TLD2', direction='right', times=2),
dict(name='TLU', anchor='TLR1', direction='up'),
dict(name='TR', anchor='origin', direction='right', times=3),
dict(name='TRD', anchor='TR2', direction='down', times=3),
dict(name='TRL', anchor='TRD2', direction='left', times=2),
dict(name='TRU', anchor='TRL1', direction='up'),
dict(name='TD', anchor='origin', direction='down', times=3),
]
mazes_dict['square_d'] = {'maze': Maze(*segments_d, goal_squares=['tlu', 'tlr1', 'tru', 'trl1']), 'action_range': 0.95}
segments_crazy = [
{'anchor': 'origin', 'direction': 'right', 'name': '1,0'},
{'anchor': 'origin', 'direction': 'up', 'name': '0,1'},
{'anchor': '1,0', 'direction': 'right', 'name': '2,0'},
{'anchor': '0,1', 'direction': 'up', 'name': '0,2'},
{'anchor': '0,2', 'direction': 'right', 'name': '1,2'},
{'anchor': '2,0', 'direction': 'up', 'name': '2,1'},
{'anchor': '1,2', 'direction': 'right', 'name': '2,2'},
{'anchor': '0,2', 'direction': 'up', 'name': '0,3'},
{'anchor': '2,1', 'direction': 'right', 'name': '3,1'},
{'anchor': '1,2', 'direction': 'down', 'name': '1,1'},
{'anchor': '3,1', 'direction': 'down', 'name': '3,0'},
{'anchor': '1,2', 'direction': 'up', 'name': '1,3'},
{'anchor': '3,1', 'direction': 'right', 'name': '4,1'},
{'anchor': '1,3', 'direction': 'up', 'name': '1,4'},
{'anchor': '4,1', 'direction': 'right', 'name': '5,1'},
{'anchor': '4,1', 'direction': 'up', 'name': '4,2'},
{'anchor': '5,1', 'direction': 'down', 'name': '5,0'},
{'anchor': '3,0', 'direction': 'right', 'name': '4,0'},
{'anchor': '1,4', 'direction': 'right', 'name': '2,4'},
{'anchor': '4,2', 'direction': 'right', 'name': '5,2'},
{'anchor': '2,4', 'direction': 'right', 'name': '3,4'},
{'anchor': '3,4', 'direction': 'up', 'name': '3,5'},
{'anchor': '1,4', 'direction': 'left', 'name': '0,4'},
{'anchor': '1,4', 'direction': 'up', 'name': '1,5'},
{'anchor': '2,2', 'direction': 'up', 'name': '2,3'},
{'anchor': '3,1', 'direction': 'up', 'name': '3,2'},
{'anchor': '5,0', 'direction': 'right', 'name': '6,0'},
{'anchor': '3,2', 'direction': 'up', 'name': '3,3'},
{'anchor': '4,2', 'direction': 'up', 'name': '4,3'},
{'anchor': '6,0', 'direction': 'up', 'name': '6,1'},
{'anchor': '6,0', 'direction': 'right', 'name': '7,0'},
{'anchor': '6,1', 'direction': 'right', 'name': '7,1'},
{'anchor': '3,4', 'direction': 'right', 'name': '4,4'},
{'anchor': '1,5', 'direction': 'right', 'name': '2,5'},
{'anchor': '7,1', 'direction': 'up', 'name': '7,2'},
{'anchor': '1,5', 'direction': 'up', 'name': '1,6'},
{'anchor': '4,4', 'direction': 'right', 'name': '5,4'},
{'anchor': '5,4', 'direction': 'down', 'name': '5,3'},
{'anchor': '0,4', 'direction': 'up', 'name': '0,5'},
{'anchor': '7,2', 'direction': 'left', 'name': '6,2'},
{'anchor': '1,6', 'direction': 'left', 'name': '0,6'},
{'anchor': '7,0', 'direction': 'right', 'name': '8,0'},
{'anchor': '7,2', 'direction': 'right', 'name': '8,2'},
{'anchor': '2,5', 'direction': 'up', 'name': '2,6'},
{'anchor': '8,0', 'direction': 'up', 'name': '8,1'},
{'anchor': '3,5', 'direction': 'up', 'name': '3,6'},
{'anchor': '6,2', 'direction': 'up', 'name': '6,3'},
{'anchor': '6,3', 'direction': 'right', 'name': '7,3'},
{'anchor': '3,5', 'direction': 'right', 'name': '4,5'},
{'anchor': '7,3', 'direction': 'up', 'name': '7,4'},
{'anchor': '6,3', 'direction': 'up', 'name': '6,4'},
{'anchor': '6,4', 'direction': 'up', 'name': '6,5'},
{'anchor': '8,1', 'direction': 'right', 'name': '9,1'},
{'anchor': '8,2', 'direction': 'right', 'name': '9,2'},
{'anchor': '2,6', 'direction': 'up', 'name': '2,7'},
{'anchor': '8,2', 'direction': 'up', 'name': '8,3'},
{'anchor': '6,5', 'direction': 'left', 'name': '5,5'},
{'anchor': '5,5', 'direction': 'up', 'name': '5,6'},
{'anchor': '7,4', 'direction': 'right', 'name': '8,4'},
{'anchor': '8,4', 'direction': 'right', 'name': '9,4'},
{'anchor': '0,6', 'direction': 'up', 'name': '0,7'},
{'anchor': '2,7', 'direction': 'up', 'name': '2,8'},
{'anchor': '7,4', 'direction': 'up', 'name': '7,5'},
{'anchor': '9,4', 'direction': 'down', 'name': '9,3'},
{'anchor': '9,4', 'direction': 'up', 'name': '9,5'},
{'anchor': '2,7', 'direction': 'left', 'name': '1,7'},
{'anchor': '4,5', 'direction': 'up', 'name': '4,6'},
{'anchor': '9,1', 'direction': 'down', 'name': '9,0'},
{'anchor': '6,5', 'direction': 'up', 'name': '6,6'},
{'anchor': '3,6', 'direction': 'up', 'name': '3,7'},
{'anchor': '1,7', 'direction': 'up', 'name': '1,8'},
{'anchor': '3,7', 'direction': 'right', 'name': '4,7'},
{'anchor': '2,8', 'direction': 'up', 'name': '2,9'},
{'anchor': '2,9', 'direction': 'left', 'name': '1,9'},
{'anchor': '7,5', 'direction': 'up', 'name': '7,6'},
{'anchor': '1,8', 'direction': 'left', 'name': '0,8'},
{'anchor': '6,6', 'direction': 'up', 'name': '6,7'},
{'anchor': '0,8', 'direction': 'up', 'name': '0,9'},
{'anchor': '7,5', 'direction': 'right', 'name': '8,5'},
{'anchor': '6,7', 'direction': 'left', 'name': '5,7'},
{'anchor': '2,9', 'direction': 'right', 'name': '3,9'},
{'anchor': '3,9', 'direction': 'right', 'name': '4,9'},
{'anchor': '7,6', 'direction': 'right', 'name': '8,6'},
{'anchor': '3,7', 'direction': 'up', 'name': '3,8'},
{'anchor': '9,5', 'direction': 'up', 'name': '9,6'},
{'anchor': '7,6', 'direction': 'up', 'name': '7,7'},
{'anchor': '5,7', 'direction': 'up', 'name': '5,8'},
{'anchor': '3,8', 'direction': 'right', 'name': '4,8'},
{'anchor': '8,6', 'direction': 'up', 'name': '8,7'},
{'anchor': '5,8', 'direction': 'right', 'name': '6,8'},
{'anchor': '7,7', 'direction': 'up', 'name': '7,8'},
{'anchor': '4,9', 'direction': 'right', 'name': '5,9'},
{'anchor': '8,7', 'direction': 'right', 'name': '9,7'},
{'anchor': '7,8', 'direction': 'right', 'name': '8,8'},
{'anchor': '8,8', 'direction': 'up', 'name': '8,9'},
{'anchor': '5,9', 'direction': 'right', 'name': '6,9'},
{'anchor': '6,9', 'direction': 'right', 'name': '7,9'},
{'anchor': '8,9', 'direction': 'right', 'name': '9,9'},
{'anchor': '9,9', 'direction': 'down', 'name': '9,8'}
]
mazes_dict['square_large'] = {'maze': Maze(*segments_crazy, goal_squares='9,9'), 'action_range': 0.95}
segments_tree = [
dict(name='A', anchor='origin', direction='down', times=2),
dict(name='BR', anchor='A1', direction='right', times=4),
dict(name='BL', anchor='A1', direction='left', times=4),
dict(name='CR', anchor='BR3', direction='down', times=2),
dict(name='CL', anchor='BL3', direction='down', times=2),
dict(name='DLL', anchor='CL1', direction='left', times=2),
dict(name='DLR', anchor='CL1', direction='right', times=2),
dict(name='DRL', anchor='CR1', direction='left', times=2),
dict(name='DRR', anchor='CR1', direction='right', times=2),
dict(name='ELL', anchor='DLL1', direction='down', times=2),
dict(name='ELR', anchor='DLR1', direction='down', times=2),
dict(name='ERL', anchor='DRL1', direction='down', times=2),
dict(name='ERR', anchor='DRR1', direction='down', times=2),
]
mazes_dict['square_tree'] = {'maze': Maze(*segments_tree, goal_squares=['ELL1', 'ERR1']), 'action_range': 0.95}
segments_corridor = [
dict(name='A', anchor='origin', direction='left', times=5),
dict(name='B', anchor='origin', direction='right', times=5)
]
mazes_dict['square_corridor'] = {'maze': Maze(*segments_corridor, goal_squares=['b4']), 'action_range': 0.95}
mazes_dict['square_corridor2'] = {'maze': Maze(*segments_corridor, goal_squares=['b4'], start_squares=['a4']),
'action_range': 0.95}
_walls_to_remove = [
((4.5, 4.5), (7.5, 8.5)),
((-0.5, 0.5), (5.5, 5.5)),
((2.5, 2.5), (4.5, 5.5)),
((3.5, 4.5), (3.5, 3.5)),
((4.5, 4.5), (2.5, 3.5)),
((4.5, 5.5), (2.5, 2.5)),
((3.5, 4.5), (0.5, 0.5)),
((4.5, 5.5), (4.5, 4.5)),
((5.5, 5.5), (0.5, 1.5)),
((8.5, 8.5), (-0.5, 0.5)),
((6.5, 7.5), (2.5, 2.5)),
((7.5, 7.5), (6.5, 7.5)),
((7.5, 8.5), (7.5, 7.5)),
((8.5, 8.5), (7.5, 8.5)),
((7.5, 7.5), (2.5, 3.5)),
((8.5, 9.5), (7.5, 7.5)),
((7.5, 8.5), (4.5, 4.5)),
((8.5, 8.5), (4.5, 5.5)),
((5.5, 6.5), (7.5, 7.5)),
((3.5, 4.5), (7.5, 7.5)),
((4.5, 4.5), (6.5, 7.5)),
((4.5, 4.5), (5.5, 6.5)),
((3.5, 3.5), (5.5, 6.5)),
((5.5, 5.5), (5.5, 6.5)),
((3.5, 4.5), (6.5, 6.5)),
((4.5, 5.5), (6.5, 6.5)),
((1.5, 1.5), (7.5, 8.5)),
((2.5, 2.5), (5.5, 6.5)),
((0.5, 0.5), (4.5, 5.5)),
((1.5, 1.5), (5.5, 6.5)),
((4.5, 4.5), (4.5, 5.5)),
((5.5, 5.5), (1.5, 2.5)),
((5.5, 5.5), (2.5, 3.5)),
((5.5, 5.5), (3.5, 4.5)),
((6.5, 7.5), (8.5, 8.5)),
((7.5, 7.5), (8.5, 9.5)),
((0.5, 0.5), (8.5, 9.5)),
((0.5, 1.5), (8.5, 8.5)),
((-0.5, 0.5), (7.5, 7.5)),
((0.5, 1.5), (6.5, 6.5)),
((0.5, 0.5), (6.5, 7.5)),
((2.5, 2.5), (6.5, 7.5)),
((2.5, 2.5), (7.5, 8.5)),
((2.5, 3.5), (8.5, 8.5)),
((3.5, 4.5), (8.5, 8.5)),
((4.5, 5.5), (8.5, 8.5)),
((5.5, 6.5), (8.5, 8.5)),
((7.5, 8.5), (5.5, 5.5)),
((8.5, 9.5), (6.5, 6.5)),
((8.5, 8.5), (5.5, 6.5)),
((7.5, 8.5), (3.5, 3.5)),
((8.5, 9.5), (2.5, 2.5)),
((8.5, 8.5), (2.5, 3.5)),
]
_walls_to_add = [
((-0.5, 0.5), (4.5, 4.5)),
((0.5, 1.5), (4.5, 4.5)),
((2.5, 3.5), (4.5, 4.5)),
((4.5, 4.5), (3.5, 4.5)),
((4.5, 4.5), (2.5, 3.5)),
((4.5, 4.5), (1.5, 2.5)),
((6.5, 6.5), (8.5, 9.5)),
]
mazes_dict['square_bottleneck'] = {'maze': Maze(*segments_crazy, goal_squares='9,9', min_wall_coord=4,
walls_to_remove=_walls_to_remove, walls_to_add=_walls_to_add),
'action_range': 0.95}
mazes_dict['square'] = {'maze': Maze(*segments_crazy, start_squares='4,4', goal_squares='9,9', min_wall_coord=9,
walls_to_remove=_walls_to_remove + [((8.5, 9.5), (1.5, 1.5))]),
'action_range': 0.95}
| 29,994 | 38.415243 | 120 | py |
CSD-manipulation | CSD-manipulation-master/envs/kitchen/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/envs/kitchen/custom_kitchen.py | from d4rl.kitchen.kitchen_envs import KitchenBase
from dm_control.mujoco import engine
import numpy as np
class KitchenMicrowaveKettleLightSliderV0Custom(KitchenBase):
TASK_ELEMENTS = ['microwave', 'kettle', 'light switch', 'slide cabinet']
def render(self, mode='human', width=None, height=None):
if width is None or height is None:
return []
camera = engine.MovableCamera(self.sim, width, height)
camera.set_pose(distance=2.2, lookat=[-0.2, .5, 2.], azimuth=70, elevation=-35)
img = camera.render()
return img
def _get_obs(self):
t, qp, qv, obj_qp, obj_qv = self.robot.get_obs(
self, robot_noise_ratio=self.robot_noise_ratio)
self.obs_dict = {}
self.obs_dict['t'] = t
self.obs_dict['qp'] = qp
self.obs_dict['qv'] = qv
self.obs_dict['obj_qp'] = obj_qp
self.obs_dict['obj_qv'] = obj_qv
self.obs_dict['goal'] = self.goal
return np.concatenate([self.obs_dict['qp'], self.obs_dict['obj_qp']])
| 1,043 | 35 | 87 | py |
energy_ood | energy_ood-master/README.md | # Energy-based Out-of-distribution Detection (Energy OOD)
This repository is the official implementation of [Energy-based Out-of-distribution Detection](https://arxiv.org/abs/2010.03759) by Weitang Liu, Xiaoyun Wang, John Owens and Yixuan Li. This method is an effective and easy OOD detector with and without fine-tuning. Our code is implemented with courtesy of [Outlier-Exposure](https://github.com/hendrycks/outlier-exposure). If you have any code related questions, such as [this issue](https://github.com/wetliu/energy_ood/issues/9) and [this issue](https://github.com/wetliu/energy_ood/issues/2), we highly recommened to check the couterpart in [Outlier-Exposure](https://github.com/hendrycks/outlier-exposure).

## Pretrained Models and Datasets
Pretrained models are provided in folder
```
./CIFAR/snapshots/
```
Please download the datasets in folder
```
./data/
```
## Testing and Fine-tuning
run energy score testing for cifar10 WRN
```test
bash run.sh energy 0
```
run energy score testing for cifar100 WRN
```test
bash run.sh energy 1
```
run energy score training and testing for cifar10 WRN
```train
bash run.sh energy_ft 0
```
run energy score training and testing for cifar100 WRN
```train
bash run.sh energy_ft 1
```
## Results
Our model achieves the following average performance on 6 OOD datasets:
### 1. MSP vs energy score with and without fine-tuned on [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html)
| Model name | FPR95 |
| ------------------ |---------------- |
| Softmax score | 51.04% |
| Energy score (ours) | 33.01% |
| Softmax score with fine-tune | 8.53% |
| Energy score with fine-tune (ours) | 3.32% |
### 2. CIFAR-10 (in-distribution) vs SVHN (out-of-distribution) Score Distributions

### 3. Performance among different baselines for [WideResNet](https://arxiv.org/abs/1605.07146)
CIFAR-10:
| Model name | FPR95 |
| ------------------ |---------------- |
| [Softmax score](https://arxiv.org/abs/1610.02136) | 51.04% |
| Energy score (ours) | 33.01% |
| [ODIN](https://arxiv.org/abs/1706.02690) | 35.71% |
| [Mahalanobis](https://arxiv.org/abs/1807.03888) | 37.08% |
| [Outlier Exposure](https://arxiv.org/abs/1812.04606)| 8.53% |
| Energy score with fine-tune (ours) | 3.32% |
CIFAR-100:
| Model name | FPR95 |
| ------------------ |---------------- |
| [Softmax score](https://arxiv.org/abs/1610.02136) | 80.41% |
| Energy score (ours) | 73.60% |
| [ODIN](https://arxiv.org/abs/1706.02690) | 74.64% |
| [Mahalanobis](https://arxiv.org/abs/1807.03888) | 54.64% |
| [Outlier Exposure](https://arxiv.org/abs/1812.04606)| 58.10% |
| Energy score with fine-tune (ours) | 47.55% |
## Outlier Datasets
These experiments make use of numerous outlier datasets. Links for less common datasets are as follows, [80 Million Tiny Images](http://horatio.cs.nyu.edu/mit/tiny/data/tiny_images.bin)
[Textures](https://www.robots.ox.ac.uk/~vgg/data/dtd/), [Places365](http://places2.csail.mit.edu/download.html), [LSUN-C](https://www.dropbox.com/s/fhtsw1m3qxlwj6h/LSUN.tar.gz), [LSUN-R](https://www.dropbox.com/s/moqh2wh8696c3yl/LSUN_resize.tar.gz), [iSUN](https://www.dropbox.com/s/ssz7qxfqae0cca5/iSUN.tar.gz) and SVHN.
## Citation
@article{liu2020energy,
title={Energy-based Out-of-distribution Detection},
author={Liu, Weitang and Wang, Xiaoyun and Owens, John and Li, Yixuan},
journal={Advances in Neural Information Processing Systems},
year={2020}
}
| 3,812 | 39.56383 | 661 | md |
energy_ood | energy_ood-master/CIFAR/run.sh | methods=(pretrained oe_tune)
data_models=(cifar10_wrn cifar100_wrn)
gpu=0
if [ "$1" = "MSP" ]; then
for dm in ${data_models[$2]}; do
for method in ${methods[0]}; do
# MSP with in-distribution samples as pos
echo "-----------"${dm}_${method}" MSP score-----------------"
CUDA_VISIBLE_DEVICES=$gpu python test.py --method_name ${dm}_${method} --num_to_avg 10
done
done
echo "||||||||done with "${dm}_${method}" above |||||||||||||||||||"
elif [ "$1" = "energy" ]; then
for dm in ${data_models[$2]}; do
for method in ${methods[0]}; do
echo "-----------"${dm}_${method}" energy score-----------------"
CUDA_VISIBLE_DEVICES=$gpu python test.py --method_name ${dm}_${method} --num_to_avg 10 --score energy
done
done
echo "||||||||done with "${dm}_${method}" energy score above |||||||||||||||||||"
elif [ "$1" = "M" ]; then
for dm in ${data_models[$2]}; do
for method in ${methods[0]}; do
for noise in 0.0 0.01 0.005 0.002 0.0014 0.001 0.0005; do
echo "-----------"${dm}_${method}_M_noise_${noise}"-----------------"
CUDA_VISIBLE_DEVICES=$gpu python test.py --method_name ${dm}_${method} --num_to_avg 10 --score M --noise $noise -v
done
done
done
echo "||||||||done with "${dm}_${method}_M" noise above|||||||||||||||||||"
elif [ "$1" = "Odin" ]; then
for T in 1000 100 10 1; do
for noise in 0 0.0004 0.0008 0.0014 0.002 0.0024 0.0028 0.0032 0.0038 0.0048; do
echo "-------T="${T}_$2" noise="$noise"--------"
CUDA_VISIBLE_DEVICES=$gpu python test.py --method_name $2 --score Odin --num_to_avg 10 --T $T --noise $noise -v #--test_bs 50
done
echo "||||Odin temperature|||||||||||||||||||||||||||||||||||||||||||"
done
elif [ "$1" = "oe_tune" ] || [ "$1" = "energy_ft" ]; then # fine-tuning
score=OE
if [ "$1" = "energy_ft" ]; then # fine-tuning
score=energy
fi
for dm in ${data_models[$2]}; do
array=(${dm//_/ })
data=${array[0]}
model=${array[1]}
for seed in 1; do
echo "---Training with dataset: "$data"---model used:"$model"---seed: "$seed"---score used:"$score"---------"
if [ "$2" = "0" ]; then
m_out=-5
m_in=-23
elif [ "$2" = "1" ]; then
m_out=-5
m_in=-27
fi
echo "---------------"$m_in"------"$m_out"--------------------"
CUDA_VISIBLE_DEVICES=$gpu python train.py $data --model $model --score $score --seed $seed --m_in $m_in --m_out $m_out
CUDA_VISIBLE_DEVICES=$gpu python test.py --method_name ${dm}_s${seed}_$1 --num_to_avg 10 --score $score
done
done
echo "||||||||done with training above "$1"|||||||||||||||||||"
elif [ "$1" = "T" ]; then
for dm in ${data_models[@]}; do
for method in ${methods[0]}; do
for T in 1 2 5 10 20 50 100 200 500 1000; do
echo "-----------"${dm}_${method}_T_${T}"-----------------"
CUDA_VISIBLE_DEVICES=$gpu python test.py --method_name ${dm}_${method} --num_to_avg 10 --score energy --T $T
done
done
echo "||||||||done with "${dm}_${method}_T" tempearture above|||||||||||||||||||"
done
fi
| 3,282 | 42.197368 | 131 | sh |
energy_ood | energy_ood-master/CIFAR/test.py | import numpy as np
import sys
import os
import pickle
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
from models.wrn import WideResNet
from skimage.filters import gaussian as gblur
from PIL import Image as PILImage
# go through rigamaroo to do ...utils.display_results import show_performance
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from utils.display_results import show_performance, get_measures, print_measures, print_measures_with_std
import utils.svhn_loader as svhn
import utils.lsun_loader as lsun_loader
import utils.score_calculation as lib
parser = argparse.ArgumentParser(description='Evaluates a CIFAR OOD Detector',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Setup
parser.add_argument('--test_bs', type=int, default=200)
parser.add_argument('--num_to_avg', type=int, default=1, help='Average measures across num_to_avg runs.')
parser.add_argument('--validate', '-v', action='store_true', help='Evaluate performance on validation distributions.')
parser.add_argument('--use_xent', '-x', action='store_true', help='Use cross entropy scoring instead of the MSP.')
parser.add_argument('--method_name', '-m', type=str, default='cifar10_allconv_baseline', help='Method name.')
# Loading details
parser.add_argument('--layers', default=40, type=int, help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability')
parser.add_argument('--load', '-l', type=str, default='./snapshots', help='Checkpoint path to resume / test.')
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
# EG and benchmark details
parser.add_argument('--out_as_pos', action='store_true', help='OE define OOD data as positive.')
parser.add_argument('--score', default='MSP', type=str, help='score options: MSP|energy')
parser.add_argument('--T', default=1., type=float, help='temperature: energy|Odin')
parser.add_argument('--noise', type=float, default=0, help='noise for Odin')
args = parser.parse_args()
print(args)
# torch.manual_seed(1)
# np.random.seed(1)
# mean and standard deviation of channels of CIFAR-10 images
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_transform = trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])
if 'cifar10_' in args.method_name:
test_data = dset.CIFAR10('../data/cifarpy', train=False, transform=test_transform)
num_classes = 10
else:
test_data = dset.CIFAR100('../data/cifarpy', train=False, transform=test_transform)
num_classes = 100
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Create model
net = WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
start_epoch = 0
# Restore model
if args.load != '':
for i in range(1000 - 1, -1, -1):
if 'pretrained' in args.method_name:
subdir = 'pretrained'
elif 'oe_tune' in args.method_name:
subdir = 'oe_tune'
elif 'energy_ft' in args.method_name:
subdir = 'energy_ft'
else:
subdir = 'oe_scratch'
model_name = os.path.join(os.path.join(args.load, subdir), args.method_name + '_epoch_' + str(i) + '.pt')
if os.path.isfile(model_name):
net.load_state_dict(torch.load(model_name))
print('Model restored! Epoch:', i)
start_epoch = i + 1
break
if start_epoch == 0:
assert False, "could not resume "+model_name
net.eval()
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
# torch.cuda.manual_seed(1)
cudnn.benchmark = True # fire on all cylinders
# /////////////// Detection Prelims ///////////////
ood_num_examples = len(test_data) // 5
expected_ap = ood_num_examples / (ood_num_examples + len(test_data))
concat = lambda x: np.concatenate(x, axis=0)
to_np = lambda x: x.data.cpu().numpy()
def get_ood_scores(loader, in_dist=False):
_score = []
_right_score = []
_wrong_score = []
with torch.no_grad():
for batch_idx, (data, target) in enumerate(loader):
if batch_idx >= ood_num_examples // args.test_bs and in_dist is False:
break
data = data.cuda()
output = net(data)
smax = to_np(F.softmax(output, dim=1))
if args.use_xent:
_score.append(to_np((output.mean(1) - torch.logsumexp(output, dim=1))))
else:
if args.score == 'energy':
_score.append(-to_np((args.T*torch.logsumexp(output / args.T, dim=1))))
else: # original MSP and Mahalanobis (but Mahalanobis won't need this returned)
_score.append(-np.max(smax, axis=1))
if in_dist:
preds = np.argmax(smax, axis=1)
targets = target.numpy().squeeze()
right_indices = preds == targets
wrong_indices = np.invert(right_indices)
if args.use_xent:
_right_score.append(to_np((output.mean(1) - torch.logsumexp(output, dim=1)))[right_indices])
_wrong_score.append(to_np((output.mean(1) - torch.logsumexp(output, dim=1)))[wrong_indices])
else:
_right_score.append(-np.max(smax[right_indices], axis=1))
_wrong_score.append(-np.max(smax[wrong_indices], axis=1))
if in_dist:
return concat(_score).copy(), concat(_right_score).copy(), concat(_wrong_score).copy()
else:
return concat(_score)[:ood_num_examples].copy()
if args.score == 'Odin':
# separated because no grad is not applied
in_score, right_score, wrong_score = lib.get_ood_scores_odin(test_loader, net, args.test_bs, ood_num_examples, args.T, args.noise, in_dist=True)
elif args.score == 'M':
from torch.autograd import Variable
_, right_score, wrong_score = get_ood_scores(test_loader, in_dist=True)
if 'cifar10_' in args.method_name:
train_data = dset.CIFAR10('../data/cifarpy', train=True, transform=test_transform)
else:
train_data = dset.CIFAR100('../data/cifarpy', train=True, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
num_batches = ood_num_examples // args.test_bs
temp_x = torch.rand(2,3,32,32)
temp_x = Variable(temp_x)
temp_x = temp_x.cuda()
temp_list = net.feature_list(temp_x)[1]
num_output = len(temp_list)
feature_list = np.empty(num_output)
count = 0
for out in temp_list:
feature_list[count] = out.size(1)
count += 1
print('get sample mean and covariance', count)
sample_mean, precision = lib.sample_estimator(net, num_classes, feature_list, train_loader)
in_score = lib.get_Mahalanobis_score(net, test_loader, num_classes, sample_mean, precision, count-1, args.noise, num_batches, in_dist=True)
print(in_score[-3:], in_score[-103:-100])
else:
in_score, right_score, wrong_score = get_ood_scores(test_loader, in_dist=True)
num_right = len(right_score)
num_wrong = len(wrong_score)
print('Error Rate {:.2f}'.format(100 * num_wrong / (num_wrong + num_right)))
# /////////////// End Detection Prelims ///////////////
print('\nUsing CIFAR-10 as typical data') if num_classes == 10 else print('\nUsing CIFAR-100 as typical data')
# /////////////// Error Detection ///////////////
print('\n\nError Detection')
show_performance(wrong_score, right_score, method_name=args.method_name)
# /////////////// OOD Detection ///////////////
auroc_list, aupr_list, fpr_list = [], [], []
def get_and_print_results(ood_loader, num_to_avg=args.num_to_avg):
aurocs, auprs, fprs = [], [], []
for _ in range(num_to_avg):
if args.score == 'Odin':
out_score = lib.get_ood_scores_odin(ood_loader, net, args.test_bs, ood_num_examples, args.T, args.noise)
elif args.score == 'M':
out_score = lib.get_Mahalanobis_score(net, ood_loader, num_classes, sample_mean, precision, count-1, args.noise, num_batches)
else:
out_score = get_ood_scores(ood_loader)
if args.out_as_pos: # OE's defines out samples as positive
measures = get_measures(out_score, in_score)
else:
measures = get_measures(-in_score, -out_score)
aurocs.append(measures[0]); auprs.append(measures[1]); fprs.append(measures[2])
print(in_score[:3], out_score[:3])
auroc = np.mean(aurocs); aupr = np.mean(auprs); fpr = np.mean(fprs)
auroc_list.append(auroc); aupr_list.append(aupr); fpr_list.append(fpr)
if num_to_avg >= 5:
print_measures_with_std(aurocs, auprs, fprs, args.method_name)
else:
print_measures(auroc, aupr, fpr, args.method_name)
# /////////////// Textures ///////////////
ood_data = dset.ImageFolder(root="../data/dtd/images",
transform=trn.Compose([trn.Resize(32), trn.CenterCrop(32),
trn.ToTensor(), trn.Normalize(mean, std)]))
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=4, pin_memory=True)
print('\n\nTexture Detection')
get_and_print_results(ood_loader)
# /////////////// SVHN /////////////// # cropped and no sampling of the test set
ood_data = svhn.SVHN(root='../data/svhn/', split="test",
transform=trn.Compose(
[#trn.Resize(32),
trn.ToTensor(), trn.Normalize(mean, std)]), download=False)
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=2, pin_memory=True)
print('\n\nSVHN Detection')
get_and_print_results(ood_loader)
# /////////////// Places365 ///////////////
ood_data = dset.ImageFolder(root="../data/places365/",
transform=trn.Compose([trn.Resize(32), trn.CenterCrop(32),
trn.ToTensor(), trn.Normalize(mean, std)]))
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=2, pin_memory=True)
print('\n\nPlaces365 Detection')
get_and_print_results(ood_loader)
# /////////////// LSUN-C ///////////////
ood_data = dset.ImageFolder(root="../data/LSUN_C",
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)]))
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=1, pin_memory=True)
print('\n\nLSUN_C Detection')
get_and_print_results(ood_loader)
# /////////////// LSUN-R ///////////////
ood_data = dset.ImageFolder(root="../data/LSUN_resize",
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)]))
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=1, pin_memory=True)
print('\n\nLSUN_Resize Detection')
get_and_print_results(ood_loader)
# /////////////// iSUN ///////////////
ood_data = dset.ImageFolder(root="../data/iSUN",
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)]))
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=1, pin_memory=True)
print('\n\niSUN Detection')
get_and_print_results(ood_loader)
# /////////////// Mean Results ///////////////
print('\n\nMean Test Results!!!!!')
print_measures(np.mean(auroc_list), np.mean(aupr_list), np.mean(fpr_list), method_name=args.method_name)
# /////////////// OOD Detection of Validation Distributions ///////////////
if args.validate is False:
exit()
auroc_list, aupr_list, fpr_list = [], [], []
# /////////////// Uniform Noise ///////////////
dummy_targets = torch.ones(ood_num_examples * args.num_to_avg)
ood_data = torch.from_numpy(
np.random.uniform(size=(ood_num_examples * args.num_to_avg, 3, 32, 32),
low=-1.0, high=1.0).astype(np.float32))
ood_data = torch.utils.data.TensorDataset(ood_data, dummy_targets)
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True)
print('\n\nUniform[-1,1] Noise Detection')
get_and_print_results(ood_loader)
# /////////////// Arithmetic Mean of Images ///////////////
if 'cifar10_' in args.method_name:
ood_data = dset.CIFAR100('../data/vision-greg/cifarpy', train=False, transform=test_transform)
else:
ood_data = dset.CIFAR10('../data/vision-greg/cifarpy', train=False, transform=test_transform)
class AvgOfPair(torch.utils.data.Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.shuffle_indices = np.arange(len(dataset))
np.random.shuffle(self.shuffle_indices)
def __getitem__(self, i):
random_idx = np.random.choice(len(self.dataset))
while random_idx == i:
random_idx = np.random.choice(len(self.dataset))
return self.dataset[i][0] / 2. + self.dataset[random_idx][0] / 2., 0
def __len__(self):
return len(self.dataset)
ood_loader = torch.utils.data.DataLoader(AvgOfPair(ood_data),
batch_size=args.test_bs, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
print('\n\nArithmetic Mean of Random Image Pair Detection')
get_and_print_results(ood_loader)
# /////////////// Geometric Mean of Images ///////////////
if 'cifar10_' in args.method_name:
ood_data = dset.CIFAR100('../data/vision-greg/cifarpy', train=False, transform=trn.ToTensor())
else:
ood_data = dset.CIFAR10('../data/vision-greg/cifarpy', train=False, transform=trn.ToTensor())
class GeomMeanOfPair(torch.utils.data.Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.shuffle_indices = np.arange(len(dataset))
np.random.shuffle(self.shuffle_indices)
def __getitem__(self, i):
random_idx = np.random.choice(len(self.dataset))
while random_idx == i:
random_idx = np.random.choice(len(self.dataset))
return trn.Normalize(mean, std)(torch.sqrt(self.dataset[i][0] * self.dataset[random_idx][0])), 0
def __len__(self):
return len(self.dataset)
ood_loader = torch.utils.data.DataLoader(
GeomMeanOfPair(ood_data), batch_size=args.test_bs, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
print('\n\nGeometric Mean of Random Image Pair Detection')
get_and_print_results(ood_loader)
# /////////////// Jigsaw Images ///////////////
ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
jigsaw = lambda x: torch.cat((
torch.cat((torch.cat((x[:, 8:16, :16], x[:, :8, :16]), 1),
x[:, 16:, :16]), 2),
torch.cat((x[:, 16:, 16:],
torch.cat((x[:, :16, 24:], x[:, :16, 16:24]), 2)), 2),
), 1)
ood_loader.dataset.transform = trn.Compose([trn.ToTensor(), jigsaw, trn.Normalize(mean, std)])
print('\n\nJigsawed Images Detection')
get_and_print_results(ood_loader)
# /////////////// Speckled Images ///////////////
speckle = lambda x: torch.clamp(x + x * torch.randn_like(x), 0, 1)
ood_loader.dataset.transform = trn.Compose([trn.ToTensor(), speckle, trn.Normalize(mean, std)])
print('\n\nSpeckle Noised Images Detection')
get_and_print_results(ood_loader)
# /////////////// Pixelated Images ///////////////
pixelate = lambda x: x.resize((int(32 * 0.2), int(32 * 0.2)), PILImage.BOX).resize((32, 32), PILImage.BOX)
ood_loader.dataset.transform = trn.Compose([pixelate, trn.ToTensor(), trn.Normalize(mean, std)])
print('\n\nPixelate Detection')
get_and_print_results(ood_loader)
# /////////////// RGB Ghosted/Shifted Images ///////////////
rgb_shift = lambda x: torch.cat((x[1:2].index_select(2, torch.LongTensor([i for i in range(32 - 1, -1, -1)])),
x[2:, :, :], x[0:1, :, :]), 0)
ood_loader.dataset.transform = trn.Compose([trn.ToTensor(), rgb_shift, trn.Normalize(mean, std)])
print('\n\nRGB Ghosted/Shifted Image Detection')
get_and_print_results(ood_loader)
# /////////////// Inverted Images ///////////////
# not done on all channels to make image ood with higher probability
invert = lambda x: torch.cat((x[0:1, :, :], 1 - x[1:2, :, ], 1 - x[2:, :, :],), 0)
ood_loader.dataset.transform = trn.Compose([trn.ToTensor(), invert, trn.Normalize(mean, std)])
print('\n\nInverted Image Detection')
get_and_print_results(ood_loader)
# /////////////// Mean Results ///////////////
print('\n\nMean Validation Results')
print_measures(np.mean(auroc_list), np.mean(aupr_list), np.mean(fpr_list), method_name=args.method_name)
| 17,632 | 40.006977 | 148 | py |
energy_ood | energy_ood-master/CIFAR/train.py | # -*- coding: utf-8 -*-
import numpy as np
import os
import pickle
import argparse
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
from tqdm import tqdm
from models.wrn import WideResNet
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from utils.tinyimages_80mn_loader import TinyImages
from utils.validation_dataset import validation_split
parser = argparse.ArgumentParser(description='Tunes a CIFAR Classifier with OE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument('--model', '-m', type=str, default='allconv',
choices=['allconv', 'wrn', 'densenet'], help='Choose architecture.')
parser.add_argument('--calibration', '-c', action='store_true',
help='Train a model to be used for calibration. This holds out some data for validation.')
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=10, help='Number of epochs to train.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.001, help='The initial learning rate.')
parser.add_argument('--batch_size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--oe_batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--test_bs', type=int, default=200)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
# WRN Architecture
parser.add_argument('--layers', default=40, type=int, help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor')
parser.add_argument('--droprate', default=0.3, type=float, help='dropout probability')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./snapshots/', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='./snapshots/pretrained', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=4, help='Pre-fetching threads.')
# EG specific
parser.add_argument('--m_in', type=float, default=-25., help='margin for in-distribution; above this value will be penalized')
parser.add_argument('--m_out', type=float, default=-7., help='margin for out-distribution; below this value will be penalized')
parser.add_argument('--score', type=str, default='OE', help='OE|energy')
parser.add_argument('--seed', type=int, default=1, help='seed for np(tinyimages80M sampling); 1|2|8|100|107')
args = parser.parse_args()
if args.score == 'OE':
save_info = 'oe_tune'
elif args.score == 'energy':
save_info = 'energy_ft'
args.save = args.save+save_info
if os.path.isdir(args.save) == False:
os.mkdir(args.save)
state = {k: v for k, v in args._get_kwargs()}
print(state)
torch.manual_seed(1)
np.random.seed(args.seed)
# mean and standard deviation of channels of CIFAR-10 images
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = trn.Compose([trn.RandomHorizontalFlip(), trn.RandomCrop(32, padding=4),
trn.ToTensor(), trn.Normalize(mean, std)])
test_transform = trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data_in = dset.CIFAR10('../data/cifarpy', train=True, transform=train_transform)
test_data = dset.CIFAR10('../data/cifarpy', train=False, transform=test_transform)
num_classes = 10
else:
train_data_in = dset.CIFAR100('../data/cifarpy', train=True, transform=train_transform)
test_data = dset.CIFAR100('../data/cifarpy', train=False, transform=test_transform)
num_classes = 100
calib_indicator = ''
if args.calibration:
train_data_in, val_data = validation_split(train_data_in, val_share=0.1)
calib_indicator = '_calib'
ood_data = TinyImages(transform=trn.Compose(
[trn.ToTensor(), trn.ToPILImage(), trn.RandomCrop(32, padding=4),
trn.RandomHorizontalFlip(), trn.ToTensor(), trn.Normalize(mean, std)]))
train_loader_in = torch.utils.data.DataLoader(
train_data_in,
batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_loader_out = torch.utils.data.DataLoader(
ood_data,
batch_size=args.oe_batch_size, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Create model
net = WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
def recursion_change_bn(module):
if isinstance(module, torch.nn.BatchNorm2d):
module.track_running_stats = 1
module.num_batches_tracked = 0
else:
for i, (name, module1) in enumerate(module._modules.items()):
module1 = recursion_change_bn(module1)
return module
# Restore model
model_found = False
if args.load != '':
for i in range(1000 - 1, -1, -1):
model_name = os.path.join(args.load, args.dataset + calib_indicator + '_' + args.model +
'_pretrained_epoch_' + str(i) + '.pt')
if os.path.isfile(model_name):
net.load_state_dict(torch.load(model_name))
print('Model restored! Epoch:', i)
model_found = True
break
if not model_found:
assert False, "could not find model to restore"
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.cuda.manual_seed(1)
cudnn.benchmark = True # fire on all cylinders
optimizer = torch.optim.SGD(
net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
args.epochs * len(train_loader_in),
1, # since lr_lambda computes multiplicative factor
1e-6 / args.learning_rate))
# /////////////// Training ///////////////
def train():
net.train() # enter train mode
loss_avg = 0.0
# start at a random point of the outlier dataset; this induces more randomness without obliterating locality
train_loader_out.dataset.offset = np.random.randint(len(train_loader_out.dataset))
for in_set, out_set in zip(train_loader_in, train_loader_out):
data = torch.cat((in_set[0], out_set[0]), 0)
target = in_set[1]
data, target = data.cuda(), target.cuda()
# forward
x = net(data)
# backward
scheduler.step()
optimizer.zero_grad()
loss = F.cross_entropy(x[:len(in_set[0])], target)
# cross-entropy from softmax distribution to uniform distribution
if args.score == 'energy':
Ec_out = -torch.logsumexp(x[len(in_set[0]):], dim=1)
Ec_in = -torch.logsumexp(x[:len(in_set[0])], dim=1)
loss += 0.1*(torch.pow(F.relu(Ec_in-args.m_in), 2).mean() + torch.pow(F.relu(args.m_out-Ec_out), 2).mean())
elif args.score == 'OE':
loss += 0.5 * -(x[len(in_set[0]):].mean(1) - torch.logsumexp(x[len(in_set[0]):], dim=1)).mean()
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
state['train_loss'] = loss_avg
# test function
def test():
net.eval()
loss_avg = 0.0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
if args.test:
test()
print(state)
exit()
# Make save directory
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
with open(os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model + '_s' + str(args.seed) +
'_' + save_info+'_training_results.csv'), 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_error(%)\n')
print('Beginning Training\n')
# Main loop
for epoch in range(0, args.epochs):
state['epoch'] = epoch
begin_epoch = time.time()
train()
test()
# Save model
torch.save(net.state_dict(),
os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model + '_s' + str(args.seed) +
'_' + save_info + '_epoch_' + str(epoch) + '.pt'))
# Let us not waste space and delete the previous model
prev_path = os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model + '_s' + str(args.seed) +
'_' + save_info + '_epoch_'+ str(epoch - 1) + '.pt')
if os.path.exists(prev_path): os.remove(prev_path)
# Show results
with open(os.path.join(args.save, args.dataset + calib_indicator + '_' + args.model + '_s' + str(args.seed) +
'_' + save_info + '_training_results.csv'), 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' % (
(epoch + 1),
time.time() - begin_epoch,
state['train_loss'],
state['test_loss'],
100 - 100. * state['test_accuracy'],
))
# # print state with rounded decimals
# print({k: round(v, 4) if isinstance(v, float) else v for k, v in state.items()})
print('Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | Test Loss {3:.3f} | Test Error {4:.2f}'.format(
(epoch + 1),
int(time.time() - begin_epoch),
state['train_loss'],
state['test_loss'],
100 - 100. * state['test_accuracy'])
)
| 10,867 | 36.605536 | 127 | py |
energy_ood | energy_ood-master/CIFAR/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
return out
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out_list.append(out)
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out), out_list
| 4,514 | 37.262712 | 116 | py |
energy_ood | energy_ood-master/utils/calibration_tools.py | import numpy as np
def calib_err(confidence, correct, p='2', beta=100):
# beta is target bin size
idxs = np.argsort(confidence)
confidence = confidence[idxs]
correct = correct[idxs]
bins = [[i * beta, (i + 1) * beta] for i in range(len(confidence) // beta)]
bins[-1] = [bins[-1][0], len(confidence)]
cerr = 0
total_examples = len(confidence)
for i in range(len(bins) - 1):
bin_confidence = confidence[bins[i][0]:bins[i][1]]
bin_correct = correct[bins[i][0]:bins[i][1]]
num_examples_in_bin = len(bin_confidence)
if num_examples_in_bin > 0:
difference = np.abs(np.nanmean(bin_confidence) - np.nanmean(bin_correct))
if p == '2':
cerr += num_examples_in_bin / total_examples * np.square(difference)
elif p == '1':
cerr += num_examples_in_bin / total_examples * difference
elif p == 'infty' or p == 'infinity' or p == 'max':
cerr = np.maximum(cerr, difference)
else:
assert False, "p must be '1', '2', or 'infty'"
if p == '2':
cerr = np.sqrt(cerr)
return cerr
def soft_f1(confidence, correct):
wrong = 1 - correct
# # the incorrectly classified samples are our interest
# # so they make the positive class
# tp_soft = np.sum((1 - confidence) * wrong)
# fp_soft = np.sum((1 - confidence) * correct)
# fn_soft = np.sum(confidence * wrong)
# return 2 * tp_soft / (2 * tp_soft + fn_soft + fp_soft)
return 2 * ((1 - confidence) * wrong).sum()/(1 - confidence + wrong).sum()
def tune_temp(logits, labels, binary_search=True, lower=0.2, upper=5.0, eps=0.0001):
logits = np.array(logits)
if binary_search:
import torch
import torch.nn.functional as F
logits = torch.FloatTensor(logits)
labels = torch.LongTensor(labels)
t_guess = torch.FloatTensor([0.5*(lower + upper)]).requires_grad_()
while upper - lower > eps:
if torch.autograd.grad(F.cross_entropy(logits / t_guess, labels), t_guess)[0] > 0:
upper = 0.5 * (lower + upper)
else:
lower = 0.5 * (lower + upper)
t_guess = t_guess * 0 + 0.5 * (lower + upper)
t = min([lower, 0.5 * (lower + upper), upper], key=lambda x: float(F.cross_entropy(logits / x, labels)))
else:
import cvxpy as cx
set_size = np.array(logits).shape[0]
t = cx.Variable()
expr = sum((cx.Minimize(cx.log_sum_exp(logits[i, :] * t) - logits[i, labels[i]] * t)
for i in range(set_size)))
p = cx.Problem(expr, [lower <= t, t <= upper])
p.solve() # p.solve(solver=cx.SCS)
t = 1 / t.value
return t
def get_measures(confidence, correct):
rms = calib_err(confidence, correct, p='2')
mad = calib_err(confidence, correct, p='1')
sf1 = soft_f1(confidence, correct)
return rms, mad, sf1
def print_measures(rms, mad, sf1, method_name='Baseline'):
print('\t\t\t\t\t\t\t' + method_name)
print('RMS Calib Error (%): \t\t{:.2f}'.format(100 * rms))
print('MAD Calib Error (%): \t\t{:.2f}'.format(100 * mad))
print('Soft F1 Score (%): \t\t{:.2f}'.format(100 * sf1))
def print_measures_with_std(rmss, mads, sf1s, method_name='Baseline'):
print('\t\t\t\t\t\t\t' + method_name)
print('RMS Calib Error (%): \t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(rmss), 100 * np.std(rmss)))
print('MAD Calib Error (%): \t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(mads), 100 * np.std(mads)))
print('Soft F1 Score (%): \t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(sf1s), 100 * np.std(sf1s)))
def show_calibration_results(confidence, correct, method_name='Baseline'):
print('\t\t\t\t' + method_name)
print('RMS Calib Error (%): \t\t{:.2f}'.format(
100 * calib_err(confidence, correct, p='2')))
print('MAD Calib Error (%): \t\t{:.2f}'.format(
100 * calib_err(confidence, correct, p='1')))
# print('Max Calib Error (%): \t\t{:.2f}'.format(
# 100 * calib_err(confidence, correct, p='infty')))
print('Soft F1-Score (%): \t\t{:.2f}'.format(
100 * soft_f1(confidence, correct)))
# add error detection measures?
| 4,280 | 33.524194 | 112 | py |
energy_ood | energy_ood-master/utils/cifar_resnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
| 3,908 | 39.298969 | 116 | py |
energy_ood | energy_ood-master/utils/display_results.py | import numpy as np
import sklearn.metrics as sk
recall_level_default = 0.95
def stable_cumsum(arr, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, dtype=np.float64)
expected = np.sum(arr, dtype=np.float64)
if not np.allclose(out[-1], expected, rtol=rtol, atol=atol):
raise RuntimeError('cumsum was found to be unstable: '
'its last element does not correspond to sum')
return out
def fpr_and_fdr_at_recall(y_true, y_score, recall_level=recall_level_default, pos_label=None):
classes = np.unique(y_true)
if (pos_label is None and
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true)[threshold_idxs]
fps = 1 + threshold_idxs - tps # add one because of zero-based indexing
thresholds = y_score[threshold_idxs]
recall = tps / tps[-1]
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1) # [last_ind::-1]
recall, fps, tps, thresholds = np.r_[recall[sl], 1], np.r_[fps[sl], 0], np.r_[tps[sl], 0], thresholds[sl]
cutoff = np.argmin(np.abs(recall - recall_level))
return fps[cutoff] / (np.sum(np.logical_not(y_true))) # , fps[cutoff]/(fps[cutoff] + tps[cutoff])
def get_measures(_pos, _neg, recall_level=recall_level_default):
pos = np.array(_pos[:]).reshape((-1, 1))
neg = np.array(_neg[:]).reshape((-1, 1))
examples = np.squeeze(np.vstack((pos, neg)))
labels = np.zeros(len(examples), dtype=np.int32)
labels[:len(pos)] += 1
auroc = sk.roc_auc_score(labels, examples)
aupr = sk.average_precision_score(labels, examples)
fpr = fpr_and_fdr_at_recall(labels, examples, recall_level)
return auroc, aupr, fpr
def show_performance(pos, neg, method_name='Ours', recall_level=recall_level_default):
'''
:param pos: 1's class, class to detect, outliers, or wrongly predicted
example scores
:param neg: 0's class scores
'''
auroc, aupr, fpr = get_measures(pos[:], neg[:], recall_level)
print('\t\t\t' + method_name)
print('FPR{:d}:\t\t\t{:.2f}'.format(int(100 * recall_level), 100 * fpr))
print('AUROC:\t\t\t{:.2f}'.format(100 * auroc))
print('AUPR:\t\t\t{:.2f}'.format(100 * aupr))
# print('FDR{:d}:\t\t\t{:.2f}'.format(int(100 * recall_level), 100 * fdr))
def print_measures(auroc, aupr, fpr, method_name='Ours', recall_level=recall_level_default):
print('\t\t\t\t' + method_name)
print(' FPR{:d} AUROC AUPR'.format(int(100*recall_level)))
print('& {:.2f} & {:.2f} & {:.2f}'.format(100*fpr, 100*auroc, 100*aupr))
#print('FPR{:d}:\t\t\t{:.2f}'.format(int(100 * recall_level), 100 * fpr))
#print('AUROC: \t\t\t{:.2f}'.format(100 * auroc))
#print('AUPR: \t\t\t{:.2f}'.format(100 * aupr))
def print_measures_with_std(aurocs, auprs, fprs, method_name='Ours', recall_level=recall_level_default):
print('\t\t\t\t' + method_name)
print(' FPR{:d} AUROC AUPR'.format(int(100*recall_level)))
print('& {:.2f} & {:.2f} & {:.2f}'.format(100*np.mean(fprs), 100*np.mean(aurocs), 100*np.mean(auprs)))
print('& {:.2f} & {:.2f} & {:.2f}'.format(100*np.std(fprs), 100*np.std(aurocs), 100*np.std(auprs)))
#print('FPR{:d}:\t\t\t{:.2f}\t+/- {:.2f}'.format(int(100 * recall_level), 100 * np.mean(fprs), 100 * np.std(fprs)))
#print('AUROC: \t\t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(aurocs), 100 * np.std(aurocs)))
#print('AUPR: \t\t\t{:.2f}\t+/- {:.2f}'.format(100 * np.mean(auprs), 100 * np.std(auprs)))
def show_performance_comparison(pos_base, neg_base, pos_ours, neg_ours, baseline_name='Baseline',
method_name='Ours', recall_level=recall_level_default):
'''
:param pos_base: 1's class, class to detect, outliers, or wrongly predicted
example scores from the baseline
:param neg_base: 0's class scores generated by the baseline
'''
auroc_base, aupr_base, fpr_base = get_measures(pos_base[:], neg_base[:], recall_level)
auroc_ours, aupr_ours, fpr_ours = get_measures(pos_ours[:], neg_ours[:], recall_level)
print('\t\t\t' + baseline_name + '\t' + method_name)
print('FPR{:d}:\t\t\t{:.2f}\t\t{:.2f}'.format(
int(100 * recall_level), 100 * fpr_base, 100 * fpr_ours))
print('AUROC:\t\t\t{:.2f}\t\t{:.2f}'.format(
100 * auroc_base, 100 * auroc_ours))
print('AUPR:\t\t\t{:.2f}\t\t{:.2f}'.format(
100 * aupr_base, 100 * aupr_ours))
# print('FDR{:d}:\t\t\t{:.2f}\t\t{:.2f}'.format(
# int(100 * recall_level), 100 * fdr_base, 100 * fdr_ours))
| 5,816 | 41.459854 | 119 | py |
energy_ood | energy_ood-master/utils/lsun_loader.py | import torch.utils.data as data
from PIL import Image
import os
import os.path
import six
import string
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class LSUNClass(data.Dataset):
def __init__(self, db_path, transform=None, target_transform=None):
import lmdb
self.db_path = db_path
self.env = lmdb.open(db_path, max_readers=1, readonly=True, lock=False,
readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.length = txn.stat()['entries']
cache_file = '_cache_' + db_path.replace('/', '_')
if os.path.isfile(cache_file):
self.keys = pickle.load(open(cache_file, "rb"))
else:
with self.env.begin(write=False) as txn:
self.keys = [key for key, _ in txn.cursor()]
pickle.dump(self.keys, open(cache_file, "wb"))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img, target = None, None
env = self.env
with env.begin(write=False) as txn:
imgbuf = txn.get(self.keys[index])
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
class LSUN(data.Dataset):
"""
`LSUN <http://lsun.cs.princeton.edu>`_ dataset.
Args:
db_path (string): Root directory for the database files.
classes (string or list): One of {'train', 'val', 'test'} or a list of
categories to load. e,g. ['bedroom_train', 'church_train'].
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, db_path, classes='train',
transform=None, target_transform=None):
categories = ['bedroom', 'bridge', 'church_outdoor', 'classroom',
'conference_room', 'dining_room', 'kitchen',
'living_room', 'restaurant', 'tower']
dset_opts = ['train', 'val', 'test']
self.db_path = db_path
if type(classes) == str and classes in dset_opts:
if classes == 'test':
classes = [classes]
else:
classes = [c + '_' + classes for c in categories]
self.classes = classes
# for each class, create an LSUNClassDataset
self.dbs = []
for c in self.classes:
self.dbs.append(LSUNClass(
db_path=db_path + '/' + c + '_lmdb',
transform=transform))
self.indices = []
count = 0
for db in self.dbs:
count += len(db)
self.indices.append(count)
self.length = count
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target) where target is the index of the target category.
"""
target = 0
sub = 0
for ind in self.indices:
if index < ind:
break
target += 1
sub = ind
db = self.dbs[target]
index = index - sub
if self.target_transform is not None:
target = self.target_transform(target)
img, _ = db[index]
return img, target
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
| 4,099 | 29.827068 | 90 | py |
energy_ood | energy_ood-master/utils/score_calculation.py | from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
from scipy import misc
to_np = lambda x: x.data.cpu().numpy()
concat = lambda x: np.concatenate(x, axis=0)
def get_ood_scores_odin(loader, net, bs, ood_num_examples, T, noise, in_dist=False):
_score = []
_right_score = []
_wrong_score = []
net.eval()
for batch_idx, (data, target) in enumerate(loader):
if batch_idx >= ood_num_examples // bs and in_dist is False:
break
data = data.cuda()
data = Variable(data, requires_grad = True)
output = net(data)
smax = to_np(F.softmax(output, dim=1))
odin_score = ODIN(data, output,net, T, noise)
_score.append(-np.max(odin_score, 1))
if in_dist:
preds = np.argmax(smax, axis=1)
targets = target.numpy().squeeze()
right_indices = preds == targets
wrong_indices = np.invert(right_indices)
_right_score.append(-np.max(smax[right_indices], axis=1))
_wrong_score.append(-np.max(smax[wrong_indices], axis=1))
if in_dist:
return concat(_score).copy(), concat(_right_score).copy(), concat(_wrong_score).copy()
else:
return concat(_score)[:ood_num_examples].copy()
def ODIN(inputs, outputs, model, temper, noiseMagnitude1):
# Calculating the perturbation we need to add, that is,
# the sign of gradient of cross entropy loss w.r.t. input
criterion = nn.CrossEntropyLoss()
maxIndexTemp = np.argmax(outputs.data.cpu().numpy(), axis=1)
# Using temperature scaling
outputs = outputs / temper
labels = Variable(torch.LongTensor(maxIndexTemp).cuda())
loss = criterion(outputs, labels)
loss.backward()
# Normalizing the gradient to binary in {0, 1}
gradient = torch.ge(inputs.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
gradient[:,0] = (gradient[:,0] )/(63.0/255.0)
gradient[:,1] = (gradient[:,1] )/(62.1/255.0)
gradient[:,2] = (gradient[:,2] )/(66.7/255.0)
#gradient.index_copy_(1, torch.LongTensor([0]).cuda(), gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0/255.0))
#gradient.index_copy_(1, torch.LongTensor([1]).cuda(), gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1/255.0))
#gradient.index_copy_(1, torch.LongTensor([2]).cuda(), gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7/255.0))
# Adding small perturbations to images
tempInputs = torch.add(inputs.data, -noiseMagnitude1, gradient)
outputs = model(Variable(tempInputs))
outputs = outputs / temper
# Calculating the confidence after adding perturbations
nnOutputs = outputs.data.cpu()
nnOutputs = nnOutputs.numpy()
nnOutputs = nnOutputs - np.max(nnOutputs, axis=1, keepdims=True)
nnOutputs = np.exp(nnOutputs) / np.sum(np.exp(nnOutputs), axis=1, keepdims=True)
return nnOutputs
def get_Mahalanobis_score(model, test_loader, num_classes, sample_mean, precision, layer_index, magnitude, num_batches, in_dist=False):
'''
Compute the proposed Mahalanobis confidence score on input dataset
return: Mahalanobis score from layer_index
'''
model.eval()
Mahalanobis = []
for batch_idx, (data, target) in enumerate(test_loader):
if batch_idx >= num_batches and in_dist is False:
break
data, target = data.cuda(), target.cuda()
data, target = Variable(data, requires_grad = True), Variable(target)
out_features = model.intermediate_forward(data, layer_index)
out_features = out_features.view(out_features.size(0), out_features.size(1), -1)
out_features = torch.mean(out_features, 2)
# compute Mahalanobis score
gaussian_score = 0
for i in range(num_classes):
batch_sample_mean = sample_mean[layer_index][i]
zero_f = out_features.data - batch_sample_mean
term_gau = -0.5*torch.mm(torch.mm(zero_f, precision[layer_index]), zero_f.t()).diag()
if i == 0:
gaussian_score = term_gau.view(-1,1)
else:
gaussian_score = torch.cat((gaussian_score, term_gau.view(-1,1)), 1)
# Input_processing
sample_pred = gaussian_score.max(1)[1]
batch_sample_mean = sample_mean[layer_index].index_select(0, sample_pred)
zero_f = out_features - Variable(batch_sample_mean)
pure_gau = -0.5*torch.mm(torch.mm(zero_f, Variable(precision[layer_index])), zero_f.t()).diag()
loss = torch.mean(-pure_gau)
loss.backward()
gradient = torch.ge(data.grad.data, 0)
gradient = (gradient.float() - 0.5) * 2
gradient.index_copy_(1, torch.LongTensor([0]).cuda(), gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0/255.0))
gradient.index_copy_(1, torch.LongTensor([1]).cuda(), gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1/255.0))
gradient.index_copy_(1, torch.LongTensor([2]).cuda(), gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7/255.0))
tempInputs = torch.add(data.data, -magnitude, gradient)
with torch.no_grad():
noise_out_features = model.intermediate_forward(tempInputs, layer_index)
noise_out_features = noise_out_features.view(noise_out_features.size(0), noise_out_features.size(1), -1)
noise_out_features = torch.mean(noise_out_features, 2)
noise_gaussian_score = 0
for i in range(num_classes):
batch_sample_mean = sample_mean[layer_index][i]
zero_f = noise_out_features.data - batch_sample_mean
term_gau = -0.5*torch.mm(torch.mm(zero_f, precision[layer_index]), zero_f.t()).diag()
if i == 0:
noise_gaussian_score = term_gau.view(-1,1)
else:
noise_gaussian_score = torch.cat((noise_gaussian_score, term_gau.view(-1,1)), 1)
noise_gaussian_score, _ = torch.max(noise_gaussian_score, dim=1)
Mahalanobis.extend(-noise_gaussian_score.cpu().numpy())
return np.asarray(Mahalanobis, dtype=np.float32)
def sample_estimator(model, num_classes, feature_list, train_loader):
"""
compute sample mean and precision (inverse of covariance)
return: sample_class_mean: list of class mean
precision: list of precisions
"""
import sklearn.covariance
model.eval()
group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False)
correct, total = 0, 0
num_output = len(feature_list)
num_sample_per_class = np.empty(num_classes)
num_sample_per_class.fill(0)
list_features = []
for i in range(num_output):
temp_list = []
for j in range(num_classes):
temp_list.append(0)
list_features.append(temp_list)
for data, target in train_loader:
total += data.size(0)
data = data.cuda()
data = Variable(data, volatile=True)
output, out_features = model.feature_list(data)
# get hidden features
for i in range(num_output):
out_features[i] = out_features[i].view(out_features[i].size(0), out_features[i].size(1), -1)
out_features[i] = torch.mean(out_features[i].data, 2)
# compute the accuracy
pred = output.data.max(1)[1]
equal_flag = pred.eq(target.cuda()).cpu()
correct += equal_flag.sum()
# construct the sample matrix
for i in range(data.size(0)):
label = target[i]
if num_sample_per_class[label] == 0:
out_count = 0
for out in out_features:
list_features[out_count][label] = out[i].view(1, -1)
out_count += 1
else:
out_count = 0
for out in out_features:
list_features[out_count][label] \
= torch.cat((list_features[out_count][label], out[i].view(1, -1)), 0)
out_count += 1
num_sample_per_class[label] += 1
sample_class_mean = []
out_count = 0
for num_feature in feature_list:
temp_list = torch.Tensor(num_classes, int(num_feature)).cuda()
for j in range(num_classes):
temp_list[j] = torch.mean(list_features[out_count][j], 0)
sample_class_mean.append(temp_list)
out_count += 1
precision = []
for k in range(num_output):
X = 0
for i in range(num_classes):
if i == 0:
X = list_features[k][i] - sample_class_mean[k][i]
else:
X = torch.cat((X, list_features[k][i] - sample_class_mean[k][i]), 0)
# find inverse
group_lasso.fit(X.cpu().numpy())
temp_precision = group_lasso.precision_
temp_precision = torch.from_numpy(temp_precision).float().cuda()
precision.append(temp_precision)
print('\n Training Accuracy:({:.2f}%)\n'.format(100. * correct / total))
return sample_class_mean, precision
| 9,397 | 39.86087 | 135 | py |
energy_ood | energy_ood-master/utils/svhn_loader.py | import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
class SVHN(data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"],
'train_and_extra': [
["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]]}
def __init__(self, root, split='train',
transform=None, target_transform=None, download=False):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.split = split # training set or test set or extra set
if self.split not in self.split_list:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="extra" or split="test" '
'or split="train_and_extra" ')
if self.split == "train_and_extra":
self.url = self.split_list[split][0][0]
self.filename = self.split_list[split][0][1]
self.file_md5 = self.split_list[split][0][2]
else:
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(root, self.filename))
if self.split == "test":
self.data = loaded_mat['X']
self.targets = loaded_mat['y']
# Note label 10 == 0 so modulo operator required
self.targets = (self.targets % 10).squeeze() # convert to zero-based indexing
self.data = np.transpose(self.data, (3, 2, 0, 1))
else:
self.data = loaded_mat['X']
self.targets = loaded_mat['y']
if self.split == "train_and_extra":
extra_filename = self.split_list[split][1][1]
loaded_mat = sio.loadmat(os.path.join(root, extra_filename))
self.data = np.concatenate([self.data,
loaded_mat['X']], axis=3)
self.targets = np.vstack((self.targets,
loaded_mat['y']))
# Note label 10 == 0 so modulo operator required
self.targets = (self.targets % 10).squeeze() # convert to zero-based indexing
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index):
if self.split == "test":
img, target = self.data[index], self.targets[index]
else:
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.split == "test":
return len(self.data)
else:
return len(self.data)
def _check_integrity(self):
root = self.root
if self.split == "train_and_extra":
md5 = self.split_list[self.split][0][2]
fpath = os.path.join(root, self.filename)
train_integrity = check_integrity(fpath, md5)
extra_filename = self.split_list[self.split][1][1]
md5 = self.split_list[self.split][1][2]
fpath = os.path.join(root, extra_filename)
return check_integrity(fpath, md5) and train_integrity
else:
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
if self.split == "train_and_extra":
md5 = self.split_list[self.split][0][2]
download_url(self.url, self.root, self.filename, md5)
extra_filename = self.split_list[self.split][1][1]
md5 = self.split_list[self.split][1][2]
download_url(self.url, self.root, extra_filename, md5)
else:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
| 5,121 | 40.306452 | 92 | py |
energy_ood | energy_ood-master/utils/tiny_resnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 16)
out = out.view(-1, self.nChannels)
return self.fc(out)
| 3,909 | 39.309278 | 116 | py |
energy_ood | energy_ood-master/utils/tinyimages_80mn_loader.py | import numpy as np
import torch
from bisect import bisect_left
class TinyImages(torch.utils.data.Dataset):
def __init__(self, transform=None, exclude_cifar=True):
data_file = open('../data/80million/tiny_images.bin', "rb")
def load_image(idx):
data_file.seek(idx * 3072)
data = data_file.read(3072)
return np.fromstring(data, dtype='uint8').reshape(32, 32, 3, order="F")
self.load_image = load_image
self.offset = 0 # offset index
self.transform = transform
self.exclude_cifar = exclude_cifar
if exclude_cifar:
self.cifar_idxs = []
with open('../utils/80mn_cifar_idxs.txt', 'r') as idxs:
for idx in idxs:
# indices in file take the 80mn database to start at 1, hence "- 1"
self.cifar_idxs.append(int(idx) - 1)
# hash table option
self.cifar_idxs = set(self.cifar_idxs)
self.in_cifar = lambda x: x in self.cifar_idxs
# bisection search option
# self.cifar_idxs = tuple(sorted(self.cifar_idxs))
#
# def binary_search(x, hi=len(self.cifar_idxs)):
# pos = bisect_left(self.cifar_idxs, x, 0, hi) # find insertion position
# return True if pos != hi and self.cifar_idxs[pos] == x else False
#
# self.in_cifar = binary_search
def __getitem__(self, index):
index = (index + self.offset) % 79302016
if self.exclude_cifar:
while self.in_cifar(index):
index = np.random.randint(79302017)
img = self.load_image(index)
if self.transform is not None:
img = self.transform(img)
return img, 0 # 0 is the class
def __len__(self):
return 79302017
| 1,862 | 31.12069 | 89 | py |
energy_ood | energy_ood-master/utils/validation_dataset.py | import torch
import numpy as np
class PartialDataset(torch.utils.data.Dataset):
def __init__(self, parent_ds, offset, length):
self.parent_ds = parent_ds
self.offset = offset
self.length = length
assert len(parent_ds) >= offset + length, Exception("Parent Dataset not long enough")
super(PartialDataset, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.parent_ds[i + self.offset]
def validation_split(dataset, val_share=0.1):
"""
Split a (training and vaidation combined) dataset into training and validation.
Note that to be statistically sound, the items in the dataset should be statistically
independent (e.g. not sorted by class, not several instances of the same dataset that
could end up in either set).
inputs:
dataset: ("training") dataset to split into training and validation
val_share: fraction of validation data (should be 0<val_share<1, default: 0.1)
returns: input dataset split into test_ds, val_ds
"""
val_offset = int(len(dataset) * (1 - val_share))
return PartialDataset(dataset, 0, val_offset), PartialDataset(dataset, val_offset, len(dataset) - val_offset)
class PartialFolder(torch.utils.data.Dataset):
def __init__(self, parent_ds, perm, length):
self.parent_ds = parent_ds
self.perm = perm
self.length = length
super(PartialFolder, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.parent_ds[self.perm[i]]
def validation_split_folder(dataset, val_share=0.1):
"""
Split a (training and vaidation combined) dataset into training and validation.
Note that to be statistically sound, the items in the dataset should be statistically
independent (e.g. not sorted by class, not several instances of the same dataset that
could end up in either set).
inputs:
dataset: ("training") dataset to split into training and validation
val_share: fraction of validation data (should be 0<val_share<1, default: 0.1)
returns: input dataset split into test_ds, val_ds
"""
num_train = int(len(dataset) * (1 - val_share))
num_val = len(dataset) - num_train
perm = np.asarray(range(len(dataset)))
np.random.seed(0)
np.random.shuffle(perm)
train_perm, val_perm = perm[:num_train], perm[num_train:]
return PartialFolder(dataset, train_perm, num_train), PartialFolder(dataset, val_perm, num_val)
| 2,610 | 34.283784 | 113 | py |
pycheops | pycheops-master/setup.py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.install import install
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, 'pycheops/VERSION')) as version_file:
version = version_file.read().strip()
setup(
name='pycheops',
# VERSION
version = version,
description='CHEOPS light curve analysis software',
long_description=long_description,
# The project's main homepage.
url='http://cheops.unibe.ch/',
# Author details
author='CHEOPS Science Team',
author_email='[email protected]',
# Choose your license
license='GNU GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 1 - Planning',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='astronomy',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'numpy>=1.17.2',
'scipy',
'astropy>=3.2.2',
'emcee>=3.0.0',
'astroquery',
'numba>=0.44.1',
'lmfit>=0.9.14',
'corner',
'photutils',
'matplotlib>3.2',
'celerite2',
'cdspyreadme',
'dace-query>=1.1.0',
'IPython',
'tqdm'],
dependency_links=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'pycheops': ['VERSION', 'data/*/*', 'examples/*/*']
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'calculate_coefficients=pycheops.calculate_coefficients:main',
'make_xml_files=pycheops.make_xml_files:main',
'combine=pycheops.combine:main',
],
},
test_suite='nose.collector',
tests_require=['nose'],
)
| 4,365 | 31.340741 | 94 | py |
pycheops | pycheops-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pycheops documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 25 17:29:42 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pycheops'
copyright = '2018, [email protected]'
author = '[email protected]'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.16'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycheopsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycheops.tex', 'pycheops Documentation',
'[email protected]', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycheops', 'pycheops Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycheops', 'pycheops Documentation',
author, 'pycheops', 'One line description of project.',
'Miscellaneous'),
]
autodoc_mock_imports = ["astropy", "photutils"]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
html_static_path = []
| 5,116 | 29.278107 | 79 | py |
pycheops | pycheops-master/docs/_build/html/constants.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>constants — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="funcs" href="funcs.html" />
<link rel="prev" title="Welcome to pycheops’s documentation!" href="index.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="toctree-wrapper compound">
</div>
<span class="target" id="module-pycheops.constants"></span><div class="section" id="constants">
<h1>constants<a class="headerlink" href="#constants" title="Permalink to this headline">¶</a></h1>
<p>Nominal values of solar and planetary constants in SI units from IAU
Resolution B3 <a class="footnote-reference brackets" href="#id4" id="id1">1</a> plus related constants</p>
<p>Masses in SI units are derived using the 2014 CODATA value for the
Newtonian constant, <span class="math notranslate nohighlight">\(G=6.67408\times 10^{-11}\,m^3\,kg^{-1}\,s^{-2}\)</span>.</p>
<p>The following conversion constants are defined.</p>
<div class="section" id="solar-conversion-constants">
<h2>Solar conversion constants<a class="headerlink" href="#solar-conversion-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>R_SunN - solar radius</p></li>
<li><p>S_SunN - total solar irradiance</p></li>
<li><p>L_SunN - luminosity</p></li>
<li><p>Teff_SunN - solar effective temperature</p></li>
<li><p>GM_SunN - solar mass parameter</p></li>
<li><p>M_SunN - solar mass derived from GM_SunN and G_2014</p></li>
<li><p>V_SunN - solar volume = (4.pi.R_SunN**3/3)</p></li>
</ul>
</div>
<div class="section" id="planetary-conversion-constants">
<h2>Planetary conversion constants<a class="headerlink" href="#planetary-conversion-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>R_eEarthN - equatorial radius of the Earth</p></li>
<li><p>R_pEarthN - polar radius of the Earth</p></li>
<li><p>R_eJupN - equatorial radius of Jupiter</p></li>
<li><p>R_pJupN - polar radius of Jupiter</p></li>
<li><p>GM_EarthN - terrestrial mass parameter</p></li>
<li><p>GM_JupN - jovian mass parameter</p></li>
<li><p>M_EarthN - mass of the Earth from GM_EarthN and G_2014</p></li>
<li><p>M_JupN - mass of Jupiter from GM_JupN and G_2014</p></li>
<li><p>V_EarthN - volume of the Earth (4.pi.R_eEarthN^2.R_pEarthN/3)</p></li>
<li><p>V_JupN - volume of Jupiter (4.pi.R_eJupN^2.R_pJupN/3)</p></li>
<li><p>R_EarthN - volume-average radius of the Earth (3.V_EarthN/4.pi)^(1/3)</p></li>
<li><p>R_JupN - volume-average radius of Jupiter (3.V_JupN/4.pi)^(1/3)</p></li>
</ul>
</div>
<div class="section" id="related-constants">
<h2>Related constants<a class="headerlink" href="#related-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>G_2014 - 2014 CODATA value for the Newtonian constant</p></li>
<li><p>mean_solar_day - 86,400.002 seconds <a class="footnote-reference brackets" href="#id5" id="id2">2</a></p></li>
<li><p>au - IAU 2009 value for astronomical constant in metres. <a class="footnote-reference brackets" href="#id6" id="id3">3</a></p></li>
<li><p>pc - 1 parsec = 3600*au*180/pi</p></li>
<li><p>c - speed of light = 299,792,458 m / s</p></li>
</ul>
</div>
<div class="section" id="example">
<h2>Example<a class="headerlink" href="#example" title="Permalink to this headline">¶</a></h2>
<p>Calculate the density relative to Jupiter for a planet 1/10 the radius of the
Sun with a mass 1/1000 of a solar mass. Note that we use the volume-average
radius for Jupiter in this case:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.constants</span> <span class="kn">import</span> <span class="n">M_SunN</span><span class="p">,</span> <span class="n">R_SunN</span><span class="p">,</span> <span class="n">M_JupN</span><span class="p">,</span> <span class="n">R_JupN</span>
<span class="gp">>>> </span><span class="n">M_planet_Jup</span> <span class="o">=</span> <span class="n">M_SunN</span><span class="o">/</span><span class="mi">1000</span> <span class="o">/</span> <span class="n">M_JupN</span>
<span class="gp">>>> </span><span class="n">R_planet_Jup</span> <span class="o">=</span> <span class="n">R_SunN</span><span class="o">/</span><span class="mi">10</span> <span class="o">/</span> <span class="n">R_JupN</span>
<span class="gp">>>> </span><span class="n">rho_planet_Jup</span> <span class="o">=</span> <span class="n">M_planet_Jup</span> <span class="o">/</span> <span class="p">(</span><span class="n">R_planet_Jup</span><span class="o">**</span><span class="mi">3</span><span class="p">)</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="p">(</span><span class="s2">"Planet mass = </span><span class="si">{:.3f}</span><span class="s2"> M_Jup"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">M_planet_Jup</span><span class="p">))</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="p">(</span><span class="s2">"Planet radius = </span><span class="si">{:.3f}</span><span class="s2"> R_Jup"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">R_planet_Jup</span><span class="p">))</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="p">(</span><span class="s2">"Planet density = </span><span class="si">{:.3f}</span><span class="s2"> rho_Jup"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">rho_planet_Jup</span><span class="p">))</span>
<span class="go">Planet mass = 1.048 M_Jup</span>
<span class="go">Planet radius = 0.995 R_Jup</span>
<span class="go">Planet density = 1.063 rho_Jup</span>
</pre></div>
</div>
<p class="rubric">References</p>
<dl class="footnote brackets">
<dt class="label" id="id4"><span class="brackets"><a class="fn-backref" href="#id1">1</a></span></dt>
<dd><p><a class="reference external" href="https://www.iau.org/static/resolutions/IAU2015_English.pdf">https://www.iau.org/static/resolutions/IAU2015_English.pdf</a></p>
</dd>
<dt class="label" id="id5"><span class="brackets"><a class="fn-backref" href="#id2">2</a></span></dt>
<dd><p><a class="reference external" href="http://tycho.usno.navy.mil/leapsec.html">http://tycho.usno.navy.mil/leapsec.html</a></p>
</dd>
<dt class="label" id="id6"><span class="brackets"><a class="fn-backref" href="#id3">3</a></span></dt>
<dd><p>Luzum et al., Celest Mech Dyn Astr (2011) 110:293-304</p>
</dd>
</dl>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1 current"><a class="current reference internal" href="#">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
<li>Previous: <a href="index.html" title="previous chapter">Welcome to pycheops’s documentation!</a></li>
<li>Next: <a href="funcs.html" title="next chapter">funcs</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/constants.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 9,892 | 51.068421 | 415 | html |
pycheops | pycheops-master/docs/_build/html/funcs.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>funcs — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="instrument" href="instrument.html" />
<link rel="prev" title="constants" href="constants.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="toctree-wrapper compound">
</div>
<span class="target" id="module-pycheops.funcs"></span><div class="section" id="funcs">
<h1>funcs<a class="headerlink" href="#funcs" title="Permalink to this headline">¶</a></h1>
<p>Functions relating observable properties of binary stars and exoplanet
systems to their fundamental properties, and vice versa. Also functions
related to Keplerian orbits.</p>
<div class="section" id="parameters">
<h2>Parameters<a class="headerlink" href="#parameters" title="Permalink to this headline">¶</a></h2>
<p>Functions are defined in terms of the following parameters. <a class="footnote-reference brackets" href="#id2" id="id1">1</a></p>
<ul class="simple">
<li><p>a - orbital semi-major axis in solar radii = a_1 + a_2</p></li>
<li><p>P - orbital period in mean solar days</p></li>
<li><p>Mass - total system mass in solar masses, Mass = m_1 + m_2</p></li>
<li><p>ecc - orbital eccentricity</p></li>
<li><p>omdeg - longitude of periastron of star’s orbit, omega, in _degrees_</p></li>
<li><p>sini - sine of the orbital inclination</p></li>
<li><p>K - 2.pi.a.sini/(P.sqrt(1-e^2)) = K_1 + K_2</p></li>
<li><p>K_1, K_2 - orbital semi-amplitudes in km/s</p></li>
<li><p>q - mass ratio = m_2/m_1 = K_1/K_2 = a_1/a_2</p></li>
<li><dl class="simple">
<dt>f_m - mass function = m_2^3.sini^3/(m_1+m_2)^2 in solar masses </dt><dd><p>= K_1^3.P/(2.pi.G).(1-e^2)^(3/2)</p>
</dd>
</dl>
</li>
<li><p>r_1 - radius of star 1 in units of the semi-major axis, r_1 = R_*/a</p></li>
<li><p>rhostar - mean stellar density = 3.pi/(GP^2(1+q)r_1^3)</p></li>
<li><p>rstar - host star radius/semi-major axis, rstar = R_*/a</p></li>
<li><p>k - planet/star radius ratio, k = R_planet/R_star</p></li>
<li><p>tzero - time of mid-transit (minimum on-sky star-planet separation).</p></li>
<li><p>b - impact parameter, b = a.cos(i)/R_star</p></li>
</ul>
<dl class="footnote brackets">
<dt class="label" id="id2"><span class="brackets"><a class="fn-backref" href="#id1">1</a></span></dt>
<dd><p>Hilditch, R.W., An Introduction to Close Binary Stars, CUP 2001.</p>
</dd>
</dl>
</div>
<div class="section" id="functions">
<h2>Functions<a class="headerlink" href="#functions" title="Permalink to this headline">¶</a></h2>
<dl class="function">
<dt id="pycheops.funcs.a_rsun">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">a_rsun</code><span class="sig-paren">(</span><em class="sig-param">P</em>, <em class="sig-param">Mass</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.a_rsun" title="Permalink to this definition">¶</a></dt>
<dd><p>Semi-major axis in solar radii</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
<li><p><strong>Mass</strong> – total mass in solar masses, M</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>a = (G.M.P^2/(4.pi^2))^(1/3) in solar radii</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.f_m">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">f_m</code><span class="sig-paren">(</span><em class="sig-param">P</em>, <em class="sig-param">K</em>, <em class="sig-param">ecc=0</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.f_m" title="Permalink to this definition">¶</a></dt>
<dd><p>Mass function in solar masses</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
<li><p><strong>K</strong> – semi-amplitude of the spectroscopic orbit in km/s</p></li>
<li><p><strong>ecc</strong> – orbital eccentricity</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>f_m = m_2^3.sini^3/(m_1+m_2)^2 in solar masses</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.m1sin3i">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">m1sin3i</code><span class="sig-paren">(</span><em class="sig-param">P</em>, <em class="sig-param">K_1</em>, <em class="sig-param">K_2</em>, <em class="sig-param">ecc=0</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.m1sin3i" title="Permalink to this definition">¶</a></dt>
<dd><p>Reduced mass of star 1 in solar masses</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>K_1</strong> – semi-amplitude of star 1 in km/s</p></li>
<li><p><strong>K_2</strong> – semi-amplitude of star 2 in km/s</p></li>
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
<li><p><strong>ecc</strong> – orbital eccentricity</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>m_1.sini^3 in solar masses</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.m2sin3i">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">m2sin3i</code><span class="sig-paren">(</span><em class="sig-param">P</em>, <em class="sig-param">K_1</em>, <em class="sig-param">K_2</em>, <em class="sig-param">ecc=0</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.m2sin3i" title="Permalink to this definition">¶</a></dt>
<dd><p>Reduced mass of star 2 in solar masses</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>K_1</strong> – semi-amplitude of star 1 in km/s</p></li>
<li><p><strong>K_2</strong> – semi-amplitude of star 2 in km/s</p></li>
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
<li><p><strong>ecc</strong> – orbital eccentricity</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>m_2.sini^3 in solar masses</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.asini">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">asini</code><span class="sig-paren">(</span><em class="sig-param">K</em>, <em class="sig-param">P</em>, <em class="sig-param">ecc=0</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.asini" title="Permalink to this definition">¶</a></dt>
<dd><p>a.sini in solar radii</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>K</strong> – semi-amplitude of the spectroscopic orbit in km/s</p></li>
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>a.sin(i) in solar radii</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.rhostar">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">rhostar</code><span class="sig-paren">(</span><em class="sig-param">r_1</em>, <em class="sig-param">P</em>, <em class="sig-param">q=0</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.rhostar" title="Permalink to this definition">¶</a></dt>
<dd><p>Mean stellar density from scaled stellar radius.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>r_1</strong> – radius of star in units of the semi-major axis, r_1 = R_*/a</p></li>
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
<li><p><strong>q</strong> – mass ratio, m_2/m_1</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>Mean stellar density in solar units</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.K_kms">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">K_kms</code><span class="sig-paren">(</span><em class="sig-param">m_1</em>, <em class="sig-param">m_2</em>, <em class="sig-param">P</em>, <em class="sig-param">sini</em>, <em class="sig-param">ecc</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.K_kms" title="Permalink to this definition">¶</a></dt>
<dd><dl class="simple">
<dt>Semi-amplitudes of the spectroscopic orbits in km/s</dt><dd><ul class="simple">
<li><p>K = 2.pi.a.sini/(P.sqrt(1-ecc^2))</p></li>
<li><p>K_1 = K * m_2/(m_1+m_2)</p></li>
<li><p>K_2 = K * m_1/(m_1+m_2)</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>m_1</strong> – mass of star 1 in solar masses</p></li>
<li><p><strong>m_2</strong> – mass of star 2 in solar masses</p></li>
<li><p><strong>P</strong> – orbital period in mean solar days</p></li>
<li><p><strong>sini</strong> – sine of the orbital inclination</p></li>
<li><p><strong>ecc</strong> – orbital eccentrcity</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>K_1, K_2 – semi-amplitudes in km/s</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.m_comp">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">m_comp</code><span class="sig-paren">(</span><em class="sig-param">f_m</em>, <em class="sig-param">m_1</em>, <em class="sig-param">sini</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.m_comp" title="Permalink to this definition">¶</a></dt>
<dd><p>Companion mass in solar masses given mass function and stellar mass</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>f_m</strong> – = K_1^3.P/(2.pi.G).(1-ecc^2)^(3/2) in solar masses</p></li>
<li><p><strong>m_1</strong> – mass of star 1 in solar masses</p></li>
<li><p><strong>sini</strong> – sine of orbital inclination</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>m_2 = mass of companion to star 1 in solar masses</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.transit_width">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">transit_width</code><span class="sig-paren">(</span><em class="sig-param">r</em>, <em class="sig-param">k</em>, <em class="sig-param">b</em>, <em class="sig-param">P=1</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.transit_width" title="Permalink to this definition">¶</a></dt>
<dd><p>Total transit duration for a circular orbit.</p>
<p>See equation (3) from Seager and Malen-Ornelas, 2003ApJ…585.1038S.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>r</strong> – R_star/a</p></li>
<li><p><strong>k</strong> – R_planet/R_star</p></li>
<li><p><strong>b</strong> – impact parameter = a.cos(i)/R_star</p></li>
<li><p><strong>P</strong> – orbital period (optional, default P=1)</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>Total transit duration in the same units as P.</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.t2z">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">t2z</code><span class="sig-paren">(</span><em class="sig-param">t</em>, <em class="sig-param">tzero</em>, <em class="sig-param">P</em>, <em class="sig-param">sini</em>, <em class="sig-param">rstar</em>, <em class="sig-param">ecc=0</em>, <em class="sig-param">omdeg=90</em>, <em class="sig-param">returnMask=False</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.t2z" title="Permalink to this definition">¶</a></dt>
<dd><p>Calculate star-planet separation relative to scaled stellar radius, z</p>
<p>Optionally, return a flag/mask to indicate cases where the planet is
further from the observer than the star, i.e., whether phases with z<1 are
transits (mask==True) or eclipses (mask==False)</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – time of observation (scalar or array)</p></li>
<li><p><strong>tzero</strong> – time of inferior conjunction, i.e., mid-transit</p></li>
<li><p><strong>P</strong> – orbital period</p></li>
<li><p><strong>sini</strong> – sine of orbital inclination</p></li>
<li><p><strong>rstar</strong> – scaled stellar radius, R_star/a</p></li>
<li><p><strong>ecc</strong> – eccentricity (optional, default=0)</p></li>
<li><p><strong>omdeg</strong> – longitude of periastron in degrees (optional, default=90)</p></li>
<li><p><strong>returnFlag</strong> – return a flag to distinguish transits from eclipses.</p></li>
</ul>
</dd>
</dl>
<p>N.B. omdeg is the longitude of periastron for the star’s orbit</p>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p>z [, mask]</p>
</dd>
<dt class="field-even">Example</dt>
<dd class="field-even"><p></p></dd>
</dl>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.funcs</span> <span class="kn">import</span> <span class="n">t2z</span>
<span class="gp">>>> </span><span class="kn">from</span> <span class="nn">numpy</span> <span class="kn">import</span> <span class="n">linspace</span>
<span class="gp">>>> </span><span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="gp">>>> </span><span class="n">t</span> <span class="o">=</span> <span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">,</span><span class="mi">1000</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">sini</span> <span class="o">=</span> <span class="mf">0.999</span>
<span class="gp">>>> </span><span class="n">rstar</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="n">t</span><span class="p">,</span> <span class="n">t2z</span><span class="p">(</span><span class="n">t</span><span class="p">,</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">,</span><span class="n">sini</span><span class="p">,</span><span class="n">rstar</span><span class="p">))</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">xlim</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">ylim</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">12</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">ecc</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="gp">>>> </span><span class="k">for</span> <span class="n">omdeg</span> <span class="ow">in</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">90</span><span class="p">,</span> <span class="mi">180</span><span class="p">,</span> <span class="mi">270</span><span class="p">):</span>
<span class="gp">>>> </span> <span class="n">plt</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="n">t</span><span class="p">,</span> <span class="n">t2z</span><span class="p">(</span><span class="n">t</span><span class="p">,</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">,</span><span class="n">sini</span><span class="p">,</span><span class="n">rstar</span><span class="p">,</span><span class="n">ecc</span><span class="p">,</span><span class="n">omdeg</span><span class="p">))</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.tzero2tperi">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">tzero2tperi</code><span class="sig-paren">(</span><em class="sig-param">tzero</em>, <em class="sig-param">P</em>, <em class="sig-param">sini</em>, <em class="sig-param">ecc</em>, <em class="sig-param">omdeg</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.tzero2tperi" title="Permalink to this definition">¶</a></dt>
<dd><p>Calculate time of periastron from time of mid-transit</p>
<p>Uses the method by Lacy, 1992AJ….104.2213L</p>
<dl class="field-list">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>tzero</strong> – times of mid-transit</p></li>
<li><p><strong>P</strong> – orbital period</p></li>
<li><p><strong>sini</strong> – sine of orbital inclination</p></li>
<li><p><strong>ecc</strong> – eccentricity</p></li>
<li><p><strong>omdeg</strong> – longitude of periastron in degrees</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>time of periastron prior to tzero</p>
</dd>
<dt class="field-odd">Example</dt>
<dd class="field-odd"><div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.funcs</span> <span class="kn">import</span> <span class="n">tzero2tperi</span>
<span class="gp">>>> </span><span class="n">tzero</span> <span class="o">=</span> <span class="mf">54321.6789</span>
<span class="gp">>>> </span><span class="n">P</span> <span class="o">=</span> <span class="mf">1.23456</span>
<span class="gp">>>> </span><span class="n">sini</span> <span class="o">=</span> <span class="mf">0.987</span>
<span class="gp">>>> </span><span class="n">ecc</span> <span class="o">=</span> <span class="mf">0.123</span>
<span class="gp">>>> </span><span class="n">omdeg</span> <span class="o">=</span> <span class="mf">89.01</span>
<span class="gp">>>> </span><span class="nb">print</span><span class="p">(</span><span class="n">tzero2tperi</span><span class="p">(</span><span class="n">tzero</span><span class="p">,</span><span class="n">P</span><span class="p">,</span><span class="n">sini</span><span class="p">,</span><span class="n">ecc</span><span class="p">,</span><span class="n">omdeg</span><span class="p">))</span>
<span class="go">54321.6762764</span>
</pre></div>
</div>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.vrad">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">vrad</code><span class="sig-paren">(</span><em class="sig-param">t</em>, <em class="sig-param">tzero</em>, <em class="sig-param">P</em>, <em class="sig-param">K</em>, <em class="sig-param">ecc=0</em>, <em class="sig-param">omdeg=90</em>, <em class="sig-param">sini=1</em>, <em class="sig-param">primary=True</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.vrad" title="Permalink to this definition">¶</a></dt>
<dd><p>Calculate radial velocity, V_r, for body in a Keplerian orbit</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – array of input times</p></li>
<li><p><strong>tzero</strong> – time of inferior conjunction, i.e., mid-transit</p></li>
<li><p><strong>P</strong> – orbital period</p></li>
<li><p><strong>K</strong> – radial velocity semi-amplitude</p></li>
<li><p><strong>ecc</strong> – eccentricity (optional, default=0)</p></li>
<li><p><strong>omdeg</strong> – longitude of periastron in degrees (optional, default=90)</p></li>
<li><p><strong>sini</strong> – sine of orbital inclination (to convert tzero to t_peri)</p></li>
<li><p><strong>primary</strong> – if false calculate V_r for companion</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>V_r in same units as K relative to the barycentre of the binary</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.funcs.xyz_planet">
<code class="sig-prename descclassname">pycheops.funcs.</code><code class="sig-name descname">xyz_planet</code><span class="sig-paren">(</span><em class="sig-param">t</em>, <em class="sig-param">tzero</em>, <em class="sig-param">P</em>, <em class="sig-param">sini</em>, <em class="sig-param">ecc=0</em>, <em class="sig-param">omdeg=90</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.funcs.xyz_planet" title="Permalink to this definition">¶</a></dt>
<dd><p>Position of the planet in Cartesian coordinates.</p>
<p>The position of the ascending node is taken to be Omega=0 and the
semi-major axis is taken to be a=1.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – time of observation (scalar or array)</p></li>
<li><p><strong>tzero</strong> – time of inferior conjunction, i.e., mid-transit</p></li>
<li><p><strong>P</strong> – orbital period</p></li>
<li><p><strong>sini</strong> – sine of orbital inclination</p></li>
<li><p><strong>ecc</strong> – eccentricity (optional, default=0)</p></li>
<li><p><strong>omdeg</strong> – longitude of periastron in degrees (optional, default=90)</p></li>
</ul>
</dd>
</dl>
<p>N.B. omdeg is the longitude of periastron for the star’s orbit</p>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p>(x, y, z)</p>
</dd>
<dt class="field-even">Example</dt>
<dd class="field-even"><p></p></dd>
</dl>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.funcs</span> <span class="kn">import</span> <span class="n">phase_angle</span>
<span class="gp">>>> </span><span class="kn">from</span> <span class="nn">numpy</span> <span class="kn">import</span> <span class="n">linspace</span>
<span class="gp">>>> </span><span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="gp">>>> </span><span class="n">t</span> <span class="o">=</span> <span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">,</span><span class="mi">1000</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">sini</span> <span class="o">=</span> <span class="mf">0.9</span>
<span class="gp">>>> </span><span class="n">ecc</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="gp">>>> </span><span class="n">omdeg</span> <span class="o">=</span> <span class="mi">90</span>
<span class="gp">>>> </span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">z</span> <span class="o">=</span> <span class="n">xyz_planet</span><span class="p">(</span><span class="n">t</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">sini</span><span class="p">,</span> <span class="n">ecc</span><span class="p">,</span> <span class="n">omdeg</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">z</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</dd></dl>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
<li>Previous: <a href="constants.html" title="previous chapter">constants</a></li>
<li>Next: <a href="instrument.html" title="next chapter">instrument</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/funcs.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 28,032 | 58.771855 | 593 | html |
pycheops | pycheops-master/docs/_build/html/genindex.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>Index — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="#" />
<link rel="search" title="Search" href="search.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<h1 id="index">Index</h1>
<div class="genindex-jumpbox">
<a href="#_"><strong>_</strong></a>
| <a href="#A"><strong>A</strong></a>
| <a href="#C"><strong>C</strong></a>
| <a href="#E"><strong>E</strong></a>
| <a href="#F"><strong>F</strong></a>
| <a href="#G"><strong>G</strong></a>
| <a href="#H"><strong>H</strong></a>
| <a href="#K"><strong>K</strong></a>
| <a href="#L"><strong>L</strong></a>
| <a href="#M"><strong>M</strong></a>
| <a href="#P"><strong>P</strong></a>
| <a href="#Q"><strong>Q</strong></a>
| <a href="#R"><strong>R</strong></a>
| <a href="#S"><strong>S</strong></a>
| <a href="#T"><strong>T</strong></a>
| <a href="#U"><strong>U</strong></a>
| <a href="#V"><strong>V</strong></a>
| <a href="#X"><strong>X</strong></a>
</div>
<h2 id="_">_</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.stagger_power2_interpolator.__call__">__call__() (pycheops.ld.stagger_power2_interpolator method)</a>
</li>
</ul></td>
</tr></table>
<h2 id="A">A</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.a_rsun">a_rsun() (in module pycheops.funcs)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.asini">asini() (in module pycheops.funcs)</a>
</li>
</ul></td>
</tr></table>
<h2 id="C">C</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.ca_to_h1h2">ca_to_h1h2() (in module pycheops.ld)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="instrument.html#pycheops.instrument.count_rate">count_rate() (in module pycheops.instrument)</a>
</li>
</ul></td>
</tr></table>
<h2 id="E">E</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.EBLMModel">EBLMModel (class in pycheops.models)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.EclipseModel">EclipseModel (class in pycheops.models)</a>
</li>
<li><a href="instrument.html#pycheops.instrument.exposure_time">exposure_time() (in module pycheops.instrument)</a>
</li>
</ul></td>
</tr></table>
<h2 id="F">F</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.f_m">f_m() (in module pycheops.funcs)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.FactorModel">FactorModel (class in pycheops.models)</a>
</li>
</ul></td>
</tr></table>
<h2 id="G">G</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.FactorModel.guess">guess() (pycheops.models.FactorModel method)</a>
</li>
</ul></td>
</tr></table>
<h2 id="H">H</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.h1h2_to_ca">h1h2_to_ca() (in module pycheops.ld)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.h1h2_to_q1q2">h1h2_to_q1q2() (in module pycheops.ld)</a>
</li>
</ul></td>
</tr></table>
<h2 id="K">K</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.K_kms">K_kms() (in module pycheops.funcs)</a>
</li>
</ul></td>
</tr></table>
<h2 id="L">L</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.ld_claret">ld_claret() (in module pycheops.ld)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.ld_power2">ld_power2() (in module pycheops.ld)</a>
</li>
</ul></td>
</tr></table>
<h2 id="M">M</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.m1sin3i">m1sin3i() (in module pycheops.funcs)</a>
</li>
<li><a href="funcs.html#pycheops.funcs.m2sin3i">m2sin3i() (in module pycheops.funcs)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.m_comp">m_comp() (in module pycheops.funcs)</a>
</li>
<li><a href="models.html#pycheops.models.minerr_transit_fit">minerr_transit_fit() (in module pycheops.models)</a>
</li>
</ul></td>
</tr></table>
<h2 id="P">P</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.PlanetModel">PlanetModel (class in pycheops.models)</a>
</li>
<li><a href="index.html#module-pycheops">pycheops (module)</a>
</li>
<li><a href="constants.html#module-pycheops.constants">pycheops.constants (module)</a>
</li>
<li><a href="funcs.html#module-pycheops.funcs">pycheops.funcs (module)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="instrument.html#module-pycheops.instrument">pycheops.instrument (module)</a>
</li>
<li><a href="ld.html#module-pycheops.ld">pycheops.ld (module)</a>
</li>
<li><a href="models.html#module-pycheops.models">pycheops.models (module)</a>
</li>
<li><a href="quantities.html#module-pycheops.quantities">pycheops.quantities (module)</a>
</li>
</ul></td>
</tr></table>
<h2 id="Q">Q</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.q1q2_to_h1h2">q1q2_to_h1h2() (in module pycheops.ld)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.qpower2">qpower2 (in module pycheops.models)</a>
</li>
</ul></td>
</tr></table>
<h2 id="R">R</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.ReflectionModel">ReflectionModel (class in pycheops.models)</a>
</li>
<li><a href="instrument.html#pycheops.instrument.response">response() (in module pycheops.instrument)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.rhostar">rhostar() (in module pycheops.funcs)</a>
</li>
<li><a href="models.html#pycheops.models.RVCompanion">RVCompanion (class in pycheops.models)</a>
</li>
<li><a href="models.html#pycheops.models.RVModel">RVModel (class in pycheops.models)</a>
</li>
</ul></td>
</tr></table>
<h2 id="S">S</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.scaled_transit_fit">scaled_transit_fit (in module pycheops.models)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="ld.html#pycheops.ld.stagger_power2_interpolator">stagger_power2_interpolator (class in pycheops.ld)</a>
</li>
</ul></td>
</tr></table>
<h2 id="T">T</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.t2z">t2z() (in module pycheops.funcs)</a>
</li>
<li><a href="models.html#pycheops.models.ThermalPhaseModel">ThermalPhaseModel (class in pycheops.models)</a>
</li>
<li><a href="instrument.html#pycheops.instrument.transit_noise">transit_noise() (in module pycheops.instrument)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.transit_width">transit_width() (in module pycheops.funcs)</a>
</li>
<li><a href="models.html#pycheops.models.TransitModel">TransitModel (class in pycheops.models)</a>
</li>
<li><a href="funcs.html#pycheops.funcs.tzero2tperi">tzero2tperi() (in module pycheops.funcs)</a>
</li>
</ul></td>
</tr></table>
<h2 id="U">U</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="models.html#pycheops.models.ueclipse">ueclipse (in module pycheops.models)</a>
</li>
</ul></td>
</tr></table>
<h2 id="V">V</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="instrument.html#pycheops.instrument.visibility">visibility() (in module pycheops.instrument)</a>
</li>
</ul></td>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.vrad">vrad() (in module pycheops.funcs)</a>
</li>
</ul></td>
</tr></table>
<h2 id="X">X</h2>
<table style="width: 100%" class="indextable genindextable"><tr>
<td style="width: 33%; vertical-align: top;"><ul>
<li><a href="funcs.html#pycheops.funcs.xyz_planet">xyz_planet() (in module pycheops.funcs)</a>
</li>
</ul></td>
</tr></table>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
</div>
</body>
</html> | 12,512 | 34.05042 | 140 | html |
pycheops | pycheops-master/docs/_build/html/index.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>Welcome to pycheops’s documentation! — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="constants" href="constants.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="section" id="module-pycheops">
<span id="welcome-to-pycheops-s-documentation"></span><h1>Welcome to pycheops’s documentation!<a class="headerlink" href="#module-pycheops" title="Permalink to this headline">¶</a></h1>
<div class="section" id="pycheops">
<h2>pycheops<a class="headerlink" href="#pycheops" title="Permalink to this headline">¶</a></h2>
<p>This package provides tools for the analysis of light curves from the ESA
CHEOPS mission <<a class="reference external" href="http://cheops.unibe.ch/">http://cheops.unibe.ch/</a>>.</p>
</div>
<div class="toctree-wrapper compound">
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
</div>
</div>
<div class="section" id="indices-and-tables">
<h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this headline">¶</a></h1>
<ul class="simple">
<li><p><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></p></li>
<li><p><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></p></li>
<li><p><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></p></li>
</ul>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="#">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="#">Documentation overview</a><ul>
<li>Next: <a href="constants.html" title="next chapter">constants</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/index.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 5,004 | 35.801471 | 185 | html |
pycheops | pycheops-master/docs/_build/html/instrument.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>instrument — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="ld" href="ld.html" />
<link rel="prev" title="funcs" href="funcs.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="toctree-wrapper compound">
</div>
<span class="target" id="module-pycheops.instrument"></span><div class="section" id="instrument">
<h1>instrument<a class="headerlink" href="#instrument" title="Permalink to this headline">¶</a></h1>
<blockquote>
<div><p>Constants, functions and data related to the CHEOPS instrument.</p>
</div></blockquote>
<div class="section" id="functions">
<h2>Functions<a class="headerlink" href="#functions" title="Permalink to this headline">¶</a></h2>
<dl class="function">
<dt id="pycheops.instrument.response">
<code class="sig-prename descclassname">pycheops.instrument.</code><code class="sig-name descname">response</code><span class="sig-paren">(</span><em class="sig-param">passband='CHEOPS'</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.instrument.response" title="Permalink to this definition">¶</a></dt>
<dd><p>Instrument response functions.</p>
<p>The response functions have been digitized from Fig. 2 of
<a class="reference external" href="https://www.cosmos.esa.int/web/cheops/cheops-performances">https://www.cosmos.esa.int/web/cheops/cheops-performances</a></p>
<p>The available passband names are ‘CHEOPS’, ‘MOST’,
‘Kepler’, ‘CoRoT’, ‘Gaia’, ‘B’, ‘V’, ‘R’, ‘I’,
‘u_’,’g_’,’r_’,’i_’,’z_’, and ‘NGTS’</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>passband</strong> – instrument/passband names (case sensitive).</p>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>Instrument response function as an astropy Table object.</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.instrument.visibility">
<code class="sig-prename descclassname">pycheops.instrument.</code><code class="sig-name descname">visibility</code><span class="sig-paren">(</span><em class="sig-param">ra</em>, <em class="sig-param">dec</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.instrument.visibility" title="Permalink to this definition">¶</a></dt>
<dd><p>Estimate of target visibility</p>
<p>The target visibility estimated with this function is approximate. A more
reliable estimate of the observing efficiency can be made with the
Feasibility Checker tool.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>ra</strong> – right ascension in degrees (scalar or array)</p></li>
<li><p><strong>dec</strong> – declination in degrees (scalar or array)</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>target visibility (%)</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.instrument.exposure_time">
<code class="sig-prename descclassname">pycheops.instrument.</code><code class="sig-name descname">exposure_time</code><span class="sig-paren">(</span><em class="sig-param">G</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.instrument.exposure_time" title="Permalink to this definition">¶</a></dt>
<dd><p>Recommended minimum/maximum exposure times</p>
<p>The function returns the exposure times that are estimated to provide
10% and 98% of the detector full well capacity in the brightest image
pixel of the target.</p>
<blockquote>
<div><dl class="field-list simple">
<dt class="field-odd">param G</dt>
<dd class="field-odd"><p>Gaia G-band magnitude</p>
</dd>
<dt class="field-even">returns</dt>
<dd class="field-even"><p>min,max recommended exposure time</p>
</dd>
</dl>
</div></blockquote>
</dd></dl>
<dl class="function">
<dt id="pycheops.instrument.transit_noise">
<code class="sig-prename descclassname">pycheops.instrument.</code><code class="sig-name descname">transit_noise</code><span class="sig-paren">(</span><em class="sig-param">time</em>, <em class="sig-param">flux</em>, <em class="sig-param">flux_err</em>, <em class="sig-param">T_0=None</em>, <em class="sig-param">width=3</em>, <em class="sig-param">h_1=0.7224</em>, <em class="sig-param">h_2=0.6713</em>, <em class="sig-param">tol=0.1</em>, <em class="sig-param">method='scaled'</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.instrument.transit_noise" title="Permalink to this definition">¶</a></dt>
<dd><p>Transit noise estimate</p>
<p>The noise is calculated in a window of duration ‘width’ in hours centered
at time T_0 by first dividing out the best-fitting transit (even if this
has a negative depth), and then finding the depth of an injected transit
that gives S/N = 1.</p>
<p>Two methods are available to estimate the transit depth and its standard
error - ‘scaled’ or ‘minerr’.</p>
<p>If method=’scaled’, the transit depth and its standard error are
calculated assuming that the true standard errors on the flux measurements
are a factor f times the nominal standard error(s) provided in flux_err.</p>
<p>If method=’minerr’, the transit depth and its standard error are
calculated assuming that standard error(s) provided in flux_err are a
lower bound to the true standard errors. This tends to be more
conservative than using method=’scaled’.</p>
<p>The transit is calculated from an impact parameter b=0 using power-2 limb
darkening parameters h_1 and h_2. Default values for h_1 and h_2 are solar
values.</p>
<p>If T_0 is not specifed that the median value of time is used.</p>
<p>If there are insufficient data for the calculation the function returns
values returned are np.nan, np.nan</p>
<blockquote>
<div><dl class="field-list simple">
<dt class="field-odd">param time</dt>
<dd class="field-odd"><p>Array of observed times (days)</p>
</dd>
<dt class="field-even">param flux</dt>
<dd class="field-even"><p>Array of normalised flux measurements</p>
</dd>
<dt class="field-odd">param flux_err</dt>
<dd class="field-odd"><p>Standard error estimate(s) for flux - array of scalar</p>
</dd>
<dt class="field-even">param T_0</dt>
<dd class="field-even"><p>Centre of time window for noise estimate</p>
</dd>
<dt class="field-odd">param width</dt>
<dd class="field-odd"><p>Width of time window for noise estimate in hours</p>
</dd>
<dt class="field-even">param h_1</dt>
<dd class="field-even"><p>Limb darkening parameter</p>
</dd>
<dt class="field-odd">param h_2</dt>
<dd class="field-odd"><p>Limb darkening parameter</p>
</dd>
<dt class="field-even">param tol</dt>
<dd class="field-even"><p>Tolerance criterion for convergence (ppm)</p>
</dd>
<dt class="field-odd">param method</dt>
<dd class="field-odd"><p>‘scaled’ or ‘minerr’</p>
</dd>
<dt class="field-even">returns</dt>
<dd class="field-even"><p>noise in ppm and, if method is ‘scaled’, noise scaling factor, f</p>
</dd>
</dl>
</div></blockquote>
</dd></dl>
<dl class="function">
<dt id="pycheops.instrument.count_rate">
<code class="sig-prename descclassname">pycheops.instrument.</code><code class="sig-name descname">count_rate</code><span class="sig-paren">(</span><em class="sig-param">gmag</em>, <em class="sig-param">bp_rp</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.instrument.count_rate" title="Permalink to this definition">¶</a></dt>
<dd><p>Predicted count rate</p>
<p>The count rate in e-/s based on the star’s Gaia G magnitude and G_BP-R_BP
colour. This value returned is suitable for use in the CHEOPS exposure
time calculator using the option “Expected flux in CHEOPS passband”</p>
<p>** Currently based on stellar models convolved with throughout and QE
curves measured pre-launch.</p>
</dd></dl>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
<li>Previous: <a href="funcs.html" title="previous chapter">funcs</a></li>
<li>Next: <a href="ld.html" title="next chapter">ld</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/instrument.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 10,804 | 42.22 | 626 | html |
pycheops | pycheops-master/docs/_build/html/ld.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>ld — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="models" href="models.html" />
<link rel="prev" title="instrument" href="instrument.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="toctree-wrapper compound">
</div>
<span class="target" id="module-pycheops.ld"></span><div class="section" id="ld">
<h1>ld<a class="headerlink" href="#ld" title="Permalink to this headline">¶</a></h1>
<p>Limb darkening functions</p>
<p>The available passband names are:</p>
<ul class="simple">
<li><p>‘CHEOPS’, ‘MOST’, ‘Kepler’, ‘CoRoT’, ‘Gaia’, ‘TESS’</p></li>
<li><p>‘U’, ‘B’, ‘V’, ‘R’, ‘I’ (Bessell/Johnson)</p></li>
<li><p>‘u_’, ‘g_’, ‘r_’, ‘i_’, ‘z_’ (SDSS)</p></li>
<li><p>‘NGTS’</p></li>
</ul>
<p>The power-2 limb-darkening law is described in Maxted (2018) <a class="footnote-reference brackets" href="#id3" id="id1">1</a>.
Uninformative sampling of the parameter space for the power-2 law
is described in Short et al. (2019) <a class="footnote-reference brackets" href="#id4" id="id2">2</a>.</p>
<div class="section" id="examples">
<h2>Examples<a class="headerlink" href="#examples" title="Permalink to this headline">¶</a></h2>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.ld</span> <span class="kn">import</span> <span class="o">*</span>
<span class="gp">>>> </span><span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="gp">>>> </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">>>> </span><span class="n">T_eff</span> <span class="o">=</span> <span class="mi">5560</span>
<span class="gp">>>> </span><span class="n">log_g</span> <span class="o">=</span> <span class="mf">4.3</span>
<span class="gp">>>> </span><span class="n">Fe_H</span> <span class="o">=</span> <span class="o">-</span><span class="mf">0.3</span>
<span class="gp">>>> </span><span class="n">passband</span> <span class="o">=</span> <span class="s1">'Kepler'</span>
<span class="gp">>>> </span><span class="n">p2K</span> <span class="o">=</span> <span class="n">stagger_power2_interpolator</span><span class="p">(</span><span class="n">passband</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">c2</span><span class="p">,</span><span class="n">a2</span><span class="p">,</span><span class="n">h1</span><span class="p">,</span><span class="n">h2</span> <span class="o">=</span> <span class="n">p2K</span><span class="p">(</span><span class="n">T_eff</span><span class="p">,</span> <span class="n">log_g</span><span class="p">,</span> <span class="n">Fe_H</span><span class="p">)</span>
<span class="gp">>>> </span><span class="nb">print</span><span class="p">(</span><span class="s1">'h_1 = </span><span class="si">{:0.3f}</span><span class="s1">, h_2 = </span><span class="si">{:0.3f}</span><span class="s1">'</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">h1</span><span class="p">,</span> <span class="n">h2</span><span class="p">))</span>
<span class="gp">>>> </span><span class="n">mu</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="n">mu</span><span class="p">,</span> <span class="n">ld_power2</span><span class="p">(</span><span class="n">mu</span><span class="p">,[</span><span class="n">c2</span><span class="p">,</span> <span class="n">a2</span><span class="p">]),</span><span class="n">label</span><span class="o">=</span><span class="s1">'power-2'</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">xlim</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">ylim</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span><span class="mi">1</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">xlabel</span><span class="p">(</span><span class="s1">'$\mu$'</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">ylabel</span><span class="p">(</span><span class="s1">'$I_{\lambda}(\mu)$'</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">legend</span><span class="p">()</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
<p class="rubric">References</p>
<dl class="footnote brackets">
<dt class="label" id="id3"><span class="brackets"><a class="fn-backref" href="#id1">1</a></span></dt>
<dd><p>Maxted, P.F.L., 2018, A&A, submitted</p>
</dd>
<dt class="label" id="id4"><span class="brackets"><a class="fn-backref" href="#id2">2</a></span></dt>
<dd><p>Short, D.R., et al., 2019, RNAAS, …, …</p>
</dd>
</dl>
<dl class="function">
<dt id="pycheops.ld.ld_power2">
<code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">ld_power2</code><span class="sig-paren">(</span><em class="sig-param">mu</em>, <em class="sig-param">a</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.ld_power2" title="Permalink to this definition">¶</a></dt>
<dd><p>Evaluate power-2 limb-darkening law</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>mu</strong> – cos of angle between surface normal and line of sight</p></li>
<li><p><strong>a</strong> – array or tuple [c, alpha]</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>1 - c * (1-mu**alpha)</p>
</dd>
</dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.ld.ld_claret">
<code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">ld_claret</code><span class="sig-paren">(</span><em class="sig-param">mu</em>, <em class="sig-param">a</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.ld_claret" title="Permalink to this definition">¶</a></dt>
<dd><p>Evaluate Claret 4-parameter limb-darkening law</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>mu</strong> – cos of angle between surface normal and line of sight</p></li>
<li><p><strong>a</strong> – array or tuple [a_1, a_2, a_3, a_4]</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>1 - Sum(i=1,4) a_i*(1-mu**(i/2))</p>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.ld.stagger_power2_interpolator">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">stagger_power2_interpolator</code><span class="sig-paren">(</span><em class="sig-param">passband='CHEOPS'</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.stagger_power2_interpolator" title="Permalink to this definition">¶</a></dt>
<dd><p>Parameters of a power-2 limb-darkening law interpolated
from the Stagger grid.</p>
<dl class="simple">
<dt>The power-2 limb darkening law is </dt><dd><p>I_X(mu) = 1 - c * (1-mu**alpha)</p>
</dd>
</dl>
<p>It is often better to use the transformed coefficients</p>
<ul class="simple">
<li><p>h1 = 1 - c*(1-0.5**alpha)</p></li>
</ul>
<p>and</p>
<ul class="simple">
<li><p>h2 = c*0.5**alpha</p></li>
</ul>
<p>as free parameters in a least-squares fit and/or for applying priors.</p>
<p>Returns NaN if interpolation outside the grid range is attempted</p>
<dl class="method">
<dt id="pycheops.ld.stagger_power2_interpolator.__call__">
<code class="sig-name descname">__call__</code><span class="sig-paren">(</span><em class="sig-param">T_eff</em>, <em class="sig-param">log_g</em>, <em class="sig-param">Fe_H</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.stagger_power2_interpolator.__call__" title="Permalink to this definition">¶</a></dt>
<dd><dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>T_eff</strong> – effective temperature in Kelvin</p></li>
<li><p><strong>log_g</strong> – log of the surface gravity in cgs units</p></li>
<li><p><strong>Fe/H</strong> – [Fe/H] in dex</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>c, alpha, h_1, h_2</p>
</dd>
</dl>
</dd></dl>
</dd></dl>
<dl class="function">
<dt id="pycheops.ld.ca_to_h1h2">
<code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">ca_to_h1h2</code><span class="sig-paren">(</span><em class="sig-param">c</em>, <em class="sig-param">alpha</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.ca_to_h1h2" title="Permalink to this definition">¶</a></dt>
<dd><p>Transform for power-2 law coefficients
h1 = 1 - c*(1-0.5**alpha)
h2 = c*0.5**alpha</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>c</strong> – power-2 law coefficient, c</p></li>
<li><p><strong>alpha</strong> – power-2 law exponent, alpha</p></li>
</ul>
</dd>
</dl>
<p>returns: h1, h2</p>
</dd></dl>
<dl class="function">
<dt id="pycheops.ld.h1h2_to_ca">
<code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">h1h2_to_ca</code><span class="sig-paren">(</span><em class="sig-param">h1</em>, <em class="sig-param">h2</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.h1h2_to_ca" title="Permalink to this definition">¶</a></dt>
<dd><p>Inverse transform for power-2 law coefficients
c = 1 - h1 + h2
alpha = log2(c/h2)</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>h1</strong> – 1 - c*(1-0.5**alpha)</p></li>
<li><p><strong>h2</strong> – c*0.5**alpha</p></li>
</ul>
</dd>
</dl>
<p>returns: c, alpha</p>
</dd></dl>
<dl class="function">
<dt id="pycheops.ld.q1q2_to_h1h2">
<code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">q1q2_to_h1h2</code><span class="sig-paren">(</span><em class="sig-param">q1</em>, <em class="sig-param">q2</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.q1q2_to_h1h2" title="Permalink to this definition">¶</a></dt>
<dd><p>Inverse transform to h1, h2 from uninformative paramaters q1, q2</p>
<p>h1 = 1 - sqrt(q1) + q2*sqrt(q1)
h2 = 1 - sqrt(q1)</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>q1</strong> – (1 - h2)**2</p></li>
<li><p><strong>q2</strong> – (h1 - h2)/(1-h2)</p></li>
</ul>
</dd>
</dl>
<p>returns: q1, q2</p>
</dd></dl>
<dl class="function">
<dt id="pycheops.ld.h1h2_to_q1q2">
<code class="sig-prename descclassname">pycheops.ld.</code><code class="sig-name descname">h1h2_to_q1q2</code><span class="sig-paren">(</span><em class="sig-param">h1</em>, <em class="sig-param">h2</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.ld.h1h2_to_q1q2" title="Permalink to this definition">¶</a></dt>
<dd><p>Transform h1, h2 to uninformative paramaters q1, q2</p>
<p>q1 = (1 - h2)**2
q2 = (h1 - h2)/(1-h2)</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>h1</strong> – 1 - c*(1-0.5**alpha)</p></li>
<li><p><strong>h2</strong> – c*0.5**alpha</p></li>
</ul>
</dd>
</dl>
<p>returns: q1, q2</p>
</dd></dl>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
<li>Previous: <a href="instrument.html" title="previous chapter">instrument</a></li>
<li>Next: <a href="models.html" title="next chapter">models</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/ld.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 15,572 | 51.083612 | 512 | html |
pycheops | pycheops-master/docs/_build/html/models.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>models — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="quantities" href="quantities.html" />
<link rel="prev" title="ld" href="ld.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="toctree-wrapper compound">
</div>
<span class="target" id="module-pycheops.models"></span><div class="section" id="models">
<h1>models<a class="headerlink" href="#models" title="Permalink to this headline">¶</a></h1>
<p>Models and likelihood functions for use with lmfit</p>
<dl class="attribute">
<dt id="pycheops.models.qpower2">
<code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">qpower2</code><a class="headerlink" href="#pycheops.models.qpower2" title="Permalink to this definition">¶</a></dt>
<dd><p>Fast and accurate transit light curves for the power-2 limb-darkening law</p>
<p>The power-2 limb-darkening law is</p>
<div class="math notranslate nohighlight">
\[I(\mu) = 1 - c (1 - \mu^\alpha)\]</div>
<p>Light curves are calculated using the qpower2 approximation <a class="footnote-reference brackets" href="#id2" id="id1">2</a>. The
approximation is accurate to better than 100ppm for radius ratio k < 0.1.</p>
<p><strong>N.B.</strong> qpower2 is untested/inaccurate for values of k > 0.2</p>
<dl class="footnote brackets">
<dt class="label" id="id2"><span class="brackets"><a class="fn-backref" href="#id1">2</a></span></dt>
<dd><p>Maxted, P.F.L. & Gill, S., 2019A&A…622A..33M</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>z</strong> – star-planet separation on the sky cf. star radius (array)</p></li>
<li><p><strong>k</strong> – planet-star radius ratio (scalar, k<1)</p></li>
<li><p><strong>c</strong> – power-2 limb darkening coefficient</p></li>
<li><p><strong>a</strong> – power-2 limb darkening exponent</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>light curve (observed flux)</p>
</dd>
<dt class="field-odd">Example</dt>
<dd class="field-odd"><p></p></dd>
</dl>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.models</span> <span class="kn">import</span> <span class="n">qpower2</span>
<span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.funcs</span> <span class="kn">import</span> <span class="n">t2z</span>
<span class="gp">>>> </span><span class="kn">from</span> <span class="nn">numpy</span> <span class="kn">import</span> <span class="n">linspace</span>
<span class="gp">>>> </span><span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="gp">>>> </span><span class="n">t</span> <span class="o">=</span> <span class="n">linspace</span><span class="p">(</span><span class="o">-</span><span class="mf">0.025</span><span class="p">,</span><span class="mf">0.025</span><span class="p">,</span><span class="mi">1000</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">sini</span> <span class="o">=</span> <span class="mf">0.999</span>
<span class="gp">>>> </span><span class="n">rstar</span> <span class="o">=</span> <span class="mf">0.05</span>
<span class="gp">>>> </span><span class="n">ecc</span> <span class="o">=</span> <span class="mf">0.2</span>
<span class="gp">>>> </span><span class="n">om</span> <span class="o">=</span> <span class="mi">120</span>
<span class="gp">>>> </span><span class="n">tzero</span> <span class="o">=</span> <span class="mf">0.0</span>
<span class="gp">>>> </span><span class="n">P</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="gp">>>> </span><span class="n">z</span><span class="o">=</span><span class="n">t2z</span><span class="p">(</span><span class="n">t</span><span class="p">,</span><span class="n">tzero</span><span class="p">,</span><span class="n">P</span><span class="p">,</span><span class="n">sini</span><span class="p">,</span><span class="n">rstar</span><span class="p">,</span><span class="n">ecc</span><span class="p">,</span><span class="n">om</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">c</span> <span class="o">=</span> <span class="mf">0.5</span>
<span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="mf">0.7</span>
<span class="gp">>>> </span><span class="n">k</span> <span class="o">=</span> <span class="mf">0.1</span>
<span class="gp">>>> </span><span class="n">f</span> <span class="o">=</span> <span class="n">qpower2</span><span class="p">(</span><span class="n">z</span><span class="p">,</span><span class="n">k</span><span class="p">,</span><span class="n">c</span><span class="p">,</span><span class="n">a</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">plot</span><span class="p">(</span><span class="n">t</span><span class="p">,</span><span class="n">f</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">plt</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
</pre></div>
</div>
</dd></dl>
<dl class="attribute">
<dt id="pycheops.models.ueclipse">
<code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">ueclipse</code><a class="headerlink" href="#pycheops.models.ueclipse" title="Permalink to this definition">¶</a></dt>
<dd><p>Eclipse light curve for a planet with uniform surface brightness by a star</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>z</strong> – star-planet separation on the sky cf. star radius (array)</p></li>
<li><p><strong>k</strong> – planet-star radius ratio (scalar, k<1)</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>light curve (observed flux from eclipsed source)</p>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.TransitModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">TransitModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.TransitModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Light curve model for the transit of a spherical star by an opaque
spherical body (planet).</p>
<p>Limb-darkening is described by the power-2 law:</p>
<div class="math notranslate nohighlight">
\[I(\mu) = 1 - c (1 - \mu^\alpha)\]</div>
<p>The transit depth, width shape are parameterised by D, W and b. These
parameters are defined below in terms of the radius of the star and
planet, R_s and R_p, respectively, the semi-major axis, a, and the orbital
inclination, i. The eccentricy and longitude of periastron for the star’s
orbit are e and omega, respectively.</p>
<p>The following parameters are defined for convenience:</p>
<ul class="simple">
<li><p>k = R_p/R_s;</p></li>
<li><p>aR = a/R_s;</p></li>
<li><p>rho = 0.013418*aR**3/(P/d)**2.</p></li>
</ul>
<p><strong>N.B.</strong> the mean stellar density in solar units is rho, but only if the
mass ratio q = M_planet/M_star is q << 1.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of mid-transit</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>D</strong> – <ul>
<li><p>(R_p/R_s)**2 = k**2</p></li>
</ul>
</p></li>
<li><p><strong>W</strong> – <ul>
<li><p>(R_s/a)*sqrt((1+k)**2 - b**2)/pi</p></li>
</ul>
</p></li>
<li><p><strong>b</strong> – <ul>
<li><p>a*cos(i)/R_s</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc)*cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc)*sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>h_1</strong> – <ul>
<li><p>I(0.5) = 1 - c*(1-0.5**alpha)</p></li>
</ul>
</p></li>
<li><p><strong>h_2</strong> – <ul>
<li><p>I(0.5) - I(0) = c*0.5**alpha</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
<p>The flux value outside of transit is 1. The light curve is calculated using
the qpower2 algorithm, which is fast but only accurate for k < ~0.3.</p>
<p>If the input parameters are invalid or k>0.5 the model is returned as an
array of value 1 everywhere.</p>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.EclipseModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">EclipseModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.EclipseModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Light curve model for the eclipse by a spherical star of a spherical
body (planet) with no limb darkening.</p>
<p>The transit depth, width shape are parameterised by D, W and b. These
parameters are defined below in terms of the radius of the star and
planet, R_s and R_p, respectively, the semi-major axis, a, and the orbital
inclination, i. The eccentricy and longitude of periastron for the star’s
orbit are e and omega, respectively. These are the same parameters used in
TransitModel. The flux level outside of eclipse is 1 and inside eclipse is
(1-L). The apparent time of mid-eclipse includes the correction a_c for
the light travel time across the orbit, i.e., for a circular orbit the
time of mid-eclipse is (T_0 + 0.5*P) + a_c.</p>
<p><strong>N.B.</strong> a_c must have the same units as P.</p>
<p>The following parameters are defined for convenience:</p>
<ul class="simple">
<li><p>k = R_p/R_s;</p></li>
<li><p>aR = a/R_s;</p></li>
<li><p>rho = 0.013418*aR**3/(P/d)**2.</p></li>
</ul>
<p><strong>N.B.</strong> the mean stellar density in solar units is rho, but only if the
mass ratio q = M_planet/M_star is q << 1.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of mid-transit</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>D</strong> – <ul>
<li><p>(R_p/R_s)**2 = k**2</p></li>
</ul>
</p></li>
<li><p><strong>W</strong> – <ul>
<li><p>(R_s/a)*sqrt((1+k)**2 - b**2)/pi</p></li>
</ul>
</p></li>
<li><p><strong>b</strong> – <ul>
<li><p>a*cos(i)/R_s</p></li>
</ul>
</p></li>
<li><p><strong>L</strong> – <ul>
<li><p>Depth of eclipse</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc).cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc).sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>a_c</strong> – <ul>
<li><p>correction for light travel time across the orbit</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.FactorModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">FactorModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', dx=None, dy=None, sinphi=None, cosphi=None, bg=None, **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.FactorModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Flux scaling and trend factor model</p>
<dl class="simple">
<dt>f = c*(1 + dfdt*dt + d2fdt2*dt**2 + dfdbg*bg(t) + </dt><dd><p>dfdx*dx(t) + dfdy*dy(t) +
d2fdx2*dx(t)**2 + d2f2y2*dy(t)**2 + d2fdxdy*x(t)*dy(t) +
dfdsinphi*sin(phi(t)) + dfdcosphi*cos(phi(t)) +
dfdsin2phi*sin(2.phi(t)) + dfdcos2phi*cos(2.phi(t)) +
dfdsin3phi*sin(3.phi(t)) + dfdcos3phi*cos(3.phi(t)) )</p>
</dd>
</dl>
<p>The detrending coefficients dfdx, etc. are 0 and fixed by default. If
any of the coefficients dfdx, d2fdxdy or d2f2x2 is not 0, a function to
calculate the x-position offset as a function of time, dx(t), must be
passed as a keyword argument, and similarly for the y-position offset,
dy(t). For detrending against the spacecraft roll angle, phi(t), the
functions to be provided as keywords arguments are sinphi(t) and
cosphi(t). The linear trend dfdbg is proportional to the estimated
background flux in the aperture, bg(t). The time trend decribed by dfdt
and d2fdt2 is calculated using the variable dt = t - median(t).</p>
<dl class="method">
<dt id="pycheops.models.FactorModel.guess">
<code class="sig-name descname">guess</code><span class="sig-paren">(</span><em class="sig-param">data</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.FactorModel.guess" title="Permalink to this definition">¶</a></dt>
<dd><p>Estimate initial model parameter values from data.</p>
</dd></dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.ThermalPhaseModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">ThermalPhaseModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.ThermalPhaseModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Thermal phase model for a tidally-locked planet</p>
<div class="math notranslate nohighlight">
\[a_{th}[1-\cos(\phi))/2 + b_{th}*(1+\sin(\phi)/2 + c_{th},\]</div>
<p>where <span class="math notranslate nohighlight">\(\phi = 2\pi(t-T_0)/P\)</span></p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of inferior conjunction (mid-transit)</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>a_th</strong> – <ul>
<li><p>coefficient of cosine-like term</p></li>
</ul>
</p></li>
<li><p><strong>b_th</strong> – <ul>
<li><p>coefficient of sine-like term</p></li>
</ul>
</p></li>
<li><p><strong>c_th</strong> – <ul>
<li><p>constant term (minimum flux)</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
<p>The following parameters are defined for convenience.</p>
<ul class="simple">
<li><p>A = sqrt(a_th**2 + b_th**2), peak-to-trough amplitude of the phase curve</p></li>
<li><p>F = c_th + (a_th + b_th + A)/2, flux at the maximum of the phase curve</p></li>
<li><p>ph_max = arctan2(b_th,-a_th)/(2*pi) = phase at maximum flux</p></li>
</ul>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.ReflectionModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">ReflectionModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.ReflectionModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Reflected stellar light from a planet with a Lambertian phase function.</p>
<p>The fraction of the stellar flux reflected from the planet of radius
<span class="math notranslate nohighlight">\(R_p\)</span> at a distance <span class="math notranslate nohighlight">\(r\)</span> from the star and viewed at phase
angle <span class="math notranslate nohighlight">\(\beta\)</span> is</p>
<div class="math notranslate nohighlight">
\[A_g(R_p/r)^2 \times [\sin(\beta) + (\pi-\beta)*\cos(\beta) ]/\pi\]</div>
<p>The eccentricity and longitude of periastron for the planet’s orbit are
ecc and omega, respectively.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of inferior conjunction (mid-transit)</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>A_g</strong> – <ul>
<li><p>geometric albedo</p></li>
</ul>
</p></li>
<li><p><strong>r_p</strong> – <ul>
<li><p>R_p/a, where a is the semi-major axis.</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc).cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc).sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>sini</strong> – <ul>
<li><p>sin(inclination)</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.RVModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">RVModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.RVModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Radial velocity in a Keplerian orbit</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of inferior conjunction for the companion (mid-transit)</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>V_0</strong> – <ul>
<li><p>radial velocity of the centre-of-mass</p></li>
</ul>
</p></li>
<li><p><strong>K</strong> – <ul>
<li><p>semi-amplitude of spectroscopic orbit</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc).cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc).sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>sini</strong> – <ul>
<li><p>sine of the orbital inclination</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.RVCompanion">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">RVCompanion</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.RVCompanion" title="Permalink to this definition">¶</a></dt>
<dd><p>Radial velocity in a Keplerian orbit for the companion</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of inferior conjunction for the companion (mid-transit)</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>V_0</strong> – <ul>
<li><p>radial velocity of the centre-of-mass</p></li>
</ul>
</p></li>
<li><p><strong>K</strong> – <ul>
<li><p>semi-amplitude of spectroscopic orbit</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc).cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc).sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>sini</strong> – <ul>
<li><p>sine of the orbital inclination</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.EBLMModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">EBLMModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.EBLMModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Light curve model for the mutual eclipses by spherical stars in an
eclipsing binary with one low-mass companion, e.g., F/G-star + M-dwarf.</p>
<p>The transit depth, width shape are parameterised by D, W and b. These
parameters are defined below in terms of the radii of the stars, R_1 and
R_2, the semi-major axis, a, and the orbital inclination, i. This model
assumes R_1 >> R_2, i.e., k=R_2/R_1 <~0.2. The eccentricy and longitude
of periastron for the star’s orbit are e and omega, respectively. These
are the same parameters used in TransitModel. The flux level outside of
eclipse is 1 and inside eclipse is (1-L). The apparent time of mid-eclipse
includes the correction a_c for the light travel time across the orbit,
i.e., for a circular orbit the time of mid-eclipse is (T_0 + 0.5*P) + a_c.</p>
<p><strong>N.B.</strong> a_c must have the same units as P. The power-2 law is used to model
the limb-darkening of star 1. Limb-darkening on star 2 is ignored.</p>
<p>The following parameters are defined for convenience:</p>
<ul class="simple">
<li><p>k = R_2/R_1;</p></li>
<li><p>aR = a/R_1;</p></li>
<li><p>J = L/D (surface brightness ratio).</p></li>
</ul>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of mid-transit</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>D</strong> – <ul>
<li><p>(R_2/R_1)**2 = k**2</p></li>
</ul>
</p></li>
<li><p><strong>W</strong> – <ul>
<li><p>(R_1/a)*sqrt((1+k)**2 - b**2)/pi</p></li>
</ul>
</p></li>
<li><p><strong>b</strong> – <ul>
<li><p>a*cos(i)/R_1</p></li>
</ul>
</p></li>
<li><p><strong>L</strong> – <ul>
<li><p>Depth of eclipse</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc).cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc).sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>h_1</strong> – <ul>
<li><p>I(0.5) = 1 - c*(1-0.5**alpha)</p></li>
</ul>
</p></li>
<li><p><strong>h_2</strong> – <ul>
<li><p>I(0.5) - I(0) = c*0.5**alpha</p></li>
</ul>
</p></li>
<li><p><strong>a_c</strong> – <ul>
<li><p>correction for light travel time across the orbit</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="pycheops.models.PlanetModel">
<em class="property">class </em><code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">PlanetModel</code><span class="sig-paren">(</span><em class="sig-param">independent_vars=['t'], prefix='', nan_policy='raise', **kwargs</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.PlanetModel" title="Permalink to this definition">¶</a></dt>
<dd><p>Light curve model for a transiting exoplanet including transits,
eclipses, and a thermal phase curve for the planet with an offset.</p>
<p>The flux level from the star is 1 and is assumed to be constant.</p>
<p>The thermal phase curve from the planet is approximated by a cosine
function with amplitude A=F_max-F_min plus the minimum flux, F_min, i.e.,
the maximum flux is F_max = F_min+A, and this occurs at phase (ph_off+0.5)
relative to the time of mid-transit, i.e.,</p>
<div class="math notranslate nohighlight">
\[f_{\rm th} = F_{\rm min} + A[1-\cos(\phi-\phi_{\rm off})]/2\]</div>
<p>where <span class="math notranslate nohighlight">\(\phi = 2\pi(t-T_0)/P\)</span> and
<span class="math notranslate nohighlight">\(\phi_{\rm off} = 2\pi\,{\rm ph\_off}\)</span>.</p>
<p>The transit depth, width shape are parameterised by D, W and b. These
parameters are defined below in terms of the radius of the star, R_1 and
R_2, the semi-major axis, a, and the orbital inclination, i. This model
assumes R_1 >> R_2, i.e., k=R_2/R_1 <~0.2. The eccentricy and longitude
of periastron for the star’s orbit are e and omega, respectively. These
are the same parameters used in TransitModel. The eclipse of the planet
assumes a uniform flux distribution.</p>
<p>The apparent time of mid-eclipse includes the correction a_c for the
light travel time across the orbit, i.e., for a circular orbit the time of
mid-eclipse is (T_0 + 0.5*P) + a_c.</p>
<p><strong>N.B.</strong> a_c must have the same units as P.</p>
<p>Stellar limb-darkening is described by the power-2 law:</p>
<div class="math notranslate nohighlight">
\[I(\mu) = 1 - c (1 - \mu^\alpha)\]</div>
<p>The following parameters are defined for convenience:</p>
<ul class="simple">
<li><p>k = R_2/R_1;</p></li>
<li><p>aR = a/R_1;</p></li>
<li><p>A = F_max - F_min = amplitude of thermal phase effect.</p></li>
<li><p>rho = 0.013418*aR**3/(P/d)**2.</p></li>
</ul>
<p><strong>N.B.</strong> the mean stellar density in solar units is rho, but only if the
mass ratio q = M_planet/M_star is q << 1.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>t</strong> – <ul>
<li><p>independent variable (time)</p></li>
</ul>
</p></li>
<li><p><strong>T_0</strong> – <ul>
<li><p>time of mid-transit</p></li>
</ul>
</p></li>
<li><p><strong>P</strong> – <ul>
<li><p>orbital period</p></li>
</ul>
</p></li>
<li><p><strong>D</strong> – <ul>
<li><p>(R_2/R_1)**2 = k**2</p></li>
</ul>
</p></li>
<li><p><strong>W</strong> – <ul>
<li><p>(R_1/a)*sqrt((1+k)**2 - b**2)/pi</p></li>
</ul>
</p></li>
<li><p><strong>b</strong> – <ul>
<li><p>a*cos(i)/R_1</p></li>
</ul>
</p></li>
<li><p><strong>F_min</strong> – <ul>
<li><p>minimum flux in the thermal phase model</p></li>
</ul>
</p></li>
<li><p><strong>F_max</strong> – <ul>
<li><p>maximum flux in the thermal phase model</p></li>
</ul>
</p></li>
<li><p><strong>ph_off</strong> – <ul>
<li><p>offset phase in the thermal phase model</p></li>
</ul>
</p></li>
<li><p><strong>f_c</strong> – <ul>
<li><p>sqrt(ecc).cos(omega)</p></li>
</ul>
</p></li>
<li><p><strong>f_s</strong> – <ul>
<li><p>sqrt(ecc).sin(omega)</p></li>
</ul>
</p></li>
<li><p><strong>h_1</strong> – <ul>
<li><p>I(0.5) = 1 - c*(1-0.5**alpha)</p></li>
</ul>
</p></li>
<li><p><strong>h_2</strong> – <ul>
<li><p>I(0.5) - I(0) = c*0.5**alpha</p></li>
</ul>
</p></li>
<li><p><strong>a_c</strong> – <ul>
<li><p>correction for light travel time across the orbit</p></li>
</ul>
</p></li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="attribute">
<dt id="pycheops.models.scaled_transit_fit">
<code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">scaled_transit_fit</code><a class="headerlink" href="#pycheops.models.scaled_transit_fit" title="Permalink to this definition">¶</a></dt>
<dd><p>Optimum scaled transit depth for data with scaled errors</p>
<p>Find the value of the scaling factor s that provides the best fit of the
model m = 1 + s*(model-1) to the normalised input fluxes. It is assumed
that the true standard errors on the flux measurements are a factor f
times the nominal standard error(s) provided in sigma. Also returns
standard error estimates for s and f, sigma_s and sigma_f, respectively.</p>
<blockquote>
<div><dl class="field-list simple">
<dt class="field-odd">param flux</dt>
<dd class="field-odd"><p>Array of normalised flux measurements</p>
</dd>
<dt class="field-even">param sigma</dt>
<dd class="field-even"><p>Standard error estimate(s) for flux - array or scalar</p>
</dd>
<dt class="field-odd">param model</dt>
<dd class="field-odd"><p>Transit model to be scaled</p>
</dd>
<dt class="field-even">returns</dt>
<dd class="field-even"><p>s, b, sigma_s, sigma_b</p>
</dd>
</dl>
</div></blockquote>
</dd></dl>
<dl class="function">
<dt id="pycheops.models.minerr_transit_fit">
<code class="sig-prename descclassname">pycheops.models.</code><code class="sig-name descname">minerr_transit_fit</code><span class="sig-paren">(</span><em class="sig-param">flux</em>, <em class="sig-param">sigma</em>, <em class="sig-param">model</em><span class="sig-paren">)</span><a class="headerlink" href="#pycheops.models.minerr_transit_fit" title="Permalink to this definition">¶</a></dt>
<dd><blockquote>
<div><p>Optimum scaled transit depth for data with lower bounds on errors</p>
<p>Find the value of the scaling factor s that provides the best fit of the
model m = 1 + s*(model-1) to the normalised input fluxes. It is assumed
that the nominal standard error(s) provided in sigma are lower bounds to
the true standard errors on the flux measurements. <a class="footnote-reference brackets" href="#id4" id="id3">1</a> The probability
distribution for the true standard errors is assumed to be</p>
<div class="math notranslate nohighlight">
\[P(\sigma_{\rm true} | \sigma) = \sigma/\sigma_{\rm true}^2 \]</div>
<dl class="field-list simple">
<dt class="field-odd">param flux</dt>
<dd class="field-odd"><p>Array of normalised flux measurements</p>
</dd>
<dt class="field-even">param sigma</dt>
<dd class="field-even"><p>Lower bound(s) on standard error for flux - array or scalar</p>
</dd>
<dt class="field-odd">param model</dt>
<dd class="field-odd"><p>Transit model to be scaled</p>
</dd>
<dt class="field-even">returns</dt>
<dd class="field-even"><p>s, sigma_s</p>
</dd>
</dl>
</div></blockquote>
<dl class="footnote brackets">
<dt class="label" id="id4"><span class="brackets"><a class="fn-backref" href="#id3">1</a></span></dt>
<dd><p>Sivia, D.S. & Skilling, J., Data Analysis - A Bayesian Tutorial, 2nd
ed., section 8.3.1</p>
</dd>
</dl>
</dd></dl>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
<li>Previous: <a href="ld.html" title="previous chapter">ld</a></li>
<li>Next: <a href="quantities.html" title="next chapter">quantities</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/models.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 33,302 | 41.478316 | 485 | html |
pycheops | pycheops-master/docs/_build/html/py-modindex.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>Python Module Index — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<h1>Python Module Index</h1>
<div class="modindex-jumpbox">
<a href="#cap-p"><strong>p</strong></a>
</div>
<table class="indextable modindextable">
<tr class="pcap"><td></td><td> </td><td></td></tr>
<tr class="cap" id="cap-p"><td></td><td>
<strong>p</strong></td><td></td></tr>
<tr>
<td><img src="_static/minus.png" class="toggler"
id="toggle-1" style="display: none" alt="-" /></td>
<td>
<a href="index.html#module-pycheops"><code class="xref">pycheops</code></a></td><td>
<em></em></td></tr>
<tr class="cg-1">
<td></td>
<td>   
<a href="constants.html#module-pycheops.constants"><code class="xref">pycheops.constants</code></a></td><td>
<em></em></td></tr>
<tr class="cg-1">
<td></td>
<td>   
<a href="funcs.html#module-pycheops.funcs"><code class="xref">pycheops.funcs</code></a></td><td>
<em></em></td></tr>
<tr class="cg-1">
<td></td>
<td>   
<a href="instrument.html#module-pycheops.instrument"><code class="xref">pycheops.instrument</code></a></td><td>
<em></em></td></tr>
<tr class="cg-1">
<td></td>
<td>   
<a href="ld.html#module-pycheops.ld"><code class="xref">pycheops.ld</code></a></td><td>
<em></em></td></tr>
<tr class="cg-1">
<td></td>
<td>   
<a href="models.html#module-pycheops.models"><code class="xref">pycheops.models</code></a></td><td>
<em></em></td></tr>
<tr class="cg-1">
<td></td>
<td>   
<a href="quantities.html#module-pycheops.quantities"><code class="xref">pycheops.quantities</code></a></td><td>
<em></em></td></tr>
</table>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
</div>
</body>
</html> | 4,834 | 30.193548 | 133 | html |
pycheops | pycheops-master/docs/_build/html/quantities.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>quantities — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="prev" title="models" href="models.html" />
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="toctree-wrapper compound">
</div>
<span class="target" id="module-pycheops.quantities"></span><div class="section" id="quantities">
<h1>quantities<a class="headerlink" href="#quantities" title="Permalink to this headline">¶</a></h1>
<p>Nominal values of solar and planetary constants from IAU Resolution B3 <a class="footnote-reference brackets" href="#id5" id="id1">1</a>
plus related constants as astropy quantities.</p>
<p>Masses in SI units are derived using the 2014 CODATA value for the Newtonian
constant, G=6.67408E-11 m3.kg-1.s-2.</p>
<p>The following conversion constants are defined.</p>
<div class="section" id="solar-conversion-constants">
<h2>Solar conversion constants<a class="headerlink" href="#solar-conversion-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>R_SunN - solar radius</p></li>
<li><p>S_SunN - total solar irradiance</p></li>
<li><p>L_SunN - solar luminosity</p></li>
<li><p>Teff_SunN - solar effective temperature</p></li>
<li><p>GM_SunN - solar mass parameter</p></li>
<li><p>M_SunN - solar mass derived from GM_SunN and G_2014</p></li>
<li><p>V_SunN - solar volume = (4.pi.R_SunN**3/3)</p></li>
</ul>
</div>
<div class="section" id="planetary-conversion-constants">
<h2>Planetary conversion constants<a class="headerlink" href="#planetary-conversion-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>R_eEarthN - equatorial radius of the Earth</p></li>
<li><p>R_pEarthN - polar radius of the Earth</p></li>
<li><p>R_eJupN - equatorial radius of Jupiter</p></li>
<li><p>R_pJupN - polar radius of Jupiter</p></li>
<li><p>GM_EarthN - terrestrial mass parameter</p></li>
<li><p>GM_JupN - jovian mass parameter</p></li>
<li><p>M_EarthN - mass of the Earth from GM_EarthN and G_2014</p></li>
<li><p>M_JupN - mass of Jupiter from GM_JupN and G_2014</p></li>
<li><p>V_EarthN - volume of the Earth (4.pi.R_eEarthN^2.R_pEarthN/3)</p></li>
<li><p>V_JupN - volume of Jupiter (4.pi.R_eJupN^2.R_pJupN/3)</p></li>
<li><p>R_EarthN - volume-average radius of the Earth (3.V_EarthN/4.pi)^(1/3)</p></li>
<li><p>R_JupN - volume-average radius of Jupiter (3.V_JupN/4.pi)^(1/3)</p></li>
</ul>
</div>
<div class="section" id="related-constants">
<h2>Related constants<a class="headerlink" href="#related-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>G_2014 - 2014 CODATA value for the Newtonian constant</p></li>
<li><p>mean_solar_day - 86,400.002 seconds <a class="footnote-reference brackets" href="#id6" id="id2">2</a></p></li>
<li><p>au - IAU 2009 value for astronomical constant in metres. <a class="footnote-reference brackets" href="#id7" id="id3">3</a></p></li>
<li><p>pc - 1 parsec = 3600*au*180/pi</p></li>
</ul>
</div>
<div class="section" id="fundamental-constants">
<h2>Fundamental constants<a class="headerlink" href="#fundamental-constants" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><p>c - speed of light in m.s-1 <a class="footnote-reference brackets" href="#id7" id="id4">3</a></p></li>
</ul>
</div>
<div class="section" id="example">
<h2>Example<a class="headerlink" href="#example" title="Permalink to this headline">¶</a></h2>
<p>Calculate the density relative to Jupiter for a planet 1/10 the radius of
the Sun with a mass 1/1000 of a solar mass. Note that we use the
volume-average radius for Jupiter in this case:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pycheops.quantities</span> <span class="kn">import</span> <span class="n">M_SunN</span><span class="p">,</span> <span class="n">R_SunN</span><span class="p">,</span> <span class="n">M_JupN</span><span class="p">,</span> <span class="n">R_JupN</span>
<span class="gp">>>> </span><span class="n">M_planet_Jup</span> <span class="o">=</span> <span class="n">M_SunN</span><span class="o">/</span><span class="mi">1000</span> <span class="o">/</span> <span class="n">M_JupN</span>
<span class="gp">>>> </span><span class="n">R_planet_Jup</span> <span class="o">=</span> <span class="n">R_SunN</span><span class="o">/</span><span class="mi">10</span> <span class="o">/</span> <span class="n">R_JupN</span>
<span class="gp">>>> </span><span class="n">rho_planet_Jup</span> <span class="o">=</span> <span class="n">M_planet_Jup</span> <span class="o">/</span> <span class="p">(</span><span class="n">R_planet_Jup</span><span class="o">**</span><span class="mi">3</span><span class="p">)</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="p">(</span><span class="s2">"Planet mass = </span><span class="si">{:.3f}</span><span class="s2"> M_Jup"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">M_planet_Jup</span><span class="p">))</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="p">(</span><span class="s2">"Planet radius = </span><span class="si">{:.3f}</span><span class="s2"> R_Jup"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">R_planet_Jup</span><span class="p">))</span>
<span class="gp">>>> </span><span class="nb">print</span> <span class="p">(</span><span class="s2">"Planet density = </span><span class="si">{:.3f}</span><span class="s2"> rho_Jup"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">rho_planet_Jup</span><span class="p">))</span>
<span class="go">Planet mass = 1.048 M_Jup</span>
<span class="go">Planet radius = 0.995 R_Jup</span>
<span class="go">Planet density = 1.063 rho_Jup</span>
</pre></div>
</div>
<p class="rubric">References</p>
<dl class="footnote brackets">
<dt class="label" id="id5"><span class="brackets"><a class="fn-backref" href="#id1">1</a></span></dt>
<dd><p><a class="reference external" href="https://www.iau.org/static/resolutions/IAU2015_English.pdf">https://www.iau.org/static/resolutions/IAU2015_English.pdf</a></p>
</dd>
<dt class="label" id="id6"><span class="brackets"><a class="fn-backref" href="#id2">2</a></span></dt>
<dd><p><a class="reference external" href="http://tycho.usno.navy.mil/leapsec.html">http://tycho.usno.navy.mil/leapsec.html</a></p>
</dd>
<dt class="label" id="id7"><span class="brackets">3</span><span class="fn-backref">(<a href="#id3">1</a>,<a href="#id4">2</a>)</span></dt>
<dd><p>Luzum et al., Celest Mech Dyn Astr (2011) 110:293-304</p>
</dd>
</dl>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
<li>Previous: <a href="models.html" title="previous chapter">models</a></li>
</ul></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3 id="searchlabel">Quick search</h3>
<div class="searchformwrapper">
<form class="search" action="search.html" method="get">
<input type="text" name="q" aria-labelledby="searchlabel" />
<input type="submit" value="Go" />
</form>
</div>
</div>
<script>$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
|
<a href="_sources/quantities.rst.txt"
rel="nofollow">Page source</a>
</div>
</body>
</html> | 9,950 | 50.559585 | 416 | html |
pycheops | pycheops-master/docs/_build/html/search.html |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>Search — pycheops 0.0.16 documentation</title>
<link rel="stylesheet" href="_static/alabaster.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script src="_static/searchtools.js"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="#" />
<script src="searchindex.js" defer></script>
<link rel="stylesheet" href="_static/custom.css" type="text/css" />
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
</head><body>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<h1 id="search-documentation">Search</h1>
<div id="fallback" class="admonition warning">
<script>$('#fallback').hide();</script>
<p>
Please activate JavaScript to enable the search
functionality.
</p>
</div>
<p>
From here you can search these documents. Enter your search
words into the box below and click "search". Note that the search
function will automatically search for all of the words. Pages
containing fewer words won't appear in the result list.
</p>
<form action="" method="get">
<input type="text" name="q" aria-labelledby="search-documentation" value="" />
<input type="submit" value="search" />
<span id="search-progress" style="padding-left: 10px"></span>
</form>
<div id="search-results">
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h1 class="logo"><a href="index.html">pycheops</a></h1>
<h3>Navigation</h3>
<p class="caption"><span class="caption-text">Contents:</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="constants.html">constants</a></li>
<li class="toctree-l1"><a class="reference internal" href="funcs.html">funcs</a></li>
<li class="toctree-l1"><a class="reference internal" href="instrument.html">instrument</a></li>
<li class="toctree-l1"><a class="reference internal" href="ld.html">ld</a></li>
<li class="toctree-l1"><a class="reference internal" href="models.html">models</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantities.html">quantities</a></li>
</ul>
<div class="relations">
<h3>Related Topics</h3>
<ul>
<li><a href="index.html">Documentation overview</a><ul>
</ul></li>
</ul>
</div>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="footer">
©2018, [email protected].
|
Powered by <a href="http://sphinx-doc.org/">Sphinx 2.4.0</a>
& <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
</div>
</body>
</html> | 3,484 | 27.801653 | 133 | html |
pycheops | pycheops-master/docs/_build/html/_static/alabaster.css | @import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: Georgia, serif;
font-size: 17px;
background-color: #fff;
color: #000;
margin: 0;
padding: 0;
}
div.document {
width: 940px;
margin: 30px auto 0 auto;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 220px;
}
div.sphinxsidebar {
width: 220px;
font-size: 14px;
line-height: 1.5;
}
hr {
border: 1px solid #B1B4B6;
}
div.body {
background-color: #fff;
color: #3E4349;
padding: 0 30px 0 30px;
}
div.body > .section {
text-align: left;
}
div.footer {
width: 940px;
margin: 20px auto 30px auto;
font-size: 14px;
color: #888;
text-align: right;
}
div.footer a {
color: #888;
}
p.caption {
font-family: inherit;
font-size: inherit;
}
div.relations {
display: none;
}
div.sphinxsidebar a {
color: #444;
text-decoration: none;
border-bottom: 1px dotted #999;
}
div.sphinxsidebar a:hover {
border-bottom: 1px solid #999;
}
div.sphinxsidebarwrapper {
padding: 18px 10px;
}
div.sphinxsidebarwrapper p.logo {
padding: 0;
margin: -10px 0 0 0px;
text-align: center;
}
div.sphinxsidebarwrapper h1.logo {
margin-top: -10px;
text-align: center;
margin-bottom: 5px;
text-align: left;
}
div.sphinxsidebarwrapper h1.logo-name {
margin-top: 0px;
}
div.sphinxsidebarwrapper p.blurb {
margin-top: 0;
font-style: normal;
}
div.sphinxsidebar h3,
div.sphinxsidebar h4 {
font-family: Georgia, serif;
color: #444;
font-size: 24px;
font-weight: normal;
margin: 0 0 5px 0;
padding: 0;
}
div.sphinxsidebar h4 {
font-size: 20px;
}
div.sphinxsidebar h3 a {
color: #444;
}
div.sphinxsidebar p.logo a,
div.sphinxsidebar h3 a,
div.sphinxsidebar p.logo a:hover,
div.sphinxsidebar h3 a:hover {
border: none;
}
div.sphinxsidebar p {
color: #555;
margin: 10px 0;
}
div.sphinxsidebar ul {
margin: 10px 0;
padding: 0;
color: #000;
}
div.sphinxsidebar ul li.toctree-l1 > a {
font-size: 120%;
}
div.sphinxsidebar ul li.toctree-l2 > a {
font-size: 110%;
}
div.sphinxsidebar input {
border: 1px solid #CCC;
font-family: Georgia, serif;
font-size: 1em;
}
div.sphinxsidebar hr {
border: none;
height: 1px;
color: #AAA;
background: #AAA;
text-align: left;
margin-left: 0;
width: 50%;
}
div.sphinxsidebar .badge {
border-bottom: none;
}
div.sphinxsidebar .badge:hover {
border-bottom: none;
}
/* To address an issue with donation coming after search */
div.sphinxsidebar h3.donation {
margin-top: 10px;
}
/* -- body styles ----------------------------------------------------------- */
a {
color: #004B6B;
text-decoration: underline;
}
a:hover {
color: #6D4100;
text-decoration: underline;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: Georgia, serif;
font-weight: normal;
margin: 30px 0px 10px 0px;
padding: 0;
}
div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
div.body h2 { font-size: 180%; }
div.body h3 { font-size: 150%; }
div.body h4 { font-size: 130%; }
div.body h5 { font-size: 100%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #DDD;
padding: 0 4px;
text-decoration: none;
}
a.headerlink:hover {
color: #444;
background: #EAEAEA;
}
div.body p, div.body dd, div.body li {
line-height: 1.4em;
}
div.admonition {
margin: 20px 0px;
padding: 10px 30px;
background-color: #EEE;
border: 1px solid #CCC;
}
div.admonition tt.xref, div.admonition code.xref, div.admonition a tt {
background-color: #FBFBFB;
border-bottom: 1px solid #fafafa;
}
div.admonition p.admonition-title {
font-family: Georgia, serif;
font-weight: normal;
font-size: 24px;
margin: 0 0 10px 0;
padding: 0;
line-height: 1;
}
div.admonition p.last {
margin-bottom: 0;
}
div.highlight {
background-color: #fff;
}
dt:target, .highlight {
background: #FAF3E8;
}
div.warning {
background-color: #FCC;
border: 1px solid #FAA;
}
div.danger {
background-color: #FCC;
border: 1px solid #FAA;
-moz-box-shadow: 2px 2px 4px #D52C2C;
-webkit-box-shadow: 2px 2px 4px #D52C2C;
box-shadow: 2px 2px 4px #D52C2C;
}
div.error {
background-color: #FCC;
border: 1px solid #FAA;
-moz-box-shadow: 2px 2px 4px #D52C2C;
-webkit-box-shadow: 2px 2px 4px #D52C2C;
box-shadow: 2px 2px 4px #D52C2C;
}
div.caution {
background-color: #FCC;
border: 1px solid #FAA;
}
div.attention {
background-color: #FCC;
border: 1px solid #FAA;
}
div.important {
background-color: #EEE;
border: 1px solid #CCC;
}
div.note {
background-color: #EEE;
border: 1px solid #CCC;
}
div.tip {
background-color: #EEE;
border: 1px solid #CCC;
}
div.hint {
background-color: #EEE;
border: 1px solid #CCC;
}
div.seealso {
background-color: #EEE;
border: 1px solid #CCC;
}
div.topic {
background-color: #EEE;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre, tt, code {
font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
.hll {
background-color: #FFC;
margin: 0 -12px;
padding: 0 12px;
display: block;
}
img.screenshot {
}
tt.descname, tt.descclassname, code.descname, code.descclassname {
font-size: 0.95em;
}
tt.descname, code.descname {
padding-right: 0.08em;
}
img.screenshot {
-moz-box-shadow: 2px 2px 4px #EEE;
-webkit-box-shadow: 2px 2px 4px #EEE;
box-shadow: 2px 2px 4px #EEE;
}
table.docutils {
border: 1px solid #888;
-moz-box-shadow: 2px 2px 4px #EEE;
-webkit-box-shadow: 2px 2px 4px #EEE;
box-shadow: 2px 2px 4px #EEE;
}
table.docutils td, table.docutils th {
border: 1px solid #888;
padding: 0.25em 0.7em;
}
table.field-list, table.footnote {
border: none;
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
table.footnote {
margin: 15px 0;
width: 100%;
border: 1px solid #EEE;
background: #FDFDFD;
font-size: 0.9em;
}
table.footnote + table.footnote {
margin-top: -15px;
border-top: none;
}
table.field-list th {
padding: 0 0.8em 0 0;
}
table.field-list td {
padding: 0;
}
table.field-list p {
margin-bottom: 0.8em;
}
/* Cloned from
* https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68
*/
.field-name {
-moz-hyphens: manual;
-ms-hyphens: manual;
-webkit-hyphens: manual;
hyphens: manual;
}
table.footnote td.label {
width: .1px;
padding: 0.3em 0 0.3em 0.5em;
}
table.footnote td {
padding: 0.3em 0.5em;
}
dl {
margin: 0;
padding: 0;
}
dl dd {
margin-left: 30px;
}
blockquote {
margin: 0 0 0 30px;
padding: 0;
}
ul, ol {
/* Matches the 30px from the narrow-screen "li > ul" selector below */
margin: 10px 0 10px 30px;
padding: 0;
}
pre {
background: #EEE;
padding: 7px 30px;
margin: 15px 0px;
line-height: 1.3em;
}
div.viewcode-block:target {
background: #ffd;
}
dl pre, blockquote pre, li pre {
margin-left: 0;
padding-left: 30px;
}
tt, code {
background-color: #ecf0f3;
color: #222;
/* padding: 1px 2px; */
}
tt.xref, code.xref, a tt {
background-color: #FBFBFB;
border-bottom: 1px solid #fff;
}
a.reference {
text-decoration: none;
border-bottom: 1px dotted #004B6B;
}
/* Don't put an underline on images */
a.image-reference, a.image-reference:hover {
border-bottom: none;
}
a.reference:hover {
border-bottom: 1px solid #6D4100;
}
a.footnote-reference {
text-decoration: none;
font-size: 0.7em;
vertical-align: top;
border-bottom: 1px dotted #004B6B;
}
a.footnote-reference:hover {
border-bottom: 1px solid #6D4100;
}
a:hover tt, a:hover code {
background: #EEE;
}
@media screen and (max-width: 870px) {
div.sphinxsidebar {
display: none;
}
div.document {
width: 100%;
}
div.documentwrapper {
margin-left: 0;
margin-top: 0;
margin-right: 0;
margin-bottom: 0;
}
div.bodywrapper {
margin-top: 0;
margin-right: 0;
margin-bottom: 0;
margin-left: 0;
}
ul {
margin-left: 0;
}
li > ul {
/* Matches the 30px from the "ul, ol" selector above */
margin-left: 30px;
}
.document {
width: auto;
}
.footer {
width: auto;
}
.bodywrapper {
margin: 0;
}
.footer {
width: auto;
}
.github {
display: none;
}
}
@media screen and (max-width: 875px) {
body {
margin: 0;
padding: 20px 30px;
}
div.documentwrapper {
float: none;
background: #fff;
}
div.sphinxsidebar {
display: block;
float: none;
width: 102.5%;
margin: 50px -30px -20px -30px;
padding: 10px 20px;
background: #333;
color: #FFF;
}
div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
div.sphinxsidebar h3 a {
color: #fff;
}
div.sphinxsidebar a {
color: #AAA;
}
div.sphinxsidebar p.logo {
display: none;
}
div.document {
width: 100%;
margin: 0;
}
div.footer {
display: none;
}
div.bodywrapper {
margin: 0;
}
div.body {
min-height: 0;
padding: 0;
}
.rtd_doc_footer {
display: none;
}
.document {
width: auto;
}
.footer {
width: auto;
}
.footer {
width: auto;
}
.github {
display: none;
}
}
/* misc. */
.revsys-inline {
display: none!important;
}
/* Make nested-list/multi-paragraph items look better in Releases changelog
* pages. Without this, docutils' magical list fuckery causes inconsistent
* formatting between different release sub-lists.
*/
div#changelog > div.section > ul > li > p:only-child {
margin-bottom: 0;
}
/* Hide fugly table cell borders in ..bibliography:: directive output */
table.docutils.citation, table.docutils.citation td, table.docutils.citation th {
border: none;
/* Below needed in some edge cases; if not applied, bottom shadows appear */
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
/* relbar */
.related {
line-height: 30px;
width: 100%;
font-size: 0.9rem;
}
.related.top {
border-bottom: 1px solid #EEE;
margin-bottom: 20px;
}
.related.bottom {
border-top: 1px solid #EEE;
}
.related ul {
padding: 0;
margin: 0;
list-style: none;
}
.related li {
display: inline;
}
nav#rellinks {
float: right;
}
nav#rellinks li+li:before {
content: "|";
}
nav#breadcrumbs li+li:before {
content: "\00BB";
}
/* Hide certain items when printing */
@media print {
div.related {
display: none;
}
} | 11,185 | 14.957204 | 96 | css |
pycheops | pycheops-master/docs/_build/html/_static/basic.css | /*
* basic.css
* ~~~~~~~~~
*
* Sphinx stylesheet -- basic theme.
*
* :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/* -- main layout ----------------------------------------------------------- */
div.clearer {
clear: both;
}
/* -- relbar ---------------------------------------------------------------- */
div.related {
width: 100%;
font-size: 90%;
}
div.related h3 {
display: none;
}
div.related ul {
margin: 0;
padding: 0 0 0 10px;
list-style: none;
}
div.related li {
display: inline;
}
div.related li.right {
float: right;
margin-right: 5px;
}
/* -- sidebar --------------------------------------------------------------- */
div.sphinxsidebarwrapper {
padding: 10px 5px 0 10px;
}
div.sphinxsidebar {
float: left;
width: 230px;
margin-left: -100%;
font-size: 90%;
word-wrap: break-word;
overflow-wrap : break-word;
}
div.sphinxsidebar ul {
list-style: none;
}
div.sphinxsidebar ul ul,
div.sphinxsidebar ul.want-points {
margin-left: 20px;
list-style: square;
}
div.sphinxsidebar ul ul {
margin-top: 0;
margin-bottom: 0;
}
div.sphinxsidebar form {
margin-top: 10px;
}
div.sphinxsidebar input {
border: 1px solid #98dbcc;
font-family: sans-serif;
font-size: 1em;
}
div.sphinxsidebar #searchbox form.search {
overflow: hidden;
}
div.sphinxsidebar #searchbox input[type="text"] {
float: left;
width: 80%;
padding: 0.25em;
box-sizing: border-box;
}
div.sphinxsidebar #searchbox input[type="submit"] {
float: left;
width: 20%;
border-left: none;
padding: 0.25em;
box-sizing: border-box;
}
img {
border: 0;
max-width: 100%;
}
/* -- search page ----------------------------------------------------------- */
ul.search {
margin: 10px 0 0 20px;
padding: 0;
}
ul.search li {
padding: 5px 0 5px 20px;
background-image: url(file.png);
background-repeat: no-repeat;
background-position: 0 7px;
}
ul.search li a {
font-weight: bold;
}
ul.search li div.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
}
ul.keywordmatches li.goodmatch a {
font-weight: bold;
}
/* -- index page ------------------------------------------------------------ */
table.contentstable {
width: 90%;
margin-left: auto;
margin-right: auto;
}
table.contentstable p.biglink {
line-height: 150%;
}
a.biglink {
font-size: 1.3em;
}
span.linkdescr {
font-style: italic;
padding-top: 5px;
font-size: 90%;
}
/* -- general index --------------------------------------------------------- */
table.indextable {
width: 100%;
}
table.indextable td {
text-align: left;
vertical-align: top;
}
table.indextable ul {
margin-top: 0;
margin-bottom: 0;
list-style-type: none;
}
table.indextable > tbody > tr > td > ul {
padding-left: 0em;
}
table.indextable tr.pcap {
height: 10px;
}
table.indextable tr.cap {
margin-top: 10px;
background-color: #f2f2f2;
}
img.toggler {
margin-right: 3px;
margin-top: 3px;
cursor: pointer;
}
div.modindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
div.genindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
/* -- domain module index --------------------------------------------------- */
table.modindextable td {
padding: 2px;
border-collapse: collapse;
}
/* -- general body styles --------------------------------------------------- */
div.body {
min-width: 450px;
max-width: 800px;
}
div.body p, div.body dd, div.body li, div.body blockquote {
-moz-hyphens: auto;
-ms-hyphens: auto;
-webkit-hyphens: auto;
hyphens: auto;
}
a.headerlink {
visibility: hidden;
}
a.brackets:before,
span.brackets > a:before{
content: "[";
}
a.brackets:after,
span.brackets > a:after {
content: "]";
}
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink,
caption:hover > a.headerlink,
p.caption:hover > a.headerlink,
div.code-block-caption:hover > a.headerlink {
visibility: visible;
}
div.body p.caption {
text-align: inherit;
}
div.body td {
text-align: left;
}
.first {
margin-top: 0 !important;
}
p.rubric {
margin-top: 30px;
font-weight: bold;
}
img.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
img.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
img.align-center, .figure.align-center, object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
img.align-default, .figure.align-default {
display: block;
margin-left: auto;
margin-right: auto;
}
.align-left {
text-align: left;
}
.align-center {
text-align: center;
}
.align-default {
text-align: center;
}
.align-right {
text-align: right;
}
/* -- sidebars -------------------------------------------------------------- */
div.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
padding: 7px 7px 0 7px;
background-color: #ffe;
width: 40%;
float: right;
}
p.sidebar-title {
font-weight: bold;
}
/* -- topics ---------------------------------------------------------------- */
div.topic {
border: 1px solid #ccc;
padding: 7px 7px 0 7px;
margin: 10px 0 10px 0;
}
p.topic-title {
font-size: 1.1em;
font-weight: bold;
margin-top: 10px;
}
/* -- admonitions ----------------------------------------------------------- */
div.admonition {
margin-top: 10px;
margin-bottom: 10px;
padding: 7px;
}
div.admonition dt {
font-weight: bold;
}
div.admonition dl {
margin-bottom: 0;
}
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
}
div.body p.centered {
text-align: center;
margin-top: 25px;
}
/* -- tables ---------------------------------------------------------------- */
table.docutils {
border: 0;
border-collapse: collapse;
}
table.align-center {
margin-left: auto;
margin-right: auto;
}
table.align-default {
margin-left: auto;
margin-right: auto;
}
table caption span.caption-number {
font-style: italic;
}
table caption span.caption-text {
}
table.docutils td, table.docutils th {
padding: 1px 8px 1px 5px;
border-top: 0;
border-left: 0;
border-right: 0;
border-bottom: 1px solid #aaa;
}
table.footnote td, table.footnote th {
border: 0 !important;
}
th {
text-align: left;
padding-right: 5px;
}
table.citation {
border-left: solid 1px gray;
margin-left: 1px;
}
table.citation td {
border-bottom: none;
}
th > p:first-child,
td > p:first-child {
margin-top: 0px;
}
th > p:last-child,
td > p:last-child {
margin-bottom: 0px;
}
/* -- figures --------------------------------------------------------------- */
div.figure {
margin: 0.5em;
padding: 0.5em;
}
div.figure p.caption {
padding: 0.3em;
}
div.figure p.caption span.caption-number {
font-style: italic;
}
div.figure p.caption span.caption-text {
}
/* -- field list styles ----------------------------------------------------- */
table.field-list td, table.field-list th {
border: 0 !important;
}
.field-list ul {
margin: 0;
padding-left: 1em;
}
.field-list p {
margin: 0;
}
.field-name {
-moz-hyphens: manual;
-ms-hyphens: manual;
-webkit-hyphens: manual;
hyphens: manual;
}
/* -- hlist styles ---------------------------------------------------------- */
table.hlist td {
vertical-align: top;
}
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
list-style: decimal;
}
ol.loweralpha {
list-style: lower-alpha;
}
ol.upperalpha {
list-style: upper-alpha;
}
ol.lowerroman {
list-style: lower-roman;
}
ol.upperroman {
list-style: upper-roman;
}
li > p:first-child {
margin-top: 0px;
}
li > p:last-child {
margin-bottom: 0px;
}
dl.footnote > dt,
dl.citation > dt {
float: left;
}
dl.footnote > dd,
dl.citation > dd {
margin-bottom: 0em;
}
dl.footnote > dd:after,
dl.citation > dd:after {
content: "";
clear: both;
}
dl.field-list {
display: grid;
grid-template-columns: fit-content(30%) auto;
}
dl.field-list > dt {
font-weight: bold;
word-break: break-word;
padding-left: 0.5em;
padding-right: 5px;
}
dl.field-list > dt:after {
content: ":";
}
dl.field-list > dd {
padding-left: 0.5em;
margin-top: 0em;
margin-left: 0em;
margin-bottom: 0em;
}
dl {
margin-bottom: 15px;
}
dd > p:first-child {
margin-top: 0px;
}
dd ul, dd table {
margin-bottom: 10px;
}
dd {
margin-top: 3px;
margin-bottom: 10px;
margin-left: 30px;
}
dt:target, span.highlighted {
background-color: #fbe54e;
}
rect.highlighted {
fill: #fbe54e;
}
dl.glossary dt {
font-weight: bold;
font-size: 1.1em;
}
.optional {
font-size: 1.3em;
}
.sig-paren {
font-size: larger;
}
.versionmodified {
font-style: italic;
}
.system-message {
background-color: #fda;
padding: 5px;
border: 3px solid red;
}
.footnote:target {
background-color: #ffa;
}
.line-block {
display: block;
margin-top: 1em;
margin-bottom: 1em;
}
.line-block .line-block {
margin-top: 0;
margin-bottom: 0;
margin-left: 1.5em;
}
.guilabel, .menuselection {
font-family: sans-serif;
}
.accelerator {
text-decoration: underline;
}
.classifier {
font-style: oblique;
}
.classifier:before {
font-style: normal;
margin: 0.5em;
content: ":";
}
abbr, acronym {
border-bottom: dotted 1px;
cursor: help;
}
/* -- code displays --------------------------------------------------------- */
pre {
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
span.pre {
-moz-hyphens: none;
-ms-hyphens: none;
-webkit-hyphens: none;
hyphens: none;
}
td.linenos pre {
padding: 5px 0px;
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
margin-left: 0.5em;
}
table.highlighttable td {
padding: 0 0.5em 0 0.5em;
}
div.code-block-caption {
padding: 2px 5px;
font-size: small;
}
div.code-block-caption code {
background-color: transparent;
}
div.code-block-caption + div > div.highlight > pre {
margin-top: 0;
}
div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */
user-select: none;
}
div.code-block-caption span.caption-number {
padding: 0.1em 0.3em;
font-style: italic;
}
div.code-block-caption span.caption-text {
}
div.literal-block-wrapper {
padding: 1em 1em 0;
}
div.literal-block-wrapper div.highlight {
margin: 0;
}
code.descname {
background-color: transparent;
font-weight: bold;
font-size: 1.2em;
}
code.descclassname {
background-color: transparent;
}
code.xref, a code {
background-color: transparent;
font-weight: bold;
}
h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
background-color: transparent;
}
.viewcode-link {
float: right;
}
.viewcode-back {
float: right;
font-family: sans-serif;
}
div.viewcode-block:target {
margin: -1px -10px;
padding: 0 10px;
}
/* -- math display ---------------------------------------------------------- */
img.math {
vertical-align: middle;
}
div.body div.math p {
text-align: center;
}
span.eqno {
float: right;
}
span.eqno a.headerlink {
position: relative;
left: 0px;
z-index: 1;
}
div.math:hover a.headerlink {
visibility: visible;
}
/* -- printout stylesheet --------------------------------------------------- */
@media print {
div.document,
div.documentwrapper,
div.bodywrapper {
margin: 0 !important;
width: 100%;
}
div.sphinxsidebar,
div.related,
div.footer,
#top-link {
display: none;
}
} | 12,261 | 14.966146 | 80 | css |
pycheops | pycheops-master/docs/_build/html/_static/custom.css | /* This file intentionally left blank. */
| 42 | 20.5 | 41 | css |
pycheops | pycheops-master/docs/_build/html/_static/pygments.css | .highlight .hll { background-color: #ffffcc }
.highlight { background: #eeffcc; }
.highlight .c { color: #408090; font-style: italic } /* Comment */
.highlight .err { border: 1px solid #FF0000 } /* Error */
.highlight .k { color: #007020; font-weight: bold } /* Keyword */
.highlight .o { color: #666666 } /* Operator */
.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */
.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
.highlight .cp { color: #007020 } /* Comment.Preproc */
.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */
.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #A00000 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #FF0000 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
.highlight .gi { color: #00A000 } /* Generic.Inserted */
.highlight .go { color: #333333 } /* Generic.Output */
.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
.highlight .gt { color: #0044DD } /* Generic.Traceback */
.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #007020 } /* Keyword.Pseudo */
.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #902000 } /* Keyword.Type */
.highlight .m { color: #208050 } /* Literal.Number */
.highlight .s { color: #4070a0 } /* Literal.String */
.highlight .na { color: #4070a0 } /* Name.Attribute */
.highlight .nb { color: #007020 } /* Name.Builtin */
.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
.highlight .no { color: #60add5 } /* Name.Constant */
.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
.highlight .ne { color: #007020 } /* Name.Exception */
.highlight .nf { color: #06287e } /* Name.Function */
.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #bb60d5 } /* Name.Variable */
.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #208050 } /* Literal.Number.Bin */
.highlight .mf { color: #208050 } /* Literal.Number.Float */
.highlight .mh { color: #208050 } /* Literal.Number.Hex */
.highlight .mi { color: #208050 } /* Literal.Number.Integer */
.highlight .mo { color: #208050 } /* Literal.Number.Oct */
.highlight .sa { color: #4070a0 } /* Literal.String.Affix */
.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
.highlight .sc { color: #4070a0 } /* Literal.String.Char */
.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */
.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
.highlight .s2 { color: #4070a0 } /* Literal.String.Double */
.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
.highlight .sx { color: #c65d09 } /* Literal.String.Other */
.highlight .sr { color: #235388 } /* Literal.String.Regex */
.highlight .s1 { color: #4070a0 } /* Literal.String.Single */
.highlight .ss { color: #517918 } /* Literal.String.Symbol */
.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #06287e } /* Name.Function.Magic */
.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ | 4,395 | 62.710145 | 83 | css |
pycheops | pycheops-master/pycheops/__init__.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
********
pycheops
********
This package provides tools for the analysis of light curves from the ESA
CHEOPS mission <http://cheops.unibe.ch/>.
"""
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'VERSION')) as version_file:
__version__ = version_file.read().strip()
"""
Create pickle files for interpolation within various data tables
"""
from scipy.interpolate import interp1d, NearestNDInterpolator
from photutils import CircularAperture, aperture_photometry
import numpy as np
import pickle
from astropy.table import Table
from .core import load_config
try:
config = load_config()
except ValueError:
from .core import setup_config
setup_config()
config = load_config()
data_path = path.join(here,'data','instrument')
cache_path = config['DEFAULT']['data_cache_path']
# Photometric aperture contamation calculation from PSF for make_xml_files
pfile = path.join(cache_path,'Contamination_33arcsec_aperture.p')
try:
psf_path = path.join(data_path, config['psf_file']['psf_file'])
except KeyError:
raise KeyError("Run pycheops.core.setup_config(overwrite=True) to"
" update your config file.")
if not path.isfile(pfile) or (path.getmtime(pfile) < path.getmtime(psf_path)):
radius = 33 # Aperture radius in pixels
psf_x0 = config['psf_file']['x0']
psf_y0 = config['psf_file']['y0']
with open(psf_path) as fp:
data = [[float(digit) for digit in line.split()] for line in fp]
position0 = [psf_x0, psf_y0]
aperture0 = CircularAperture(position0, r=radius)
photTable0 = aperture_photometry(data, aperture0)
target_flux = photTable0['aperture_sum'][0]
rad = np.linspace(0.0,125,25,endpoint=True)
contam = np.zeros_like(rad)
contam[0] = 1.0
for i,r in enumerate(rad[1:]):
nthe = max(4, int(round(r/5)))
the = np.linspace(0,2*np.pi,nthe)
pos= np.array((100+np.array(r*np.cos(the)),
100+np.array(r*np.sin(the)))).T
apertures = CircularAperture(pos, r=radius)
photTable = aperture_photometry(data, apertures)
contam[i+1] = max(photTable['aperture_sum'])/target_flux
contam = np.array(contam) # convert to numpy array else sphinx complains
I = interp1d(rad, contam,fill_value=min(contam),bounds_error=False)
with open(pfile,'wb') as fp:
pickle.dump(I,fp)
# Visibility calculator for instrument.py and make_xml_files
pfile = path.join(cache_path,'visibility_interpolator.p')
if not path.isfile(pfile):
vfile = path.join(data_path,'VisibilityTable.csv')
visTable = Table.read(vfile)
ra_ = visTable['RA']*180/np.pi
dec_ = visTable['Dec']*180/np.pi
vis = visTable['Efficiency']
I = NearestNDInterpolator((np.array([ra_,dec_])).T,vis)
with open(pfile,'wb') as fp:
pickle.dump(I,fp)
# T_eff v. G_BP-G_RP colour from
# http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt
# Version 2019.3.22
pfile = path.join(cache_path,'Teff_BP_RP_interpolator.p')
if not path.isfile(pfile):
fT = path.join(here,'data','EEM_dwarf_UBVIJHK_colors_Teff',
'EEM_dwarf_UBVIJHK_colors_Teff.txt')
T = Table.read(fT,format='ascii',header_start=-1,
fill_values=('...',np.nan))
b_p = np.array(T['Bp-Rp']) # convert to numpy array else sphinx complains
Teff = np.array(T['Teff']) # convert to numpy array else sphinx complains
I = interp1d(b_p,Teff,bounds_error=False,
fill_value='extrapolate')
with open(pfile,'wb') as fp:
pickle.dump(I,fp)
# CHEOPS magnitude - Gaia G magnitude v. T_eff
# From ImageETCc1.4 exposure time calculator spreadsheet
pfile = path.join(cache_path,'C_G_Teff_interpolator.p')
if not path.isfile(pfile):
fT = path.join(here,'data','instrument', 'C_G_Teff.csv')
T = Table.read(fT,format='csv')
Teff = np.array(T['Teff']) # convert to numpy array else sphinx complains
C_G = np.array(T['C-G']) # convert to numpy array else sphinx complains
I = interp1d(Teff,C_G,bounds_error=False, fill_value='extrapolate')
with open(pfile,'wb') as fp:
pickle.dump(I,fp)
from .dataset import Dataset
from .multivisit import MultiVisit
from .starproperties import StarProperties
from .planetproperties import PlanetProperties
| 5,122 | 38.407692 | 78 | py |
pycheops | pycheops-master/pycheops/calculate_coefficients.py | #!/usr/bin/env python
import numpy as np
from scipy.interpolate import pchip_interpolate
from scipy.optimize import minimize
import argparse
import textwrap
from ellc import lc
def q1q2_to_h1h2(q1, q2):
return 1 - np.sqrt(q1) + q2*np.sqrt(q1), 1 - np.sqrt(q1)
def h1h2_to_ca(h1, h2):
return 1 - h1 + h2, np.log2((1 - h1 + h2)/h2)
def transit_width(r, k, b, P=1):
return P*np.arcsin(r*np.sqrt( ((1+k)**2-b**2) / (1-b**2*r**2) ))/np.pi
def func(c, t, r_1, k, incl, grid_size, lc_mugrid):
h1,h2 = q1q2_to_h1h2(c[0],c[1])
c2,a2 = h1h2_to_ca(h1,h2)
ldc_1 = [c2, a2]
try:
lc_fit = lc(t, radius_1=r_1, radius_2=r_1*k,
sbratio=0, incl=incl, ld_1='power-2', ldc_1=ldc_1,
grid_1=grid_size, grid_2=grid_size)
except:
lc_fit = zero_like(t)
return (lc_fit - lc_mugrid).std()
#---------------
def main():
parser = argparse.ArgumentParser(
description='Optimize limb-darkening coefficients',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = textwrap.dedent('''\
Optimize limb-darkening coefficients by fitting a transit light curve
Input files containing centre-to-limb intensity profile must have three
columns with mu in the _2nd_ column and intensity in the _3rd_column.
N.B. input data must be in ascending order of mu.
Currently only implements power-2 law - others to be added.
The output is a single line with the parameters in the following order
profile c alpha h_1 h_2 q_1 q_2 rms
rms is the root-mean-square residual of the fit to the transit light
curve in ppm.
Notes
~~~~~
- If repeated intensity values with mu=0 are present at the start of
the data file then only the last one in the list is used.
- If the data file does not contain data at mu=0, I(0)=0 will be used.
- If the data file does not contain data at mu=1, I(1)=1 will be used.
--
'''))
parser.add_argument('profile', nargs='+',
help='File with intensity profile, mu in col. 2, I(mu) in col. 3')
parser.add_argument('-r', '--resample', default=101, type=int,
help='''Re-sample input data
Input data are interpolated onto regular grid with specifed number of
points. Set this value to 0 to avoid re-sampling if the input data are
already on a regular grid of mu values.
(default: %(default)d)
''')
parser.add_argument('-n', '--n_lc', default=64, type=int,
help='''Number of points in the simulated light curve.
(default: %(default)d)
''')
parser.add_argument('-b', '--impact', default=0.0, type=float,
help='''Impact parameter for simulated light curve
(default: %(default)f)
''')
parser.add_argument('-k', '--ratio', default=0.1, type=float,
help='''Planet-star radius ratio for simulated light curve
(default: %(default)f)
''')
parser.add_argument('-g', '--grid', default='sparse', type=str,
help='''Density of numerical grid for simulation
Options are 'very_sparse', 'sparse', 'default', 'fine' and 'very_fine'
(default: %(default)s)
''')
args= parser.parse_args()
# Fixed parameters, might change these to input options later, idk
r_1 = 0.1 # star radius/a
w = 0.5*transit_width(r_1, args.ratio, args.impact)
t = np.linspace(0,w,args.n_lc,endpoint=False)
for profile in args.profile:
mu,I_mu=np.loadtxt(profile,unpack=True, usecols=[1,2])
# Deal with repeated data at mu=0
if sum(mu == 0) > 1:
j = np.arange(len(mu))[mu ==0].max()
mu = mu[j:]
I_mu = I_mu[j:]
elif sum(mu == 0) == 0:
mu = np.array([0,*mu])
I_mu = np.array([0,*I_mu])
if sum(mu == 1) == 0:
mu = np.array([*mu, 1])
I_mu = np.array([*I_mu, 1])
if args.resample > 0:
mu_1 = np.linspace(0,1,args.resample)
ldc_1 = pchip_interpolate(mu, I_mu, mu_1)
else:
ldc_1 = mu
incl = 180*np.arccos(r_1*args.impact)/np.pi
lc_mugrid = lc(t, radius_1=r_1, radius_2=r_1*args.ratio,
sbratio=0, incl=incl, ld_1='mugrid', ldc_1 = ldc_1,
grid_1=args.grid, grid_2=args.grid)
c = np.array([0.3,0.45]) # q1, q2
smol = np.sqrt(np.finfo(float).eps)
soln = minimize(func, c,
args=(t, r_1, args.ratio, incl, args.grid, lc_mugrid),
method='L-BFGS-B', bounds=((smol, 1-smol),(smol, 1-smol)))
q1,q2 = soln.x
h1,h2 = q1q2_to_h1h2(q1,q2)
c2,a2 = h1h2_to_ca(h1,h2)
print(f"{profile} {c2:0.5f} {a2:0.5f} {h1:0.5f} {h2:0.5f} {q1:0.5f} {q2:0.5f} {soln.fun*1e6:5.2f}")
#-------------------------------
if __name__ == "__main__":
main()
| 4,967 | 32.342282 | 107 | py |
pycheops | pycheops-master/pycheops/combine.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
combine
=======
Calculate weighted mean and its standard error allowing for external noise
Functions
---------
main() - combine
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
from astropy.table import Table
import numpy as np
from . import __version__
from .utils import uprint as up
from uncertainties import ufloat as uf
import emcee
import textwrap
from scipy.special import gammainc
from numba import jit
def combine(y, yerr, walkers=64, steps=256, discard=128):
@jit(nopython=True)
def log_prob(p, y, yvar, mulo, muhi, lnsig_lo, lnsig_hi):
mu = p[0]
lnsig = p[1]
if (mu < mulo) or (mu > muhi): return -np.inf
if (lnsig < lnsig_lo) or (lnsig > lnsig_hi): return -np.inf
sigma2 = yvar + np.exp(2*lnsig)
return -0.5 * np.sum((y - mu)**2/sigma2 + np.log(sigma2))
y = np.array(y)
yerr = np.array(yerr)
mulo = y.min() - yerr.max()
muhi = y.max() + yerr.max()
mu_i = np.random.normal(y.mean(),y.std()+yerr.min(), walkers)
mu_i = np.clip(mu_i, mulo, muhi)
lnsig_0 = np.log(y.std()+yerr.min())
lnsig_lo = lnsig_0 - 15
lnsig_hi = lnsig_0 + 5
lnsig_i = np.random.normal(lnsig_0-5,1,walkers)
lnsig_i = np.clip(lnsig_i, lnsig_lo, lnsig_hi)
pos = np.array( [mu_i, lnsig_i]).T
nwalkers, ndim = pos.shape
yvar = yerr**2
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob,
args=(y, yvar, mulo, muhi, lnsig_lo, lnsig_hi))
sampler.run_mcmc(pos, steps)
chain = sampler.get_chain(flat=True, discard=discard)
mu = chain[:,0].mean()
mu_err = chain[:,0].std()
sigext = np.exp(chain[:,1]).mean()
sigext_err = np.exp(chain[:,1]).std()
return mu, mu_err, sigext, sigext_err, sampler
def main():
# Set up command line switches
parser = argparse.ArgumentParser(
description='Weighted mean and error allowing for external noise',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = textwrap.dedent(f'''\
This is version {__version__}
Reads a table of values with standard error estimates and calculates
the weighted mean and error, allowing for the possibility that there
is an additional source of uncertainty.
The input table can be any format suitable for reading with the
command astropy.table.Table.read(), e.g., CSV.
By default, the calculation is done using columns 1 and 2 in the
table. Use the flag --val_col and --err_col to specify alternative
column names or numbers.
If --val_col is used and --err-col is not specified, the program will ..
- use the column val_col+1 if val_col is an integer
- look for a column named "e_"+val_col
- look for a column named val_col+"_err"
- give up...
'''))
parser.add_argument('table', nargs='?',
help='Table of values and errors'
)
parser.add_argument('-f', '--format',
help='Table format - passed to Table.read()'
)
parser.add_argument('-v', '--val_col',
default=1,
help='''Column with values
(default: %(default)d)
'''
)
parser.add_argument('-e', '--err_col',
default=None,
help='Column with errors'
)
parser.add_argument('-1', '--one_line',
action='store_const',
dest='one_line',
const=True,
default=False,
help='Output results on one line'
)
parser.add_argument('-p', '--precise',
action='store_const',
dest='precise',
const=True,
default=False,
help='More precise (but slower) calculation'
)
args = parser.parse_args()
if args.table is None:
parser.print_usage()
exit(1)
table = Table.read(args.table, format=args.format)
try:
val_col = int(args.val_col) - 1
name = 'y'
except ValueError:
val_col = str(args.val_col)
name = args.val_col
y = table[val_col][:]
if args.err_col is None:
if isinstance(val_col, int):
yerr = table[val_col+1][:]
else:
try:
err_col = 'e_'+str(args.val_col)
yerr = table[err_col][:]
except KeyError:
err_col = str(args.val_col)+'_err'
yerr = table[err_col][:]
else:
yerr = table[args.err_col][:]
if args.precise:
nw, ns, nb, sf = 128, 1500, 500, 2
else:
nw, ns, nb, sf = 64, 150, 50, 1
mu, e_mu, sig, e_sig, sampler = combine(y, yerr,
walkers=nw, steps=ns, discard=nb)
if not args.one_line:
n = len(y)
print (f'\nRead {n} values from {args.table}')
print (up(uf(y.max(), yerr[np.argmax(y)]),
f'Maximum value of {name}', sf=sf))
print (up(uf(y.min(), yerr[np.argmin(y)]),
f'Minimum value of {name}', sf=sf))
wt = 1/yerr**2
wsum = wt.sum()
wmean = (y*wt).sum()/wsum
chisq = ((y-wmean)**2*wt).sum()
e_int = np.sqrt(1/wsum)
e_ext = np.sqrt(chisq/(n-1)/wsum)
print(f' Weighted mean = {wmean:0.4f}')
print (up(uf(wmean, e_int),f'{name}', sf=sf), '(Internal error)')
print (up(uf(wmean, e_ext),f'{name}', sf=sf), '(External error)')
print(f' Chi-square = {chisq:0.2f}')
p = 1-gammainc(0.5*(n-1),0.5*chisq)
print(f' P(chi-sq. > observed chi-sq. if mean is constant) = {p:0.2}')
print ("--")
print(up(uf(mu,e_mu), name, sf=sf) + '; ' +
up(uf(sig, e_sig),'sigma_ext', sf=sf))
| 6,545 | 29.588785 | 80 | py |
pycheops | pycheops-master/pycheops/constants.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
constants
=========
Nominal values of solar and planetary constants in SI units from IAU
Resolution B3 [1]_ plus related constants
Masses in SI units are derived using the 2014 CODATA value for the
Newtonian constant, :math:`G=6.67408\\times 10^{-11}\,m^3\,kg^{-1}\,s^{-2}`.
The following conversion constants are defined.
Solar conversion constants
--------------------------
* R_SunN - solar radius
* S_SunN - total solar irradiance
* L_SunN - luminosity
* Teff_SunN - solar effective temperature
* GM_SunN - solar mass parameter
* M_SunN - solar mass derived from GM_SunN and G_2014
* V_SunN - solar volume = (4.pi.R_SunN**3/3)
Planetary conversion constants
------------------------------
* R_eEarthN - equatorial radius of the Earth
* R_pEarthN - polar radius of the Earth
* R_eJupN - equatorial radius of Jupiter
* R_pJupN - polar radius of Jupiter
* GM_EarthN - terrestrial mass parameter
* GM_JupN - jovian mass parameter
* M_EarthN - mass of the Earth from GM_EarthN and G_2014
* M_JupN - mass of Jupiter from GM_JupN and G_2014
* V_EarthN - volume of the Earth (4.pi.R_eEarthN^2.R_pEarthN/3)
* V_JupN - volume of Jupiter (4.pi.R_eJupN^2.R_pJupN/3)
* R_EarthN - volume-average radius of the Earth (3.V_EarthN/4.pi)^(1/3)
* R_JupN - volume-average radius of Jupiter (3.V_JupN/4.pi)^(1/3)
Related constants
-----------------
* G_2014 - 2014 CODATA value for the Newtonian constant
* mean_solar_day - 86,400.002 seconds [2]_
* au - IAU 2009 value for astronomical constant in metres. [3]_
* pc - 1 parsec = 3600*au*180/pi
* c - speed of light = 299,792,458 m / s
Example
-------
Calculate the density relative to Jupiter for a planet 1/10 the radius of the
Sun with a mass 1/1000 of a solar mass. Note that we use the volume-average
radius for Jupiter in this case::
>>> from pycheops.constants import M_SunN, R_SunN, M_JupN, R_JupN
>>> M_planet_Jup = M_SunN/1000 / M_JupN
>>> R_planet_Jup = R_SunN/10 / R_JupN
>>> rho_planet_Jup = M_planet_Jup / (R_planet_Jup**3)
>>> print ("Planet mass = {:.3f} M_Jup".format(M_planet_Jup))
>>> print ("Planet radius = {:.3f} R_Jup".format(R_planet_Jup))
>>> print ("Planet density = {:.3f} rho_Jup".format(rho_planet_Jup))
Planet mass = 1.048 M_Jup
Planet radius = 0.995 R_Jup
Planet density = 1.063 rho_Jup
.. rubric:: References
.. [1] https://www.iau.org/static/resolutions/IAU2015_English.pdf
.. [2] http://tycho.usno.navy.mil/leapsec.html
.. [3] Luzum et al., Celest Mech Dyn Astr (2011) 110:293-304
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__ = [ 'G_2014',
'R_SunN','S_SunN','L_SunN','Teff_SunN','GM_SunN','M_SunN','V_SunN',
'R_eEarthN','R_pEarthN','GM_EarthN','M_EarthN','V_EarthN','R_EarthN',
'R_eJupN','R_pJupN','GM_JupN','M_JupN','V_JupN','R_JupN',
'mean_solar_day','au','pc','c']
from math import pi
G_2014 = 6.67408E-11 # m3.kg-1.s-2, 2014 CODATA value
R_SunN = 6.957E8 # m, solar radius
S_SunN = 1361. # w.m-2 total solar irradiance
L_SunN = 3.828E26 # W, solar luminosity
Teff_SunN = 5772. # K, solar effective temperature
GM_SunN = 1.3271244E20 # m3.s-2, solar mass parameter
M_SunN = GM_SunN/G_2014 # kg, solar mass derived from GM_SunN and G_2014
V_SunN = 4*pi*R_SunN**3/3 # m3, solar volume
R_eEarthN = 6.3781E6 # m, equatorial radius of the Earth
R_pEarthN = 6.3568E6 # m, polar radius of the Earth
R_eJupN = 7.1492E7 # m, equatorial radius of Jupiter
R_pJupN = 6.6854E7 # m, polar radius of Jupiter
GM_EarthN = 3.986004E14 # m3.s-2, terrestrial mass parameter
GM_JupN = 1.2668653E17 # m3.s-2, jovian mass parameter
M_EarthN = GM_EarthN/G_2014 # kg, mass of the Earth from GM_EarthN and G_2014
M_JupN = GM_JupN/G_2014 # kg, mass of Jupiter from GM_JupN and G_2014
V_EarthN = 4*pi*R_eEarthN**2*R_pEarthN/3 # m^3, volume of the Earth
V_JupN = 4*pi*R_eJupN**2*R_pJupN/3 # m^3, volume of Jupiter
R_EarthN = (R_eEarthN**2*R_pEarthN)**(1/3.) # m, mean radius of the Earth
R_JupN = (R_eJupN**2*R_pJupN)**(1/3.) # m, mean radius of Jupiter
mean_solar_day = 86400.002 # seconds
au = 1.49597870700E11 # m, IAU 2009 Astronomical unit
pc = 3600*au*180/pi # m, parsec
c = 299792458 # m.s-1, speed of light
| 5,378 | 41.023438 | 80 | py |
pycheops | pycheops-master/pycheops/core.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
core
====
Core functions for pycheops
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from configparser import ConfigParser
from sys import platform
import getpass
__all__ = ['load_config', 'setup_config', 'get_cache_path']
def find_config():
r"""
Find pycheops.cfg from a hierarchy of places
First, try `~/pycheops.cfg`
if that fails, path is platform dependent
Linux: `$XDG_CONFIG_HOME/pycheops.cfg` (defaults to `~/.config/pycheops.cfg` if `$XDG_DATA_HOME` is not set)
Windows: `%APPDATA%\pycheops\pycheops.cfg` (usually `C:\Users\user\AppData\Roaming\pycheops\pycheops.cfg`)
Other: `~/pycheops/pycheops.cfg`
"""
dirname='~'
fname='pycheops.cfg'
tryConfigFile = os.path.expanduser(os.path.join(dirname, fname))
if os.path.isfile(tryConfigFile):
configFile = tryConfigFile
return configFile
if platform == "linux" or platform == "linux2":
dirname = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(os.path.join('~', '.config')))
elif platform == "win32":
dirname = os.path.expandvars(os.path.join('%APPDATA%', 'pycheops'))
else:
dirname = os.path.expanduser(os.path.join('~', 'pycheops'))
tryConfigFile = os.path.join(dirname, fname)
if not os.path.isdir(dirname):
os.makedirs(dirname, exist_ok=True)
configFile = tryConfigFile
return configFile
def load_config(configFile=None):
"""
Load module configuration from configFile
If configFile is None, find pycheops.cfg
:param configFile: Full path to configuration file
"""
if configFile is None:
configFile = find_config()
if not os.path.isfile(configFile):
raise ValueError('Configuration file not found - run core.setup_config')
c = ConfigParser()
c.read(configFile)
return c
def setup_config(configFile=None, overwrite=False, mode=0o600,
data_cache_path=None, pdf_cmd=None):
"""
Create module configuration
If configFile is None, find pycheops.cfg
:param configFile: Full path to configuration file
:param overwrite: overwrite values in existing configFile
:param mode: mode (permission) settings for configFile
:param data_cache_path: user is prompted if None, use '' for default
:param pdf_cmd: user is prompted if None, use '' for default
"""
if configFile is None:
configFile = find_config()
print('Creating configuration file {}'.format(configFile))
if os.path.isfile(configFile) and not overwrite:
raise ValueError('Configuration file exists and overwrite is not set')
r"""
`data_cache_default` is platform dependent and not in `~`
Linux: `$XDG_DATA_HOME/pycheops` (defaults to `~/.local/share/pycheops` if `$XDG_DATA_HOME` is not set)
Windows: `%APPDATA%\pycheops\data` (usually `C:\Users\user\AppData\Roaming\pycheops\data`)
Other: `~/pycheops/data`
"""
if platform == "linux" or platform == "linux2":
data_cache_default = os.path.join(os.getenv('XDG_DATA_HOME', os.path.expanduser(os.path.join('~', '.local', 'share'))), 'pycheops')
elif platform == "win32":
data_cache_default = os.path.expandvars(os.path.join('%APPDATA%', 'pycheops', 'data'))
else:
data_cache_default = os.path.expanduser(os.path.join('~', 'pycheops', 'data'))
prompt = "Enter data cache directory [{}] > ".format(data_cache_default)
if data_cache_path is None:
data_cache_path = input(prompt)
if data_cache_path == '':
data_cache_path = data_cache_default
if not os.path.isdir(data_cache_path):
os.makedirs(data_cache_path, exist_ok=True)
if platform == "linux" or platform == "linux2":
pdf_cmd_default = r'okular {} &'
elif platform == "darwin":
pdf_cmd_default = r'open -a preview {}'
elif platform == "win32":
pdf_cmd_default = r'AcroRd32.exe {}'
prompt = ("Enter command to view PDF with {{}} as file name placeholder "
"[{}] > ".format(pdf_cmd_default))
if pdf_cmd is None:
pdf_cmd = input(prompt)
if pdf_cmd == '':
pdf_cmd = pdf_cmd_default
c = ConfigParser()
c['DEFAULT'] = {'data_cache_path': data_cache_path,
'pdf_cmd': pdf_cmd}
# SweetCat location and update interval in seconds
url = 'https://sweetcat.iastro.pt/catalog/SWEETCAT_Dataframe.csv'
c['SWEET-Cat'] = {'update_interval': 86400, 'download_url': url}
# TEPCat location and update interval in seconds
url = 'https://www.astro.keele.ac.uk/jkt/tepcat/allplanets-csv.csv'
c['TEPCat'] = {'update_interval': 86400, 'download_url': url}
url = 'https://www.astro.keele.ac.uk/jkt/tepcat/observables.csv'
c['TEPCatObs'] = {'update_interval': 86400, 'download_url': url}
#N.B. The archive username and password are stored in plain text so the
#default mode value is 0o600 = user read/write permission only.
# Reference PSF file
psf_file = 'CHEOPS_IT_PSFwhite_CH_TU2018-01-01.txt'
c['psf_file'] = {'psf_file': psf_file, 'x0':99.5, 'y0':99.5}
with open(os.open(configFile, os.O_CREAT | os.O_WRONLY, mode), 'w') as cf:
c.write(cf)
| 6,108 | 33.128492 | 139 | py |
pycheops | pycheops-master/pycheops/dataset.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
Dataset
=======
Object class for data access, data caching and data inspection tools
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tarfile
from zipfile import ZipFile
import re
import logging
from pathlib import Path
from .core import load_config
from astropy.io import fits
from astropy.table import Table, MaskedColumn
import matplotlib.pyplot as plt
from .instrument import transit_noise
from ftplib import FTP
from .models import TransitModel, FactorModel, EclipseModel
from lmfit import Parameter, Parameters, minimize, Minimizer, fit_report
from lmfit import __version__ as _lmfit_version_
from lmfit import Model
from scipy.interpolate import interp1d, LSQUnivariateSpline
import matplotlib.pyplot as plt
from emcee import EnsembleSampler
import corner
from copy import copy, deepcopy
from celerite2 import terms, GaussianProcess
from celerite2.terms import SHOTerm
from sys import stdout
from astropy.coordinates import SkyCoord, get_body, Angle
from lmfit.printfuncs import gformat
from scipy.signal import medfilt
from .utils import lcbin, mode
import astropy.units as u
from uncertainties import ufloat, UFloat
from uncertainties.umath import sqrt as usqrt
from astropy.timeseries import LombScargle
from astropy.time import Time
from astropy.convolution import convolve, Gaussian1DKernel
from .instrument import CHEOPS_ORBIT_MINUTES
from scipy.stats import skewnorm
from scipy.optimize import minimize as scipy_minimize
from . import __version__
from .funcs import rhostar, massradius
from tqdm import tqdm_notebook as tqdm
import matplotlib.animation as animation
import matplotlib.colors as colors
from IPython.display import Image
import subprocess
import pickle
import warnings
from astropy.units import UnitsWarning
from photutils import CircularAperture, aperture_photometry
import cdspyreadme
from textwrap import fill, indent
import os
from contextlib import redirect_stderr
from dace_query.cheops import Cheops
_file_key_re = re.compile(r'CH_PR(\d{2})(\d{4})_TG(\d{4})(\d{2})_V(\d{4})')
_file_key_reT = re.compile(r'TIC_(\d{10})_SEC(\d{4})_V(\d{4})')
_file_key_reP = re.compile(r'PIPE_CH_PR(\d{2})(\d{4})_TG(\d{4})(\d{2})_V(\d{4})')
_file_key_reK = re.compile(r'KIC_(\d{10})_SEC_(\d{4})')
#---
# Utility function for model fitting
def _kw_to_Parameter(name, kwarg):
if isinstance(kwarg, float):
return Parameter(name=name, value=kwarg, vary=False)
if isinstance(kwarg, int):
return Parameter(name=name, value=float(kwarg), vary=False)
if isinstance(kwarg, list):
return Parameter(name=name, value=np.median(kwarg),
min=min(kwarg), max=max(kwarg))
if isinstance(kwarg, tuple):
if len(kwarg) == 2:
if (min(kwarg) != kwarg[0]) or (max(kwarg) != kwarg[1]):
raise ValueError('Invalid initial tuple values (max < min')
return Parameter(name=name, value=np.median(kwarg),
min=kwarg[0], max=kwarg[1])
elif len(kwarg) == 3:
if (min(kwarg) != kwarg[0]) or (max(kwarg) != kwarg[2]):
raise ValueError('Invalid initial tuple values')
return Parameter(name=name, value=kwarg[1],
min=kwarg[0], max=kwarg[2])
else:
raise ValueError('Invalid initial tuple length')
if isinstance(kwarg, UFloat):
return Parameter(name=name, value=kwarg.n, user_data=kwarg)
if isinstance(kwarg, Parameter):
return kwarg
raise ValueError('Unrecognised type for keyword argument {}'.
format(name))
#----
def _make_interp(t,x,scale=None):
if scale == None:
z = x
elif np.ptp(x) == 0:
z = np.zeros_like(x)
elif scale == 'max':
z = (x-min(x))/np.ptp(x)
elif scale == 'range':
z = (x-np.median(x))/np.ptp(x)
else:
raise ValueError('scale must be None, max or range')
# Use copy=False to store time and value arrays by reference rather than
# as copies.
return interp1d(t, z, bounds_error=False, fill_value=(z[0],z[-1]),
copy=False)
#---
def _glint_func(t, glint_scale, f_theta=None, f_glint=None ):
return glint_scale * f_glint(f_theta(t))
#---
def _make_trial_params(pos, params, vn):
# Create a copy of the params object with the parameter values give in
# list vn replaced with trial values from array pos.
# Also returns the contribution to the log-likelihood of the parameter
# values.
# Return value is parcopy, lnprior
# If any of the parameters are out of range, returns None, -inf
parcopy = params.copy()
lnprior = 0
for i, p in enumerate(vn):
v = pos[i]
if (v < parcopy[p].min) or (v > parcopy[p].max):
return None, -np.inf
parcopy[p].value = v
lnprior = _log_prior(parcopy['D'], parcopy['W'], parcopy['b'])
if not np.isfinite(lnprior):
return None, -np.inf
# Also check parameter range here so we catch "derived" parameters
# that are out of range.
for p in parcopy:
v = parcopy[p].value
if (v < parcopy[p].min) or (v > parcopy[p].max):
return None, -np.inf
if np.isnan(v):
return None, -np.inf
u = parcopy[p].user_data
if isinstance(u, UFloat):
lnprior += -0.5*((u.n - v)/u.s)**2
if not np.isfinite(lnprior):
return None, -np.inf
return parcopy, lnprior
#---
# Prior on (D, W, b) for transit/eclipse fitting.
# This prior assumes uniform priors on cos(i), log(k) and log(aR). The
# factor 2kW is the absolute value of the determinant of the Jacobian,
# J = d(D, W, b)/d(cosi, k, aR)
def _log_prior(D, W, b):
if (D < 2e-6) or (D > 0.25): return -np.inf
k = np.sqrt(D)
if (b < 0) : return -np.inf
if (W < 1e-4): return -np.inf
q = (1+k)**2 - b**2
if (q < 0): return -np.inf
aR = np.sqrt(q)/(np.pi*W)
if (aR < 1): return -np.inf
return -np.log(2*k*W) - np.log(k) - np.log(aR)
#---
# Target functions for emcee
def _log_posterior_jitter(pos, model, time, flux, flux_err, params, vn,
return_fit):
parcopy, lnprior = _make_trial_params(pos, params, vn)
if parcopy == None: return -np.inf, -np.inf
fit = model.eval(parcopy, t=time)
if return_fit:
return fit
if False in np.isfinite(fit):
return -np.inf, -np.inf
jitter = np.exp(parcopy['log_sigma'].value)
s2 =flux_err**2 + jitter**2
lnlike = -0.5*(np.sum((flux-fit)**2/s2 + np.log(2*np.pi*s2)))
return lnlike + lnprior, lnlike
#----
def _log_posterior_SHOTerm(pos, model, time, flux, flux_err, params, vn,
return_fit):
parcopy, lnprior = _make_trial_params(pos, params, vn)
if parcopy == None: return -np.inf, -np.inf
fit = model.eval(parcopy, t=time)
if return_fit:
return fit
if False in np.isfinite(fit):
return -np.inf, -np.inf
resid = flux-fit
kernel = SHOTerm(
S0=np.exp(parcopy['log_S0'].value),
Q=np.exp(parcopy['log_Q'].value),
w0=np.exp(parcopy['log_omega0'].value))
gp = GaussianProcess(kernel, mean=0)
yvar = flux_err**2+np.exp(2*parcopy['log_sigma'].value)
gp.compute(time, diag=yvar, quiet=True)
lnlike = gp.log_likelihood(resid)
return lnlike + lnprior, lnlike
#---------------
def _make_labels(plotkeys, bjd_ref, extra_decorr_vectors=None):
labels = []
xbf = {} if extra_decorr_vectors==None else extra_decorr_vectors
for key in plotkeys:
if key == 'T_0':
labels.append(r'T$_0-{}$'.format(bjd_ref))
elif key == 'h_1':
labels.append(r'$h_1$')
elif key == 'h_2':
labels.append(r'$h_2$')
elif key == 'f_c':
labels.append(r'$f_c$')
elif key == 'f_s':
labels.append(r'$f_s$')
elif key == 'l_3':
labels.append(r'$\ell_3$')
elif key == 'dfdbg':
labels.append(r'$df\,/\,d{\rm (bg)}$')
elif key == 'dfdsmear':
labels.append(r'$df\,/\,d{\rm (smear)}$')
elif key == 'dfdcontam':
labels.append(r'$df\,/\,d{\rm (contam)}$')
elif key == 'ramp':
labels.append(r'$df\,/\,d\Delta T$')
elif key == 'dfdx':
labels.append(r'$df\,/\,dx$')
elif key == 'd2fdx2':
labels.append(r'$d^2f\,/\,dx^2$')
elif key == 'dfdy':
labels.append(r'$df\,/\,dy$')
elif key == 'd2fdy2':
labels.append(r'$d^2f\,/\,dy^2$')
elif key == 'dfdt':
labels.append(r'$df\,/\,dt$')
elif key == 'd2fdt2':
labels.append(r'$d^2f\,/\,dt^2$')
elif key == 'dfdsinphi':
labels.append(r'$df\,/\,d\sin(\phi)$')
elif key == 'dfdcosphi':
labels.append(r'$df\,/\,d\cos(\phi)$')
elif key == 'dfdsin2phi':
labels.append(r'$df\,/\,d\sin(2\phi)$')
elif key == 'dfdcos2phi':
labels.append(r'$df\,/\,d\cos(2\phi)$')
elif key == 'dfdsin3phi':
labels.append(r'$df\,/\,d\sin(3\phi)$')
elif key == 'dfdcos3phi':
labels.append(r'$df\,/\,d\cos(3\phi)$')
elif key == 'log_sigma':
labels.append(r'$\log\sigma$')
elif key == 'log_omega0':
labels.append(r'$\log\omega_0$')
elif key == 'log_S0':
labels.append(r'$\log{\rm S}_0$')
elif key == 'log_Q':
labels.append(r'$\log{\rm Q}$')
elif key == 'sigma_w':
labels.append(r'$\sigma_w$ [ppm]')
elif key == 'logrho':
labels.append(r'$\log\rho_{\star}$')
elif key == 'aR':
labels.append(r'${\rm a}\,/\,{\rm R}_{\star}$')
elif key == 'sini':
labels.append(r'\sin i')
# for an extra basis function 'extra', key will be 'dfdextra'
elif key[3:] in xbf:
k = key[3:]
if 'label' in xbf[k]:
labels.append(xbf[k]['label'])
else:
labels.append(key)
else:
labels.append(key)
return labels
#---------------
class Dataset(object):
"""
CHEOPS Dataset object
:param file_key:
:param force_download:
:param download_all: If False, download light curves only
:param configFile:
:param target:
:param view_report_on_download:
:param metadata: True to load meta data
:param verbose:
"""
def __init__(self, file_key, source=None, force_download=False,
download_all=True, configFile=None, target=None,
verbose=True, metadata=True, view_report_on_download=True):
if source == None:
if _file_key_re.search(file_key):
if file_key[21:] == 'V9193':
source = 'PIPE'
else:
source = 'CHEOPS'
elif _file_key_reP.search(file_key):
source = 'PIPE'
elif _file_key_reK.search(file_key):
source = 'Kepler'
elif _file_key_reT.search(file_key):
source = 'TESS'
if source == 'TESS':
m = _file_key_reT.search(file_key)
elif source == 'PIPE':
m = _file_key_reP.search(file_key)
if m == None: # file_key for files from_pipe_file same as CHEOPS
m = _file_key_re.search(file_key)
elif source == 'Kepler' or source == 'K2':
m = _file_key_reK.search(file_key)
else:
m = _file_key_re.search(file_key)
if m == None:
raise ValueError('Invalid file_key {}'.format(file_key))
self.source = source
self.file_key = file_key
l = [int(i) for i in m.groups()]
try:
self.progtype,self.prog_id,self.req_id,self.visitctr,self.ver = l
except:
try:
self.ticid, self.sector, self.ver = l
except:
self.kicid, self.num = l
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
tgzPath = Path(_cache_path,file_key).with_suffix('.tgz')
self.tgzfile = str(tgzPath)
view_report = view_report_on_download
if tgzPath.is_file() and not force_download:
if verbose:
print('Found archive tgzfile',self.tgzfile)
view_report = False
else:
if download_all:
file_type='all'
else:
file_type='lightcurves'
view_report = False
# Bodge to avoid logging errors in jupyter notebooks
with open(os.devnull,'w+') as devnull:
with redirect_stderr(devnull):
Cheops.download(file_type,
filters={'file_key':{'contains':[file_key]}},
output_directory=str(tgzPath.parent),
output_filename=str(tgzPath.name) )
lisPath = Path(_cache_path,file_key).with_suffix('.lis')
# The file list can be out-of-date if force_download is used
if lisPath.is_file() and not force_download:
self.list = [line.rstrip('\n') for line in open(lisPath)]
else:
if verbose: print('Creating dataset file list')
tar = tarfile.open(self.tgzfile)
self.list = tar.getnames()
tar.close()
with open(str(lisPath), 'w') as fh:
fh.writelines("%s\n" % l for l in self.list)
# Extract light curve data file from .tgz file so we can access the
# FITS file header information.
# V9193 files are generated from PIPE output files and have only one
# aperture called 'PSF'
if self.file_key[-5:] == 'V9193':
aperture = 'PSF'
else:
aperture='DEFAULT'
lcFile = "{}-{}.fits".format(self.file_key,aperture)
lcPath = Path(self.tgzfile).parent/lcFile
if lcPath.is_file():
with fits.open(lcPath) as hdul:
hdr = hdul[1].header
else:
tar = tarfile.open(self.tgzfile)
s = '(?!\.)(.*_SCI_COR_Lightcurve-{}_V[0-9]{{4}}.fits)'
r=re.compile(s.format(aperture))
datafile = list(filter(r.match, self.list))
if len(datafile) == 0:
raise Exception('Requested light curve not in this Dataset.')
if len(datafile) > 1:
raise Exception('Multiple light curve files in datset')
with tar.extractfile(datafile[0]) as fd:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
hdul = fits.open(fd)
hdr = hdul[1].header
table = Table.read(hdul[1])
hdul.writeto(lcPath)
tar.close()
self.pi_name = hdr['PI_NAME']
self.obsid = hdr['OBSID']
if target == None:
self.target = hdr['TARGNAME']
else:
self.target = target
coords = SkyCoord(hdr['RA_TARG'],hdr['DEC_TARG'],unit='degree,degree')
self.ra = coords.ra.to_string(precision=2,unit='hour',sep=':',pad=True)
self.dec = coords.dec.to_string(precision=1,sep=':',unit='degree',
alwayssign=True,pad=True)
if 'MAG_V' in hdr:
self.vmag = hdr['MAG_V']
self.e_vmag = hdr['MAG_VERR']
else:
self.vmag =None
if 'MAG_G' in hdr:
self.gmag = hdr['MAG_G']
self.e_gmag = hdr['MAG_GERR']
else:
self.gmag =None
if 'T_EFF' in hdr:
self.teff = self.teff = hdr['T_EFF']
self.spectype = hdr['SPECTYPE']
self.nexp = hdr['NEXP']
self.exptime = hdr['EXPTIME']
self.texptime = hdr['TEXPTIME']
self.pipe_ver = hdr['PIPE_VER']
if verbose:
print(' PI name : {}'.format(self.pi_name))
print(' OBS ID : {}'.format(self.obsid))
print(' Target : {}'.format(self.target))
print(' Coordinates : {} {}'.format(self.ra, self.dec))
print(' Spec. type : {}'.format(self.spectype))
if self.vmag is not None:
print(' V magnitude : {:0.2f} +- {:0.2f}'.
format(self.vmag, self.e_vmag))
if self.gmag is not None:
print(' G magnitude : {:0.2f} +- {:0.2f}'.
format(self.gmag, self.e_gmag))
if metadata:
metaFile = "{}-meta.fits".format(self.file_key)
metaPath = Path(self.tgzfile).parent/metaFile
if metaPath.is_file():
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
self.metadata = Table.read(metaPath)
else:
tar = tarfile.open(self.tgzfile)
r=re.compile('(?!\.)(.*SCI_RAW_SubArray.*.fits)')
metafile = list(filter(r.match, self.list))
if len(metafile) > 1:
raise Exception('Multiple metadata files in datset')
if len(metafile) == 0:
msg = "No metadata in file {}".format(self.tgzfile)
warnings.warn(msg)
else:
with tar.extractfile(metafile[0]) as fd:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
hdul = fits.open(fd)
table = Table.read(hdul,hdu='SCI_RAW_ImageMetadata')
table.write(metaPath)
tar.close()
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
self.metadata = Table.read(metaPath)
if view_report:
self.view_report(configFile=configFile)
#----
@classmethod
def from_pipe_file(self, pipe_file, file_key=None, configFile=None,
metadata=True, verbose=True):
"""
Create a Dataset object from a PIPE output file.
PIPE is a PSF photomety extraction package for CHEOPS.
https://pipe-cheops.readthedocs.io/
If file_key=None (default) then the DACE archive is queried to find
the file_key value for the observation identification number OBSID
obtained from the header of the PIPE output file. The version number
in the file_key is set to "V9193", e.g. CH_PR100001_TG000101_V9193.
The output is saved in the directory data_cache_path specified in the
pycheops configuration file. It can subsequently be loaded as a normal
Dataset object. The aperture name for dataset_get_lightcurve is 'PSF'.
This is detected automatically by get_lightcurve(), e.g.
>>> dataset = Dataset('CH_PR100001_TG000101_V9193').
>>> time, flux, flux_err = dataset.get_lightcurve()
:param pipe_file: PIPE output FITS file
:param file_key: (optional) file_key to use for saving data
:param configFile: pycheops configuration file
:param metadata: download metadata
:param verbose: (optional, default=True) verbose output, none if False
"""
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
pipedata = Table.read(pipe_file)
target = pipedata.meta['TARGNAME']
if file_key == None:
obs_id = pipedata.meta['OBSID']
db = Cheops.query_database(filters={'obs_id':{'equal':[obs_id]}})
file_key = db['file_key'][0][:-5]+'V9193'
tgzPath = Path(_cache_path,file_key).with_suffix('.tgz')
tgzfile = str(tgzPath)
file_stats = os.stat(pipe_file)
if metadata:
dblist = list(Cheops.list_data_product(
visit_filepath=str(db.get('file_rootpath', [])[0]))['file'])
_re_meta = re.compile('(.*CH_.*SCI_RAW_SubArray.*.fits)')
dbmetapath = list(filter(_re_meta.match, dblist))
Cheops.download_files(files=dbmetapath, file_type='files',
output_filename=tgzfile)
metaFile = "{}-meta.fits".format(file_key)
metaPath = Path(_cache_path, metaFile)
tar = tarfile.open(tgzfile)
tarmetafile = tar.getnames()[0]
with tar.extractfile(tarmetafile) as fd:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
hdul = fits.open(fd)
table = Table.read(hdul,hdu='SCI_RAW_ImageMetadata')
table.write(metaPath, overwrite=True)
tar.close()
lcFile = (file_key[3:20] + '/' + file_key[:-5] +
'TU'+pipedata.meta['DATE'].replace(':','-') +
'_SCI_COR_Lightcurve-PSF_V9193.fits')
with tarfile.open(tgzfile, mode='w:gz') as tgz:
tarinfo = tarfile.TarInfo(name=lcFile)
tarinfo.size = file_stats.st_size
with open(pipe_file,'rb') as fp:
tgz.addfile(tarinfo=tarinfo, fileobj=fp)
if metadata:
tarinfo = tarfile.TarInfo(name=tarmetafile)
tarinfo.size = os.stat(metaPath).st_size
with open(metaPath,'rb') as fp:
tgz.addfile(tarinfo=tarinfo, fileobj=fp)
return self(file_key=file_key, target=target, metadata=metadata,
configFile=configFile, source='PIPE', verbose=verbose)
#----
@classmethod
def from_test_data(self, subdir, target=None, configFile=None,
verbose=True):
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
ftp=FTP('obsftp.unige.ch')
_ = ftp.login()
wd = "pub/cheops/test_data/{}".format(subdir)
ftp.cwd(wd)
filelist = [fl[0] for fl in ftp.mlsd()]
_re = re.compile(r'(CH_PR\d{6}_TG\d{6}).zip')
zipfiles = list(filter(_re.match, filelist))
if len(zipfiles) > 1:
raise ValueError('More than one dataset in ftp directory')
if len(zipfiles) == 0:
raise ValueError('No zip files for datasets in ftp directory')
zipfile = zipfiles[0]
file_key = zipfile[:-4]+'_V0000'
m = _file_key_re.search(file_key)
l = [int(i) for i in m.groups()]
self.progtype,self.prog_id,self.req_id,self.visitctr,self.ver = l
zipPath = Path(_cache_path,zipfile)
if zipPath.is_file():
if verbose: print('{} already downloaded'.format(str(zipPath)))
else:
cmd = 'RETR {}'.format(zipfile)
if verbose: print('Downloading {} ...'.format(zipfile))
ftp.retrbinary(cmd, open(str(zipPath), 'wb').write)
pdfFile = "{}_DataReduction.pdf".format(file_key)
pdfPath = Path(_cache_path,pdfFile)
if pdfPath.is_file():
if verbose: print('{} already downloaded'.format(pdfFile))
else:
_re = re.compile(r'CH_.*RPT_COR_DataReduction.*pdf')
pdffiles = list(filter(_re.match, filelist))
if len(pdffiles) > 0:
cmd = 'RETR {}'.format(pdffiles[0])
if verbose: print('Downloading {} ...'.format(pdfFile))
ftp.retrbinary(cmd, open(str(pdfPath), 'wb').write)
ftp.quit()
tgzPath = Path(_cache_path,file_key).with_suffix('.tgz')
tgzfile = str(tgzPath)
zpf = ZipFile(str(zipPath), mode='r')
ziplist = zpf.namelist()
_re_sa = re.compile('(CH_.*SCI_RAW_SubArray_.*.fits)')
_re_im = re.compile('(CH_.*SCI_RAW_Imagette_.*.fits)')
_re_lc = re.compile('(CH_.*_SCI_COR_Lightcurve-.*fits)')
with tarfile.open(tgzfile, mode='w:gz') as tgz:
subfiles = list(filter(_re_sa.match, ziplist))
if len(subfiles) > 1:
raise ValueError('More than one sub-array file in zip file')
if len(subfiles) == 1:
if verbose: print("Writing sub-array data to .tgz file...")
subfile=subfiles[0]
tarPath = Path('visit')/Path(file_key)/Path(subfile).name
tarinfo = tarfile.TarInfo(name=str(tarPath))
zipinfo = zpf.getinfo(subfile)
tarinfo.size = zipinfo.file_size
zf = zpf.open(subfile)
tgz.addfile(tarinfo=tarinfo, fileobj=zf)
zf.close()
imgfiles = list(filter(_re_im.match, ziplist))
if len(imgfiles) > 1:
raise ValueError('More than one imagette file in zip file')
if len(imgfiles) == 1:
if verbose: print("Writing Imagette data to .tgz file...")
imgfile=imgfiles[0]
tarPath = Path('visit')/Path(file_key)/Path(imgfile).name
tarinfo = tarfile.TarInfo(name=str(tarPath))
zipinfo = zpf.getinfo(imgfile)
tarinfo.size = zipinfo.file_size
zf = zpf.open(imgfile)
tgz.addfile(tarinfo=tarinfo, fileobj=zf)
zf.close()
if verbose: print("Writing Lightcurve data to .tgz file...")
for lcfile in list(filter(_re_lc.match, ziplist)):
tarPath = Path('visit')/Path(file_key)/Path(lcfile).name
tarinfo = tarfile.TarInfo(name=str(tarPath))
zipinfo = zpf.getinfo(lcfile)
tarinfo.size = zipinfo.file_size
zf = zpf.open(lcfile)
tgz.addfile(tarinfo=tarinfo, fileobj=zf)
zf.close()
if verbose: print ('.. {} - done'.format(Path(lcfile).name))
zpf.close()
return self(file_key=file_key, target=target, verbose=verbose)
#----
@classmethod
def from_simulation(self, job, target=None, configFile=None,
version=0, verbose=True):
ftp=FTP('obsftp.unige.ch')
_ = ftp.login()
wd = "pub/cheops/simulated_data/CHEOPSim_job{}".format(job)
ftp.cwd(wd)
filelist = [fl[0] for fl in ftp.mlsd()]
_re = re.compile(r'CH_(PR\d{6}_TG\d{6}).zip')
zipfiles = list(filter(_re.match, filelist))
if len(zipfiles) > 1:
raise ValueError('More than one dataset in ftp directory')
if len(zipfiles) == 0:
raise ValueError('No zip files for datasets in ftp directory')
zipfile = zipfiles[0]
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
zipPath = Path(_cache_path,zipfile)
if zipPath.is_file():
if verbose: print('{} already downloaded'.format(str(zipPath)))
else:
cmd = 'RETR {}'.format(zipfile)
if verbose: print('Downloading {} ...'.format(zipfile))
ftp.retrbinary(cmd, open(str(zipPath), 'wb').write)
ftp.quit()
file_key = "{}_V{:04d}".format(zipfile[:-4],version)
m = _file_key_re.search(file_key)
l = [int(i) for i in m.groups()]
pdfFile = "{}_DataReduction.pdf".format(file_key)
pdfPath = Path(_cache_path,pdfFile)
if pdfPath.is_file():
if verbose: print('{} already downloaded'.format(pdfFile))
else:
_re = re.compile(r'CH_.*RPT_COR_DataReduction.*pdf')
pdffiles = list(filter(_re.match, filelist))
if len(pdffiles) > 0:
cmd = 'RETR {}'.format(pdffiles[0])
if verbose: print('Downloading {} ...'.format(pdfFile))
ftp.retrbinary(cmd, open(str(pdfPath), 'wb').write)
ftp.quit()
tgzPath = Path(_cache_path,file_key).with_suffix('.tgz')
tgzfile = str(tgzPath)
zpf = ZipFile(str(zipPath), mode='r')
ziplist = zpf.namelist()
_re_im = re.compile('(CH_.*SCI_RAW_Imagette_.*.fits)')
_re_lc = re.compile('(CH_.*_SCI_COR_Lightcurve-.*fits)')
_re_meta = re.compile('(CH_.*SCI_RAW_HkCe-SubArray_.*.fits)')
with tarfile.open(tgzfile, mode='w:gz') as tgz:
metafiles = list(filter(_re_meta.match, ziplist))
if len(metafiles) > 1:
raise ValueError('More than one metadata file in zip file')
if len(metafiles) == 1:
if verbose: print("Writing metadata to .tgz file...")
metafile=metafiles[0]
tarPath = Path('visit')/Path(file_key)/Path(metafile).name
tarinfo = tarfile.TarInfo(name=str(tarPath))
zipinfo = zpf.getinfo(metafile)
tarinfo.size = zipinfo.file_size
zf = zpf.open(metafile)
tgz.addfile(tarinfo=tarinfo, fileobj=zf)
zf.close()
imgfiles = list(filter(_re_im.match, ziplist))
if len(imgfiles) > 1:
raise ValueError('More than one imagette file in zip file')
if len(imgfiles) == 1:
if verbose: print("Writing Imagette data to .tgz file...")
imgfile=imgfiles[0]
tarPath = Path('visit')/Path(file_key)/Path(imgfile).name
tarinfo = tarfile.TarInfo(name=str(tarPath))
zipinfo = zpf.getinfo(imgfile)
tarinfo.size = zipinfo.file_size
zf = zpf.open(imgfile)
tgz.addfile(tarinfo=tarinfo, fileobj=zf)
zf.close()
if verbose: print("Writing Lightcurve data to .tgz file...")
for lcfile in list(filter(_re_lc.match, ziplist)):
tarPath = Path('visit')/Path(file_key)/Path(lcfile).name
tarinfo = tarfile.TarInfo(name=str(tarPath))
zipinfo = zpf.getinfo(lcfile)
tarinfo.size = zipinfo.file_size
zf = zpf.open(lcfile)
tgz.addfile(tarinfo=tarinfo, fileobj=zf)
zf.close()
if verbose: print ('.. {} - done'.format(Path(lcfile).name))
zpf.close()
return self(file_key=file_key, target=target, verbose=verbose)
#------
def save(self, tag="", overwrite=False):
"""
Save the current Dataset instance as a pickle file
:param tag: string to tag different versions of the same Dataset
:param overwrite: set True to overwrite existing version of file
:returns: pickle file name
"""
fl = self.target.replace(" ","_")+'_'+tag+'_'+self.file_key+'.dataset'
if os.path.isfile(fl) and not overwrite:
msg = f'File {fl} exists. If you mean to replace it then '
msg += 'use the argument "overwrite=True".'
raise OSError(msg)
with open(fl, 'wb') as fp:
pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
return fl
#------
@classmethod
def load(self, filename):
"""
Load a dataset from a pickle file
:param filename: pickle file name
:returns: dataset object
"""
with open(filename, 'rb') as fp:
self = pickle.load(fp)
return self
#----
def get_imagettes(self, verbose=True):
imFile = "{}-Imagette.fits".format(self.file_key)
imPath = Path(self.tgzfile).parent / imFile
if imPath.is_file():
with fits.open(imPath) as hdul:
cube = hdul['SCI_RAW_Imagette'].data
hdr = hdul['SCI_RAW_Imagette'].header
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
meta = Table.read(hdul['SCI_RAW_ImagetteMetadata'])
if verbose: print ('Imagette data loaded from ',imPath)
else:
if verbose: print ('Extracting imagette data from ',self.tgzfile)
r=re.compile('(?!\.)(.*SCI_RAW_Imagette.*.fits)' )
datafile = list(filter(r.match, self.list))
if len(datafile) == 0:
raise Exception('Dataset does not contains imagette data.')
if len(datafile) > 1:
raise Exception('Multiple imagette data files in dataset')
tar = tarfile.open(self.tgzfile)
with tar.extractfile(datafile[0]) as fd:
hdul = fits.open(fd)
cube = hdul['SCI_RAW_Imagette'].data
hdr = hdul['SCI_RAW_Imagette'].header
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
meta = Table.read(hdul['SCI_RAW_ImagetteMetadata'])
hdul.writeto(imPath)
tar.close()
if verbose: print('Saved imagette data to ',imPath)
self.imagettes = (cube, hdr, meta)
self.imagettes = {'data':cube, 'header':hdr, 'meta':meta}
return cube
#----
def get_subarrays(self, verbose=True):
subFile = "{}-SubArray.fits".format(self.file_key)
subPath = Path(self.tgzfile).parent / subFile
if subPath.is_file():
with fits.open(subPath) as hdul:
cube = hdul['SCI_COR_SubArray'].data
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
hdr = hdul['SCI_COR_SubArray'].header
meta = Table.read(hdul['SCI_COR_ImageMetadata'])
if verbose: print ('Subarray data loaded from ',subPath)
else:
if verbose: print ('Extracting subarray data from ',self.tgzfile)
r=re.compile('(?!\.)(.*SCI_COR_SubArray.*.fits)' )
datafile = list(filter(r.match, self.list))
if len(datafile) == 0:
r=re.compile('(?!\.)(.*SCI_RAW_SubArray.*.fits)' )
datafile = list(filter(r.match, self.list))
if len(datafile) == 0:
raise Exception('Dataset does not contains subarray data.')
if len(datafile) > 1:
raise Exception('Multiple subarray data files in dataset')
tar = tarfile.open(self.tgzfile)
with tar.extractfile(datafile[0]) as fd:
hdul = fits.open(fd)
if 'SCI_COR_SubArray' in hdul:
ext = 'SCI_COR_SubArray'
mext = 'SCI_COR_ImageMetadata'
elif 'SCI_RAW_SubArray' in hdul:
ext = 'SCI_RAW_SubArray'
mext = 'SCI_RAW_ImageMetadata'
else:
raise KeyError('No SubArray extension in file')
cube = hdul[ext].data
hdr = hdul[ext].header
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
meta = Table.read(hdul[mext])
hdul.writeto(subPath)
tar.close()
if verbose: print('Saved subarray data to ',subPath)
self.subarrays = (cube, hdr, meta)
self.subarrays = {'data':cube, 'header':hdr, 'meta':meta}
return cube
#----
def list_apertures(self):
r=re.compile('.*_SCI_COR_Lightcurve-(.*)_V.*.fits')
apertures = [r.match(f).group(1) for f in filter(r.match, self.list)]
apertures.sort()
return apertures
#----
def _get_table_(self, aperture, verbose):
lcFile = "{}-{}.fits".format(self.file_key, aperture)
lcPath = Path(self.tgzfile).parent / lcFile
if lcPath.is_file():
with fits.open(lcPath) as hdul:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnitsWarning)
table = Table.read(hdul[1])
hdr = hdul[1].header
if verbose: print ('Light curve data loaded from ',lcPath)
else:
if verbose: print ('Extracting light curve from ',self.tgzfile)
tar = tarfile.open(self.tgzfile)
s = '(?!\.)(.*_SCI_COR_Lightcurve-{}_V[0-9]{{4}}.fits)'
r=re.compile(s.format(aperture))
datafile = list(filter(r.match, self.list))
if len(datafile) == 0:
raise Exception('Dataset does not contain light curve data.')
if len(datafile) > 1:
raise Exception('Multiple light curve files in dataset')
with tar.extractfile(datafile[0]) as fd:
hdul = fits.open(fd)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UnitsWarning)
table = Table.read(hdul[1])
hdr = hdul[1].header
hdul.writeto(lcPath)
if verbose: print('Saved lc data to ',lcPath)
return table, hdr
#----
def get_lightcurve(self, aperture=None, decontaminate=None,
returnTable=False, reject_highpoints=False, verbose=True):
"""
Read light curve data for current data set for selected aperture.
By default, return time, flux and flux_error. Use returnTable=True to
return the full table of light curve data and metadata.
Use reject_highpoints=True to remove points to remove positive
outliers automatically.
:param aperture: use dataset.list_apertures() to list options
:param decontaminate: if True, subtract flux from background stars
:param returnTable:
:param reject_highpoints:
:param verbose:
:returns: time, flux, flux_err
The offset of the telescope tube temperature from its nominal value
(thermFront_2 + 12) is stored in dataset.lc['deltaT']
N.B. for PIPE data (aperture='PSF'), only data with FLAG=0 are used.
"""
if self.source == 'PIPE':
aperture = 'PSF'
if aperture not in self.list_apertures():
raise ValueError('Invalid/missing aperture name')
if decontaminate not in (True, False) and aperture not in ('PSF'):
raise ValueError('Set decontaminate =True or =False')
table, hdr = self._get_table_(aperture, verbose)
if aperture == 'PSF':
ok = table['FLAG'] == 0
else:
ok = (table['EVENT'] == 0) | (table['EVENT'] == 100)
ok &= table['FLUX'] > 0
m = np.isnan(table['FLUX'])
if sum(m) > 0:
msg = "Light curve contains {} NaN values".format(sum(m))
warnings.warn(msg)
ok = ok & ~m
bjd = np.array(table['BJD_TIME'][ok])
bjd_ref = int(bjd[0])
self.bjd_ref = bjd_ref
time = bjd-bjd_ref
flux = np.array(table['FLUX'][ok])
flux_err = np.array(table['FLUXERR'][ok])
if aperture == 'PSF':
xc = table['XC'][ok]
xoff = np.array(xc - np.mean(xc))
yc = table['YC'][ok]
yoff = np.array(yc - np.mean(yc))
roll_angle = np.array(table['ROLL'][ok])
bg = np.array(table['BG'][ok])
contam = np.zeros_like(bjd)
ap_rad = np.nan
else:
xc = table['CENTROID_X'][ok]
yc = table['CENTROID_Y'][ok]
xoff = np.array(xc - table['LOCATION_X'][ok])
yoff = np.array(yc - table['LOCATION_Y'][ok])
roll_angle = np.array(table['ROLL_ANGLE'][ok])
bg = np.array(table['BACKGROUND'][ok])
contam = np.array(table['CONTA_LC'][ok])
ap_rad = hdr['AP_RADI']
try:
smear = np.array(table['SMEARING_LC'][ok])
except:
smear = np.zeros_like(bjd)
try:
deltaT = np.array(self.metadata['thermFront_2'][ok]) + 12
except:
deltaT = np.zeros_like(bjd)
self.bjd_ref = bjd_ref
self.ap_rad = ap_rad
self.aperture = aperture
if verbose:
print('Time stored relative to BJD = {:0.0f}'.format(bjd_ref))
if self.aperture == 'PSF':
print('Photometry from PSF fitting')
else:
print('Aperture radius used = {:0.1f} arcsec'.format(ap_rad))
if 'V_STRT_U' in table.meta:
print('UTC start: ',table.meta['V_STRT_U'][0:19])
if 'V_STOP_U' in table.meta:
print('UTC end: ',table.meta['V_STOP_U'][0:19])
duration = (table['MJD_TIME'][-1] - table['MJD_TIME'][0])*86400
print('Visit duration: {:0.0f} s'.format(duration))
print('Exposure time: {} x {:0.1f} s'.format(self.nexp,
self.exptime))
xloc = np.median(xc)
yloc = np.median(yc)
print(f'Target location on CCD: ({xloc:0.1f}, {yloc:0.1f})')
eff = 100*len(ok)/(1+duration/self.texptime)
print('Number of non-flagged data points: {}'.format(len(ok)))
print('Efficiency (non-flagged data): {:0.1f} %'.format(eff))
if aperture == 'PSF':
self.decontaminated = False
if verbose:
print('Ignored decontaminate=True for PSF photometry.')
elif decontaminate:
flux = flux/(1 + contam)
if verbose:
print('Light curve corrected for flux from background stars')
self.decontaminated = True
else:
if verbose:
print('Correction for flux from background stars not applied')
self.decontaminated = False
if reject_highpoints:
C_cut = (2*np.nanmedian(flux)-np.nanmin(flux))
ok = (flux < C_cut).nonzero()
time = time[ok]
flux = flux[ok]
flux_err = flux_err[ok]
xoff = xoff[ok]
yoff = yoff[ok]
xc = xc[ok]
yc = yc[ok]
roll_angle = roll_angle[ok]
bg = bg[ok]
contam = contam[ok]
smear = smear[ok]
deltaT = deltaT[ok]
N_cut = len(bjd) - len(time)
fluxmed = np.nanmedian(flux)
self.flux_mean = flux.mean()
self.flux_median = fluxmed
self.flux_rms = np.std(flux)
self.flux_mse = np.nanmedian(flux_err)
if verbose:
if reject_highpoints:
print('C_cut = {:0.0f}'.format(C_cut))
print('N(C > C_cut) = {}'.format(N_cut))
print('Mean counts = {:0.1f}'.format(self.flux_mean))
print('Median counts = {:0.1f}'.format(fluxmed))
print('RMS counts = {:0.1f} [{:0.0f} ppm]'.format(np.nanstd(flux),
1e6*np.nanstd(flux)/fluxmed))
print('Median standard error = {:0.1f} [{:0.0f} ppm]'.format(
np.nanmedian(flux_err), 1e6*np.nanmedian(flux_err)/fluxmed))
print('Median background = {:0.0f} e-'.format(np.median(bg)))
print('Mean contamination = {:0.1f} ppm'.format(1e6*contam.mean()))
print('Mean smearing correction = {:0.1f} ppm'.
format(1e6*smear.mean()/fluxmed))
if np.max(np.abs(deltaT)) > 0:
f = interp1d([22.5, 25, 30, 40], [140,200,330,400],
bounds_error=False, fill_value='extrapolate')
ramp = np.ptp(f(ap_rad)*deltaT)
print('Predicted amplitude of ramp = {:0.0f} ppm'.format(ramp))
flux = flux/fluxmed
flux_err = flux_err/fluxmed
smear = smear/fluxmed
bg = bg/fluxmed
self.lc = {'time':time, 'flux':flux, 'flux_err':flux_err,
'bjd_ref':bjd_ref, 'table':table, 'header':hdr,
'xoff':xoff, 'yoff':yoff, 'bg':bg,
'contam':contam, 'smear':smear, 'deltaT':deltaT,
'centroid_x':xc, 'centroid_y':yc,
'roll_angle':roll_angle, 'aperture':aperture}
if returnTable:
return table
else:
return time, flux, flux_err
def view_report(self, pdf_cmd=None, configFile=None):
'''
View the PDF DRP report.
:param pdf_cmd: command to launch PDF viewer with {} as placeholder for
file name.
'''
if pdf_cmd == None:
config = load_config(configFile)
try:
pdf_cmd = config['DEFAULT']['pdf_cmd']
except KeyError:
raise KeyError("Run pycheops.core.setup_config to set your"
" default PDF viewer")
pdfFile = "{}_DataReduction.pdf".format(self.file_key)
pdfPath = Path(self.tgzfile).parent/pdfFile
if not pdfPath.is_file():
tar = tarfile.open(self.tgzfile)
r = re.compile('(?!\.)(.*_RPT_COR_DataReduction_.*.pdf)')
report = list(filter(r.match, self.list))
if len(report) == 0:
raise Exception('Dataset does not contain DRP report.')
if len(report) > 1:
raise Exception('Multiple reports in datset')
print('Extracting report from .tgz file ...')
with tar.extractfile(report[0]) as fin:
with open(pdfPath,'wb') as fout:
for line in fin:
fout.write(line)
tar.close()
subprocess.run(pdf_cmd.format(pdfPath),shell=True)
#----
def animate_frames(self, nframes=10, vmin=1., vmax=1., subarray=True,
imagette=False, grid=False, aperture=None, writer='pillow',
figsize=(10,10), fontsize=12, linewidth=3):
if aperture == None:
aperture = self.ap_rad
sub_anim, imag_anim = [], []
for hindex, h in enumerate([subarray, imagette]):
if h == True:
if hindex == 0:
if type(aperture) == str:
title = str(self.target) + " - subarray - " + aperture
else:
title = str(self.target) + " - subarray - R = " + str(aperture) + " pix"
try:
frame_cube = self.get_subarrays()[::nframes,:,:]
pltlims = 200
cen_x = self.lc['table']['CENTROID_X'][::nframes]-self.lc['table']['LOCATION_X'][::nframes]+(pltlims/2)
cen_y = self.lc['table']['CENTROID_Y'][::nframes]-self.lc['table']['LOCATION_Y'][::nframes]+(pltlims/2)
except:
print("\nNo subarray data.")
continue
if hindex == 1:
if type(aperture) == str:
title = str(self.target) + " - imagette - " + aperture
else:
title = str(self.target) + " - imagette - R = " + str(aperture) + " pix"
try:
pltlims = 50
frame_cube = self.get_imagettes()[::nframes,:,:]
cen_x = self.lc['table']['CENTROID_X'][::nframes]-self.lc['table']['LOCATION_X'][::nframes]+(pltlims/2)
cen_y = self.lc['table']['CENTROID_Y'][::nframes]-self.lc['table']['LOCATION_Y'][::nframes]+(pltlims/2)
except:
print("\nNo imagette data.")
continue
else:
continue
fig = plt.figure(figsize=figsize)
plt.rc('font', size=fontsize)
plt.xlabel("Row (pixel)")
plt.ylabel("Column (pixel)")
plt.xlim(-1,pltlims-1)
plt.ylim(-1,pltlims-1)
plt.title(title)
if grid:
ax = plt.gca()
ax.grid(color='w', linestyle='-', linewidth=1)
frames = []
for i in tqdm(range(len(frame_cube))):
ax = plt.gca()
if str(np.amin(frame_cube[i,:,:])) == "nan":
img_min = 0
else:
img_min = np.amin(frame_cube[i,:,:])
if str(np.amax(frame_cube[i,:,:])) == "nan":
img_max = 200000
else:
img_max = np.amax(frame_cube[i,:,:])
image = ax.imshow(frame_cube[i,:,:],
norm=colors.Normalize(vmin=vmin*img_min,
vmax=vmax*img_max),
origin="lower")
if aperture:
xpos,ypos = cen_x[i],cen_y[i]
if type(aperture) == int or type(aperture) == float:
circle1 = plt.Circle((xpos,ypos), aperture, color='r', lw=linewidth, fill=False, clip_on=True)
ax.add_patch(circle1)
else:
if aperture == "DEFAULT":
aprad = 25
elif aperture == "RINF":
aprad = 22
elif aperture == "RSUP":
aprad = 30
elif aperture == "OPTIMAL":
lcFile = "{}-{}.fits".format(self.file_key,aperture)
lcPath = Path(self.tgzfile).parent / lcFile
if lcPath.is_file():
with fits.open(lcPath) as hdul:
hdr = hdul[1].header
else:
tar = tarfile.open(self.tgzfile)
s = '(?!\.)(.*_SCI_COR_Lightcurve-{}_.*.fits)'
r=re.compile(s.format(aperture))
datafile = list(filter(r.match, self.list))
with tar.extractfile(datafile[0]) as fd:
hdul = fits.open(fd)
hdr = hdul[1].header
aprad = hdr['AP_RADI']
circle1 = plt.Circle((xpos,ypos), aprad, color='r', lw=linewidth, fill=False, clip_on=True)
ax.add_patch(circle1)
frames.append([image,circle1])
# Suppress annoying logger warnings from animation module
logging.getLogger('matplotlib.animation').setLevel(logging.ERROR)
if hindex == 0:
sub_anim = animation.ArtistAnimation(fig, frames, blit=True)
sub_anim.save(title.replace(" ","")+'.gif', writer=writer)
with open(title.replace(" ","")+'.gif','rb') as file:
display(Image(file.read()))
print("Subarray is saved in the current directory as " +
title.replace(" ","")+'.gif')
elif hindex == 1:
imag_anim = animation.ArtistAnimation(fig, frames, blit=True)
imag_anim.save(title.replace(" ","")+'.gif', writer=writer)
with open(title.replace(" ","")+'.gif','rb') as file:
display(Image(file.read()))
print("Imagette is saved in the current directory as " +
title.replace(" ","")+'.gif')
plt.close()
if subarray and not imagette:
return sub_anim
elif imagette and not subarray:
return imag_anim
elif subarray and imagette:
return sub_anim, imag_anim
#----------------------------------------------------------------------------
# Eclipse and transit fitting
def __make_extra_basis_funcs__(self, extra_decorr_vectors, time, params):
# Also adds parameters 'dfd'+(vector key) to params
if extra_decorr_vectors == None:
return {}
print('Adding extra decorrelation basis vector functions.')
extra_basis_funcs = {}
vectors = extra_decorr_vectors.copy()
if 't' in vectors:
# pop 't' so it gets skipped when we loop over parameters
t = vectors.pop('t')
if (min(t) > max(time)) or (max(t) < min(time)):
raise ValueError('time array for extra basis vectors does'
' not overlap times in light curve')
else:
t = time
for v in vectors:
if not 'init' in vectors[v]:
p_init = (-1, 1)
else:
p_init = vectors[v]['init']
params['dfd'+v] = _kw_to_Parameter('dfd'+v, p_init)
x = vectors[v]['x']
if 'fill_value' in vectors[v]:
fill_value = vectors[v]['fill_value']
else:
fill_value = (x[0], x[-1])
extra_basis_funcs[v] = interp1d(t, x, bounds_error=False,
fill_value=fill_value, copy=False)
print(f'{v:12s}: mean = {x.mean():8.3f}'
f', std. dev. = {x.std():8.3f}'
f', min = {x.min():8.3f}'
f', max = {x.max():8.3f}')
return extra_basis_funcs
#----
def __factor_model__(self, scale, extra_basis_funcs=None):
time = np.array(self.lc['time'])
phi = self.lc['roll_angle']*np.pi/180
# For backwards compatibility
try:
smear = self.lc['smear']
except KeyError:
smear = np.zeros_like(time)
try:
deltaT = self.lc['deltaT']
except KeyError:
deltaT = np.zeros_like(time)
if scale:
F = FactorModel(
dx = _make_interp(time, self.lc['xoff'], scale='range'),
dy = _make_interp(time, self.lc['yoff'], scale='range'),
sinphi = _make_interp(time,np.sin(phi)),
cosphi = _make_interp(time,np.cos(phi)),
bg = _make_interp(time,self.lc['bg'], scale='range'),
contam = _make_interp(time,self.lc['contam'], scale='range'),
smear = _make_interp(time,smear, scale='range'),
deltaT = _make_interp(time,deltaT),
extra_basis_funcs=extra_basis_funcs)
else:
F = FactorModel(
dx = _make_interp(time, self.lc['xoff']),
dy = _make_interp(time, self.lc['yoff']),
sinphi = _make_interp(time,np.sin(phi)),
cosphi = _make_interp(time,np.cos(phi)),
bg = _make_interp(time,self.lc['bg']),
contam = _make_interp(time,self.lc['contam']),
smear = _make_interp(time,smear),
deltaT = _make_interp(time,deltaT),
extra_basis_funcs=extra_basis_funcs)
return F
#---
def lmfit_transit(self,
T_0=None, P=None, D=None, W=None, b=None, f_c=None, f_s=None,
h_1=None, h_2=None, l_3=None, scale=True,
c=None, dfdbg=None, dfdcontam=None, dfdsmear=None, ramp=None,
dfdx=None, dfdy=None, d2fdx2=None, d2fdy2=None,
dfdsinphi=None, dfdcosphi=None, dfdsin2phi=None, dfdcos2phi=None,
dfdsin3phi=None, dfdcos3phi=None, dfdt=None, d2fdt2=None,
glint_scale=None, logrhoprior=None, extra_decorr_vectors=None,
log_sigma=None):
"""
Fit a transit to the light curve in the current dataset.
Parameter values can be specified in one of the following ways:
* fixed value, e.g., P=1.234
* free parameter with uniform prior interval specified as a 2-tuple,
e.g., dfdx=(-1,1). The initial value is taken as the the mid-point of
the allowed interval;
* free parameter with uniform prior interval and initial value
specified as a 3-tuple, e.g., (0.1, 0.2, 1);
* free parameter with a Gaussian prior specified as a ufloat, e.g.,
ufloat(0,1);
* as an lmfit Parameter object.
To enable decorrelation against a parameter, specifiy it as a free
parameter, e.g., dfdbg=(-1,1).
If scale=True (default), decorrelation is done against a scaled
version of the quantities xoff, yoff, bg, contam and smear with a
peak-to-peak range of 1. This means the coefficients dfdx, dfdy,
dfdbg, etc. correspond to the amplitude of the flux variation due to
the correlation with the relevant parameter.
Decorrelation against the telescope tube temperature can be included
using the parameter "ramp" which has units of ppm/degree_C. If
correct_ramp has been applied then this parameter should have a value
close to zero (within a few ppm/degree_C).
The AIC and BIC values report in the MinimizerResult object returned
by this method are defined by
- AIC = 2*k - 2*lnlike
- BIC = k*ln(n) - 2*ln(Lmax)
where
- k = number of free parameter
- n = number of data points
- Lmax - maximum likelihood
A fixed value for the logarithm of the additional Gaussian white noise
in can be added to the flux measurements using the keyword log_sigma.
Arbitrary basis vectors for decorrelation specified by the user, each
with its own linear coefficient, can be included in the model using
the extra_decorr_vectors keyword. Use the keyword extra_decorr_vectors
to specify these detending basis vectors in the following format ...
extra_decorr_vectors = {'t':t, 'a':{'x':a}, 'b',{'x':b}}
The times at which the basis vectors are sampled can be specified
using the key 't'. Times are specfied using the same time scale as
dataset.lc['time'], i.e. BJD_TDB-dataset.bjd_ref. Each basis vector
is then provided by the user using a dict with the value of the basis
function at these times specified as an array-like object provided
using the key 'x'. If 't' is not provided then the basis functions are
assumed to be sampled at the same times as dataset.lc['time']. An
exception is raised if the times specified do not overlap the same
time range as dataset.lc['time'].
The array of values provided for each basis vector are used to create
a linear interpolating function that can be used to evaluate the
basis function at arbitrary times. By default, the first/last value in
the array is used to extrapolate to times before/after the input array
of times. To specify different extrapolated values, use the
'fill_value' key to specify a value of the fill_value keyword to be
used in scipy.interpolate.interp1d, e.g.
extra_decorr_vectors = { 'a':{'x':a, 'fill_value':0},
'b':{'x':b, 'fill_value':np.mean(b)},
'c':{'x':c, 'fill_value':'extrapolate'} }
Summary statistics for each basis vector are printed on initialisation
of FactorModel. It is advisable to use basis vectors with a mean
value of 0 and with a range or standard deviation of about 1. It is
also advisable to avoid basis vectors that are strongly correlated
with one another or other parameters being using for decorrelation.
By default, the coefficients for each basis vector are labeled in
plots using the key prefixed by 'dfd'. Alternative labels can be
specified using the 'label' key, e.g.
extra_decorr_vectors={'x2':{'x':dx**2,
'label':'$d^2f/d(\Delta x)^2$'}}
Initial values and priors for each linear coefficient can be specified
in the same way as other parameters used in dataset.lmfit_transit() or
dataset.lmfit_eclipse() using the 'init' key, e.g.
extra_decorr_vectors = { 'a':{'x':a, 'init':(-2,2)},
'b':{'x':b, 'init':ufloat(0,1),
'c':{'x':c, 'init':0}}
If not specified, the parameter is initialised using (-1, 1), i.e.
initial value = 0, min=-1, max=1.
"""
def _chisq_prior(params, *args):
r = (flux - model.eval(params, t=time))/flux_err
for p in params:
u = params[p].user_data
if isinstance(u, UFloat):
r = np.append(r, (u.n - params[p].value)/u.s)
return r
try:
time = self.lc['time']
flux = self.lc['flux']
flux_err = self.lc['flux_err']
xoff = self.lc['xoff']
yoff = self.lc['yoff']
phi = self.lc['roll_angle']*np.pi/180
bg = self.lc['bg']
contam = self.lc['contam']
smear = self.lc['smear']
deltaT = self.lc['deltaT']
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
params = Parameters()
if T_0 == None:
params.add(name='T_0', value=np.nanmedian(time),
min=min(time),max=max(time))
else:
params['T_0'] = _kw_to_Parameter('T_0', T_0)
if P == None:
params.add(name='P', value=1, vary=False)
else:
params['P'] = _kw_to_Parameter('P', P)
_P = params['P'].value
if D == None:
params.add(name='D', value=1-min(flux), min=0,max=0.5)
else:
params['D'] = _kw_to_Parameter('D', D)
k = np.sqrt(params['D'].value)
if W == None:
params.add(name='W', value=np.ptp(time)/2/_P,
min=np.ptp(time)/len(time)/_P, max=np.ptp(time)/_P)
else:
params['W'] = _kw_to_Parameter('W', W)
if b == None:
params.add(name='b', value=0.5, min=0, max=1)
else:
params['b'] = _kw_to_Parameter('b', b)
if f_c == None:
params.add(name='f_c', value=0, vary=False)
else:
params['f_c'] = _kw_to_Parameter('f_c', f_c)
if f_s == None:
params.add(name='f_s', value=0, vary=False)
else:
params['f_s'] = _kw_to_Parameter('f_s', f_s)
if l_3 == None:
params.add(name='l_3', value=0, vary=False)
else:
params['l_3'] = _kw_to_Parameter('l_3', l_3)
if h_1 == None:
params.add(name='h_1', value=0.7224, vary=False)
else:
params['h_1'] = _kw_to_Parameter('h_1', h_1)
if h_2 == None:
params.add(name='h_2', value=0.6713, vary=False)
else:
params['h_2'] = _kw_to_Parameter('h_2', h_2)
if c == None:
params.add(name='c', value=1, min=min(flux)/2,max=2*max(flux))
else:
params['c'] = _kw_to_Parameter('c', c)
# Error message for decorrelation against parameters with 0 range
zero_range_err = "Decorrelation against parameter with zero range - "
if dfdbg is not None:
if np.ptp(bg) == 0:
raise ValueError(zero_range_err+'bg')
params['dfdbg'] = _kw_to_Parameter('dfdbg', dfdbg)
if dfdcontam is not None:
if np.ptp(contam) == 0:
raise ValueError(zero_range_err+'contam')
params['dfdcontam'] = _kw_to_Parameter('dfdcontam', dfdcontam)
if dfdsmear is not None:
if np.ptp(smear) == 0:
raise ValueError(zero_range_err+'smear')
params['dfdsmear'] = _kw_to_Parameter('dfdsmear', dfdsmear)
if ramp is not None:
if np.ptp(deltaT) == 0:
raise ValueError(zero_range_err+'ramp')
params['ramp'] = _kw_to_Parameter('ramp', ramp)
if dfdx is not None:
if np.ptp(xoff) == 0:
raise ValueError(zero_range_err+'x')
params['dfdx'] = _kw_to_Parameter('dfdx', dfdx)
if dfdy is not None:
if np.ptp(yoff) == 0:
raise ValueError(zero_range_err+'y')
params['dfdy'] = _kw_to_Parameter('dfdy', dfdy)
if d2fdx2 is not None:
if np.ptp(xoff) == 0:
raise ValueError(zero_range_err+'x')
params['d2fdx2'] = _kw_to_Parameter('d2fdx2', d2fdx2)
if d2fdy2 is not None:
if np.ptp(yoff) == 0:
raise ValueError(zero_range_err+'y')
params['d2fdy2'] = _kw_to_Parameter('d2fdy2', d2fdy2)
if dfdt is not None:
params['dfdt'] = _kw_to_Parameter('dfdt', dfdt)
if d2fdt2 is not None:
params['d2fdt2'] = _kw_to_Parameter('d2fdt2', d2fdt2)
l = [dfdsinphi, dfdcosphi,dfdsin2phi,dfdcos2phi,dfdsin3phi,dfdcos3phi]
if (l.count(None) < 6) and (np.ptp(phi) == 0):
raise ValueError(zero_range_err+'phi')
if dfdsinphi is not None:
params['dfdsinphi'] = _kw_to_Parameter('dfdsinphi', dfdsinphi)
if dfdcosphi is not None:
params['dfdcosphi'] = _kw_to_Parameter('dfdcosphi', dfdcosphi)
if dfdsin2phi is not None:
params['dfdsin2phi'] = _kw_to_Parameter('dfdsin2phi', dfdsin2phi)
if dfdcos2phi is not None:
params['dfdcos2phi'] = _kw_to_Parameter('dfdcos2phi', dfdcos2phi)
if dfdsin3phi is not None:
params['dfdsin3phi'] = _kw_to_Parameter('dfdsin3phi', dfdsin3phi)
if dfdcos3phi is not None:
params['dfdcos3phi'] = _kw_to_Parameter('dfdcos3phi', dfdcos3phi)
if glint_scale is not None:
params['glint_scale']=_kw_to_Parameter('glint_scale', glint_scale)
# Derived parameters
params.add('k',expr='sqrt(D)',min=0,max=1)
params.add('aR',expr='sqrt((1+k)**2-b**2)/W/pi',min=1)
params.add('sini',expr='sqrt(1 - (b/aR)**2)')
# Avoid use of aR in this expr for logrho - breaks error propogation.
expr = 'log10(4.3275e-4*((1+k)**2-b**2)**1.5/W**3/P**2)'
params.add('logrho',expr=expr,min=-9,max=6)
params['logrho'].user_data=logrhoprior
params.add('e',min=0,max=1,expr='f_c**2 + f_s**2')
params.add('q_1',min=0,max=1,expr='(1-h_2)**2')
params.add('q_2',min=0,max=1,expr='(h_1-h_2)/(1-h_2)')
# For eccentric orbits only from Winn, arXiv:1001.2010
if (params['e'].value>0) or params['f_c'].vary or params['f_s'].vary:
params.add('esinw',expr='sqrt(e)*f_s')
params.add('ecosw',expr='sqrt(e)*f_c')
params.add('b_tra',expr='b*(1-e**2)/(1+esinw)')
params.add('b_occ',expr='b*(1-e**2)/(1-esinw)')
params.add('T_tot',expr='P*W*sqrt(1-e**2)/(1+esinw)')
l = ['dfdbg','dfdcontam','dfdsmear','dfdx','dfdy','d2fdx2','d2fdy2']
if True in [p in l for p in params]:
self.__scale__ = scale
else:
self.__scale__ = None
self.extra_decorr_vectors = extra_decorr_vectors
extra_basis_funcs = self.__make_extra_basis_funcs__(
extra_decorr_vectors, time, params)
self.__extra_basis_funcs__ = extra_basis_funcs
model = TransitModel()*self.__factor_model__(scale, extra_basis_funcs)
if 'glint_scale' in params.valuesdict().keys():
try:
f_theta = self.f_theta
f_glint = self.f_glint
except AttributeError:
raise AttributeError("Use add_glint() to first.")
model += Model(_glint_func, independent_vars=['t'],
f_theta=f_theta, f_glint=f_glint)
# Additional white noise
if log_sigma is not None:
flux_err = np.hypot(flux_err, np.exp(log_sigma))
params.add(name='log_sigma', value=log_sigma, vary=False)
result = minimize(_chisq_prior, params, nan_policy='propagate',
args=(model, time, flux, flux_err))
self.model = model
fit = model.eval(result.params,t=time)
result.bestfit = fit
result.rms = (flux-fit).std()
# Move priors out of result.residual into their own object and update
# result.ndata, result.chisqr, etc.
npriors = len(result.residual) - len(time)
if npriors > 0:
result.prior_residual = result.residual[-npriors:]
result.residual = result.residual[:-npriors]
result.npriors = npriors
result.ndata = len(time)
result.nfree = result.ndata - result.nvarys
result.chisqr = np.sum(result.residual**2)
result.redchi = result.chisqr/(result.ndata-result.nvarys)
# Renormalize AIC and BIC so they are consistent with emcee values
lnlike = -0.5*np.sum(result.residual**2 + np.log(2*np.pi*flux_err**2))
result.lnlike = lnlike
result.aic = 2*result.nvarys-2*lnlike
result.bic = result.nvarys*np.log(result.ndata) - 2*lnlike
self.lmfit = result
self.__lastfit__ = 'lmfit'
return result
# ----------------------------------------------------------------
def correct_ramp(self, beta=None, plot=False, force=False,
figsize=(6,3), fontsize=12):
"""
Linear correction for ramp effect based on telescope tube temperature.
A flux ramp is often observed in the beginning of a visit with an
amplitude of a few hundred ppm (either positive or negative) and
decaying over a time scale of several hours. This ramp is due to a
small scale change in the shape of the PSF. This in turn can be
understood as a slight focus change as a result of a thermal
adaptation of the telescope tube to the new heat load by the thermal
radiation from the Earth. This thermal adaptation (*breathing*) is
monitored by thermal sensors in the tube.
At the time of writing (Dec 2020) several algorithms are being
investigated to correct for this ramp effect. One algorithm that is
simple to implement and seems to work quite well is to correct the
measured flux using the equation
Flux_corrected = Flux_measured (1+beta*deltaT)
where deltaT = T_thermFront_2 + 12
The following values of the coefficient beta have been determined by
Goran Olofsson.
| Aperture | beta |
|:---------|:--------|
| 22.5 | 0.00014 |
| 25.0 | 0.00020 |
| 30.0 | 0.00033 |
| 40.0 | 0.00040 |
This routine uses linear interpolation in this table to predict the
slope beta for the aperture radius of the light curve.
:param beta: user-defined value of beta (None to use value from table)
:param plot: plot flux values before/after correction v. deltaT
:param force: apply ramp correction even if already corrected
:returns: time, flux, flux_err
"""
if hasattr(self,'ramp_correction'):
if force:
warnings.warn('Ramp correction already applied')
else:
raise Exception('Ramp correction already applied')
T = self.lc['deltaT']
flux = self.lc['flux']
if beta == None:
f = interp1d([22.5, 25, 30, 40], [0.00014,0.00020,0.00033,0.00040],
bounds_error=False, fill_value='extrapolate')
beta = f(self.ap_rad)
if (self.ap_rad < 22.5) or (self.ap_rad > 40):
warnings.warn("Ramp correction extrapolated")
fcor = flux * (1+beta*T)
self.ramp_correction = True
if plot:
plt.rc('font', size=fontsize)
fig,ax=plt.subplots(figsize=figsize)
ax.plot(T, flux, 'o',c='skyblue',ms=2, label='Measured')
ax.plot(T, 1+beta*T, c='skyblue')
ax.plot(T, fcor, 'o',c='midnightblue',ms=2,label='Corrected')
ax.set_xlabel(r'T$_{\rm thermFront\_2} +12^{\circ}$ C')
ax.set_ylabel(r'Flux')
ax.legend()
self.lc['flux'] = fcor
return self.lc['time'], self.lc['flux'], self.lc['flux_err']
# ----------------------------------------------------------------
def add_glint(self, nspline=8, mask=None, fit_flux=False,
moon=False, angle0=None, gapmax=30,
show_plot=True, binwidth=15, figsize=(6,3), fontsize=11):
"""
Adds a glint model to the current dataset.
The glint model is a smooth function v. roll angle that can be scaled
to account for artefacts in the data caused by internal reflections.
If moon=True the roll angle is measured relative to the apparent
direction of the Moon, i.e. assume that the glint is due to
moonlight.
To use this model, include the the parameter glint_scale in the
lmfit least-squares fit.
* nspline - number of splines in the fit
* mask - fit only data for which mask array is False
* fit_flux - fit flux rather than residuals from previous fit
* moon - use roll-angle relative to apparent Moon direction
* angle0 = dependent variable is (roll angle - angle0)
* gapmax = parameter to identify large gaps in data - used to
calculate angle0 of not specified by the user.
* show_plot - default is to show a plot of the fit
* binwidth - in degrees for binned points on plot (or None to ignore)
* figsize -
* fontsize -
Returns the glint function as a function of roll angle/moon angle.
"""
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
angle = np.array(self.lc['roll_angle'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
if moon:
bjd = Time(self.bjd_ref+self.lc['time'],format='jd',scale='tdb')
moon_coo = get_body('moon', bjd)
target_coo = SkyCoord(self.ra,self.dec,unit=('hour','degree'))
ra_m = moon_coo.ra.radian
ra_s = target_coo.ra.radian
dec_m = moon_coo.dec.radian
dec_s = target_coo.dec.radian
v_moon = np. arccos(
np.cos(ra_m)*np.cos(dec_m)*np.cos(ra_s)*np.cos(dec_s) +
np.sin(ra_m)*np.cos(dec_m)*np.sin(ra_s)*np.cos(dec_s) +
np.sin(dec_m)*np.sin(dec_s))
dv_rot = np.degrees(np.arcsin(np.sin(ra_m-ra_s)*np.cos(dec_m)/
np.sin(v_moon)))
angle -= dv_rot
if fit_flux:
y = flux - 1
else:
l = self.__lastfit__
fit = self.emcee.bestfit if l == 'emcee' else self.lmfit.bestfit
y = flux - fit
if angle0 == None:
x = np.sort(angle)
gap = np.hstack((x[0], x[1:]-x[:-1]))
if max(gap) > gapmax:
angle0 = x[np.argmax(gap)]
else:
angle0 = 0
if abs(angle0) < 0.01:
if moon:
xlab = r'Moon angle [$^{\circ}$]'
else:
xlab = r'Roll angle [$^{\circ}$]'
xlim = (0,360)
theta = angle % 360
else:
if moon:
xlab = r'Moon angle - {:0.0f}$^{{\circ}}$'.format(angle0)
else:
xlab = r'Roll angle - {:0.0f}$^{{\circ}}$'.format(angle0)
theta = (360 + angle - angle0) % 360
xlim = (min(theta),max(theta))
f_theta = _make_interp(time, theta)
if mask is not None:
time = time[~mask]
theta = theta[~mask]
y = y[~mask]
# Copies of data for theta-360 and theta+360 used to make
# interpolating function periodic
y = y - np.nanmedian(y)
y = y[np.argsort(theta)]
x = np.sort(theta)
t = np.linspace(min(x),max(x),1+nspline,endpoint=False)[1:]
x = np.hstack([x-360,x,x+360])
y = np.hstack([y,y,y])
t = np.hstack([t-360,t,t+360])
f_glint = LSQUnivariateSpline(x,y,t,ext='const')
self.glint_moon = moon
self.glint_angle0 = angle0
self.f_theta = f_theta
self.f_glint = f_glint
if show_plot:
plt.rc('font', size=fontsize)
fig,ax=plt.subplots(nrows=1, figsize=figsize, sharex=True)
ax.plot(x, y, 'o',c='skyblue',ms=2)
if binwidth:
r_, f_, e_, n_ = lcbin(x, y, binwidth=binwidth)
ax.errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
ax.set_xlim(xlim)
ylim = np.max(np.abs(y))+0.05*np.ptp(y)
ax.set_ylim(-ylim,ylim)
xt = np.linspace(xlim[0],xlim[1],10001)
yt = f_glint(xt)
ax.plot(xt, yt, color='saddlebrown')
ax.set_xlabel(xlab)
ax.set_ylabel('Glint')
return f_glint(f_theta(time))
# ----------------------------------------------------------------
def lmfit_eclipse(self,
T_0=None, P=None, D=None, W=None, b=None, L=None,
f_c=None, f_s=None, l_3=None, a_c=None, dfdbg=None,
dfdcontam=None, dfdsmear=None, ramp=None, scale=True,
c=None, dfdx=None, dfdy=None, d2fdx2=None, d2fdy2=None,
dfdsinphi=None, dfdcosphi=None, dfdsin2phi=None, dfdcos2phi=None,
dfdsin3phi=None, dfdcos3phi=None, dfdt=None, d2fdt2=None,
glint_scale=None, extra_decorr_vectors=None, log_sigma=None):
"""
See lmfit_transit for options
"""
def _chisq_prior(params, *args):
r = (flux - model.eval(params, t=time))/flux_err
for p in params:
u = params[p].user_data
if isinstance(u, UFloat):
r = np.append(r, (u.n - params[p].value)/u.s)
return r
try:
time = self.lc['time']
flux = self.lc['flux']
flux_err = self.lc['flux_err']
xoff = self.lc['xoff']
yoff = self.lc['yoff']
phi = self.lc['roll_angle']*np.pi/180
bg = self.lc['bg']
contam = self.lc['contam']
smear = self.lc['smear']
deltaT = self.lc['deltaT']
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
params = Parameters()
if T_0 == None:
params.add(name='T_0', value=np.nanmedian(time),
min=min(time),max=max(time))
else:
params['T_0'] = _kw_to_Parameter('T_0', T_0)
if P == None:
params.add(name='P', value=1, vary=False)
else:
params['P'] = _kw_to_Parameter('P', P)
_P = params['P'].value
if D == None:
params.add(name='D', value=1-min(flux), min=0,max=0.5)
else:
params['D'] = _kw_to_Parameter('D', D)
k = np.sqrt(params['D'].value)
if W == None:
params.add(name='W', value=np.ptp(time)/2/_P,
min=np.ptp(time)/len(time)/_P, max=np.ptp(time)/_P)
else:
params['W'] = _kw_to_Parameter('W', W)
if b == None:
params.add(name='b', value=0.5, min=0, max=1)
else:
params['b'] = _kw_to_Parameter('b', b)
if L == None:
params.add(name='L', value=0.001, min=0, max=1)
else:
params['L'] = _kw_to_Parameter('L', L)
if f_c == None:
params.add(name='f_c', value=0, vary=False)
else:
params['f_c'] = _kw_to_Parameter('f_c', f_c)
if f_s == None:
params.add(name='f_s', value=0, vary=False)
else:
params['f_s'] = _kw_to_Parameter('f_s', f_s)
if l_3 == None:
params.add(name='l_3', value=0, vary=False)
else:
params['l_3'] = _kw_to_Parameter('l_3', l_3)
if c == None:
params.add(name='c', value=1, min=min(flux)/2,max=2*max(flux))
else:
params['c'] = _kw_to_Parameter('c', c)
if a_c == None:
params.add(name='a_c', value=0, vary=False)
else:
params['a_c'] = _kw_to_Parameter('a_c', a_c)
# Error message for decorrelation against parameters with 0 range
zero_range_err = "Decorrelation against parameter with zero range - "
if dfdbg is not None:
if np.ptp(bg) == 0:
raise ValueError(zero_range_err+'bg')
params['dfdbg'] = _kw_to_Parameter('dfdbg', dfdbg)
if dfdcontam is not None:
if np.ptp(contam) == 0:
raise ValueError(zero_range_err+'contam')
params['dfdcontam'] = _kw_to_Parameter('dfdcontam', dfdcontam)
if dfdsmear is not None:
if np.ptp(smear) == 0:
raise ValueError(zero_range_err+'smear')
params['dfdsmear'] = _kw_to_Parameter('dfdsmear', dfdsmear)
if ramp is not None:
if np.ptp(deltaT) == 0:
raise ValueError(zero_range_err+'ramp')
params['ramp'] = _kw_to_Parameter('ramp', ramp)
if dfdx is not None:
if np.ptp(xoff) == 0:
raise ValueError(zero_range_err+'x')
params['dfdx'] = _kw_to_Parameter('dfdx', dfdx)
if dfdy is not None:
if np.ptp(yoff) == 0:
raise ValueError(zero_range_err+'y')
params['dfdy'] = _kw_to_Parameter('dfdy', dfdy)
if d2fdx2 is not None:
if np.ptp(xoff) == 0:
raise ValueError(zero_range_err+'x')
params['d2fdx2'] = _kw_to_Parameter('d2fdx2', d2fdx2)
if d2fdy2 is not None:
if np.ptp(yoff) == 0:
raise ValueError(zero_range_err+'y')
params['d2fdy2'] = _kw_to_Parameter('d2fdy2', d2fdy2)
if dfdt is not None:
params['dfdt'] = _kw_to_Parameter('dfdt', dfdt)
if d2fdt2 is not None:
params['d2fdt2'] = _kw_to_Parameter('d2fdt2', d2fdt2)
l = [dfdsinphi, dfdcosphi,dfdsin2phi,dfdcos2phi,dfdsin3phi,dfdcos3phi]
if (l.count(None) < 6) and (np.ptp(phi) == 0):
raise ValueError(zero_range_err+'phi')
if dfdsinphi is not None:
params['dfdsinphi'] = _kw_to_Parameter('dfdsinphi', dfdsinphi)
if dfdcosphi is not None:
params['dfdcosphi'] = _kw_to_Parameter('dfdcosphi', dfdcosphi)
if dfdsin2phi is not None:
params['dfdsin2phi'] = _kw_to_Parameter('dfdsin2phi', dfdsin2phi)
if dfdcos2phi is not None:
params['dfdcos2phi'] = _kw_to_Parameter('dfdcos2phi', dfdcos2phi)
if dfdsin3phi is not None:
params['dfdsin3phi'] = _kw_to_Parameter('dfdsin3phi', dfdsin3phi)
if dfdcos3phi is not None:
params['dfdcos3phi'] = _kw_to_Parameter('dfdcos3phi', dfdcos3phi)
if glint_scale is not None:
params['glint_scale']=_kw_to_Parameter('glint_scale', glint_scale)
# Derived parameters
params.add('k',expr='sqrt(D)',min=0,max=1)
params.add('aR',expr='sqrt((1+k)**2-b**2)/W/pi',min=1)
params.add('sini',expr='sqrt(1 - (b/aR)**2)')
params.add('e',min=0,max=1,expr='f_c**2 + f_s**2')
# For eccentric orbits only from Winn, arXiv:1001.2010
if (params['e'].value>0) or params['f_c'].vary or params['f_s'].vary:
params.add('esinw',expr='sqrt(e)*f_s')
params.add('ecosw',expr='sqrt(e)*f_c')
params.add('b_tra',expr='b*(1-e**2)/(1+esinw)')
params.add('b_occ',expr='b*(1-e**2)/(1-esinw)')
params.add('T_tot',expr='P*W*sqrt(1-e**2)/(1-esinw)')
l = ['dfdbg','dfdcontam','dfdsmear','dfdx','dfdy']
if True in [p in l for p in params]:
self.__scale__ = scale
else:
self.__scale__ = None
self.extra_decorr_vectors = extra_decorr_vectors
extra_basis_funcs = self.__make_extra_basis_funcs__(
extra_decorr_vectors, time, params)
self.__extra_basis_funcs__ = extra_basis_funcs
model = EclipseModel()*self.__factor_model__(scale, extra_basis_funcs)
if 'glint_scale' in params.valuesdict().keys():
try:
f_theta = self.f_theta
f_glint = self.f_glint
except AttributeError:
raise AttributeError("Use add_glint() to first.")
model += Model(_glint_func, independent_vars=['t'],
f_theta=f_theta, f_glint=f_glint)
# Additional white noise
if log_sigma is not None:
flux_err = np.hypot(flux_err, np.exp(log_sigma))
params.add(name='log_sigma', value=log_sigma, vary=False)
result = minimize(_chisq_prior, params, nan_policy='propagate',
args=(model, time, flux, flux_err))
self.model = model
fit = model.eval(result.params,t=time)
result.bestfit = fit
result.rms = (flux-fit).std()
# Move priors out of result.residual into their own object and update
# result.ndata, result.chisqr, etc.
npriors = len(result.residual) - len(time)
if npriors > 0:
result.prior_residual = result.residual[-npriors:]
result.residual = result.residual[:-npriors]
result.npriors = npriors
result.ndata = len(time)
result.nfree = result.ndata - result.nvarys
result.chisqr = np.sum(result.residual**2)
result.redchi = result.chisqr/(result.ndata-result.nvarys)
# Renormalize AIC and BIC so they are consistent with emcee values
lnlike = -0.5*np.sum(result.residual**2 + np.log(2*np.pi*flux_err**2))
result.lnlike = lnlike
result.aic = 2*result.nvarys-2*lnlike
result.bic = result.nvarys*np.log(result.ndata) - 2*lnlike
self.lmfit = result
self.__lastfit__ = 'lmfit'
return result
# ----------------------------------------------------------------
def lmfit_report(self, **kwargs):
report = fit_report(self.lmfit, **kwargs)
rms = self.lmfit.rms*1e6
s = " RMS residual = {:0.1f} ppm\n".format(rms)
j = report.index('[[Variables]]')
report = report[:j] + s + report[j:]
noPriors = True
params = self.lmfit.params
parnames = list(params.keys())
namelen = max([len(n) for n in parnames])
for p in params:
u = params[p].user_data
if isinstance(u, UFloat):
if noPriors:
report+="\n[[Priors]]"
noPriors = False
report += "\n %s:%s" % (p, ' '*(namelen-len(p)))
report += '%s +/-%s' % (gformat(u.n), gformat(u.s))
# Bayes factors
noBayes = True
for p in params:
u = params[p].user_data
if (isinstance(u, UFloat) and
(p.startswith('dfd') or p.startswith('d2f') or
(p == 'ramp') or (p == 'glint_scale') ) ):
if noBayes:
report+="\n[[Bayes Factors]] "
report+="(values >~1 => free parameter may not be useful)"
noBayes = False
v = params[p].value
s = params[p].stderr
if s is not None:
B = np.exp(-0.5*((v-u.n)/s)**2) * u.s/s
report += "\n %s:%s" % (p, ' '*(namelen-len(p)))
report += ' %12.3f' % (B)
# Decorrelation parameter scaling
has_notes = False
if self.__scale__ is not None:
has_notes = True
report += '\n[[Notes]]'
if self.__scale__:
report +='\n Decorrelation parameters were scaled'
else:
report +='\n Decorrelation parameters were not scaled'
if params['e'].value > 0:
if not has_notes:
report += '\n[[Notes]]'
has_notes = True
report +='\n T_tot from Winn, arXiv:1001.2010 is approximate'
report += '\n[[Software versions]]'
report += '\n CHEOPS DRP : %s' % self.pipe_ver
report += '\n pycheops : %s' % __version__
report += '\n lmfit : %s' % _lmfit_version_
return(report)
# ----------------------------------------------------------------
def select_detrend(self, max_bayes_factor=1, exclude=None,
keep_original=False, dprior=None, tprior=None,
t2prior=None, verbose=True):
"""
Select choice of detrending model coefficients using Bayes factors
See Maxted et al. 2022MNRAS.514...77M section 2.7.2 for an explanation
of how the Bayes factor is calculated for models with/without a given
decorrelation parameter. As suggested, decorrelation parameters are
added one-by-one, selecting the parameter that has the highest Bayes
factor at each step until no parameters have a Bayes factor >
max_bayes_factor. To avoid overfitting, if any parameters then have a
Bayes factor < max_bayes_factor, they are removed one-by-one.
A least-squares fit to the light curve using lmfit_transit() or
lmfit_eclipse() must be run succesfully prior to calling
select_detrend(). Any detrending parameters included in this prior
least-squares fit will be included in the dictionary of detrending
parameters returned by this method, irrespective of their Bayes
factor.
Use exclude=[] to specify a list of decorrelation parameters that
should never be included in the decorrelation model, irrespective of
their Bayes factors.
If dprior=None (default) then the priors on all decorrelation
parameters apart from dfdt and d2fdt2 are Gaussians with mean of 0 and
standard deviation equal to the root mean square residual (rms) of the
prior least-squares fit. Otherwise, the priors on these decorrelation
parameters are Gaussians with mean of 0 and standard deviation
specified by the user using this keyword.
If tprior=None (default) then the prior on dfdt is a Gaussian with
mean of 0 and standard deviation dprior/ptp(time), where ptp(time) is
the length of time (in days) covered by the light curve. Otherwise,
the prior on this decorrelation parameter is a Gaussian with mean of 0
and standard deviation specified by the user using this keyword.
If t2prior=None (default) then the prior on d2fdt2 is a Gaussian with
mean of 0 and standard deviation dprior/ptp(time)**2. Otherwise, the
prior on this decorrelation parameter is a Gaussian with mean of 0 and
standard deviation specified by the user using this keyword.
If keep_original=False (default), detrending parameters from the last
least-squares fit will be removed based on the Bayes factor calculated
with the Gaussian prior specified in that fit for each parameter, if
present. If no Gaussian prior was specified, dprior, tprior or t2prior
is used to calculate the Bayes factor, as appropriate.
N.B. the prior least-squares fit is not affected by running
dataset.select_detrend(). To overwrite the prior least-squares fit,
call lmfit_transit() or lmfit_eclipse() including the
argument "**detrend" in the argument list, where "detrend" in the
python dict returned by dataset.select_detrend().
:param max_bayes_factor: Bayes factor limit for selection
:param exclude: list of coefficients to exclude
:param dprior: default Gaussian prior (ufloat)
:param tprior: Gaussian prior for dfdt (ufloat)
:param t2prior: Gaussian prior for d2fdt2 (ufloat)
:param keep_original: Do not reject parameters from original lmfit
:param verbose: set False to suppress printed output
:returns: python dict of selected detrending coefficients
Example
-------
>>> lmfit0 = dataset.lmfit_transit(P=0.123, T_0=0.654)
>>> x = ['d2fdt2','d2fdx2','d2fdy2']
>>> detrend = dataset.select_detrend(exclude=x, max_bayes_factor=0.5)
>>> lmfit = dataset.lmfit_transit(iP=0.123, T_0=0.654, **detrend)
"""
def _chisq_prior(params, *args):
r = (flux - model.eval(params, t=time))/flux_err
for p in params:
u = params[p].user_data
if isinstance(u, UFloat):
r = np.append(r, (u.n - params[p].value)/u.s)
return r
try:
time = self.lc['time']
flux = self.lc['flux']
flux_err = self.lc['flux_err']
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
params = self.lmfit.params.copy()
except AttributeError:
raise AttributeError('no valid lmfit result in dataset.')
if verbose:
print('Parameter BF Delta_BIC RMS(ppm)')
allpar = ['dfdsinphi', 'dfdsin2phi', 'dfdsin3phi',
'dfdcos3phi', 'dfdcosphi', 'dfdcos2phi',
'dfdx', 'd2fdx2', 'dfdy', 'd2fdy2',
'dfdsmear', 'dfdbg', 'dfdcontam',
'dfdt', 'd2fdt2']
if keep_original:
keep = [p for p in params]
else:
keep = []
if dprior == None:
dprior = ufloat(0, self.lmfit.rms)
if tprior == None:
tprior = dprior/np.ptp(time)
if t2prior == None:
t2prior = dprior/np.ptp(time)**2
user_priors = {}
for p in params:
if (p in allpar):
u = params[p].user_data
if isinstance(u, UFloat):
user_priors[p] = u
def pprior(p):
if p in user_priors:
return user_priors[p]
elif p == 'dfdt':
return tprior
elif p == 'd2fdt2':
return t2prior
else:
return dprior
detrend = {}
for p in params:
if (p in allpar):
allpar.remove(p)
detrend[p] = pprior(p)
if exclude != None:
for p in exclude:
if (p in allpar):
allpar.remove(p)
model = self.model
result0 = minimize(_chisq_prior, params, nan_policy='propagate',
args=(model, time, flux, flux_err))
bestbf = 0
lastbic = result0.bic
while bestbf < max_bayes_factor:
bestbf = np.inf
for p in allpar:
partmp = params.copy()
partmp[p] = Parameter(p,value=0,user_data=pprior(p))
result = minimize(_chisq_prior, partmp, nan_policy='propagate',
args=(model, time, flux, flux_err))
v = result.params[p].value
s = result.params[p].stderr
if s != None:
bf = np.exp(-0.5*((v-pprior(p).n)/s)**2) * pprior(p).s/s
if bf < bestbf:
bestbf = bf
newpar = p
if bestbf < max_bayes_factor:
p = newpar
detrend[p] = pprior(p)
params[p] = Parameter(p,value=0,user_data=pprior(p))
result = minimize(_chisq_prior, params, nan_policy='propagate',
args=(model, time, flux, flux_err))
if verbose:
dbic = result.bic - lastbic
lastbic = result.bic
rms = (flux-model.eval(result.params,t=time)).std()
print(f'+{newpar:<12s} {bestbf:6.2f} {dbic:8.1f}'
f' {rms*1e6:8.1f}')
allpar.remove(newpar)
worstbf = max_bayes_factor + 1
while worstbf > max_bayes_factor:
worstbf = 0
for p in [p for p in detrend if p not in keep]:
v = result.params[p].value
s = result.params[p].stderr
if s != None:
bf = np.exp(-0.5*((v-pprior(p).n)/s)**2) * pprior(p).s/s
if bf > worstbf:
worstbf = bf
delpar = p
if worstbf > max_bayes_factor:
del params[delpar]
del detrend[delpar]
result = minimize(_chisq_prior, params, nan_policy='propagate',
args=(model, time, flux, flux_err))
if verbose:
dbic = result.bic - lastbic
lastbic = result.bic
rms = (flux-model.eval(result.params,t=time)).std()
print(f'-{delpar:<12s} {bestbf:6.2f} {dbic:8.1f}'
f' {rms*1e6:8.1f}')
return detrend
# ----------------------------------------------------------------
def aperture_scan(self, xy_detrend_fixed=True, data_match=True,
verbose=True, return_full=False, ramp=None,
extra_decorr_vectors=None, copy_initial=False):
"""
Repeat lmfit fit to light curve for all available apertures
If data_match=True (default), all data that have been removed from the
light curve in the current dataset are excluded from the fits.
If ramp=None (default), ramp correction is applied if and only if ramp
correction has been applied to the light curve in the current dataset.
Set ramp=False or ramp=True to force ramp correction off or on,
respectively.
If xy_detrend_fixed=True (default) then dfdx and dfdy are included in
the fit to the "FIXED" aperture(s), whether or not they were included
in the previous fit to the light curve.
If verbose=True (default), a summary of the results is printed to the
terminal.
If copy_initial=False (default) then the initial parameter values are
taken from the last best-fit values using lmfit_transit() or
lmfit_eclipse(). If copy_initial=True, the initial parameter values
will be the same as the initial values from the last call to
lmfit_transi() or lmfit_eclipse().
If return_full=True, return a dict that includes the MinimizerResult
objects for each aperture. Default is False, in which case an astropy
Table is returned containing a summary of the fits to each aperture.
N.B. the MinimizerResult object includes any Gaussian priors on
parameter as part of the data, i.e. n_data = n_obs + n_priors
The signal-to-noise ratio (SNR) given in the output from this method
is (depth)/(standard error on depth) for the depth of the eclipse or
transit, depending on whether the prior least-squares fit the current
light curve was done using lmfit_transit() or lmfit_eclipse().
N.B. the fits to the light curves for each aperture will do the
equivalent of "scale=True", even if the previous least-squares fit to
the light curve used scale=False.
N.B. the existing light curve in the current dataset is not affected
by running aperture_scan(). Use get_lightcurve() to change the choice
aperture for the light curve in the current dataset based on the
results from aperture_scan().
"""
try:
bjd0 = self.lc['time'] + self.bjd_ref
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
if self.source != 'CHEOPS':
raise TypeError('aperture_scan only available for CHEOPS data')
try:
params = self.lmfit.params.copy()
except AttributeError:
raise AttributeError('no valid lmfit result in dataset.')
aplist = self.list_apertures()
# Re-order apertures so that they are in radius order with
# DEFAULT ahead place of R25
if ('DEFAULT' in aplist) and ('R25' in aplist):
aplist.remove('DEFAULT')
aplist.insert(aplist.index('R25'),'DEFAULT')
if ('RINF' in aplist) and ('R23' in aplist) :
aplist.remove('RINF')
aplist.insert(aplist.index('R23'),'RINF')
# For data matching, interpolate BJD to array index
i = np.arange(len(bjd0))
I=interp1d(np.round(bjd0,6),i,bounds_error=False,fill_value=0.5)
def _chisq_prior(params, *args):
r = (flux - model.eval(params, t=time))/flux_err
for p in params:
u = params[p].user_data
if isinstance(u, UFloat):
r = np.append(r, (u.n - params[p].value)/u.s)
return r
results = {}
rad_var = set([])
rad_fix = set([])
if ramp == None:
do_ramp = hasattr(self,'ramp_correction')
else:
do_ramp = ramp
if do_ramp:
beta = interp1d([22.5, 25, 30, 40],
[0.00014,0.00020,0.00033,0.00040],
bounds_error=False, fill_value='extrapolate')
if verbose:
hdr = 'Aperture Type R[pxl] rms[ppm] mad[ppm] chisq/ndf SNR'
hdr += " N_data"
print(hdr)
for ap in aplist:
params = self.lmfit.params.copy()
table, hdr = self._get_table_(ap, False)
rad = hdr['AP_RADI']
ap_type = table.meta['AP_TYPE']
if ap_type == 'Fixed':
if rad in rad_fix:
continue
rad_fix.add(rad)
if xy_detrend_fixed:
if not 'dfdx' in params:
params['dfdx'] = Parameter('dfdx', value=0, vary=True)
if not 'dfdy' in params:
params['dfdy'] = Parameter('dfdy', value=0, vary=True)
else:
if rad in rad_var:
continue
rad_var.add(rad)
ok = (((table['EVENT'] == 0) | (table['EVENT'] == 100))
& (table['FLUX']>0) & np.isfinite(table['FLUX']))
bjd = np.array(table['BJD_TIME'])[ok]
time = bjd-self.bjd_ref
flux = np.array(table['FLUX'][ok])
flux_err = np.array(table['FLUXERR'][ok])
bg = np.array(table['BACKGROUND'][ok])
smear = np.array(table['SMEARING_LC'][ok])
xoff = np.array(table['CENTROID_X'][ok]- table['LOCATION_X'][ok])
yoff = np.array(table['CENTROID_Y'][ok]- table['LOCATION_Y'][ok])
phi = np.array(table['ROLL_ANGLE'][ok])*np.pi/180
contam = np.array(table['CONTA_LC'][ok])
deltaT = np.array(self.metadata['thermFront_2'][ok]) + 12
if self.decontaminated:
flux /= (1 + contam)
if data_match:
j = I(np.round(bjd,6)) % 1 == 0
time =time[j]
flux =flux[j]
flux_err =flux_err[j]
bg =bg[j]
smear =smear[j]
xoff =xoff[j]
yoff =yoff[j]
phi =phi[j]
contam =contam[j]
deltaT =deltaT[j]
fluxmed = np.nanmedian(flux)
flux = flux/fluxmed
flux_err = flux_err/fluxmed
smear = smear/fluxmed
bg = bg/fluxmed
if do_ramp:
flux *= (1+beta(rad)*deltaT)
if '_transit_func' in self.model.__repr__():
model = TransitModel()
else:
model = EclipseModel()
model *= FactorModel(
dx=_make_interp(time, self.lc['xoff'], scale='range'),
dy=_make_interp(time, self.lc['yoff'], scale='range'),
sinphi=_make_interp(time,np.sin(phi)),
cosphi=_make_interp(time,np.cos(phi)),
bg=_make_interp(time,self.lc['bg'], scale='range'),
contam=_make_interp(time,self.lc['contam'], scale='range'),
smear=_make_interp(time,smear, scale='range'),
deltaT=_make_interp(time,deltaT),
extra_decorr_vectors=extra_decorr_vectors)
if hasattr(self,'f_theta'):
model += Model(_glint_func, independent_vars=['t'],
f_theta=self.f_theta, f_glint=self.f_glint)
if copy_initial:
for p in params:
if params[p].vary:
params[p].value = params[p].init_value
result = minimize(_chisq_prior, params, nan_policy='propagate',
args=(model, time, flux, flux_err))
fit = model.eval(result.params,t=time)
rad = hdr['AP_RADI']
rms = 1e6*(flux-fit).std()
mad = 1e6*abs(flux-fit).mean()
chisq = np.sum((flux-fit)**2/flux_err**2)
ndf = len(flux)-sum([params[p].vary for p in params])
chisqr = np.sum((flux-fit)**2/flux_err**2)/ndf
try:
if '_transit_func' in self.model.__repr__():
snr = result.params['D']/result.params['D'].stderr
else:
snr = result.params['L']/result.params['L'].stderr
except TypeError:
snr = np.nan
if verbose:
txt = f'{ap:9s} {ap_type:9s} {rad:4.1f} {rms:9.1f} {mad:9.1f}'
txt += f' {chisqr:9.4f} {snr:8.2f} {len(flux):6d}'
print(txt)
results[ap] = {'aperture_radius':rad, 'ap_type':ap_type,
'rms':rms, 'mad':mad, 'ndf':ndf, 'chisq':chisq,
'snr':snr,'ndata':len(flux)}
if return_full:
results[ap]['result'] = result
results[ap]['time'] = time
results[ap]['flux'] = flux
results[ap]['flux_err'] = flux_err
if return_full:
return results
else:
T = Table()
keys = list(results.keys())
T['aperture'] = keys
for f in ['aperture_radius','rms','mad','ndf','chisq','snr']:
T[f] = [round(results[k][f],3) for k in keys]
T['rms'].unit = 'ppm'
T['mad'].unit = 'ppm'
return T
# ----------------------------------------------------------------
def emcee_sampler(self, params=None,
steps=128, nwalkers=64, burn=256, thin=1, log_sigma=None,
add_shoterm=False, log_omega0=None, log_S0=None, log_Q=None,
init_scale=1e-2, progress=True, backend=None):
"""
If you only want to store and yield 1-in-thin samples in the chain, set
thin to an integer greater than 1. When this is set, thin*steps will be
made and the chains returned with have "steps" values per walker.
See https://emcee.readthedocs.io/en/stable/tutorials/monitor/ for use
of the backend keyword.
"""
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
model = self.model
except AttributeError:
raise AttributeError(
"Use lmfit_transit() or lmfit_eclipse() first.")
# Make a copy of the lmfit MinimizerResult as a template for the
# output of this method
result = deepcopy(self.lmfit)
result.method ='emcee'
# Remove components on result not relevant for emcee
result.status = None
result.success = None
result.message = None
result.ier = None
result.lmdif_message = None
if params == None:
params = self.lmfit.params.copy()
k = params.valuesdict().keys()
if add_shoterm:
if 'log_S0' in k:
pass
elif log_S0 == None:
params.add('log_S0', value=-12, min=-30, max=0)
else:
params['log_S0'] = _kw_to_Parameter('log_S0', log_S0)
# For time in days, and the default value of Q=1/sqrt(2),
# log_omega0=8 is a correlation length of about 30s and
# -2.3 is about 10 days.
if 'log_omega0' in k:
pass
elif log_omega0 == None:
params.add('log_omega0', value=3, min=-2.3, max=8)
else:
lw0 = _kw_to_Parameter('log_omega0', log_omega0)
params['log_omega0'] = lw0
if 'log_Q' in params:
pass
elif log_Q == None:
params.add('log_Q', value=np.log(1/np.sqrt(2)), vary=False)
else:
params['log_Q'] = _kw_to_Parameter('log_Q', log_Q)
params.add('rho_SHO', expr='2*pi/exp(log_omega0)')
params.add('tau_SHO', expr='2*exp(log_Q)/exp(log_omega0)')
params.add('sigma_SHO', expr='sqrt(exp(log_Q+log_S0+log_omega0))')
if 'log_sigma' in k:
pass
elif log_sigma == None:
if not 'log_sigma' in params:
params.add('log_sigma', value=-10, min=-16,max=-1)
params['log_sigma'].stderr = 1
else:
params['log_sigma'] = _kw_to_Parameter('log_sigma', log_sigma)
params.add('sigma_w',expr='exp(log_sigma)*1e6')
vv, vs, vn = [], [], []
for p in params:
if params[p].vary:
vn.append(p)
vv.append(params[p].value)
if params[p].stderr == None:
if params[p].user_data == None:
vs.append(0.01*(params[p].max-params[p].min))
else:
vs.append(params[p].user_data.s)
else:
if np.isfinite(params[p].stderr):
vs.append(params[p].stderr)
else:
vs.append(0.01*(params[p].max-params[p].min))
result.var_names = vn
result.init_vals = vv
result.init_values = {}
for n,v in zip(vn, vv):
result.init_values[n] = v
vv = np.array(vv)
vs = np.array(vs)
args=(model, time, flux, flux_err, params, vn)
p = list(params.keys())
if 'log_S0' in p and 'log_omega0' in p and 'log_Q' in p :
log_posterior_func = _log_posterior_SHOTerm
self.gp = True
else:
log_posterior_func = _log_posterior_jitter
self.gp = False
return_fit = False
args += (return_fit, )
# Initialize sampler positions ensuring all walkers produce valid
# function values (or pos=None if restarting from a backend)
n_varys = len(vv)
if backend == None:
iteration = 0
else:
try:
iteration = backend.iteration
except OSError:
iteration = 0
if iteration > 0:
pos = None
else:
pos = []
for i in range(nwalkers):
params_tmp = params.copy()
lnpost_i = -np.inf
while lnpost_i == -np.inf:
pos_i = vv + vs*np.random.randn(n_varys)*init_scale
lnpost_i, lnlike_i = log_posterior_func(pos_i, *args)
pos.append(pos_i)
sampler = EnsembleSampler(nwalkers, n_varys, log_posterior_func,
args=args, backend=backend)
if progress:
print('Running burn-in ..')
stdout.flush()
pos,_,_,_ = sampler.run_mcmc(pos, burn, store=False,
skip_initial_state_check=True, progress=progress)
sampler.reset()
if progress:
print('Running sampler ..')
stdout.flush()
state = sampler.run_mcmc(pos, steps, thin_by=thin,
skip_initial_state_check=True, progress=progress)
flatchain = sampler.get_chain(flat=True).reshape((-1, len(vn)))
pos_i = flatchain[np.argmax(sampler.get_log_prob()),:]
fit = log_posterior_func(pos_i, model, time, flux, flux_err,
params, vn, return_fit=True)
# Use scaled resiudals for consistency with lmfit
result.residual = (flux - fit)/flux_err
result.bestfit = fit
result.chain = flatchain
# Store median and stanadrd error of PPD in result.params
# Store best fit in result.parbest
parbest = params.copy()
quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
for i, n in enumerate(vn):
std_l, median, std_u = quantiles[:, i]
params[n].value = median
params[n].stderr = 0.5 * (std_u - std_l)
params[n].correl = {}
parbest[n].value = pos_i[i]
parbest[n].stderr = 0.5 * (std_u - std_l)
parbest[n].correl = {}
result.params = params
result.params_best = parbest
corrcoefs = np.corrcoef(flatchain.T)
for i, n in enumerate(vn):
for j, n2 in enumerate(vn):
if i != j:
result.params[n].correl[n2] = corrcoefs[i, j]
result.params_best[n].correl[n2] = corrcoefs[i, j]
result.lnprob = np.copy(sampler.get_log_prob())
result.errorbars = True
result.nvarys = n_varys
af = sampler.acceptance_fraction.mean()
result.acceptance_fraction = af
result.nfev = int(thin*nwalkers*steps/af)
result.thin = thin
result.ndata = len(time)
result.nfree = len(time) - n_varys
result.chisqr = np.sum((flux-fit)**2/flux_err**2)
result.redchi = result.chisqr/(len(time) - n_varys)
loglmax = np.max(sampler.get_blobs())
result.lnlike = loglmax
result.aic = 2*n_varys - 2*loglmax
result.bic = np.log(len(time))*n_varys - 2*loglmax
result.covar = np.cov(flatchain.T)
result.rms = (flux - fit).std()
self.emcee = result
self.sampler = sampler
self.__lastfit__ = 'emcee'
return result
# ----------------------------------------------------------------
def emcee_report(self, **kwargs):
report = fit_report(self.emcee, **kwargs)
rms = self.emcee.rms*1e6
s = " RMS residual = {:0.1f} ppm\n".format(rms)
j = report.index('[[Variables]]')
report = report[:j] + s + report[j:]
noPriors = True
params = self.emcee.params
parnames = list(params.keys())
namelen = max([len(n) for n in parnames])
for p in params:
u = params[p].user_data
if isinstance(u, UFloat):
if noPriors:
report+="\n[[Priors]]"
noPriors = False
report += "\n %s:%s" % (p, ' '*(namelen-len(p)))
report += '%s +/-%s' % (gformat(u.n), gformat(u.s))
# Decorrelation parameter scaling
has_notes = False
if self.__scale__ is not None:
has_notes = True
report += '\n[[Notes]]'
if self.__scale__:
report +='\n Decorrelation parameters were scaled'
else:
report +='\n Decorrelation parameters were not scaled'
if params['e'].value > 0:
if not has_notes:
report += '\n[[Notes]]'
has_notes = True
report +='\n T_tot from Winn, arXiv:1001.2010 is approximate'
report += '\n[[Software versions]]'
report += '\n CHEOPS DRP : %s' % self.pipe_ver
report += '\n pycheops : %s' % __version__
report += '\n lmfit : %s' % _lmfit_version_
return(report)
# ----------------------------------------------------------------
def trail_plot(self, plotkeys=['T_0', 'D', 'W', 'b'],
width=8, height=1.5):
"""
Plot parameter values v. step number for each walker.
These plots are useful for checking the convergence of the sampler.
The parameters width and height specifiy the size of the subplot for
each parameter.
The parameters to be plotted at specified by the keyword plotkeys, or
plotkeys='all' to plot every jump parameter.
"""
params = self.emcee.params
samples = self.sampler.get_chain()
varkeys = []
for key in params:
if params[key].vary:
varkeys.append(key)
if plotkeys == 'all':
plotkeys = varkeys
n = len(plotkeys)
fig,ax = plt.subplots(nrows=n, figsize=(width,n*height), sharex=True)
if n == 1: ax = [ax,]
labels = _make_labels(plotkeys, self.bjd_ref, self.extra_decorr_vectors)
for i,key in enumerate(plotkeys):
ax[i].plot(samples[:,:,varkeys.index(key)],'k',alpha=0.1)
ax[i].set_ylabel(labels[i])
ax[i].yaxis.set_label_coords(-0.1, 0.5)
ax[-1].set_xlim(0, len(samples)-1)
ax[-1].set_xlabel("step number");
fig.tight_layout()
return fig
# ----------------------------------------------------------------
def corner_plot(self, plotkeys=['T_0', 'D', 'W', 'b'],
show_priors=True, show_ticklabels=False, kwargs=None):
params = self.emcee.params
varkeys = []
for key in params:
if params[key].vary:
varkeys.append(key)
if plotkeys == 'all':
plotkeys = varkeys
chain = self.sampler.get_chain(flat=True)
xs = []
for key in plotkeys:
if key in varkeys:
xs.append(chain[:,varkeys.index(key)])
if key == 'sigma_w' and params['log_sigma'].vary:
xs.append(np.exp(self.emcee.chain[:,-1])*1e6)
if 'D' in varkeys:
k = np.sqrt(chain[:,varkeys.index('D')])
else:
k = np.sqrt(params['D'].value) # Needed for later calculations
if key == 'k' and 'D' in varkeys:
xs.append(k)
if 'b' in varkeys:
b = chain[:,varkeys.index('b')]
else:
b = params['b'].value # Needed for later calculations
if 'W' in varkeys:
W = chain[:,varkeys.index('W')]
else:
W = params['W'].value
aR = np.sqrt((1+k)**2-b**2)/W/np.pi
if key == 'aR':
xs.append(aR)
sini = np.sqrt(1 - (b/aR)**2)
if key == 'sini':
xs.append(sini)
if 'P' in varkeys:
P = chain[:,varkeys.index('P')]
else:
P = params['P'].value # Needed for later calculations
if key == 'logrho':
logrho = np.log10(4.3275e-4*((1+k)**2-b**2)**1.5/W**3/P**2)
xs.append(logrho)
kws = {} if kwargs == None else kwargs
xs = np.array(xs).T
labels = _make_labels(plotkeys, self.bjd_ref, self.extra_decorr_vectors)
figure = corner.corner(xs, labels=labels, **kws)
nax = len(labels)
axes = np.array(figure.axes).reshape((nax, nax))
if not show_ticklabels:
for i in range(nax):
ax = axes[-1, i]
ax.set_xticklabels([])
ax.set_xlabel(labels[i])
ax.xaxis.set_label_coords(0.5, -0.1)
for i in range(1,nax):
ax = axes[i,0]
ax.set_yticklabels([])
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
if show_priors:
for i, key in enumerate(plotkeys):
u = params[key].user_data
if isinstance(u, UFloat):
ax = axes[i, i]
ax.axvline(u.n - u.s, color="g", linestyle='--')
ax.axvline(u.n + u.s, color="g", linestyle='--')
return figure
# ------------------------------------------------------------
def plot_fft(self, star=None, gsmooth=5, logxlim = (1.5,4.5),
title=None, fontsize=12, figsize=(8,5)):
"""
Lomb-Scargle power-spectrum of the residuals.
If the previous fit included a GP then this is _not_ included in the
calculation of the residuals, i.e. the power spectrum includes the
power "fitted-out" using the GP. The assumption here is that the GP
has been used to model stellar variability that we wish to
characterize using the power spectrum.
The red vertical dotted lines show the CHEOPS orbital frequency and
its first two harmonics.
If star is a pycheops starproperties object and
5000 K < star.teff < 7000 K, then the likely range of nu_max is shown
using green dashed lines.
The expected power due to white noise is shown as a horizontal dashed
gray line.
"""
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
l = self.__lastfit__
except AttributeError:
raise AttributeError(
"Use lmfit_transit() to get best-fit parameters first.")
model = self.model
params = self.emcee.params_best if l == 'emcee' else self.lmfit.params
res = flux - self.model.eval(params, t=time)
# print('nu_max = {:0.0f} muHz'.format(nu_max))
t_s = time*86400*u.second
y = (1e6*res)*u.dimensionless_unscaled
ls = LombScargle(t_s, y, normalization='psd')
frequency, power = ls.autopower()
p_smooth = convolve(power, Gaussian1DKernel(gsmooth))
plt.rc('font', size=fontsize)
fig,ax=plt.subplots(figsize=figsize)
# Expected white-noise level based on median error bar
sigma_w = 1e6 * np.nanmedian(flux_err/flux) # Median error in ppm
power_w = 1e-6 * sigma_w**2 # ppm^2/micro-Hz
ax.axhline(power_w, ls='--', c='dimgray')
ax.loglog(frequency*1e6,power/1e6,c='gray',alpha=0.5)
ax.loglog(frequency*1e6,p_smooth/1e6,c='darkcyan')
# nu_max from Campante et al. (2016) eq (20)
if star is not None:
if abs(star.teff-6000) < 1000:
nu_max = 3090 * 10**(star.logg-4.438)*usqrt(star.teff/5777)
ax.axvline(nu_max.n-nu_max.s,ls='--',c='g')
ax.axvline(nu_max.n+nu_max.s,ls='--',c='g')
f_cheops = 1e6/(CHEOPS_ORBIT_MINUTES*60)
for h in range(1,4):
ax.axvline(h*f_cheops,ls=':',c='darkred')
ax.set_xlim(10**logxlim[0],10**logxlim[1])
ax.set_xlabel(r'Frequency [$\mu$Hz]')
ax.set_ylabel('Power [ppm$^2$ $\mu$Hz$^{-1}$]');
ax.set_title(title)
return fig
# ------------------------------------------------------------
def plot_lmfit(self, figsize=(6,4), fontsize=11, title=None,
show_model=True, binwidth=0.005, detrend=False,
xlim=None):
"""
Plot the best fit from lmfit_transit / lmfit_eclipse
"""
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
model = self.model
except AttributeError:
raise AttributeError("Use lmfit_transit() to fit a model first.")
try:
params = self.lmfit.params
except AttributeError:
raise AttributeError(
"Use lmfit_transit() to get best-fit parameters first.")
res = flux - self.model.eval(params, t=time)
if xlim is None:
tmin = np.round(np.min(time)-0.05*np.ptp(time),2)
tmax = np.round(np.max(time)+0.05*np.ptp(time),2)
else:
tmin, tmax = xlim
tp = np.linspace(tmin, tmax, 10*len(time))
fp = self.model.eval(params,t=tp)
glint = model.right.name == 'Model(_glint_func)'
if detrend:
if glint:
flux -= model.right.eval(params, t=time) # de-glint
fp -= model.right.eval(params, t=tp) # de-glint
flux /= model.left.right.eval(params, t=time) # de-trend
fp /= model.left.right.eval(params, t=tp) # de-trend
else:
flux /= model.right.eval(params, t=time)
fp /= model.right.eval(params, t=tp)
# Transit model only
if glint:
ft = model.left.left.eval(params, t=tp)
else:
ft = model.left.eval(params, t=tp)
if not detrend:
ft *= params['c'].value
plt.rc('font', size=fontsize)
fig,ax=plt.subplots(nrows=2,sharex=True, figsize=figsize,
gridspec_kw={'height_ratios':[2,1]})
ax[0].plot(time,flux,'o',c='skyblue',ms=2,zorder=0)
ax[0].plot(tp,fp,c='saddlebrown',zorder=2)
if binwidth:
t_, f_, e_, n_ = lcbin(time, flux, binwidth=binwidth)
ax[0].errorbar(t_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,zorder=2,
capsize=2)
if show_model:
ax[0].plot(tp,ft,c='forestgreen',zorder=1, lw=2)
ax[0].set_xlim(tmin, tmax)
ymin = np.min(flux-flux_err)-0.05*np.ptp(flux)
ymax = np.max(flux+flux_err)+0.05*np.ptp(flux)
ax[0].set_ylim(ymin,ymax)
ax[0].set_title(title)
if detrend:
if glint:
ax[0].set_ylabel('(Flux-glint)/trend')
else:
ax[0].set_ylabel('Flux/trend')
else:
ax[0].set_ylabel('Flux')
ax[1].plot(time,res,'o',c='skyblue',ms=2,zorder=0)
ax[1].plot([tmin,tmax],[0,0],ls=':',c='saddlebrown',zorder=1)
if binwidth:
t_, f_, e_, n_ = lcbin(time, res, binwidth=binwidth)
ax[1].errorbar(t_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,zorder=2,
capsize=2)
ax[1].set_xlabel('BJD-{}'.format(self.lc['bjd_ref']))
ax[1].set_ylabel('Residual')
ylim = np.max(np.abs(res-flux_err)+0.05*np.ptp(res))
ax[1].set_ylim(-ylim,ylim)
fig.tight_layout()
return fig
# ------------------------------------------------------------
def plot_emcee(self, title=None, nsamples=32, detrend=False,
binwidth=0.005, show_model=True, xlim=None,
figsize=(6,4), fontsize=11):
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
model = self.model
except AttributeError:
raise AttributeError("Use lmfit_transit() to get a model first.")
try:
parbest = self.emcee.params_best
except AttributeError:
raise AttributeError(
"Use emcee_transit() or emcee_eclipse() first.")
res = flux - model.eval(parbest, t=time)
if xlim is None:
tmin = np.round(np.min(time)-0.05*np.ptp(time),2)
tmax = np.round(np.max(time)+0.05*np.ptp(time),2)
else:
tmin, tmax = xlim
tp = np.linspace(tmin, tmax, 10*len(time))
fp = model.eval(parbest,t=tp)
glint = model.right.name == 'Model(_glint_func)'
flux0 = copy(flux)
if detrend:
if glint:
flux -= model.right.eval(parbest, t=time) # de-glint
fp -= model.right.eval(parbest, t=tp) # de-glint
flux /= model.left.right.eval(parbest, t=time) # de-trend
fp /= model.left.right.eval(parbest, t=tp) # de-trend
else:
flux /= model.right.eval(parbest, t=time)
fp /= model.right.eval(parbest, t=tp)
# Transit model only
if glint:
ft = model.left.left.eval(parbest, t=tp)
else:
ft = model.left.eval(parbest, t=tp)
if not detrend:
ft *= parbest['c'].value
plt.rc('font', size=fontsize)
fig,ax=plt.subplots(nrows=2,sharex=True, figsize=figsize,
gridspec_kw={'height_ratios':[2,1]})
ax[0].plot(time,flux,'o',c='skyblue',ms=2,zorder=0)
ax[0].plot(tp,fp,c='saddlebrown',zorder=1)
if binwidth:
t_, f_, e_, n_ = lcbin(time, flux, binwidth=binwidth)
ax[0].errorbar(t_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
zorder=2, capsize=2)
if show_model:
ax[0].plot(tp,ft,c='forestgreen',zorder=1, lw=2)
nchain = self.emcee.chain.shape[0]
partmp = parbest.copy()
if self.gp:
kernel = SHOTerm(
S0=np.exp(parbest['log_S0'].value),
Q=np.exp(parbest['log_Q'].value),
w0=np.exp(parbest['log_omega0'].value))
gp = GaussianProcess(kernel, mean=0)
yvar = flux_err**2+np.exp(2*parbest['log_sigma'].value)
gp.compute(time, diag=yvar, quiet=True)
mu0 = gp.predict(res,tp,return_cov=False,return_var=False)
pp = mu0 + model.eval(parbest,t=tp)
if detrend:
if glint:
pp -= model.right.eval(parbest, t=tp) # de-glint
pp /= model.left.right.eval(parbest, t=tp) # de-trend
else:
pp /= model.right.eval(parbest, t=tp)
ax[0].plot(tp,pp,c='saddlebrown',zorder=1)
for i in np.linspace(0,nchain,nsamples,endpoint=False,
dtype=int):
for j, n in enumerate(self.emcee.var_names):
partmp[n].value = self.emcee.chain[i,j]
rr = flux0 - model.eval(partmp, t=time)
kernel = SHOTerm(
S0=np.exp(partmp['log_S0'].value),
Q=np.exp(partmp['log_Q'].value),
w0=np.exp(partmp['log_omega0'].value))
gp = GaussianProcess(kernel, mean=0)
yvar = flux_err**2+np.exp(2*partmp['log_sigma'].value)
gp.compute(time, diag=yvar, quiet=True)
mu = gp.predict(rr,tp,return_var=False,return_cov=False)
pp = mu + model.eval(partmp, t=tp)
if detrend:
if glint:
pp -= model.right.eval(partmp, t=tp) # de-glint
pp /= model.left.right.eval(partmp, t=tp) # de-trend
else:
pp /= model.right.eval(partmp, t=tp)
ax[0].plot(tp,pp,c='saddlebrown',zorder=1,alpha=0.1)
else:
for i in np.linspace(0,nchain,nsamples,endpoint=False,
dtype=int):
for j, n in enumerate(self.emcee.var_names):
partmp[n].value = self.emcee.chain[i,j]
fp = model.eval(partmp,t=tp)
if detrend:
if glint:
fp -= model.right.eval(partmp, t=tp)
fp /= model.left.right.eval(partmp, t=tp)
else:
fp /= model.right.eval(partmp, t=tp)
ax[0].plot(tp,fp,c='saddlebrown',zorder=1,alpha=0.1)
ymin = np.min(flux-flux_err)-0.05*np.ptp(flux)
ymax = np.max(flux+flux_err)+0.05*np.ptp(flux)
ax[0].set_xlim(tmin, tmax)
ax[0].set_ylim(ymin,ymax)
ax[0].set_title(title)
if detrend:
if glint:
ax[0].set_ylabel('(Flux-glint)/trend')
else:
ax[0].set_ylabel('Flux/trend')
else:
ax[0].set_ylabel('Flux')
ax[1].plot(time,res,'o',c='skyblue',ms=2,zorder=0)
if self.gp:
ax[1].plot(tp,mu0,c='saddlebrown', zorder=1)
ax[1].plot([tmin,tmax],[0,0],ls=':',c='saddlebrown', zorder=1)
if binwidth:
t_, f_, e_, n_ = lcbin(time, res, binwidth=binwidth)
ax[1].errorbar(t_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,zorder=2,
capsize=2)
ax[1].set_xlabel('BJD-{}'.format(self.lc['bjd_ref']))
ax[1].set_ylabel('Residual')
ylim = np.max(np.abs(res-flux_err)+0.05*np.ptp(res))
ax[1].set_ylim(-ylim,ylim)
fig.tight_layout()
return fig
# ------------------------------------------------------------
def massradius(self, m_star=None, r_star=None, K=None, q=0,
jovian=True, plot_kws=None, return_samples=False,
verbose=True):
'''
Use the results from the previous emcee/lmfit transit light curve fit
to estimate the mass and/or radius of the planet.
Requires that stellar properties are supplied using the keywords
m_star and/or r_star. If only one parameter is supplied then the other
is estimated using the stellar density derived from the transit light
curve analysis. The planet mass can only be estimated if the the
semi-amplitude of its orbit (in m/s) is supplied using the keyword
argument K. See pycheops.funcs.massradius for valid formats to specify
these parameters.
N.B. by default, the mean stellar density calculated from the light
curve fit is an uses the approximation q->0, where q=m_p/m_star is
the mass ratio. If this approximation is not valid then supply an
estimate of the mass ratio using the keyword argment q.
Output units are selected using the keyword argument jovian=True
(Jupiter mass/radius) or jovian=False (Earth mass/radius).
See pycheops.funcs.massradius for options available using the plot_kws
keyword argument.
'''
# Generate value(s) from previous emcee sampler run
def _v(p):
vn = self.emcee.var_names
chain = self.emcee.chain
pars = self.emcee.params
if (p in vn):
v = chain[:,vn.index(p)]
elif p in pars.valuesdict().keys():
v = pars[p].value
else:
raise AttributeError(
'Parameter {} missing from dataset'.format(p))
return v
# Generate ufloat from previous lmfit run
def _u(p):
vn = self.lmfit.var_names
pars = self.lmfit.params
if (p in vn):
u = ufloat(pars[p].value, pars[p].stderr)
elif p in pars.valuesdict().keys():
u = pars[p].value
else:
raise AttributeError(
'Parameter {} missing from dataset'.format(p))
return u
# Generate a sample of values for a parameter
def _s(x, nm=100_000):
if isinstance(x,float) or isinstance(x,int):
return np.full(nm, x, dtype=float)
elif isinstance(x, UFloat):
return np.random.normal(x.n, x.s, nm)
elif isinstance(x, np.ndarray):
if len(x) == nm:
return x
elif len(x) > nm:
return x[random_sample(range(len(x)), nm)]
else:
return x[(np.random.random(nm)*len(x+1)).astype(int)]
elif isinstance(x, tuple):
if len(x) == 2:
return np.random.normal(x[0], x[1], nm)
elif len(x) == 3:
raise NotImplementedError
raise ValueError("Unrecognised type for parameter values")
# If last fit was emcee then generate samples for derived parameters
# not specified by the user from the chain rather than the summary
# statistics
if self.__lastfit__ == 'emcee':
k = np.sqrt(_v('D'))
b = _v('b')
W = _v('W')
P = _v('P')
aR = np.sqrt((1+k)**2-b**2)/W/np.pi
sini = np.sqrt(1 - (b/aR)**2)
f_c = _v('f_c')
f_s = _v('f_s')
ecc = f_c**2 + f_s**2
_q = _s(q, len(self.emcee.chain))
rho_star = rhostar(1/aR,P,_q)
# N.B. use of np.abs to cope with values with large errors
if r_star == None and m_star is not None:
_m = np.abs(_s(m_star, len(self.emcee.chain)))
r_star = (_m/rho_star)**(1/3)
if m_star == None and r_star is not None:
_r = np.abs(_s(r_star, len(self.emcee.chain)))
m_star = rho_star*_r**3
# If last fit was lmfit then extract parameter values as ufloats or, for
# fixed parameters, as floats
if self.__lastfit__ == 'lmfit':
k = usqrt(_u('D'))
b = _u('b')
W = _u('W')
P = _u('P')
aR = usqrt((1+k)**2-b**2)/W/np.pi
sini = usqrt(1 - (b/aR)**2)
ecc = _u('e')
_q = ufloat(q[0], q[1]) if isinstance(q, tuple) else q
rho_star = rhostar(1/aR, P, _q)
if r_star == None and m_star is not None:
if isinstance(m_star, tuple):
_m = ufloat(m_star[0], m_star[1])
else:
_m = m_star
r_star = (_m/rho_star)**(1/3)
if m_star == None and r_star is not None:
if isinstance(r_star, tuple):
_r = ufloat(r_star[0], r_star[1])
else:
_r = r_star
m_star = rho_star*_r**3
if verbose:
print('[[Mass/radius]]')
if plot_kws == None:
plot_kws = {}
return massradius(P=P, k=k, sini=sini, ecc=ecc,
m_star=m_star, r_star=r_star, K=K, aR=aR,
jovian=jovian, verbose=verbose,
return_samples=return_samples, **plot_kws)
# ------------------------------------------------------------
def bright_star_check(self, vmax=3, sepmax=6):
"""
Check for bright stars near target
Only stars from the Bright Star Catalogue, 5th Revised Ed.
(Hoffleit+, 1991) are checked.
vmax - maximum V magnitude to check
sepmax - maximum separation in degrees to check
Return an astropy table with stars from the bright star catalog
brighter than V magnitude vmax within sepmax degrees from the target
"""
if vmax > 6.5:
warnings.warn('Bright star catalogue only complete to V=6.5')
if sepmax > 24:
warnings.warn('No internal reflections for stars > 24 deg away')
target_coo = SkyCoord(self.ra,self.dec,unit=('hour','degree'))
catpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data','BrightStarCat','bscat.fits')
T = Table.read(catpath)
T.remove_column('recno')
T = T[np.isfinite(T['RAJ2000'])]
bscat = SkyCoord(T['RAJ2000'], T['DEJ2000'],unit='degree, degree')
sep = target_coo.separation(bscat)
T.add_column(sep, name='Separation', index=0)
T['Separation'].info.format = '7.3f'
T['SpType'].info.format = '<18s'
i = (sep.degree < sepmax) & (T['Vmag'] < vmax)
T = T[i]
T.sort('Separation')
return T
# ------------------------------------------------------------
def planet_check(self):
"""
Show target separation from solar system objects at time of observation
"""
visit_mid_time = Time(np.median(self.lc['table']['MJD_TIME']),
format='mjd', scale='utc')
target_coo = SkyCoord(self.ra,self.dec,unit=('hour','degree'))
print(f'UTC = {visit_mid_time.isot}')
print(f'Target coordinates = {target_coo.to_string("hmsdms")}')
print('Body R.A. Declination Sep(deg)')
print('-------------------------------------------')
for p in ('moon','mars','jupiter','saturn','uranus','neptune'):
c = get_body(p, visit_mid_time)
ra = c.ra.to_string(precision=2,unit='hour',sep=':',pad=True)
dec = c.dec.to_string(precision=1,sep=':',unit='degree',
alwayssign=True,pad=True)
sep = c.separation(target_coo).degree
print(f'{p.capitalize():8s} {ra:12s} {dec:12s} {sep:8.1f}')
# ------------------------------------------------------------
def cds_data_export(self, lcfile="lc.dat",title=None, author=None,
authors=None, abstract=None, keywords=None, bibcode=None,
acknowledgements=None):
'''
Save light curve, best fit, etc. to files suitable for CDS upload
Generates ReadMe file and a data file with the following columns..
Format Units Label Explanations
F11.6 d time Time of mid-exposure (BJD_TDB)
F8.6 --- flux Normalized flux
F8.6 --- e_flux Normalized flux error
F8.6 --- flux_d Normalized flux corrected for instrumental trends
F8.4 pix xoff Target position offset in x-direction
F8.4 pix yoff Target position offset in y-direction
F8.4 deg roll Spacecraft roll angle
F9.7 --- contam Fraction of flux in aperture from nearby stars
F9.7 --- smear Fraction of flux in aperture from readout trails
F9.7 --- bg Fraction of flux in aperture from background
F6.3 --- temp_2 thermFront_2 temperature sensor reading
:param lcfile: output file for upload to CDS
:param title: title
:param author: First author
:param authors: Full author list of the paper
:param abstract: Abstract of the paper
:param keywords: list of keywords as in the printed publication
:param bibcode: Bibliography code for the printed publication
:param acknowledgements: list of acknowledgements
See http://cdsarc.u-strasbg.fr/submit/catstd/catstd-3.1.htx for the
correct formatting of title, keywords, etc.
The acknowledgements are normally used to give the name and e-mail
address of the person who generated the table, e.g.
"Pierre Maxted, p.maxted(at)keele.ac.uk"
'''
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
l = self.__lastfit__
except AttributeError:
raise AttributeError(
"Use lmfit_transit() to get best-fit parameters first.")
model = self.model
params = self.lmfit.params if l == 'lmfit' else self.emcee.params_best
if model.right.name == 'Model(_glint_func)':
flux_d = flux - model.right.eval(params, t=time) # de-glint
flux_d /= model.left.right.eval(params, t=time) # de-trend
else:
flux_d = flux/model.right.eval(params, t=time)
tmk = cdspyreadme.CDSTablesMaker()
tmk.title = title if title is not None else ""
tmk.author = author if author is not None else ""
tmk.authors = authors if author is not None else ""
tmk.abstract = abstract if abstract is not None else ""
tmk.keywords = keywords if keywords is not None else ""
tmk.bibcode = bibcode if bibcode is not None else ""
tmk.date = Time.now().value.year
T=Table()
T['time'] = time + self.lc['bjd_ref']
T['time'].info.format = '16.6f'
T['time'].description = 'Time of mid-exposure'
T['time'].units = u.day
T['flux'] = flux
T['flux'].info.format = '8.6f'
T['flux'].description = 'Normalized flux'
T['e_flux'] = self.lc['flux_err']
T['e_flux'].info.format = '8.6f'
T['e_flux'].description = 'Normalized flux error'
T['flux_d'] = flux_d
T['flux_d'].info.format = '8.6f'
T['flux_d'].description = 'Normalized flux corrected for instrumental trends'
T['xoff'] = self.lc['xoff']
T['xoff'].info.format = '8.4f'
T['xoff'].description = "Target position offset in x-direction"
T['yoff'] = self.lc['yoff']
T['yoff'].info.format = '8.4f'
T['yoff'].description = "Target position offset in y-direction"
T['roll'] = self.lc['roll_angle']
T['roll'].info.format = '8.4f'
T['roll'].description = "Spacecraft roll angle"
T['roll'].units = u.degree
T['contam'] = self.lc['contam']
T['contam'].info.format = '9.7f'
T['contam'].description = "Fraction of flux in aperture from nearby stars"
if np.ptp(self.lc['smear']) > 0:
T['smear'] = self.lc['smear']
T['smear'].info.format = '9.7f'
T['smear'].description = "Fraction of flux in aperture from readout trails"
T['bg'] = self.lc['bg']
T['bg'].info.format = '9.7f'
T['bg'].description = "Fraction of flux in aperture from background"
if np.ptp(self.lc['deltaT']) > 0:
T['temp_2'] = self.lc['deltaT'] - 12
T['temp_2'].info.format = '6.3f'
T['temp_2'].description = "thermFront_2 temperature sensor reading"
T['temp_2'].units = u.Celsius
table = tmk.addTable(T, lcfile,
description=f"CHEOPS photometry of {self.target}")
# Set output format
for p in T.colnames:
c=table.get_column(p)
c.set_format(f'F{T[p].format[:-1]}')
# Units
c=table.get_column('time'); c.unit = 'd'
c=table.get_column('xoff'); c.unit = 'pix'
c=table.get_column('yoff'); c.unit = 'pix'
c=table.get_column('roll'); c.unit = 'deg'
tmk.writeCDSTables()
templatename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data','cdspyreadme','ReadMe.template')
coo = SkyCoord(
self.lc['header']['RA_TARG'],
self.lc['header']['DEC_TARG'],unit='deg')
rastr = coo.ra.to_string(unit='hour',sep=' ',precision=1, pad=True)
destr = coo.dec.to_string(unit='deg',sep=' ',precision=0,
alwayssign=True, pad=True)
desc = (indent(fill(
f'Photometry of {self.target} generated from CHEOPS archive '+
f'files with file key {self.file_key} using pycheops version '+
f'{__version__}.', width=78),' ') +
f'\n Aperture radius = {self.ap_rad} pixels.'+
f'\n Exposure time: {self.nexp} x {self.exptime:0.1f} s')
templateValue = {
'object':f'{rastr} {destr} {self.target}',
'description':desc,
'acknowledgements':acknowledgements
}
tmk.setReadmeTemplate(templatename, templateValue)
with open("ReadMe", "w") as fd:
tmk.makeReadMe(out=fd)
# ------------------------------------------------------------
def rollangle_plot(self, binwidth=15, figsize=None, fontsize=11,
title=None):
'''
Plot of residuals from last fit v. roll angle
The upper panel shows the fit to the glint and/or trends v. roll angle
The lower panel shows the residuals from the best fit.
If a glint correction v. moon angle has been applied, this is shown in
the middle panel.
'''
try:
flux = np.array(self.lc['flux'])
time = np.array(self.lc['time'])
angle = np.array(self.lc['roll_angle'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
try:
l = self.__lastfit__
except AttributeError:
raise AttributeError(
"Use lmfit_transit() to get best-fit parameters first.")
# Residuals from last fit and trends due to glint and roll angle
fit = self.emcee.bestfit if l == 'emcee' else self.lmfit.bestfit
res = flux - fit
params = self.emcee.params_best if l == 'emcee' else self.lmfit.params
rolltrend = np.zeros_like(angle)
glint = np.zeros_like(angle)
phi = angle*np.pi/180 # radians for calculation
# Grid of angle values for plotting smooth version of trends
tang = np.linspace(0,360,3600) # degrees
tphi = tang*np.pi/180 # radians for calculation
tr = np.zeros_like(tang) # roll angle trend
tg = np.zeros_like(tang) # glint
vd = params.valuesdict()
vk = vd.keys()
notrend = True
noglint = True
# Roll angle trend
for n in range(1,4):
p = "dfdsinphi" if n==1 else "dfdsin{}phi".format(n)
if p in vk:
notrend = False
rolltrend += vd[p] * np.sin(n*phi)
tr += vd[p] * np.sin(n*tphi)
p = "dfdcosphi" if n==1 else "dfdcos{}phi".format(n)
if p in vk:
notrend = False
rolltrend += vd[p] * np.cos(n*phi)
tr += vd[p] * np.cos(n*tphi)
if 'glint_scale' in vk:
notrend = False
if self.glint_moon:
glint_theta = self.f_theta(time)
glint = vd['glint_scale']*self.f_glint(glint_theta)
tg = vd['glint_scale']*self.f_glint(tang)
noglint = False
else:
glint_theta = (360 + angle - self.glint_angle0) % 360
glint = vd['glint_scale']*self.f_glint(glint_theta)
gt = (360 + tang - self.glint_angle0) % 360
tg = vd['glint_scale']*self.f_glint(gt)
plt.rc('font', size=fontsize)
if notrend:
figsize = (9,4) if figsize == None else figsize
fig,ax=plt.subplots(nrows=1, figsize=figsize, sharex=True)
ax.plot(angle, res, 'o',c='skyblue',ms=2)
if binwidth:
r_, f_, e_, n_ = lcbin(angle, res, binwidth=binwidth)
ax.errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
ax.set_xlim(0, 360)
ylim = np.max(np.abs(res))+0.05*np.ptp(res)
ax.set_ylim(-ylim,ylim)
ax.axhline(0, color='saddlebrown',ls=':')
ax.set_xlabel(r'Roll angle [$^{\circ}$]')
ax.set_ylabel('Residual')
ax.set_title(title)
elif 'glint_scale' in vk and self.glint_moon:
figsize = (9,8) if figsize == None else figsize
fig,ax=plt.subplots(nrows=3, figsize=figsize)
y = res + rolltrend
ax[0].plot(angle, y, 'o',c='skyblue',ms=2)
ax[0].plot(tang, tr, c='saddlebrown')
if binwidth:
r_, f_, e_, n_ = lcbin(angle, y, binwidth=binwidth)
ax[0].errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
ax[0].set_xlabel(r'Roll angle [$^{\circ}$] (Sky)')
ax[0].set_ylabel('Roll angle trend')
ylim = np.max(np.abs(y))+0.05*np.ptp(y)
ax[0].set_xlim(0, 360)
ax[0].set_ylim(-ylim,ylim)
ax[0].set_title(title)
y = res + glint
ax[1].plot(glint_theta, y, 'o',c='skyblue',ms=2)
ax[1].plot(tang, tg, c='saddlebrown')
if binwidth:
r_, f_, e_, n_ = lcbin(glint_theta, y, binwidth=binwidth)
ax[1].errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
ylim = np.max(np.abs(y))+0.05*np.ptp(y)
ax[1].set_xlim(0, 360)
ax[1].set_ylim(-ylim,ylim)
ax[1].set_xlabel(r'Roll angle [$^{\circ}$] (Moon)')
ax[1].set_ylabel('Moon glint')
ax[2].plot(angle, res, 'o',c='skyblue',ms=2)
if binwidth:
r_, f_, e_, n_ = lcbin(angle, res, binwidth=binwidth)
ax[2].errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
ax[2].axhline(0, color='saddlebrown',ls=':')
ax[2].set_xlim(0, 360)
ylim = np.max(np.abs(res))+0.05*np.ptp(res)
ax[2].set_ylim(-ylim,ylim)
ax[2].set_xlabel(r'Roll angle [$^{\circ}$] (Sky)')
ax[2].set_ylabel('Residuals')
else:
figsize = (8,6) if figsize == None else figsize
fig,ax=plt.subplots(nrows=2, figsize=figsize, sharex=True)
y = res + rolltrend + glint
ax[0].plot(angle, y, 'o',c='skyblue',ms=2)
ax[0].plot(tang, tr+tg, c='saddlebrown')
if binwidth:
r_, f_, e_, n_ = lcbin(angle, y, binwidth=binwidth)
ax[0].errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
if noglint:
ax[0].set_ylabel('Roll angle trend')
else:
ax[0].set_ylabel('Roll angle trend + glint')
ylim = np.max(np.abs(y))+0.05*np.ptp(y)
ax[0].set_ylim(-ylim,ylim)
ax[0].set_title(title)
ax[1].plot(angle, res, 'o',c='skyblue',ms=2)
if binwidth:
r_, f_, e_, n_ = lcbin(angle, res, binwidth=binwidth)
ax[1].errorbar(r_,f_,yerr=e_,fmt='o',c='midnightblue',ms=5,
capsize=2)
ax[1].axhline(0, color='saddlebrown',ls=':')
ax[1].set_xlim(0, 360)
ylim = np.max(np.abs(res))+0.05*np.ptp(res)
ax[1].set_ylim(-ylim,ylim)
ax[1].set_xlabel(r'Roll angle [$^{\circ}$]')
ax[1].set_ylabel('Residuals')
fig.tight_layout()
return fig
# ------------------------------------------------------------
# Data display and diagnostics
def transit_noise_plot(self, width=3, steps=500,
fname=None, figsize=(6,4), fontsize=11, return_values=False,
requirement=None, local=False, verbose=True):
"""
Transit noise plot
fname: to specify an output file for the plot
return_values: return a dictionary of statistics - noise in ppm
"""
try:
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
T = np.linspace(np.min(time)+width/48,np.max(time)-width/48 , steps)
Nsc = np.zeros_like(T)
Fsc = np.zeros_like(T)
Nmn = np.zeros_like(T)
for i,_t in enumerate(T):
if local:
j = (np.abs(time-_t) < (width/48)).nonzero()[0]
_n,_f = transit_noise(time[j], flux[j], flux_err[j], T_0=_t,
width=width, method='scaled')
_m = transit_noise(time[j], flux[j], flux_err[j], T_0=_t,
width=width, method='minerr')
else:
_n,_f = transit_noise(time, flux, flux_err, T_0=_t,
width=width, method='scaled')
_m = transit_noise(time, flux, flux_err, T_0=_t,
width=width, method='minerr')
if np.isfinite(_n):
Nsc[i] = _n
Fsc[i] = _f
if np.isfinite(_m):
Nmn[i] = _m
msk = (Nsc > 0)
Tsc = T[msk]
Nsc = Nsc[msk]
Fsc = Fsc[msk]
msk = (Nmn > 0)
Tmn = T[msk]
Nmn = Nmn[msk]
if verbose:
print('Scaled noise method')
print('Mean noise = {:0.1f} ppm'.format(Nsc.mean()))
print('Min. noise = {:0.1f} ppm'.format(Nsc.min()))
print('Max. noise = {:0.1f} ppm'.format(Nsc.max()))
print('Mean noise scaling factor = {:0.3f} '.format(Fsc.mean()))
print('Min. noise scaling factor = {:0.3f} '.format(Fsc.min()))
print('Max. noise scaling factor = {:0.3f} '.format(Fsc.max()))
print('\nMinimum error noise method')
print('Mean noise = {:0.1f} ppm'.format(Nmn.mean()))
print('Min. noise = {:0.1f} ppm'.format(Nmn.min()))
print('Max. noise = {:0.1f} ppm'.format(Nmn.max()))
plt.rc('font', size=fontsize)
fig,ax=plt.subplots(2,1,figsize=figsize,sharex=True)
ax[0].set_xlim(np.min(time),np.max(time))
ax[0].plot(time, flux,'b.',ms=1)
ax[0].set_ylabel("Flux ")
ylo = np.min(flux) - 0.2*np.ptp(flux)
ypl = np.max(flux) + 0.2*np.ptp(flux)
yhi = np.max(flux) + 0.4*np.ptp(flux)
ax[0].set_ylim(ylo, yhi)
ax[0].errorbar(np.median(T),ypl,xerr=width/48,
capsize=5,color='b',ecolor='b')
ax[1].plot(Tsc,Nsc,'b.',ms=1)
ax[1].plot(Tmn,Nmn,'g.',ms=1)
ax[1].set_ylabel("Transit noise [ppm] ")
ax[1].set_xlabel("Time");
if requirement is not None:
ax[1].axhline(requirement, color='darkcyan',ls=':')
fig.tight_layout()
if fname == None:
plt.show()
else:
plt.savefig(fname)
if return_values:
d = {}
d['Scaled noise, mean noise'] = Nsc.mean()
d['Scaled noise, min. noise'] = Nsc.min()
d['Scaled noise, max. noise'] = Nsc.max()
d['Scaled noise, mean scaling factor'] = Fsc.mean()
d['Scaled noise, min. scaling factor'] = Fsc.min()
d['Scaled noise, max. scaling factor'] = Fsc.max()
d['Minimum error, mean noise'] = Nmn.mean()
d['Minimum error, min. noise'] = Nmn.min()
d['Minimum error, max. noise'] = Nmn.max()
return d
#------
def decontaminate(self, Gmag=None, count_rate=None, verbose=True,
configFile=None):
"""
Correction to light curve for contamination by nearby stars.
The parameter count_rate is used to pass the assumed values of the
target counts per exposure relative to 10**(-0.4*Gmag), i.e. assuming
that a star with G-band magnitude Gmag has a count_rate value of 1.
Must have the same number of elements as the observed lightcurve
currently stored in dataset.lc. Set elements of count_rate to np.nan
to exclude observations from the calculation of the zero point
calculation.
:param Gmag: default is to use value from FITS keyword MAG_G
:param count_rate: Normalised count rate values for light curve
:param verbose:
:param configFile:
:returns: time, flux, flux_err
"""
if self.decontaminated:
raise Exception('Decontamination correction already applied.')
time = self.lc['time']
flux = self.lc['flux']
flux_err = self.lc['flux_err']
contam = self.lc['contam']
config = load_config(configFile)
psf_file = config['psf_file']['psf_file']
psf_x0 = config['psf_file']['x0']
psf_y0 = config['psf_file']['y0']
here = os.path.abspath(os.path.dirname(__file__))
data_path = os.path.join(here,'data','instrument')
try:
psf_path = os.path.join(data_path, psf_file)
except KeyError:
raise KeyError("Run pycheops.core.setup_config(overwrite=True) to"
" update your config file.")
with open(psf_path) as fp:
psf = [[float(digit) for digit in line.split()] for line in fp]
position0 = [psf_x0, psf_y0]
aperture0 = CircularAperture(position0, r=self.ap_rad)
photTable0 = aperture_photometry(psf, aperture0)
target_flux = photTable0['aperture_sum'][0]
flx_frac = target_flux/np.sum(psf)
if Gmag == None:
Gmag = self.lc['table'].meta['MAG_G']
if count_rate == None:
count_rate = np.ones_like(time)
k = np.isfinite(count_rate)
nk = sum(k)
G0 = -2.5*np.log10( (contam[k]+count_rate[k])*
flx_frac*10**(-0.4*Gmag)/ flux[k])
G0mean = np.nanmean(G0)
contam_flux = contam*10**(-0.4*(Gmag-G0mean))
flux = (flux - contam_flux)/(1-np.nanmean(contam_flux))
flux_err = flux_err/(1-np.nanmean(contam_flux))
if verbose:
print(f'Fraction of target flux in aperture = {flx_frac:0.4f}')
print(f'Target G magnitude = {Gmag:0.3f}')
mncr = np.nanmedian(count_rate)
print(f'Median normalized count rate = {mncr:0.3f}')
print(f'No. of valid count rate values = {sum(k)}')
if nk>1:
G0err = np.nanstd(G0)/np.sqrt(sum(k))
print(f'G-band zero point = {G0mean:0.4f} +/- {G0err:0.4f}')
else:
print(f'G-band zero point = {G0mean:0.4f}')
self.lc['flux'] = flux
self.lc['flux_err'] = flux_err
self.decontaminated = True
return time, flux, flux_err
def flatten(self, mask_centre, mask_width, npoly=2):
"""
Renormalize using a polynomial fit excluding a section of the data
The position and width of the mask to exclude the transit/eclipse is
specified on the same time scale as the light curve data.
:param mask_centre: time at the centre of the mask
:param mask_width: full width of the mask
:param npoly: number of terms in the normalizing polynomial
:returns: time, flux, flux_err
"""
time = self.lc['time']
flux = self.lc['flux']
flux_err = self.lc['flux_err']
mask = abs(time-mask_centre) > mask_width/2
n = np.polyval(np.polyfit(time[mask],flux[mask],npoly-1),time)
self.lc['flux'] /= n
self.lc['flux_err'] /= n
return self.lc['time'], self.lc['flux'], self.lc['flux_err']
#------
def mask_data(self, mask, verbose=True):
"""
Mask light curve data
Replace the light curve in the dataset with a subset of the data for
which the input mask is False.
The orignal data are saved in lc_unmask
"""
self.lc_unmask = copy(self.lc)
for k in self.lc:
if isinstance(self.lc[k],np.ndarray):
self.lc[k] = self.lc[k][~mask]
if verbose:
print('\nMasked {} points'.format(sum(mask)))
return self.lc['time'], self.lc['flux'], self.lc['flux_err']
#------
def clip_outliers(self, clip=5, width=11, verbose=True):
"""
Remove outliers from the light curve.
Data more than clip*mad from a smoothed version of the light curve are
removed where mad is the mean absolute deviation from the
median-smoothed light curve.
:param clip: tolerance on clipping
:param width: width of window for median-smoothing filter
:returns: time, flux, flux_err
"""
flux = self.lc['flux']
# medfilt pads the array to be filtered with zeros, so edge behaviour
# is better if we filter flux-1 rather than flux.
d = abs(medfilt(flux-1, width)+1-flux)
mad = d.mean()
ok = d < clip*mad
for k in self.lc:
if isinstance(self.lc[k],np.ndarray):
self.lc[k] = self.lc[k][ok]
if verbose:
print('\nRejected {} points more than {:0.1f} x MAD = {:0.0f} '
'ppm from the median'.format(sum(~ok),clip,1e6*mad*clip))
return self.lc['time'], self.lc['flux'], self.lc['flux_err']
#----------------------------------
def diagnostic_plot(self, fname=None,
figsize=(8,8), fontsize=10, flagged=None):
try:
D = Table(self.lc['table'], masked=True)
except AttributeError:
raise AttributeError("Use get_lightcurve() to load data first.")
EventMask = (D['EVENT'] > 0) & (D['EVENT'] != 100)
D['FLUX'].mask = EventMask
D['FLUX_BAD'] = MaskedColumn(self.lc['table']['FLUX'],
mask = (EventMask == False))
D['BACKGROUND'].mask = EventMask
D['BACKGROUND_BAD'] = MaskedColumn(self.lc['table']['BACKGROUND'],
mask = (EventMask == False))
tjdb_table = D['BJD_TIME']
flux_table = D['FLUX']
flux_err_table = D['FLUXERR']
back_table = D['BACKGROUND']
rollangle_table = D['ROLL_ANGLE']
xcen_table = D['CENTROID_X']
ycen_table = D['CENTROID_Y']
contam_table = D['CONTA_LC']
contam_err_table = D['CONTA_LC_ERR']
try:
smear_table = D['SMEARING_LC']
except:
smear_table = np.zeros_like(tjdb_table)
flux_bad_table = D['FLUX_BAD']
back_bad_table = D['BACKGROUND_BAD']
xloc_table = D['LOCATION_X']
yloc_table = D['LOCATION_Y']
time = np.array(self.lc['time'])+self.lc['bjd_ref']
flux = np.array(self.lc['flux'])*np.nanmean(flux_table)
flux_err = np.array(self.lc['flux_err'])*np.nanmean(flux_table)
rollangle = np.array(self.lc['roll_angle'])
xcen = np.array(self.lc['centroid_x'])
ycen = np.array(self.lc['centroid_y'])
xoff = np.array(self.lc['xoff'])
yoff = np.array(self.lc['yoff'])
bg = np.array(self.lc['bg'])
contam = np.array(self.lc['contam'])
try:
smear = np.array(self.lc['smear'])
except KeyError:
smear = np.zeros_like(time)
plt.rc('font', size=fontsize)
fig, ax = plt.subplots(5,2,figsize=figsize)
cgood = 'midnightblue'
cbad = 'xkcd:red'
if flagged:
flux_measure = copy(flux_table)
else:
flux_measure = copy(flux)
ax[0,0].scatter(time,flux,s=2,c=cgood)
if flagged:
ax[0,0].scatter(tjdb_table,flux_bad_table,s=2,c=cbad)
ax[0,0].set_ylim(0.998*np.quantile(flux_measure,0.16),
1.002*np.quantile(flux_measure,0.84))
ax[0,0].set_xlabel('BJD')
ax[0,0].set_ylabel('Flux [e-]')
ax[0,1].scatter(rollangle,flux,s=2,c=cgood)
if flagged:
ax[0,1].scatter(rollangle_table,flux_bad_table,s=2,c=cbad)
ax[0,1].set_ylim(0.998*np.quantile(flux_measure,0.16),
1.002*np.quantile(flux_measure,0.84))
ax[0,1].set_xlabel('Roll angle in degrees')
ax[0,1].set_ylabel('Flux [e-]')
ax[1,0].scatter(time,bg,s=2,c=cgood)
if flagged:
ax[1,0].scatter(tjdb_table,back_bad_table,s=2,c=cbad)
ax[1,0].set_xlabel('BJD')
ax[1,0].set_ylabel('Background [e-]')
ax[1,0].set_ylim(0.9*np.quantile(bg,0.005),
1.1*np.quantile(bg,0.995))
ax[1,1].scatter(rollangle,bg,s=2,c=cgood)
if flagged:
ax[1,1].scatter(rollangle_table,back_bad_table,s=2,c=cbad)
ax[1,1].set_xlabel('Roll angle in degrees')
ax[1,1].set_ylabel('Background [e-]')
ax[1,1].set_ylim(0.9*np.quantile(bg,0.005),
1.1*np.quantile(bg,0.995))
ax[2,0].scatter(xcen,flux,s=2,c=cgood)
if flagged:
ax[2,0].scatter(xcen_table,flux_bad_table,s=2,c=cbad)
ax[2,0].set_ylim(0.998*np.quantile(flux_measure,0.16),
1.002*np.quantile(flux_measure,0.84))
ax[2,0].set_xlabel('Centroid x')
ax[2,0].set_ylabel('Flux [e-]')
ax[2,1].scatter(ycen,flux,s=2,c=cgood)
if flagged:
ax[2,1].scatter(ycen_table,flux_bad_table,s=2,c=cbad)
ax[2,1].set_ylim(0.998*np.quantile(flux_measure,0.16),
1.002*np.quantile(flux_measure,0.84))
ax[2,1].set_xlabel('Centroid y')
ax[2,1].set_ylabel('Flux [e-]')
ax[3,0].scatter(contam,flux,s=2,c=cgood)
if flagged:
ax[3,0].scatter(contam_table,flux_bad_table,s=2,c=cbad)
ax[3,0].set_xlabel('Contamination estimate')
ax[3,0].set_ylabel('Flux [e-]')
ax[3,0].set_xlim(np.min(contam),np.max(contam))
ax[3,0].set_ylim(0.998*np.quantile(flux_measure,0.16),
1.002*np.quantile(flux_measure,0.84))
ax[3,1].scatter(smear,flux,s=2,c=cgood)
if flagged:
ax[3,1].scatter(smear_table,flux_bad_table,s=2,c=cbad)
ax[3,1].set_xlabel('Smear estimate')
ax[3,1].set_ylabel('Flux [e-]')
if np.ptp(smear) > 0:
ax[3,1].set_xlim(np.min(smear),np.max(smear))
else:
ax[3,1].set_xlim(-1,1)
ax[3,1].set_ylim(0.998*np.quantile(flux_measure,0.16),
1.002*np.quantile(flux_measure,0.84))
ax[4,0].scatter(rollangle,xoff,s=2,c=cgood)
#ax[4,0].scatter(rollangle,yoff,s=2,c=cbad)
ax[4,0].set_xlabel('Roll angle in degrees')
ax[4,0].set_ylabel('X centroid offset')
#ax[4,1].scatter(rollangle,xoff,s=2,c=cgood)
ax[4,1].scatter(rollangle,yoff,s=2,c=cbad)
ax[4,1].set_xlabel('Roll angle in degrees')
ax[4,1].set_ylabel('Y centroid offset')
fig.tight_layout()
if fname == None:
plt.show()
else:
plt.savefig(fname)
#------
def decorr(self, dfdt=False, d2fdt2=False, dfdx=False, d2fdx2=False,
dfdy=False, d2fdy2=False, d2fdxdy=False, dfdsinphi=False,
dfdcosphi=False, dfdsin2phi=False, dfdcos2phi=False,
dfdsin3phi=False, dfdcos3phi=False, dfdbg=False,
dfdcontam=False, dfdsmear=False, scale=True):
time = np.array(self.lc['time'])
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
phi = self.lc['roll_angle']*np.pi/180
sinphi = interp1d(time,np.sin(phi), fill_value=0, bounds_error=False)
cosphi = interp1d(time,np.cos(phi), fill_value=0, bounds_error=False)
dx = interp1d(time,self.lc['xoff'], fill_value=0, bounds_error=False)
dy = interp1d(time,self.lc['yoff'], fill_value=0, bounds_error=False)
model = self.__factor_model__(scale)
params = model.make_params()
params.add('dfdt', value=0, vary=dfdt)
params.add('d2fdt2', value=0, vary=d2fdt2)
params.add('dfdx', value=0, vary=dfdx)
params.add('d2fdx2', value=0, vary=d2fdx2)
params.add('dfdy', value=0, vary=dfdy)
params.add('d2fdy2', value=0, vary=d2fdy2)
params.add('d2fdxdy', value=0, vary=d2fdxdy)
params.add('dfdsinphi', value=0, vary=dfdsinphi)
params.add('dfdcosphi', value=0, vary=dfdcosphi)
params.add('dfdsin2phi', value=0, vary=dfdsin2phi)
params.add('dfdcos2phi', value=0, vary=dfdcos2phi)
params.add('dfdsin3phi', value=0, vary=dfdsin3phi)
params.add('dfdcos3phi', value=0, vary=dfdcos3phi)
params.add('dfdbg', value=0, vary=dfdbg)
params.add('dfdcontam', value=0, vary=dfdcontam)
params.add('dfdsmear', value=0, vary=dfdsmear)
result = model.fit(flux, params, t=time)
print("Fit Report")
print(result.fit_report())
result.plot()
print("\nCompare the lightcurve RMS before and after decorrelation")
print('RMS before = {:0.1f} ppm'.format(1e6*self.lc['flux'].std()))
self.lc['flux'] = flux/result.best_fit
self.lc['flux_err'] = flux_err/result.best_fit
print('RMS after = {:0.1f} ppm'.format(1e6*self.lc['flux'].std()))
flux_d = flux/result.best_fit
flux_err_d = flux_err/result.best_fit
fig,ax=plt.subplots(1,2,figsize=(8,4))
y = 1e6*(flux_d-1)
ax[0].plot(time, y,'b.',ms=1)
ax[0].set_xlabel("BJD-{}".format((self.lc['bjd_ref'])),fontsize=12)
ax[0].set_ylabel("Flux-1 [ppm]",fontsize=12)
fig.suptitle('Detrended fluxes')
n, bins, patches = ax[1].hist(y, 50, density=True, stacked=True)
ax[1].set_xlabel("Flux-1 [ppm]",fontsize=12)
v = np.var(y)
ax[1].plot(bins,np.exp(-0.5*bins**2/v)/np.sqrt(2*np.pi*v))
fig.tight_layout()
fig.subplots_adjust(top=0.88)
return flux_d, flux_err_d
#-----------------------------------
def should_I_decorr(self,mask_centre=0,mask_width=0,scale=True):
flux = np.array(self.lc['flux'])
flux_err = np.array(self.lc['flux_err'])
phi = self.lc['roll_angle']*np.pi/180
sinphi = interp1d(np.array(self.lc['time']),np.sin(phi), fill_value=0,
bounds_error=False)
cosphi = interp1d(np.array(self.lc['time']),np.cos(phi), fill_value=0,
bounds_error=False)
bg = interp1d(np.array(self.lc['time']),self.lc['bg'], fill_value=0,
bounds_error=False)
contam = interp1d(np.array(self.lc['time']),self.lc['contam'], fill_value=0,
bounds_error=False)
smear = interp1d(np.array(self.lc['time']),self.lc['smear'], fill_value=0,
bounds_error=False)
dx = interp1d(np.array(self.lc['time']),self.lc['xoff'], fill_value=0,
bounds_error=False)
dy = interp1d(np.array(self.lc['time']),self.lc['yoff'], fill_value=0,
bounds_error=False)
time = np.array(self.lc['time'])
if mask_centre != 0:
flux = flux[(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
flux_err = flux_err[(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
time_cut = time[(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
phi_cut = self.lc['roll_angle'][(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))] *np.pi/180
sinphi = interp1d(time_cut,np.sin(phi_cut), fill_value=0, bounds_error=False)
cosphi = interp1d(time_cut,np.cos(phi_cut), fill_value=0, bounds_error=False)
bg_cut = self.lc['bg'][(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
bg = interp1d(time_cut,bg_cut, fill_value=0, bounds_error=False)
contam_cut = self.lc['contam'][(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
contam = interp1d(time_cut,contam_cut, fill_value=0, bounds_error=False)
dx_cut = self.lc['xoff'][(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
dx = interp1d(time_cut,dx_cut, fill_value=0, bounds_error=False)
dy_cut = self.lc['yoff'][(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
dy = interp1d(time_cut,dy_cut, fill_value=0, bounds_error=False)
time = time[(self.lc['time'] < (mask_centre-mask_width/2)) |
(self.lc['time'] > (mask_centre+mask_width/2))]
params_d = ['dfdt', 'dfdx', 'dfdy', 'dfdsinphi', 'dfdcosphi', 'dfdbg', 'dfdcontam',
'dfdsmear', 'd2fdt2', 'd2fdx2', 'd2fdy2', 'dfdsin2phi', 'dfdcos2phi']
boolean = [[False, True]]*len(params_d)
decorr_arr = [[]]*len(params_d)
for kindex, k in enumerate(range(len(params_d))):
temp = []
for jindex, j in enumerate([False, True]):
for index, i in enumerate(range(len(params_d)-kindex)):
temp.append(boolean[index][jindex])
decorr_arr[kindex] = temp
for hindex, h in enumerate(range(kindex)):
decorr_arr[kindex].append(False)
decorr_arr[kindex].append(True)
for index, i in enumerate(decorr_arr[0]):
dfdt=decorr_arr[0][index]
dfdx=decorr_arr[1][index]
dfdy=decorr_arr[2][index]
dfdsinphi=decorr_arr[3][index]
dfdcosphi=decorr_arr[4][index]
dfdbg=decorr_arr[5][index]
dfdcontam=decorr_arr[6][index]
dfdsmear=decorr_arr[7][index]
d2fdt2=decorr_arr[8][index]
d2fdx2=decorr_arr[9][index]
d2fdy2=decorr_arr[10][index]
dfdsin2phi=decorr_arr[11][index]
dfdcos2phi=decorr_arr[12][index]
model = self.__factor_model__(scale)
params = model.make_params()
params.add('dfdt', value=0, vary=dfdt)
params.add('dfdx', value=0, vary=dfdx)
params.add('dfdy', value=0, vary=dfdy)
params.add('dfdsinphi', value=0, vary=dfdsinphi)
params.add('dfdcosphi', value=0, vary=dfdcosphi)
params.add('dfdbg', value=0, vary=dfdbg)
params.add('dfdcontam', value=0, vary=dfdcontam)
params.add('dfdsmear', value=0, vary=dfdsmear)
params.add('d2fdt2', value=0, vary=d2fdt2)
params.add('d2fdx2', value=0, vary=d2fdx2)
params.add('d2fdy2', value=0, vary=d2fdy2)
params.add('dfdsin2phi', value=0, vary=dfdsin2phi)
params.add('dfdcos2phi', value=0, vary=dfdcos2phi)
result = model.fit(flux, params, t=time)
if index == 0:
min_BIC = copy(result.bic)
decorr_params = []
else:
if result.bic < min_BIC:
min_BIC = copy(result.bic)
decorr_params = []
for xindex, x in enumerate([dfdt, dfdx, dfdy, dfdsinphi, dfdcosphi, dfdbg, dfdcontam,
dfdsmear, d2fdt2, d2fdx2, d2fdy2, dfdsin2phi, dfdcos2phi]):
if x == True:
if params_d[xindex] == "dfdsinphi":
decorr_params.append("dfdsinphi")
decorr_params.append("dfdcosphi")
elif params_d[xindex] == "dfdcosphi" and "dfdsinphi" not in decorr_params:
decorr_params.append("dfdsinphi")
decorr_params.append("dfdcosphi")
elif params_d[xindex] == "dfdcosphi" and "dfdcosphi" in decorr_params:
continue
elif params_d[xindex] == "dfdsin2phi":
decorr_params.append("dfdsin2phi")
decorr_params.append("dfdcos2phi")
elif params_d[xindex] == "dfdcos2phi" and "dfdsin2phi" not in decorr_params:
decorr_params.append("dfdsin2phi")
decorr_params.append("dfdcos2phi")
elif params_d[xindex] == "dfdcos2phi" and "dfdcos2phi" in decorr_params:
continue
else:
decorr_params.append(params_d[xindex])
if len(decorr_params) == 0:
print("No decorrelation is needed.")
else:
print("Decorrelate in", *decorr_params, "using decorr, lmfit_transt, or lmfit_eclipse functions.")
return(min_BIC, decorr_params)
#---------------------------------
# Pickling
def __getstate__(self):
state = self.__dict__.copy()
# Replace lmfit model with its string representation
if 'model' in state.keys():
model_repr = state['model'].__repr__()
state['model'] = model_repr
else:
state['model'] = ''
# There may also be an instance of an lmfit model buried in
# sampler.log_prob_fn.args - replace with its string representation
if 'sampler' in state.keys():
args = state['sampler'].log_prob_fn.args
model_repr = args[0].__repr__()
state['sampler'].log_prob_fn.args = (model_repr, *args[1:])
return state
#------
def __setstate__(self, state):
# Fix for old saved datasets with no __scale__ attribute
if not hasattr(self, '__scale__'):
self.__scale__ = True
# Fix for old saved datasets with no __extra_basis_funcs__ attribute
if not hasattr(self, '__extra_basis_funcs__'):
self.__extra_basis_funcs__ = {}
def reconstruct_model(model_repr,state):
F = self.__factor_model__(self.__scale__,
self.__extra_basis_funcs__)
if '_transit_func' in model_repr:
model = TransitModel()
model *= self.__factor_model__(self.__scale__,
self.__extra_basis_funcs__)
elif '_eclipse_func' in model_repr:
model = EclipseModel()
model *= self.__factor_model__(self.__scale__,
self.__extra_basis_funcs__)
else:
model = None
if 'glint_func' in model_repr:
model += Model(_glint_func, independent_vars=['t'],
f_theta=state['f_theta'], f_glint=state['f_glint'])
return model
self.__dict__.update(state)
if 'model' in state.keys():
self.model = reconstruct_model(state['model'],state)
if 'sampler' in state.keys():
args = state['sampler'].log_prob_fn.args
model = reconstruct_model(args[0],state)
state['sampler'].log_prob_fn.args = (model, *args[1:])
| 184,733 | 40.531924 | 127 | py |
pycheops | pycheops-master/pycheops/funcs.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
funcs
=====
Functions related to observable properties of stars and exoplanets
Parameters
----------
Functions are defined in terms of the following parameters. [1]_
* a - orbital semi-major axis in solar radii = a_1 + a_2
* P - orbital period in mean solar days
* Mass - total system mass in solar masses, Mass = m_1 + m_2
* ecc - orbital eccentricity
* omdeg - longitude of periastron of star's orbit, omega, in _degrees_
* sini - sine of the orbital inclination
* K - 2.pi.a.sini/(P.sqrt(1-e^2)) = K_1 + K_2
* K_1, K_2 - orbital semi-amplitudes in km/s
* q - mass ratio = m_2/m_1 = K_1/K_2 = a_1/a_2
* f_m - mass function = m_2^3.sini^3/(m_1+m_2)^2 in solar masses
= K_1^3.P/(2.pi.G).(1-e^2)^(3/2)
* r_1 - radius of star 1 in units of the semi-major axis, r_1 = R_*/a
* r_2 - radius of companion in units of the semi-major axis, r_2 = R_2/a
* rhostar - mean stellar density = 3.pi/(GP^2(1+q)r_1^3)
* rstar - host star radius/semi-major axis, rstar = R_*/a
* k - planet/star radius ratio, k = R_planet/R_star
* tzero - time of mid-transit (minimum on-sky star-planet separation).
* b - impact parameter, b=a.cos(i)/R_star (assuming circular orbit)
.. rubric References
.. [1] Hilditch, R.W., An Introduction to Close Binary Stars, CUP 2001.
Functions
---------
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .constants import *
from numpy import *
from scipy.optimize import brent
from numba import vectorize
from uncertainties import ufloat, UFloat
import requests
from .utils import mode, parprint, ellpar
from random import sample as random_sample
from .constants import R_SunN, M_SunN, M_JupN, R_JupN, au, M_EarthN, R_EarthN
import matplotlib.pyplot as plt
from os.path import join, dirname, abspath
from astropy.table import Table
from pathlib import Path
from time import localtime, mktime
from os.path import getmtime
from .core import load_config
from matplotlib.patches import Ellipse
from scipy.signal import argrelextrema
import warnings
__all__ = [ 'a_rsun','f_m','m1sin3i','m2sin3i','asini','rhostar','g_2',
'K_kms','m_comp','transit_width','esolve','t2z',
'tperi2tzero','tzero2tperi', 'vrad', 'xyz_planet', 'delta_t_sec']
_arsun = (GM_SunN*mean_solar_day**2/(4*pi**2))**(1/3.)/R_SunN
_f_m = mean_solar_day*1e9/(2*pi)/GM_SunN
_asini = mean_solar_day*1e3/2/pi/R_SunN
_rhostar = 3*pi*V_SunN/(GM_SunN*mean_solar_day**2)
_model_path = join(dirname(abspath(__file__)),'data','models')
_rho_Earth_cgs = M_EarthN/(4/3*pi*R_EarthN**3)/1000
config = load_config()
_cache_path = config['DEFAULT']['data_cache_path']
TEPCatPath = Path(_cache_path,'allplanets-csv.csv')
def a_rsun(P, Mass):
"""
Semi-major axis in solar radii
:param P: orbital period in mean solar days
:param Mass: total mass in solar masses, M
:returns: a = (G.M.P^2/(4.pi^2))^(1/3) in solar radii
"""
return _arsun * P**(2/3.) * Mass**(1/3.)
def f_m(P, K, ecc=0):
"""
Mass function in solar masses
:param P: orbital period in mean solar days
:param K: semi-amplitude of the spectroscopic orbit in km/s
:param ecc: orbital eccentricity
:returns: f_m = m_2^3.sini^3/(m_1+m_2)^2 in solar masses
"""
return _f_m * K**3 * P * (1 - ecc**2)**1.5
def m1sin3i(P, K_1, K_2, ecc=0):
"""
Reduced mass of star 1 in solar masses
:param K_1: semi-amplitude of star 1 in km/s
:param K_2: semi-amplitude of star 2 in km/s
:param P: orbital period in mean solar days
:param ecc: orbital eccentricity
:returns: (m_2.sini)^3/(m_1+m_2)^2 in solar masses
"""
return _f_m * K_2 * (K_1 + K_2)**2 * P * (1 - ecc**2)**1.5
def m2sin3i(P, K_1, K_2, ecc=0):
"""
Reduced mass of star 2 in solar masses
:param K_1: semi-amplitude of star 1 in km/s
:param K_2: semi-amplitude of star 2 in km/s
:param P: orbital period in mean solar days
:param ecc: orbital eccentricity
:returns: m_2.sini^3 in solar masses
"""
return _f_m * K_1 * (K_1 + K_2)**2 * P * (1 - ecc**2)**1.5
def asini(K, P, ecc=0):
"""
a.sini in solar radii
:param K: semi-amplitude of the spectroscopic orbit in km/s
:param P: orbital period in mean solar days
:returns: a.sin(i) in solar radii
"""
return _asini * K * P *sqrt(1-ecc**2)
def r_star(rho, P, q=0):
"""
Scaled stellar radius R_*/a from mean stellar density
:param rho: Mean stellar density in solar units
:param P: orbital period in mean solar days
:param q: mass ratio, m_2/m_1
:returns: radius of star in units of the semi-major axis, R_*/a
"""
return (_rhostar/(rho*P**2/(1+q)))**(1/3.)
def rhostar(r_1, P, q=0):
"""
Mean stellar density from scaled stellar radius.
:param r_1: radius of star in units of the semi-major axis, r_1 = R_*/a
:param P: orbital period in mean solar days
:param q: mass ratio, m_2/m_1
:returns: Mean stellar density in solar units
"""
return _rhostar/(P**2*(1+q)*r_1**3)
def g_2(r_2, P, K, sini=1, ecc=0):
"""
Companion surface gravity g = G.m_2/R_2**2 from P, K and r_2
Calculated using equation (4) from Southworth et al., MNRAS
2007MNRAS.379L..11S. The
:param r_2: companion radius relative to the semi-major axis, r_2 = R_2/a
:param P: orbital period in mean solar days
:param K_1: semi-amplitude of star 1's orbit in km/s
:param sini: sine of the orbital inclination
:param ecc: orbital eccentrcity
:returns: companion surface gravity in m.s-2
"""
return 2*pi*sqrt(1-ecc**2)*K*1e3/(P*mean_solar_day*r_2**2*sini)
def K_kms(m_1, m_2, P, sini, ecc):
"""
Semi-amplitudes of the spectroscopic orbits in km/s
- K = 2.pi.a.sini/(P.sqrt(1-ecc^2))
- K_1 = K * m_2/(m_1+m_2)
- K_2 = K * m_1/(m_1+m_2)
:param m_1: mass of star 1 in solar masses
:param m_2: mass of star 2 in solar masses
:param P: orbital period in mean solar days
:param sini: sine of the orbital inclination
:param ecc: orbital eccentrcity
:returns: K_1, K_2 -- semi-amplitudes in km/s
"""
M = m_1 + m_2
a = a_rsun(P, M)
K = 2*pi*a*R_SunN*sini/(P*mean_solar_day*sqrt(1-ecc**2))/1000
K_1 = K * m_2/M
K_2 = K * m_1/M
return K_1, K_2
#---------------
def m_comp(f_m, m_1, sini):
"""
Companion mass in solar masses given mass function and stellar mass
:param f_m: = K_1^3.P/(2.pi.G).(1-ecc^2)^(3/2) in solar masses
:param m_1: mass of star 1 in solar masses
:param sini: sine of orbital inclination
:returns: m_2 = mass of companion to star 1 in solar masses
"""
DA = -f_m/sini**3
DB = 2*DA*m_1
DC = DA*m_1**2
Q = (DA**2 - 3*DB)/9
R = (2*DA**3 - 9*DA*DB + 27*DC)/54
DAA = -sign(R)*(sqrt(R**2 - Q**3) + abs(R))**(1/3)
DBB = Q/DAA
return DAA + DBB - DA/3
#---------------
def transit_width(r, k, b, P=1):
"""
Total transit duration for a circular orbit.
See equation (3) from Seager and Malen-Ornelas, 2003ApJ...585.1038S.
:param r: R_star/a
:param k: R_planet/R_star
:param b: impact parameter = a.cos(i)/R_star
:param P: orbital period (optional, default P=1)
:returns: Total transit duration in the same units as P.
"""
return P*arcsin(r*sqrt( ((1+k)**2-b**2) / (1-b**2*r**2) ))/pi
#---------------
@vectorize(nopython=True)
def esolve(M, ecc):
"""
Solve Kepler's equation M = E - ecc.sin(E)
:param M: mean anomaly (scalar or array)
:param ecc: eccentricity (scalar or array)
:returns: eccentric anomaly, E
Algorithm is from Markley 1995, CeMDA, 63, 101 via pyAstronomy class
keplerOrbit.py
:Example:
Test precision using random values::
>>> from pycheops.funcs import esolve
>>> from numpy import pi, sin, abs, max
>>> from numpy.random import uniform
>>> ecc = uniform(0,1,1000)
>>> M = uniform(-2*pi,4*pi,1000)
>>> E = esolve(M, ecc)
>>> maxerr = max(abs(E - ecc*sin(E) - (M % (2*pi)) ))
>>> print("Maximum error = {:0.2e}".format(maxerr))
Maximum error = 8.88e-16
"""
M = M % (2*pi)
if ecc == 0:
return M
if M > pi:
M = 2*pi - M
flip = True
else:
flip = False
alpha = (3*pi + 1.6*(pi-abs(M))/(1+ecc) )/(pi - 6/pi)
d = 3*(1 - ecc) + alpha*ecc
r = 3*alpha*d * (d-1+ecc)*M + M**3
q = 2*alpha*d*(1-ecc) - M**2
w = (abs(r) + sqrt(q**3 + r**2))**(2/3)
E = (2*r*w/(w**2 + w*q + q**2) + M) / d
f_0 = E - ecc*sin(E) - M
f_1 = 1 - ecc*cos(E)
f_2 = ecc*sin(E)
f_3 = 1-f_1
d_3 = -f_0/(f_1 - 0.5*f_0*f_2/f_1)
d_4 = -f_0/(f_1 + 0.5*d_3*f_2 + (d_3**2)*f_3/6)
E = E -f_0/(f_1 + 0.5*d_4*f_2 + d_4**2*f_3/6 - d_4**3*f_2/24)
if flip:
E = 2*pi - E
return E
#---------------
def t2z(t, tzero, P, sini, rstar, ecc=0, omdeg=90, returnMask=False):
"""
Calculate star-planet separation relative to scaled stellar radius, z
Optionally, return a flag/mask to indicate cases where the planet is
further from the observer than the star, i.e., whether phases with z<1 are
transits (mask==True) or eclipses (mask==False)
:param t: time of observation (scalar or array)
:param tzero: time of inferior conjunction, i.e., mid-transit
:param P: orbital period
:param sini: sine of orbital inclination
:param rstar: scaled stellar radius, R_star/a
:param ecc: eccentricity (optional, default=0)
:param omdeg: longitude of periastron in degrees (optional, default=90)
:param returnFlag: return a flag to distinguish transits from eclipses.
N.B. omdeg is the longitude of periastron for the star's orbit
:returns: z [, mask]
:Example:
>>> from pycheops.funcs import t2z
>>> from numpy import linspace
>>> import matplotlib.pyplot as plt
>>> t = linspace(0,1,1000)
>>> sini = 0.999
>>> rstar = 0.1
>>> plt.plot(t, t2z(t,0,1,sini,rstar))
>>> plt.xlim(0,1)
>>> plt.ylim(0,12)
>>> ecc = 0.1
>>> for omdeg in (0, 90, 180, 270):
>>> plt.plot(t, t2z(t,0,1,sini,rstar,ecc,omdeg))
>>> plt.show()
"""
if ecc == 0:
nu = 2*pi*(t-tzero)/P
omrad = 0.5*pi
z = sqrt(1 - cos(nu)**2*sini**2)/rstar
else:
tp = tzero2tperi(tzero,P,sini,ecc,omdeg,return_nan_on_error=True)
if tp is nan:
if returnMask:
return full_like(t,nan),full_like(t,True,dtype=bool)
else:
return full_like(t,nan)
M = 2*pi*(t-tp)/P
E = esolve(M,ecc)
nu = 2*arctan(sqrt((1+ecc)/(1-ecc))*tan(E/2))
omrad = pi*omdeg/180
# Equation (5.63) from Hilditch
z = (((1-ecc**2)/
(1+ecc*cos(nu))*sqrt(1-sin(omrad+nu)**2*sini**2))/rstar)
if returnMask:
return z, sin(nu + omrad)*sini < 0
else:
return z
#---------
def tzero2tperi(tzero,P,sini,ecc,omdeg,
return_nan_on_error=False):
"""
Calculate time of periastron from time of mid-transit
Uses the method by Lacy, 1992AJ....104.2213L
:param tzero: times of mid-transit
:param P: orbital period
:param sini: sine of orbital inclination
:param ecc: eccentricity
:param omdeg: longitude of periastron in degrees
:returns: time of periastron prior to tzero
:Example:
>>> from pycheops.funcs import tzero2tperi
>>> tzero = 54321.6789
>>> P = 1.23456
>>> sini = 0.987
>>> ecc = 0.654
>>> omdeg = 89.01
>>> print("{:0.4f}".format(tzero2tperi(tzero,P,sini,ecc,omdeg)))
54321.6784
"""
def _delta(th, sin2i, omrad, ecc):
# Equation (4.9) from Hilditch
return (1-ecc**2)*(
sqrt(1-sin2i*sin(th+omrad)**2)/(1+ecc*cos(th)))
omrad = omdeg*pi/180
sin2i = sini**2
theta = 0.5*pi-omrad
if (1-sin2i) > finfo(0.).eps :
ta = theta-0.125*pi
tb = theta
tc = theta+0.125*pi
fa = _delta(ta, sin2i, omrad, ecc)
fb = _delta(tb, sin2i, omrad, ecc)
fc = _delta(tc, sin2i, omrad, ecc)
if ((fb>fa)|(fb>fc)):
t_ = linspace(0,2*pi,1024)
d_ = _delta(t_, sin2i, omrad, ecc)
try:
i_= argrelextrema(d_, less)[0]
t_ = t_[i_]
if len(t_)>1:
i_ = (abs(t_ - tb)).argmin()
t_ = t_[i_]
ta,tb,tc = (t_-0.01, t_, t_+0.01)
except:
if return_nan_on_error: return nan
print(sin2i, omrad, ecc)
print(ta, tb, tc)
print(fa, fb, fc)
raise ValueError('tzero2tperi grid search fail')
try:
theta = brent(_delta, args=(sin2i, omrad, ecc), brack=(ta, tb, tc))
except ValueError:
if return_nan_on_error: return nan
print(sin2i, omrad, ecc)
print(ta, tb, tc)
print(fa, fb, fc)
raise ValueError('Not a bracketing interval.')
if theta == pi:
E = pi
else:
E = 2*arctan(sqrt((1-ecc)/(1+ecc))*tan(theta/2))
return tzero - (E - ecc*sin(E))*P/(2*pi)
#---------
def tperi2tzero(tperi,P,sini,ecc,omdeg,eclipse=False):
"""
Calculate phase mid-eclipse from time of mid-transit
:param tperi: times of periastron passage
:param P: orbital period
:param sini: sine of orbital inclination
:param ecc: eccentricity
:param omdeg: longitude of periastron in degrees
:param eclipse: calculate time of mid-eclipse if True, else mid-transit
:returns: time of mid-eclipse
:Example:
>>> from pycheops.funcs import tperi2tzero
>>> tperi = 54321.6784
>>> P = 1.23456
>>> sini = 0.987
>>> ecc = 0.654
>>> omdeg = 89.01
>>> t_transit = tperi2tzero(tperi,P,sini,ecc,omdeg)
>>> t_eclipse = tperi2tzero(tperi,P,sini,ecc,omdeg,eclipse=True)
>>> print(f"{t_transit:0.4f}, {t_eclipse:0.4f}")
"""
def _delta(th, sin2i, omrad, ecc):
# Equation (4.9) from Hilditch
return (1-ecc**2)*(
sqrt(1-sin2i*sin(th+omrad)**2)/(1+ecc*cos(th)))
omrad = omdeg*pi/180
sin2i = sini**2
theta = 0.5*pi-omrad + pi*eclipse
if (1-sin2i) > finfo(0.).eps :
ta = theta-0.125*pi
tb = theta
tc = theta+0.125*pi
fa = _delta(ta, sin2i, omrad, ecc)
fb = _delta(tb, sin2i, omrad, ecc)
fc = _delta(tc, sin2i, omrad, ecc)
if ((fb>fa)|(fb>fc)):
t_ = linspace(0,2*pi,1024)
d_ = _delta(t_, sin2i, omrad, ecc)
try:
i_= argrelextrema(d_, less)[0]
t_ = t_[i_]
if len(t_)>1:
i_ = (abs(t_ - tb)).argmin()
t_ = t_[i_]
ta,tb,tc = (t_-0.01, t_, t_+0.01)
except:
print(sin2i, omrad, ecc)
print(ta, tb, tc)
print(fa, fb, fc)
raise ValueError('tzero2tperi grid search fail')
try:
theta = brent(_delta, args=(sin2i, omrad, ecc), brack=(ta, tb, tc))
except ValueError:
print(sin2i, omrad, ecc)
print(ta, tb, tc)
print(fa, fb, fc)
raise ValueError('Not a bracketing interval.')
if theta == pi:
E = pi
else:
E = 2*arctan(sqrt((1-ecc)/(1+ecc))*tan(theta/2))
return tperi + (E - ecc*sin(E))*P/(2*pi)
#---------------
def eclipse_phase (P,sini,ecc,omdeg):
"""
Calculate time of mid-transit/mid-eclipse from time of periastron
Uses the method by Lacy, 1992AJ....104.2213L
:param tzero: times of mid-transit
:param P: orbital period
:param sini: sine of orbital inclination
:param ecc: eccentricity
:param omdeg: longitude of periastron in degrees
:returns: phase of mid-eclipse
:Example:
>>> from pycheops.funcs import eclipse_phase
>>> P = 1.23456
>>> sini = 0.987
>>> ecc = 0.654
>>> omdeg = 89.01
>>> ph_ecl = eclipse_phase(tzero,P,sini,ecc,omdeg)
>>> print(f"Phase of eclipse = {ph_ecl:0.4f}")
"""
t_peri = tzero2tperi(0,P,sini,ecc,omdeg)
t_ecl = tperi2tzero(t_peri,P,sini,ecc,omdeg,eclipse=True)
return t_ecl/P % 1
#---------------
def nu_max(Teff, logg):
"""
Peak frequency in micro-Hz for solar-like oscillations.
From equation (17) of Campante et al., (2016)[2]_.
:param logg: log of the surface gravity in cgs units.
:param Teff: effective temperature in K
:returns: nu_max in micro-Hz
.. rubric References
.. [2] Campante, 2016, ApJ 830, 138.
"""
return 3090 * 10**(logg-4.438)/sqrt(Teff/5777)
#---------------
def vrad(t,tzero,P,K,ecc=0,omdeg=90,sini=1, primary=True):
"""
Calculate radial velocity, V_r, for body in a Keplerian orbit
:param t: array of input times
:param tzero: time of inferior conjunction, i.e., mid-transit
:param P: orbital period
:param K: radial velocity semi-amplitude
:param ecc: eccentricity (optional, default=0)
:param omdeg: longitude of periastron in degrees (optional, default=90)
:param sini: sine of orbital inclination (to convert tzero to t_peri)
:param primary: if false calculate V_r for companion
:returns: V_r in same units as K relative to the barycentre of the binary
"""
tp = tzero2tperi(tzero,P,sini,ecc,omdeg)
M = 2*pi*(t-tp)/P
E = esolve(M,ecc)
nu = 2*arctan(sqrt((1+ecc)/(1-ecc))*tan(E/2))
omrad = pi*omdeg/180
if not primary:
omrad = omrad + pi
return K*(cos(nu+omrad)+ecc*cos(omrad))
#---------------
def xyz_planet(t, tzero, P, sini, ecc=0, omdeg=90):
"""
Position of the planet in Cartesian coordinates.
The position of the ascending node is taken to be Omega=0 and the
semi-major axis is taken to be a=1.
:param t: time of observation (scalar or array)
:param tzero: time of inferior conjunction, i.e., mid-transit
:param P: orbital period
:param sini: sine of orbital inclination
:param ecc: eccentricity (optional, default=0)
:param omdeg: longitude of periastron in degrees (optional, default=90)
N.B. omdeg is the longitude of periastron for the star's orbit
:returns: (x, y, z)
:Example:
>>> from pycheops.funcs import phase_angle
>>> from numpy import linspace
>>> import matplotlib.pyplot as plt
>>> t = linspace(0,1,1000)
>>> sini = 0.9
>>> ecc = 0.1
>>> omdeg = 90
>>> x, y, z = xyz_planet(t, 0, 1, sini, ecc, omdeg)
>>> plt.plot(x, y)
>>> plt.plot(x, z)
>>> plt.show()
"""
if ecc == 0:
nu = 2*pi*(t-tzero)/P
r = 1
cosw = 0
sinw = -1
else:
tp = tzero2tperi(tzero,P,sini,ecc,omdeg)
M = 2*pi*(t-tp)/P
E = esolve(M,ecc)
nu = 2*arctan(sqrt((1+ecc)/(1-ecc))*tan(E/2))
r = (1-ecc**2)/(1+ecc*cos(nu))
omrad = pi*omdeg/180
# negative here since om_planet = om_star + pi
cosw = -cos(omrad)
sinw = -sin(omrad)
sinv = sin(nu)
cosv = cos(nu)
cosi = sqrt(1-sini**2)
x = r*(-sinv*sinw + cosv*cosw)
y = r*cosi*(cosv*sinw + sinv*cosw)
z = -r*sini*(cosw*sinv + cosv*sinw)
return x, y, z
#----------------------------------------------------------------------------
def massradius(P=None, k=None, sini=None, ecc=None,
m_star=None, r_star=None, K=None, aR=None,
jovian=False, solar=False, verbose=True, return_samples=False,
plot=True, figsize=(8,6), xlim=None, ylim=None,
errorbar=True, err_kws={'capsize':4, 'color':'darkred', 'fmt':'o'},
logmass=False, logradius=False, title=None,
ellipse=True, ell_kws={'facecolor':'None','edgecolor':'darkblue'},
ell_sigma=[1,2,3], tepcat=True, tepcat_kws={'s':8, 'c':'cadetblue'},
show_legend=True, legend_kws={},
zeng_models=['R100H2O','Rrock'], zeng_kws={},
baraffe_models=['Z0.02_5Gyr','Z0.50_5Gyr'],
baraffe_kws={}, lab_kws={}, tick_kws={}):
"""
Calculate planet mass and/or radius
Stellar mass and/or radius (m_star, r_star) are assumed to have solar
units. The radial velocity semi-amplitude of the stars orbit, K, is
assumed to have units of m/s. P is assumed to have units of days.
Parameters can be specified in one of the following ways.
- single value (zero error assumed)
- ufloat values, i.e., m_star=ufloat(1.1, 0.05)
- 2-tuple with value and standard deviation, e.g., m_star=(1.1, 0.05)
- a numpy array of values sampled from the parameter's probability
distribution
If input values are numpy arrays of the same size, e.g., outputs from the
same run of an emcee sampler, then they are sampled in the same way to
ensure any correlations between these input parameters are preserved.
If the orbital eccentricity is not given then it is assumed to be e=0
(circular orbit).
In the table below, the input and output quantities are
- k = planet-star radius ratio r_p/r_star
- sini = sine of orbital inclination
- ecc = orbital eccentricity
- K = semi-amplitude of star's spectroscopic orbit in m/s
- aR = a/r_star
- r_pl = planet radius
- m_pl = planet mass
- a = semi-major axis of the planet's orbit in solar radii
- q = mass ratio = m_pl/m_star
- g_p = planet's surface gravity (m.s-2)
- rho_p = planet's mean density
- rho_star = mean stellar density in solar units
+----------------------------+---------------+
| Input | Output |
+============================+===============+
| r_star, k | r_p |
| m_star, K, sini, P | m_p, a, q |
| aR, k, sini, P, K | g_p |
| aR, k, sini, P, K, m_star | rho_p |
| aR, P, m_star, K, sini | rho_star |
+----------------------------+---------------+
The planet surface gravity, g_p, is calculated directly from k and aR
using equation (4) from Southworth et al., MNRAS 2007MNRAS.379L..11S. The
mean stellar density, rho_star, is calculated directly from aR using
the equation from section 2.2 of Maxted et al. 2015A&A...575A..36M.
By default, the units for the planet mass, radius and density are Earth
mass, Earth radius and Earth density. Jovian mass, radius and density
units can be selected by setting jovian=True. In both cases, the radius
units are those for a sphere with the same volume as the Earth or Jupiter.
Alternatively, solar units can be selected using solar=True.
The following statistics are calculated for each of the input and output
quantities and are returned as a python dict.
- mean
- stderr (standard error)
- mode (estimated using half-sample method)
- median
_ e_hi (84.1%-ile - median)
_ e_lo (median - 15.9%-ile)
- c95_up (95& upper confidence limit)
- c95_lo (95& lower confidence limit)
- sample (sample used to calculate statistics, if return_samples=True)
An output plot showing the planet mass and radius relative to models
and/or other known planets is generated if both the planet mass and
radius can be calculated (unless plot=False is specified). Keyword options
can be sent to ax.tick_params using the tick_kws option and similarly for
ax.set_xlabel and ax.set_ylabel with the lab_kws option. The plot title
can be set with the title keyword.
The following models from Zeng et al. (2016ApJ...819..127Z) can be
selected using the zeng_models keyword.
R100Fe,R50Fe,R30Fe,R25Fe,R20Fe,Rrock,R25H2O,R50H2O,R100H2O
Set zeng_models=None to skip plotting of these models, or 'all' to plot
them all. Keyword argument to the plot command for these models can be
added using the zeng_kws option.
Models from Baraffe et al., (2008A&A...482..315B) are available for
metalicities Z=0.02, 0.10, 0.50 and 0.90, and ages 0.5Gyr, 1Gyr and 5Gyr.
Models can be selected using the baraffe_models option using model names
Z0.02_0.5Gyr, Z0.02_1Gyr, Z0.02_5Gyr, Z0.02_0.5Gyr, etc. Set
baraffe_models=None to skip plotting of these models, or 'all' to plot
them all. Keyword argument to the plot command for these models can be
added using the baraffe_kws option.
The keyword show_legend can be used to include a legend for the models
plotted with keyword arguments legend_kws.
Well-studied planets from TEPCat will also be shown in the plot if
tepcat=True. The appearance of the points can be controlled using
kws_tepcat keyword arguments that are passed to plt.scatter.
If errorbar=True the planet mass and radius are plotted as an error bar
using plt.errorbar with optional keyword arguments err_kws. Logarithmic
scales for the mass and radius axes can be selected with the logmass and
logradius keywords.
If ellipse=True then the planet mass and radius are shown using ellipses
with semi-major axes set by the ell_sigma keyword. The appearance of these
ellipses can be specified using the ell_kws keyword. These options are
sent to the plt.add_patch command.
The return value of this function is "result, fig" or, if plot=False,
"result", where "result" is a python dict containing the statistics for
each parameter and "fig" is a matplotlib Figure object.
"""
NM=100_000 # No. of Monte Carlo simulations.
# Generate a sample of values for a parameter
def _s(x, nm=NM):
if isinstance(x,float) or isinstance(x,int):
return full(nm, x, dtype=float)
elif isinstance(x, UFloat):
return random.normal(x.n, x.s, nm)
elif isinstance(x, ndarray):
if len(x) == nm:
return x
elif len(x) > nm:
return x[random_sample(range(len(x)), nm)]
else:
return x[(random.random(nm)*len(x+1)).astype(int)]
elif isinstance(x, tuple):
if len(x) == 2:
return random.normal(x[0], x[1], nm)
elif len(x) == 3:
raise NotImplementedError
raise ValueError("Unrecognised type for parameter values")
# Generate dict of parameter statistics
def _d(x):
d = {}
d['mean'] = x.mean()
d['stderr'] = x.std()
d['mode'] = mode(x)
q = percentile(x, [5,15.8655,50,84.1345,95])
d['median'] = q[2]
d['e_hi'] = q[3]-q[2]
d['e_lo'] = q[2]-q[1]
d['c95_up'] = q[4]
d['c95_lo'] = q[0]
return d
result = {}
fig = None
# Use e=0 if input value is none, otherwise sample in the range [0,1)
_e = 0 if ecc is None else clip(abs(_s(ecc)),0,0.999999)
# Look for input values that are numpy arrays of the same length, in which
# case sample these together.
pv = [P, k, sini, _e, m_star, r_star, K, aR]
pn = ['P', 'k', 'sini', 'e', 'm_star', 'r_star', 'K', 'aR']
ps = {} # dictionary of samples for each input parameter
_n = [len(p) if isinstance(p, ndarray) else 0 for p in pv]
_u = unique(_n)
for _m in _u[_u>0]:
_i = where(_n == _m)[0]
if len(_i) > 1:
if _m == NM:
_j = range(_m)
elif _m > NM:
_j = random_sample(range(_m), NM)
else:
_j = (random.random(NM)*_m).astype(int)
for _k in _i:
ps[pn[_k]] = pv[_k][_j]
# Generate samples for input parameters not already sampled
# N.B. All parameters assumed to be strictly positive so use abs() to
# avoid negative values.
for n in set(pn) - set(ps.keys()):
_i = pn.index(n)
ps[n] = None if pv[_i] is None else abs(_s(pv[_i]))
if jovian:
if solar: raise ValueError("Cannot specify both jovian and solar units")
mfac = M_SunN/M_JupN
rfac = R_SunN/R_JupN
mstr = ' M_Jup'
rstr = ' R_Jup'
elif solar:
mfac, rfac = 1, 1
mstr = ' M_Sun'
rstr = ' R_Sun'
else:
mfac = M_SunN/M_EarthN
rfac = R_SunN/R_EarthN
mstr = ' M_Earth'
rstr = ' R_Earth'
if ps['m_star'] is not None:
result['m_star'] = _d(ps['m_star'])
if verbose:
print(parprint(ps['m_star'],'m_star',wn=8,w=10) + ' M_Sun')
if ps['r_star'] is not None:
result['r_star'] = _d(ps['r_star'])
if verbose:
print(parprint(ps['r_star'],'r_star',wn=8,w=10) + ' R_Sun')
result['e'] = _d(ps['e'])
if verbose:
print(parprint(ps['e'],'e',wn=8,w=10))
# Calculations start here. Intermediate variables names in result
# dictionary start with "_" so we can remove/ignore them later.
if ps['k'] is not None and ps['r_star'] is not None:
ps['_rp'] = ps['k']*ps['r_star'] # in solar units
ps['r_p'] = ps['_rp']*rfac # in output units
result['r_p'] = _d(ps['r_p'])
if verbose:
print(parprint(ps['r_p'],'r_p',wn=8,w=10) + rstr)
if not True in [p is None for p in [m_star, sini, P, K]]:
# Mass function in solar mass - careful to use K in km/s here
_K = ps['K']/1000 # K in km/s
ps['_fm'] = f_m(ps['P'], _K, ps['e'])
ps['_mp'] = m_comp(ps['_fm'], ps['m_star'], ps['sini']) # solar units
ps['m_p'] = ps['_mp']*mfac # in output units
result['m_p'] = _d(ps['m_p'])
ps['q'] = ps['_mp']/ps['m_star']
result['q'] = _d(ps['q'])
ps['a'] = asini(_K*(1+1/ps['q']), ps['P'], ps['e']) / ps['sini']
result['a'] = _d(ps['a'])
if verbose:
print(parprint(ps['m_p'],'m_p',wn=8,w=10) + mstr)
print(parprint(ps['q'],'q',wn=8,w=10))
print(parprint(ps['a'],'a',wn=8,w=10) + ' R_Sun')
print(parprint(ps['a']*R_SunN/au,'a',wn=8,w=10) + ' au')
if aR is not None:
ps['rho_star'] = rhostar(1/ps['aR'], ps['P'], ps['q'])
result['rho_star'] = _d(ps['rho_star'])
if verbose:
print(parprint(ps['rho_star'],'rho_star',wn=8,w=10)+' rho_Sun')
if not True in [p is None for p in [k, aR, K, sini, P]]:
_K = ps['K']/1000 # K in km/s
ps['g_p'] = g_2(ps['k']/ps['aR'],ps['P'],_K,ps['sini'],ps['e'])
result['g_p'] = _d(ps['g_p'])
if verbose:
print(parprint(ps['g_p'],'g_p',wn=8,w=10) + ' m.s-2')
_loggp = log10(ps['g_p'])+2
print(parprint(_loggp,'log g_p',wn=8,w=10)+' [cgs]')
if m_star is not None:
_rho = (3 * ps['g_p']**1.5 /
( 4*pi * G_2014**1.5 * (ps['_mp']*M_SunN)**0.5) )
if jovian:
rho_Jup = M_JupN / (4/3*pi*R_JupN**3)
ps['rho_p'] = _rho/rho_Jup
rhostr = ' rho_Jup'
elif solar:
ps['rho_p'] = _rho
rhostr = ' rho_Sun'
else:
rho_Earth = M_EarthN / (4/3*pi*R_EarthN**3)
ps['rho_p'] = _rho/rho_Earth
rhostr = ' rho_Earth'
if verbose:
print(parprint(ps['rho_p'],'rho_p',wn=8,w=10) + rhostr)
print(parprint(_rho*1e-3,'rho_p',wn=8,w=10)+' [g.cm-3]')
result['rho_p'] = _d(ps['rho_p'])
# Include input quantities in result
for k in ['P', 'k', 'sini', 'e', 'K', 'aR']:
result[k] = _d(ps[k])
if return_samples:
for k in result.keys():
result[k]['sample'] = ps[k]
if plot is False or not 'm_p' in result or not 'r_p' in result:
return result
# Plotting starts here
_m = result['m_p']['median']
_r = result['r_p']['median']
fig, ax = plt.subplots(figsize=figsize)
ax.tick_params(**tick_kws)
if logmass: ax.set_xscale('log')
if logradius: ax.set_yscale('log')
if xlim:
ax.set_xlim(xlim)
else:
if logmass:
ax.set_xlim(_m/10,_m*10)
else:
ax.set_xlim(0, _m*2)
if ylim:
ax.set_ylim(ylim)
else:
if logradius:
ax.set_ylim(_r/10,_r*10)
else:
ax.set_ylim(0, _r*2)
if jovian:
ax.set_xlabel(r"$M/M_{\rm Jup}$", **lab_kws)
ax.set_ylabel(r"$R/R_{\rm Jup}$", **lab_kws)
elif solar:
ax.set_xlabel(r"$M/M_{\odot}$", **lab_kws)
ax.set_ylabel(r"$R/R_{\odot}$", **lab_kws)
else:
ax.set_xlabel(r"$M/M_{\oplus}$", **lab_kws)
ax.set_ylabel(r"$R/R_{\oplus}$", **lab_kws)
if title is not None:
ax.set_title(title)
if zeng_models is not None:
if jovian:
mfac, rfac = M_EarthN/M_JupN, R_EarthN/R_JupN
elif solar:
mfac, rfac = M_EarthN/M_SunN, R_EarthN/R_SunN
else:
mfac, rfac = 1,1
mfile = join(_model_path,'apj522803t2_mrt.txt')
T = Table.read(mfile, format='ascii.cds')
if zeng_models == 'all':
zeng_models = T.colnames[1:]
for c in T.colnames[1:] if zeng_models == 'all' else zeng_models:
ax.plot(T['Mass']*mfac,T[c]*rfac,**zeng_kws,label=c)
if baraffe_models is not None:
if jovian:
mfac, rfac = M_EarthN/M_JupN, 1
elif solar:
mfac, rfac = M_EarthN/M_SunN, R_JupN/R_SunN
else:
mfac, rfac = 1, R_JupN/R_EarthN
mfile = join(_model_path,'aa9321-07_table4.csv')
T = Table.read(mfile, format='csv')
if baraffe_models == 'all':
baraffe_models = T.colnames[1:]
for c in T.colnames[1:] if baraffe_models == 'all' else baraffe_models:
ax.plot(T['Mass']*mfac,T[c]*rfac,**baraffe_kws,label=c)
if show_legend:
ax.legend(**legend_kws)
if tepcat:
if TEPCatPath.is_file():
file_age = mktime(localtime())-getmtime(TEPCatPath)
if file_age > int(config['TEPCat']['update_interval']):
download = True
else:
download = False
else:
download = True
if download:
url = config['TEPCat']['download_url']
try:
req=requests.post(url)
except:
warnings.warn("Failed to update TEPCat data file from server")
else:
with open(TEPCatPath, 'wb') as file:
file.write(req.content)
if verbose:
print('TEPCat data downloaded from \n {}'.format(url))
# Awkward table to deal with because of repeated column names
T = Table.read(TEPCatPath,format='ascii.no_header')
M_b=array(T[T.colnames[list(T[0]).index('M_b')]][1:],dtype=float)
R_b=array(T[T.colnames[list(T[0]).index('R_b')]][1:],dtype=float)
ok = (M_b > 0) & (R_b > 0)
M_b = M_b[ok]
R_b = R_b[ok]
if jovian:
R_b = R_b*R_eJupN/R_JupN
elif solar:
M_b = M_b*M_JupN/M_SunN
R_b = R_b*R_eJupN/R_SunN
else:
M_b = M_b*M_JupN/M_EarthN
R_b = R_b*R_eJupN/R_EarthN
ax.scatter(M_b,R_b, **tepcat_kws)
if errorbar:
ax.errorbar(_m, _r,
xerr=[[result['m_p']['e_lo']],[result['m_p']['e_hi']]],
yerr=[[result['r_p']['e_lo']],[result['r_p']['e_hi']]],
**err_kws)
if ellipse:
for nsig in ell_sigma:
xy, w, h, theta = ellpar(ps['m_p'],ps['r_p'],nsig)
ax.add_patch(Ellipse(xy, w, h, theta, **ell_kws))
return result, fig
#---------------
def delta_t_sec(P, M=1, sini=1, ecc=0, omdeg=90, q=0):
"""
Correction to time of mid-eclipse due to light travel time.
From Borkovits, et al. 2015MNRAS.448..946B, equation (25)
:param P: orbital period in days
:param Mass: primary star mass in solar masses (optional, default 1)
:param sini: sine of orbital inclination (optional, default 1)
:param ecc: eccentricity (optional, default=0)
:param omdeg: longitude of periastron in degrees (optional, default=90)
:param q: mass ratio = M_companion/M_star (optional, default 0)
N.B. omdeg is the longitude of periastron for the primary star's orbit
:returns: light travel time correction in seconds
"""
asini = a_rsun(P, M*(1+q))*R_SunN*sini
esinw = ecc*sin(radians(omdeg))
return 2*(1-q)/(1+q)*asini*(1-ecc**2)/(1-esinw**2)/c
| 37,655 | 33.139619 | 80 | py |
pycheops | pycheops-master/pycheops/instrument.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
instrument
==========
Constants, functions and data related to the CHEOPS instrument.
Functions
---------
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from os.path import join,abspath,dirname,isfile
import pickle
from astropy.table import Table
from .core import load_config
from .models import TransitModel, scaled_transit_fit, minerr_transit_fit
import warnings
__all__ = [ 'response', 'visibility', 'exposure_time', 'transit_noise',
'count_rate', 'cadence', 'CHEOPS_ORBIT_MINUTES']
_data_path = join(dirname(abspath(__file__)),'data')
config = load_config()
_cache_path = config['DEFAULT']['data_cache_path']
# Parameters from spreadsheet ImageETCv1.4, 2020-04-01
FLUX_0 = 1851840480
PSF_R90 = 16.2
PSF_HP = 0.0046
FWC = 114000
# From Benz et al., 2020
CHEOPS_ORBIT_MINUTES = 98.725
with open(join(_cache_path,'C_G_Teff_interpolator.p'),'rb') as fp:
_C_G_Teff_interpolator = pickle.load(fp)
with open(join(_cache_path,'visibility_interpolator.p'),'rb') as fp:
_visibility_interpolator = pickle.load(fp)
_cadence_Table = Table.read(join(_data_path,'instrument','cadence.csv'),
format='ascii.csv', header_start=1)
#-----------------------------
def count_rate(G, Teff=6000):
"""
Predicted count rates, c_tot, c_av, c_max
The count rates in e-/s based on the star's Gaia G magnitude and effective
temperature, Teff.
* c_tot = total count rate
* c_av = average count rate
* c_max = count rate in the brightest pixel
:param G: Gaia G-band magnitude
:param Teff: target effective temperature in K
:returns: c_tot, c_av, c_max
"""
c_tot = round(FLUX_0*10**(-0.4*(G+_C_G_Teff_interpolator(Teff))))
c_av = round(0.90*c_tot/(np.pi*PSF_R90**2))
c_max = round(PSF_HP*c_tot)
return c_tot, c_av, c_max
#-----------------------------
def visibility(ra, dec):
"""
Estimate of target visibility
The target visibility estimated with this function is approximate. A more
reliable estimate of the observing efficiency can be made with the
Feasibility Checker tool.
:param ra: right ascension in degrees (scalar or array)
:param dec: declination in degrees (scalar or array)
:returns: target visibility (%)
"""
return (_visibility_interpolator(ra, dec)*100).astype(int)
#-----------------------------
def response(passband='CHEOPS'):
"""
Instrument response functions.
The available passband names are 'CHEOPS', 'MOST',
'Kepler', 'CoRoT', 'Gaia', 'U', 'B', 'V', 'R', 'I',
'u\_','g\_','r\_','i\_','z\_', 'NGTS', 'TESS' and 'PLATO'
:param passband: instrument/passband names (case sensitive).
:returns: Instrument response function as an astropy Table object.
"""
T = Table.read(join(_data_path,'response_functions',
'response_functions.fits'))
T.rename_column(passband,'Response')
return T['Wavelength','Response']
#------------------
def exposure_time(G, Teff=6000, frac=0.85):
"""
Recommended exposure time.
By default, calculates the exposure time required to obtain 85% of the
full-well capacity in the brightest pixel for a star of a given Gaia
G-band magnitude, G, and effective temperature, Teff in Kelvin.
The value returned is restricted to the range 0.1 s < t_exp < 60 s.
The exposure time can be adjusted by selecting a different value of frac,
the fraction of the full-well capacity (FWC) in the brightest pixel. It is
strongly recommended not to exceed frac=0.95 for CHEOPS observations.
:param G: Gaia G-band magnitude
:frac: target fraction of the FWC in the brightest pixel.
:returns: t_exp
"""
c_tot, c_av, c_max = count_rate(G, Teff)
t_exp = round(np.clip(frac*FWC/c_max,0.1,60),2)
return t_exp
#------------------
def cadence(exptime, G, Teff=6000):
"""
Cadence and other observing informtion for a given exposure time.
For a star of the specified Gaia G-band magnitude and effective
temperature, return the following parameters for an exposure time of the
specified length.
* img = image stacking order
* igt = imagette stacking order
* cad = stacked image cadence (in seconds)
* duty = duty cycle (%)
* frac = maximim counts as a fraction of the full-well capacity
:param exptime: exposure time in seconds (0.1 .. 60)
:param G: Gaia G-band magnitude
:Teff: target effective temperature in Kelvin
:returns: img, igt, cad, duty, frac
"""
if exptime < 0.1 or exptime > 60:
return int(np.nan), int(np.nan), np.nan, np.nan, np.nan
R = _cadence_Table[np.searchsorted(_cadence_Table['t_hi'],exptime)]
img = R['img']
igt = R['igt']
w = (R['t_hi']-exptime)/(R['t_hi']-R['t_lo']) # interpolating weight
duty = round(w*R['duty_lo'] + (1-w)*R['duty_hi'],2)
cad = round(w*R['cad_lo'] + (1-w)*R['cad_hi'],2)
c_tot, c_av, c_max = count_rate(G, Teff)
frac = round(exptime*c_max/FWC,2)
return img, igt, cad, duty, frac
#------------------
def transit_noise(time, flux, flux_err, T_0=None, width=3,
h_1=0.7224, h_2=0.6713, tol=0.1,
method='scaled'):
"""
Transit noise estimate
The noise is calculated in a window of duration 'width' in hours centered
at time T_0 by first dividing out the best-fitting transit (even if this
has a negative depth), and then finding the depth of an injected transit
that gives S/N = 1.
Two methods are available to estimate the transit depth and its standard
error - 'scaled' or 'minerr'.
If method='scaled', the transit depth and its standard error are
calculated assuming that the true standard errors on the flux measurements
are a factor f times the nominal standard error(s) provided in flux_err.
If method='minerr', the transit depth and its standard error are
calculated assuming that standard error(s) provided in flux_err are a
lower bound to the true standard errors. This tends to be more
conservative than using method='scaled'.
The transit is calculated from an impact parameter b=0 using power-2 limb
darkening parameters h_1 and h_2. Default values for h_1 and h_2 are solar
values.
If T_0 is not specifed that the median value of time is used.
If there are insufficient data for the calculation the function returns
values returned are np.nan, np.nan
:param time: Array of observed times (days)
:param flux: Array of normalised flux measurements
:param flux_err: Standard error estimate(s) for flux - array of scalar
:param T_0: Centre of time window for noise estimate
:param width: Width of time window for noise estimate in hours
:param h_1: Limb darkening parameter
:param h_2: Limb darkening parameter
:param tol: Tolerance criterion for convergence (ppm)
:param method: 'scaled' or 'minerr'
:returns: noise in ppm and, if method is 'scaled', noise scaling factor, f
"""
assert (method in ('scaled', 'minerr')), "Invalid method value"
mad = np.median(np.abs(flux-np.median(flux)))
if np.abs(np.median(flux)-1) > mad:
warnings.warn ("Input flux values are not normalised")
if T_0 is None:
T_0 = np.median(time)
# Use orbital period = 10* data duration so there is certainly 1 transit
P = 10*(max(time)-min(time))
j = (np.abs(time-T_0) < (width/48)).nonzero()[0]
if len(j) < 4:
if method == 'scaled':
return np.nan, np.nan
else:
return np.nan
ITMAX = 10
it = 1
e_depth = np.median(flux_err[j])/np.sqrt(len(j))
depth_in = 0
W = width/24/P # Transit Width in phase units
tm = TransitModel()
depth_tol = tol*1e-6
while abs(e_depth-depth_in) > depth_tol:
depth_in = e_depth
k = np.clip(np.sqrt(depth_in),1e-6,0.2)
pars = tm.make_params(T_0=T_0, P=P, D=depth_in, W=W, b=0,
h_1=h_1, h_2=h_2)
model = tm.eval(params=pars, t=time)
# Calculate best-fit transit depth
if method == 'scaled':
s0, _, _, _ = scaled_transit_fit(flux,flux_err,model)
if s0 == 0:
s0, _, _, _ = scaled_transit_fit(2-flux,flux_err,model)
s0 = -s0
else:
s0, _ = minerr_transit_fit(flux,flux_err,model)
if s0 == 0:
s0, _ = minerr_transit_fit(2-flux,flux_err,model)
s0 = -s0
# Subtract off best-fit transit depth and inject model transit
_f = flux - (s0-1)*(model-1)
if method == 'scaled':
s, f, sigma_s, sigma_f = scaled_transit_fit(_f,flux_err,model)
else:
s, sigma_s = minerr_transit_fit(_f,flux_err,model)
# If the input depth is too small then error can be 0, so ..
if sigma_s > 0:
e_depth = sigma_s*depth_in
else:
e_depth = depth_in*2
#print(it,s0,s, sigma_s, depth_in, e_depth)
it = it + 1
if it > ITMAX:
warnings.warn ('Algorithm failed to converge.')
break
if method == 'scaled':
return 1e6*depth_in, f
else:
return 1e6*depth_in
| 10,175 | 29.836364 | 78 | py |
pycheops | pycheops-master/pycheops/ld.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
ld
==
Limb darkening functions
The available passband names are:
* 'CHEOPS', 'MOST', 'Kepler', 'CoRoT', 'Gaia', 'TESS', 'PLATO'
* 'U', 'B', 'V', 'R', 'I' (Bessell/Johnson)
* 'u\_', 'g\_', 'r\_', 'i\_', 'z\_' (SDSS)
* 'NGTS'
The power-2 limb-darkening law is described in Maxted (2018) [1]_.
Uninformative sampling of the parameter space for the power-2 law
is described in Short et al. (2019) [2]_.
Examples
--------
>>> from pycheops.ld import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> T_eff = 5560
>>> log_g = 4.3
>>> Fe_H = -0.3
>>> passband = 'Kepler'
>>> p2K = stagger_power2_interpolator(passband)
>>> c2,a2,h1,h2 = p2K(T_eff, log_g, Fe_H)
>>> print('h_1 = {:0.3f}, h_2 = {:0.3f}'.format(h1, h2))
>>> mu = np.linspace(0,1)
>>> plt.plot(mu, ld_power2(mu,[c2, a2]),label='power-2')
>>> plt.xlim(0,1)
>>> plt.ylim(0,1)
>>> plt.xlabel('$\mu$')
>>> plt.ylabel('$I_{\lambda}(\mu)$')
>>> plt.legend()
>>> plt.show()
.. rubric:: References
.. [1] Maxted, P.F.L., 2018, A&A, 616, A39
.. [2] Short, D.R., et al., 2019, RNAAS, 3, 117
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import os
from os.path import join,abspath,dirname,isfile
import pickle
from astropy.table import Table
from scipy.interpolate import pchip_interpolate, LinearNDInterpolator
from scipy.optimize import minimize
from .core import load_config
from .funcs import transit_width
try:
from ellc import lc
except:
pass
__all__ = ['ld_power2', 'ld_claret', 'stagger_power2_interpolator',
'atlas_h1h2_interpolator', 'phoenix_h1h2_interpolator',
'ca_to_h1h2', 'h1h2_to_ca' , 'q1q2_to_h1h2', 'h1h2_to_q1q2']
_data_path_ = join(dirname(abspath(__file__)),'data','limbdarkening')
config = load_config()
_cache_path_ = config['DEFAULT']['data_cache_path']
def ld_power2(mu, a):
"""
Evaluate power-2 limb-darkening law
:param mu: cos of angle between surface normal and line of sight
:param a: array or tuple [c, alpha]
:returns: 1 - c * (1-mu**alpha)
"""
c, alpha = a
return 1 - c * (1-mu**alpha)
def ca_to_h1h2(c, alpha):
"""
Transform for power-2 law coefficients
h1 = 1 - c*(1-0.5**alpha)
h2 = c*0.5**alpha
:param c: power-2 law coefficient, c
:param alpha: power-2 law exponent, alpha
returns: h1, h2
"""
return 1 - c*(1-0.5**alpha), c*0.5**alpha
def h1h2_to_ca(h1, h2):
"""
Inverse transform for power-2 law coefficients
c = 1 - h1 + h2
alpha = log2(c/h2)
:param h1: 1 - c*(1-0.5**alpha)
:param h2: c*0.5**alpha
returns: c, alpha
"""
return 1 - h1 + h2, np.log2((1 - h1 + h2)/h2)
def h1h2_to_q1q2(h1, h2):
"""
Transform h1, h2 to uninformative paramaters q1, q2
q1 = (1 - h2)**2
q2 = (h1 - h2)/(1-h2)
:param h1: 1 - c*(1-0.5**alpha)
:param h2: c*0.5**alpha
returns: q1, q2
"""
return (1 - h2)**2, (h1 - h2)/(1-h2)
def q1q2_to_h1h2(q1, q2):
"""
Inverse transform to h1, h2 from uninformative paramaters q1, q2
h1 = 1 - sqrt(q1) + q2*sqrt(q1)
h2 = 1 - sqrt(q1)
:param q1: (1 - h2)**2
:param q2: (h1 - h2)/(1-h2)
returns: h1, h2
"""
return 1 - np.sqrt(q1) + q2*np.sqrt(q1), 1 - np.sqrt(q1)
def ld_claret(mu, a):
"""
Evaluate Claret 4-parameter limb-darkening law
:param mu: cos of angle between surface normal and line of sight
:param a: array or tuple [a_1, a_2, a_3, a_4]
:returns: 1 - Sum(i=1,4) a_i*(1-mu**(i/2))
"""
return 1-a[0]*(1-mu**0.5)-a[1]*(1-mu)-a[2]*(1-mu**1.5)-a[3]*(1-mu**2)
class _coefficient_optimizer:
"""
Optimize coefficients of the limb darkening law specified by fitting a
transit light curve.
Available limb-darkening laws are "lin", "quad", "power-2", "exp",
"sqrt", "log", "sing" and "claret"
"""
def __init__(self, passband='CHEOPS'):
"""
:param passband: instrument/passband names (case sensitive).
"""
pfile = join(_cache_path_,passband+'_stagger_mugrid_interpolator.p')
with open(pfile, 'rb') as fp:
self._interpolator = pickle.load(fp)
def __call__(self, T_eff, log_g, Fe_H, k=0.1, b=0.0,
law='power-2', precision='low'):
"""
:parameter T_eff: effective temperature in Kelvin
:parameter log_g: log of the surface gravity in cgs units
:parameter Fe/H: [Fe/H] in dex
:parameter k: Radius ratio R_pl/R_star
:parameter b: Impact parameter (R_star/a)cos(incl)
:parameter law: Limb darkening law
:param precision: 'low', 'medium' or 'high'
:returns: array of coefficients
"""
self._mu_default = np.array(
[0,0.01,0.05,0.1,0.2,0.3,0.5,0.7,0.8,0.9,1.0])
precision_to_gridsize = {
"low" : "very_sparse",
"medium": "sparse",
"high": "default"
}
self._gridsize = precision_to_gridsize.get(precision,None)
if self._gridsize is None:
raise ValueError("Invalid precision value",precision)
# Fixed parameters
n_mu = 51
n_grid = 32
mu = np.linspace(0,1,n_mu)
I_mu = self._interpolator(T_eff, log_g, Fe_H)
ldc_1 = pchip_interpolate(self._mu_default, I_mu, mu )
w = 0.5*transit_width(0.1, k, b)
# Avoid last contact point - occasional numerical problems
self._t = np.linspace(0,w,n_grid,endpoint=False)
incl = 180*np.arccos(0.1*b)/np.pi
self._lc_mugrid = lc(self._t, radius_1=0.1, radius_2=0.1*k,
sbratio=0, incl=incl, ld_1='mugrid', ldc_1 = ldc_1,
grid_1=self._gridsize, grid_2=self._gridsize)
if law in ("lin"):
c = np.full(1, 0.5)
elif law in ("quad", "log", "sqrt", "exp"):
c = np.full(2, 0.5)
elif law in ("power-2"):
c = np.array([0.3,0.45]) # q1, q2
elif law in ("sing"):
c = np.full(3, 0.5)
elif law in ("claret"):
c = np.full(4, 0.5)
else:
raise Exception("Invalid limb darkening law")
if law in ("power-2"):
smol = np.sqrt(np.finfo(float).eps)
soln = minimize(self._f, c, args=(k, incl, law),
method='L-BFGS-B',
bounds=((smol, 1-smol),(smol, 1-smol)))
h1,h2 = q1q2_to_h1h2(soln.x[0],soln.x[1])
c2,a2 = h1h2_to_ca(h1,h2)
c = np.array([c2, a2])
else:
soln = minimize(self._f, c, args=(k, incl, law))
c = soln.x
self._rms = soln.fun
self._lc_fit = lc(self._t, radius_1=0.1, radius_2=0.1*k,
sbratio=0, incl=incl, ld_1=law, ldc_1 = c,
grid_1=self._gridsize, grid_2=self._gridsize)
return c
def _f(self, c, k, incl, law):
if law in ("power-2"):
h1,h2 = q1q2_to_h1h2(c[0],c[1])
c2,a2 = h1h2_to_ca(h1,h2)
ldc_1 = [c2, a2]
else:
ldc_1 = c
try:
lc_fit = lc(self._t, radius_1=0.1, radius_2=0.1*k,
sbratio=0, incl=incl, ld_1=law, ldc_1 = ldc_1,
grid_1=self._gridsize, grid_2=self._gridsize)
except:
lc_fit = zero_like(self._t)
rms = np.sqrt(np.mean((lc_fit - self._lc_mugrid)**2))
return rms
class stagger_power2_interpolator:
"""
Parameters of a power-2 limb-darkening law interpolated
from the Stagger grid.
The power-2 limb darkening law is
I_X(mu) = 1 - c * (1-mu**alpha)
It is often better to use the transformed coefficients
* h1 = 1 - c*(1-0.5**alpha)
and
* h2 = c*0.5**alpha
as free parameters in a least-squares fit and/or for applying priors.
Returns NaN if interpolation outside the grid range is attempted
"""
def __init__(self,passband='CHEOPS'):
"""
:param passband: instrument/passband names (case sensitive).
"""
pfile = join(_cache_path_, passband+'_stagger_power2_interpolator.p')
if not isfile(pfile):
datfile = join(_data_path_, 'power2.dat')
Tpower2 = Table.read(datfile,format='ascii',
names=['Tag','T_eff','log_g','Fe_H','c','alpha','h1','h2'])
tag = passband[0:min(len(passband),2)]
T = Tpower2[(Tpower2['Tag'] == tag)]
p = np.array([T['T_eff'],T['log_g'],T['Fe_H']]).T
v = np.array((T.as_array()).tolist())[:,4:]
mLNDI = LinearNDInterpolator(p,v)
with open(os.open(pfile, os.O_CREAT|os.O_WRONLY, 0o644),'wb') as fp:
pickle.dump(mLNDI,fp)
with open(pfile, 'rb') as fp:
self._interpolator = pickle.load(fp)
def __call__(self, T_eff, log_g, Fe_H):
"""
:parameter T_eff: effective temperature in Kelvin
:parameter log_g: log of the surface gravity in cgs units
:parameter Fe/H: [Fe/H] in dex
:returns: c, alpha, h_1, h_2
"""
return self._interpolator(T_eff, log_g, Fe_H)
#-----
class atlas_h1h2_interpolator:
"""
Parameters (h1,h2) of a power-2 limb-darkening law interpolated from
Table 10 of Claret (2019RNAAS...3...17C).
The transformation from the coefficients a1..a4 from Table 10 to h1, h2
was done using least-squares fit to the intensity profile as a function of
r=sqrt(1-mu**2) for r<0.99.
The Gaia G passband is used here as a close approximation to the CHEOPS
band.
"""
def __init__(self):
pfile = join(_cache_path_, 'atlas_h1h2_interpolator.p')
if not isfile(pfile):
csvfile = join(_data_path_, 'atlas_h1h2.csv')
T = Table.read(csvfile,format='csv')
p = np.array([T['T_eff'],T['log_g'],T['Fe_H']]).T
v = np.array((T.as_array()).tolist())[:,3:]
mLNDI = LinearNDInterpolator(p,v)
with open(os.open(pfile, os.O_CREAT|os.O_WRONLY, 0o644),'wb') as fp:
pickle.dump(mLNDI,fp)
with open(pfile, 'rb') as fp:
self._interpolator = pickle.load(fp)
def __call__(self, T_eff, log_g, Fe_H):
"""
:parameter T_eff: effective temperature in Kelvin
:parameter log_g: log of the surface gravity in cgs units
:parameter Fe/H: [Fe/H] in dex
:returns: h_1, h_2
"""
return self._interpolator(T_eff, log_g, Fe_H)
#-----
class phoenix_h1h2_interpolator:
"""
Parameters (h1,h2) of a power-2 limb-darkening law interpolated from
Table 5 of Claret (2019RNAAS...3...17C).
The transformation from the coefficients a1..a4 from Table 10 to h1, h2
was done using least-squares fit to the intensity profile as a function of
r=sqrt(1-mu**2) for r<0.99.
N.B. only solar-metalicity models available in this table.
The Gaia G passband is used here as a close approximation to the CHEOPS
band.
"""
def __init__(self):
pfile = join(_cache_path_, 'phoenix_h1h2_interpolator.p')
if not isfile(pfile):
csvfile = join(_data_path_, 'phoenix_h1h2.csv')
T = Table.read(csvfile,format='csv')
p = np.array([T['T_eff'],T['log_g']]).T
v = np.array((T.as_array()).tolist())[:,2:]
mLNDI = LinearNDInterpolator(p,v)
with open(os.open(pfile, os.O_CREAT|os.O_WRONLY, 0o644),'wb') as fp:
pickle.dump(mLNDI,fp)
with open(pfile, 'rb') as fp:
self._interpolator = pickle.load(fp)
def __call__(self, T_eff, log_g):
"""
:parameter T_eff: effective temperature in Kelvin
:parameter log_g: log of the surface gravity in cgs units
:returns: h_1, h_2
"""
return self._interpolator(T_eff, log_g)
| 12,919 | 27.584071 | 80 | py |
pycheops | pycheops-master/pycheops/make_xml_files.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
make_xml_files
==============
Generate XML files for CHEOPS observing requests
Dictionaries
------------
SpTypeToGminusV - valid keys are A1 to M9V
SpTypeToTeff - valid keys are A1 to M9V
Functions
---------
main() - make_xml_files
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import textwrap
from astropy.table import Table, Row
from astropy.time import Time
from astropy.coordinates import SkyCoord, Distance
import astropy.units as u
import numpy as np
from warnings import warn
import re
from os.path import join,abspath,dirname,exists,isfile
from os import listdir, getcwd
from shutil import copy
# Suppress output to stdout on import of astroquery.gaia.Gaia
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
_ = StringIO()
with redirect_stdout(_):
from astroquery.gaia import Gaia
from sys import exit
from .core import load_config
import pickle
from .instrument import visibility, exposure_time, count_rate, cadence
from . import __version__
__all__ = ['SpTypeToGminusV', 'SpTypeToTeff', '_GaiaDR2match']
# G-V and Teff v. spectral type from
# http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt
# version 2019.03.22
SpTypeToGminusV = {
'A0':+0.007, 'A1':+0.000, 'A2':+0.005, 'A3':-0.009, 'A4':-0.020,
'A5':-0.024, 'A6':-0.026, 'A7':-0.036, 'A8':-0.046, 'A9':-0.047,
'F0':-0.060, 'F1':-0.079, 'F2':-0.093, 'F3':-0.100, 'F4':-0.107,
'F5':-0.116, 'F6':-0.129, 'F7':-0.135, 'F8':-0.140, 'F9':-0.146,
'G0':-0.155, 'G1':-0.162, 'G2':-0.167, 'G3':-0.169, 'G4':-0.172,
'G5':-0.174, 'G6':-0.180, 'G7':-0.182, 'G8':-0.188, 'G9':-0.204,
'K0':-0.221, 'K1':-0.232, 'K2':-0.254, 'K3':-0.322, 'K4':-0.412,
'K5':-0.454, 'K6':-0.528, 'K7':-0.595, 'K8':-0.628, 'K9':-0.69 ,
'M0':-0.65 , 'M1':-0.82 , 'M2':-0.92 , 'M3':-1.09 , 'M4':-1.41 ,
'M5':-1.74 , 'M6':-2.14 , 'M7':-2.98 , 'M8':-3.08 , 'M9':-3.00 }
SpTypeToTeff = {
'A0':9700, 'A1':9200, 'A2':8840, 'A3':8550, 'A4':8270,
'A5':8080, 'A6':8000, 'A7':7800, 'A8':7500, 'A9':7440,
'F0':7220, 'F1':7030, 'F2':6810, 'F3':6720, 'F4':6640,
'F5':6510, 'F6':6340, 'F7':6240, 'F8':6170, 'F9':6060,
'G0':5920, 'G1':5880, 'G2':5770, 'G3':5720, 'G4':5680,
'G5':5660, 'G6':5590, 'G7':5530, 'G8':5490, 'G9':5340,
'K0':5280, 'K1':5170, 'K2':5040, 'K3':4830, 'K4':4600,
'K5':4410, 'K6':4230, 'K7':4070, 'K8':4000, 'K9':3940,
'M0':3870, 'M1':3700, 'M2':3550, 'M3':3410, 'M4':3200,
'M5':3030, 'M6':2850, 'M7':2650, 'M8':2500, 'M9':2400 }
# Define a query object for Gaia DR2
_query = """SELECT source_id, ra, dec, parallax, pmra, pmdec, \
phot_g_mean_mag, phot_g_mean_flux_over_error, bp_rp FROM gaiadr2.gaia_source \
WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec), \
CIRCLE('ICRS',{},{},0.0666))=1 AND (phot_g_mean_mag<=16.5); \
"""
# XML strings and formats for input for Feasibility checker and PHT2
_xml_time_critical_fmt = """<?xml version="1.0" encoding="UTF-8"?>
<!-- -->
<!-- This file contains time-critical observations -->
<!-- -->
<!-- Generated by pycheops.make_xml_files -->
<!-- -->
<Earth_Explorer_File xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="ext_app_observation_requests_schema.xsd">
<Earth_Explorer_Header>
<Fixed_Header>
<File_Name>CH_TU2018-03-05T10-00-00_EXT_APP_ObservationRequests_V1234</File_Name>
<File_Description>Observation requests file</File_Description>
<Notes>Template file for CHEOPS observation Request : PHT2, FeasibilityChecker, CHEOPSim</Notes>
<Mission>CHEOPS</Mission>
<File_Class>TEST</File_Class>
<File_Type>EXT_APP_ObservationRequests</File_Type>
<Validity_Period>
<Validity_Start>UTC=2017-01-01T00:00:00</Validity_Start>
<Validity_Stop>UTC=2023-01-01T00:00:00</Validity_Stop>
</Validity_Period>
<File_Version>0001</File_Version>
<Source>
<System>PSO</System>
<Creator>PHT2</Creator>
<Creator_Version>000</Creator_Version>
<Creation_Date>UTC={}</Creation_Date>
</Source>
</Fixed_Header>
<Variable_Header>
<Programme_Type>10</Programme_Type>
</Variable_Header>
</Earth_Explorer_Header>
<Data_Block type="xml">
<!-- TIME-CRITICAL REQUEST -->
<List_of_Time_Critical_Requests count="1">
<Time_Critical_Request>
<Programme_ID>{:d}</Programme_ID>
<Observation_Request_ID>1</Observation_Request_ID>
<Observation_Category>time critical</Observation_Category>
<Proprietary_Period_First_Visit unit="days">{:d}</Proprietary_Period_First_Visit>
<Proprietary_Period_Last_Visit unit="days">{:d}</Proprietary_Period_Last_Visit>
<Target_Name>{}</Target_Name>
<Gaia_ID>GAIA DR2 {}</Gaia_ID>
<Spectral_Type>{}</Spectral_Type>
<Target_Magnitude unit="mag">{:0.2f}</Target_Magnitude>
<Target_Magnitude_Error unit="mag">{:0.2f}</Target_Magnitude_Error>
<Readout_Mode>{}</Readout_Mode>
<Right_Ascension unit="deg">{:0.7f}</Right_Ascension>
<Declination unit="deg">{:0.7f}</Declination>
<RA_Proper_Motion unit="mas/year">{:0.2f}</RA_Proper_Motion>
<DEC_Proper_Motion unit="mas/year">{:0.2f}</DEC_Proper_Motion>
<Parallax unit="mas">{:0.2f}</Parallax>
<T_Eff unit="Kelvin">{:0.0f}</T_Eff>
<Extinction unit="mag">0.00</Extinction>
<Earliest_Start unit="BJD">{:0.3f}</Earliest_Start>
<Latest_End unit="BJD">{:0.3f}</Latest_End>
<Exposure_Time unit="sec">{:0.2f}</Exposure_Time>
<Number_Stacked_Images>{:d}</Number_Stacked_Images>
<Number_Stacked_Imagettes>{:d}</Number_Stacked_Imagettes>
<Transit_Time unit="BJD">{:0.6f}</Transit_Time>
<Transit_Period unit="days">{:0.6f}</Transit_Period>
<Visit_Duration unit="sec">{:0.1f}</Visit_Duration>
<Number_of_Visits>{:d}</Number_of_Visits>
<Continuous_Visits>false</Continuous_Visits> <!-- Irrelevant for nominal science observations -->
<Priority>{:d}</Priority>
<Minimum_Effective_Duration unit="%">{:d}</Minimum_Effective_Duration>
<Earliest_Observation_Start unit="phase">{:0.4f}</Earliest_Observation_Start>
<Latest_Observation_Start unit="phase">{:0.4f}</Latest_Observation_Start>
<Send_Data_Taking_During_SAA>false</Send_Data_Taking_During_SAA>
<Send_Data_Taking_During_Earth_Constraints>false</Send_Data_Taking_During_Earth_Constraints>
<PITL>true</PITL>
</Time_Critical_Request>
</List_of_Time_Critical_Requests>
</Data_Block>
</Earth_Explorer_File>
"""
_xml_non_time_critical_fmt = """<?xml version="1.0" encoding="UTF-8"?>
<!-- -->
<!-- This file contains non-time-critical observations -->
<!-- -->
<!-- Generated by pycheops.make_xml_files -->
<!-- -->
<Earth_Explorer_File xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="ext_app_observation_requests_schema.xsd">
<Earth_Explorer_Header>
<Fixed_Header>
<File_Name>CH_TU2018-03-05T10-00-00_EXT_APP_ObservationRequests_V1234</File_Name>
<File_Description>Observation requests file</File_Description>
<Notes>Template file for CHEOPS observation Request : PHT2, FeasibilityChecker, CHEOPSim</Notes>
<Mission>CHEOPS</Mission>
<File_Class>TEST</File_Class>
<File_Type>EXT_APP_ObservationRequests</File_Type>
<Validity_Period>
<Validity_Start>UTC=2017-01-01T00:00:00</Validity_Start>
<Validity_Stop>UTC=2023-01-01T00:00:00</Validity_Stop>
</Validity_Period>
<File_Version>0001</File_Version>
<Source>
<System>PSO</System>
<Creator>PHT2</Creator>
<Creator_Version>000</Creator_Version>
<Creation_Date>UTC={}</Creation_Date>
</Source>
</Fixed_Header>
<Variable_Header>
<Programme_Type>10</Programme_Type>
</Variable_Header>
</Earth_Explorer_Header>
<Data_Block type="xml">
<!-- NON-TIME-CRITICAL REQUEST -->
<List_of_Non_Time_Critical_Requests count="1">
<Non_Time_Critical_Request>
<Programme_ID>{:d}</Programme_ID>
<Observation_Request_ID>1</Observation_Request_ID>
<Observation_Category>non time critical</Observation_Category>
<Proprietary_Period_First_Visit unit="days">{:d}</Proprietary_Period_First_Visit>
<Proprietary_Period_Last_Visit unit="days">{:d}</Proprietary_Period_Last_Visit>
<Target_Name>{}</Target_Name>
<Gaia_ID>GAIA DR2 {}</Gaia_ID>
<Spectral_Type>{}</Spectral_Type>
<Target_Magnitude unit="mag">{:0.2f}</Target_Magnitude>
<Target_Magnitude_Error unit="mag">{:0.2f}</Target_Magnitude_Error>
<Readout_Mode>{}</Readout_Mode>
<Right_Ascension unit="deg">{:0.5f}</Right_Ascension>
<Declination unit="deg">{:0.5f}</Declination>
<RA_Proper_Motion unit="mas/year">{:0.2f}</RA_Proper_Motion>
<DEC_Proper_Motion unit="mas/year">{:0.2f}</DEC_Proper_Motion>
<Parallax unit="mas">{:0.2f}</Parallax>
<T_Eff unit="Kelvin">{:0.0f}</T_Eff>
<Extinction unit="mag">0.00</Extinction>
<Earliest_Start unit="BJD">{:0.3f}</Earliest_Start>
<Latest_End unit="BJD">{:0.3f}</Latest_End>
<Exposure_Time unit="sec">{:0.2f}</Exposure_Time>
<Number_Stacked_Images>{:d}</Number_Stacked_Images>
<Number_Stacked_Imagettes>{:d}</Number_Stacked_Imagettes>
<Visit_Duration unit="sec">{:0.1f}</Visit_Duration>
<Number_of_Visits>{:d}</Number_of_Visits>
<Continuous_Visits>false</Continuous_Visits> <!-- Irrelevant for nominal science observations -->
<Priority>{:d}</Priority>
<Minimum_Effective_Duration unit="%">{:d}</Minimum_Effective_Duration>
<Send_Data_Taking_During_SAA>false</Send_Data_Taking_During_SAA>
<Send_Data_Taking_During_Earth_Constraints>false</Send_Data_Taking_During_Earth_Constraints>
<PITL>true</PITL>
</Non_Time_Critical_Request>
</List_of_Non_Time_Critical_Requests>
</Data_Block>
</Earth_Explorer_File>
"""
_phase_range_format_1 = """
<List_of_Phase_Ranges count="1">
<Phase_Range>
<Start unit="phase">{:0.4f}</Start>
<End unit="phase">{:0.4f}</End>
<Minimum_Phase_Duration unit="%">{:d}</Minimum_Phase_Duration>
</Phase_Range>
</List_of_Phase_Ranges>
"""
_phase_range_format_2 = """
<List_of_Phase_Ranges count="2">
<Phase_Range>
<Start unit="phase">{:0.4f}</Start>
<End unit="phase">{:0.4f}</End>
<Minimum_Phase_Duration unit="%">{:d}</Minimum_Phase_Duration>
</Phase_Range>
<Phase_Range>
<Start unit="phase">{:0.4f}</Start>
<End unit="phase">{:0.4f}</End>
<Minimum_Phase_Duration unit="%">{:d}</Minimum_Phase_Duration>
</Phase_Range>
</List_of_Phase_Ranges>
<!-- If two critical phase ranges are defined above, this parameter is used to request that both ("true") or -->
<!-- only one of the two phase ranges ("false") are observed. This can be seen as a AND / OR operator, respectively. -->
<!-- ############################# Set the critical phase ranges -->
<Fulfil_all_Phase_Ranges>{}</Fulfil_all_Phase_Ranges>
"""
def _GaiaDR2Match(row, fC, match_radius=1, gaia_mag_tolerance=0.5,
id_check=True):
flags = 0
coo = SkyCoord(row['_RAJ2000'],row['_DEJ2000'],
frame='icrs',unit=(u.hourangle, u.deg))
s = coo.to_string('decimal',precision=5).split()
_ = StringIO()
with redirect_stdout(_), redirect_stderr(_):
job = Gaia.launch_job(_query.format(s[0],s[1]))
DR2Table = job.get_results()
# Replace missing values for pmra, pmdec, parallax
DR2Table['pmra'].fill_value = 0.0
DR2Table['pmdec'].fill_value = 0.0
DR2Table['parallax'].fill_value = 0.0
DR2Table = Table(DR2Table.filled(), masked=True)
# Avoid problems with small/negative parallaxes
DR2Table['parallax'].mask = DR2Table['parallax'] <= 0.1
DR2Table['parallax'].fill_value = 0.0999
DR2Table = DR2Table.filled()
# Fix units for proper motion columns
DR2Table['pmra'].unit = 'mas / yr'
DR2Table['pmdec'].unit = 'mas / yr'
cat = SkyCoord(DR2Table['ra'],DR2Table['dec'],
frame='icrs',
distance=Distance(parallax=DR2Table['parallax'].quantity),
pm_ra_cosdec=DR2Table['pmra'], pm_dec=DR2Table['pmdec'],
obstime=Time(2015.5, format='decimalyear')
).apply_space_motion(new_obstime=Time('2000-01-01 00:00:00.0'))
idx, d2d, _ = coo.match_to_catalog_sky(cat)
if d2d > match_radius*u.arcsec:
raise ValueError('No Gaia DR2 source within specified match radius')
try:
key = re.match('[AFGKM][0-9]', row['SpTy'])[0]
GV = SpTypeToGminusV[key]
except TypeError:
flags += 1024
GV = -0.15
try:
Gmag = float(row['Gmag'])
except ValueError:
raise ValueError('Invalid Gmag value ',row['Gmag'])
except KeyError:
Gmag = row['Vmag'] + GV
if abs(Gmag-DR2Table['phot_g_mean_mag'][idx]) > gaia_mag_tolerance:
if 'Gmag' in row.colnames:
print("Input value: G = ", Gmag)
else:
print("Input values: V = {:5.2f}, SpTy = {} -> G_est = {:5.2f}"
.format(row['Vmag'], row['SpTy'], Gmag))
print("Catalogue values: G = {:5.2f}, Source = {}"
.format(DR2Table['phot_g_mean_mag'][idx],
DR2Table['source_id'][idx] ))
raise ValueError('Nearest Gaia source does not match estimated G mag')
if (str(row['Old_Gaia_DR2']) != str(DR2Table['source_id'][idx])):
if id_check:
raise ValueError('Nearest Gaia DR2 source does not match input ID')
flags += 32768
gmag = np.array(DR2Table['phot_g_mean_mag'])
sep = coo.separation(cat)
if any((sep <= 51*u.arcsec) & (gmag < gmag[idx])):
flags += 16384
if any((sep > 51*u.arcsec) & (sep < 180*u.arcsec) & (gmag < gmag[idx])):
flags += 8192
gflx = np.ma.array(10**(-0.4*(gmag-gmag[idx])), mask=False,fill_value=0.0)
gflx.mask[idx] = True
contam = np.nansum(gflx.filled()*fC(cat.separation(cat[idx]).arcsec))
if contam > 1:
flags += 4096
elif contam > 0.1:
flags += 2048
return DR2Table[idx], contam, flags, cat[idx]
def _choose_stacking(Texp):
if Texp < 0.1:
return 40, 4
elif Texp < 0.15:
return 39, 3
elif Texp < 0.20:
return 36, 3
elif Texp < 0.40:
return 33, 3
elif Texp < 0.50:
return 30, 3
elif Texp < 0.55:
return 28, 2
elif Texp < 0.65:
return 26, 2
elif Texp < 0.85:
return 24,23
elif Texp < 1.05:
return 22, 2
elif Texp < 1.10:
return 44, 4
elif Texp < 1.20:
return 40, 4
elif Texp < 1.25:
return 39, 3
elif Texp < 1.30:
return 36, 3
elif Texp < 1.50:
return 33, 3
elif Texp < 1.60:
return 30, 3
elif Texp < 1.65:
return 28, 2
elif Texp < 1.75:
return 26, 2
elif Texp < 1.95:
return 24, 2
elif Texp < 2.15:
return 22, 2
elif Texp < 2.40:
return 20, 2
elif Texp < 2.70:
return 18, 2
elif Texp < 2.80:
return 16, 2
elif Texp < 2.90:
return 15, 1
elif Texp < 3.05:
return 14, 1
elif Texp < 3.20:
return 13, 1
elif Texp < 3.40:
return 12, 1
elif Texp < 3.65:
return 11, 1
elif Texp < 3.90:
return 10, 1
elif Texp < 4.25:
return 9, 1
elif Texp < 4.70:
return 8, 1
elif Texp < 5.25:
return 7, 1
elif Texp < 6.05:
return 6, 1
elif Texp < 7.25:
return 5, 1
elif Texp < 9.20:
return 4, 1
elif Texp < 12.5:
return 3, 1
elif Texp < 22.65:
return 2, 1
else:
return 1, 0
def _choose_romode(t_exp):
if t_exp < 1.05:
return 'ultrabright'
if t_exp < 2.226:
return 'bright'
if t_exp < 12:
return 'faint fast'
return 'faint'
def _creation_time_string():
t = Time.now()
t.precision = 0
return t.isot
def _make_list_of_phase_ranges(Num_Ranges,
BegPh1, EndPh1, Effic1,
BegPh2, EndPh2, Effic2):
if Num_Ranges == 1 :
return _phase_range_format_1.format(BegPh1, EndPh1, Effic1)
if Num_Ranges == 2 :
return _phase_range_format_2.format(BegPh1, EndPh1, Effic1,
BegPh2, EndPh2, Effic2, 'true')
if Num_Ranges == -2 :
return _phase_range_format_2.format(BegPh1, EndPh1, Effic1,
BegPh2, EndPh2, Effic2, 'false')
return ""
def _parcheck_non_time_critical(Priority, MinEffDur,
Earliest_start_date, Latest_end_date):
if not Priority in (1, 2, 3):
return """
The priority has to be set equal to 1, 2, or 3: 1 = A-grade, 2 = B-grade, 3 = C-grade
"""
if (MinEffDur < 0) or (MinEffDur > 100):
return """
The minimum effective duration is in % and it has to be between 0 and 100
"""
if ( (Earliest_start_date > 0) and (Latest_end_date > 0) and
Earliest_start_date >= Latest_end_date) :
return """
The earliest start date must be less than the latest start date
"""
return None
def _parcheck_time_critical(Priority, MinEffDur,
Earliest_start_date, Latest_end_date,
Earliest_start_phase, Latest_start_phase,
Period, Num_Ranges,
BegPh1, EndPh1, Effic1,
BegPh2, EndPh2, Effic2):
if (((Earliest_start_phase > -50) and (Earliest_start_phase < 0)) or
(Earliest_start_phase > 1)) :
return """
The earliest start phase should be a number between 0 and 1, inclusive
"""
if (((Latest_start_phase > -50) and (Latest_start_phase < 0)) or
(Latest_start_phase > 1)) :
return """
The latest start phase should be a number between 0 and 1, inclusive
"""
if not Num_Ranges in (-2,0,1,2):
return """
The number of constrained ranges is invalid (not 0, 1, 2 or -2)
"""
if abs(Num_Ranges) > 0:
if (BegPh1 < 0) or (BegPh1 > 1) or (EndPh1 < 0) or (EndPh1 > 1):
return """
Invalid phase range for phase constraint 1
"""
if (Effic1 < 0) or (Effic1 > 99):
return """
Invalid efficiency for phase constraint 1
"""
if abs(Num_Ranges) > 1:
if (BegPh2 < 0) or (BegPh2 > 1) or (EndPh2 < 0) or (EndPh2 > 1):
return """
Invalid phase range for phase constraint 2
"""
if (Effic2 < 0) or (Effic2 > 99):
return """
Invalid efficiency for phase constraint 2
"""
return _parcheck_non_time_critical(Priority, MinEffDur, Earliest_start_date,
Latest_end_date)
def _target_table_row_to_xml(row, progamme_id=0,
proprietary_first=547, proprietary_last=365,
checker=False, user_g_mag=False):
period = row['Period']
t_exp = row['T_exp']
n_stack_image, n_stack_imagettes = _choose_stacking(t_exp)
c = SkyCoord("{} {}".format(row['_RAJ2000'],row['_DEJ2000']),
frame='icrs', obstime='J2000.0',
unit=(u.hourangle, u.deg),
pm_ra_cosdec=row['pmra']*u.mas/u.yr,
pm_dec=row['pmdec']*u.mas/u.yr )
radeg = float(c.to_string(precision=5).split()[0])
dedeg = float(c.to_string(precision=5).split()[1])
if user_g_mag:
gmag = row['Gmag']
e_gmag = row['e_Gmag']
else:
gmag = row['dr2_g_mag']
e_gmag = row['e_dr2_g_mag']
if checker:
try:
key = re.match('[AFGKM][0-9]', row['SpTy'])[0]
GV = SpTypeToGminusV[key]
except TypeError:
GV = -0.15
mag, e_mag = gmag-GV, e_gmag
else:
mag, e_mag = gmag, e_gmag
if period > 0:
error = _parcheck_time_critical(
row['Priority'], row['MinEffDur'],
row['BJD_early'], row['BJD_late'],
row['Ph_early'], row['Ph_late'],
period, row['N_Ranges'],
row["BegPh1"], row["EndPh1"], row["Effic1"],
row["BegPh2"], row["EndPh2"], row["Effic2"])
assert error is None, (
"Failed to process data for observing request {}\n{}\n"
.format(row['ObsReqName'],error))
xml = _xml_time_critical_fmt.format(
_creation_time_string(),
progamme_id, proprietary_first, proprietary_last,
row['Target'], row['Gaia_DR2'], row['SpTy'],
mag, e_mag,
_choose_romode(t_exp),
radeg, dedeg, row['pmra'], row['pmdec'],
row['parallax'], row['T_eff'],
row['BJD_early'], row['BJD_late'], t_exp,
n_stack_image, n_stack_imagettes,
row['BJD_0'], period,
row['T_visit'], row['N_Visits'], row['Priority'],
row['MinEffDur'],
row['Ph_early'], row['Ph_late'],
_make_list_of_phase_ranges(row['N_Ranges'],
row["BegPh1"], row["EndPh1"], row["Effic1"],
row["BegPh2"], row["EndPh2"], row["Effic2"])
)
else:
error = _parcheck_non_time_critical(
row['Priority'], row['MinEffDur'],
row['BJD_early'], row['BJD_late'])
assert error is None, (
"Failed to process data for observing request {}\n{}\n"
.format(row['ObsReqName'],error))
xml = _xml_non_time_critical_fmt.format(
_creation_time_string(),
progamme_id, proprietary_first, proprietary_last,
row['Target'], row['Gaia_DR2'], row['SpTy'],
mag, e_mag,
_choose_romode(t_exp),
radeg, dedeg, row['pmra'], row['pmdec'],
row['parallax'], row['T_eff'],
row['BJD_early'], row['BJD_late'], t_exp,
n_stack_image, n_stack_imagettes,
row['T_visit'], row['N_Visits'], row['Priority'],
row['MinEffDur']
)
if checker:
xml = xml.replace('Target_Magnitude','Target_Vmagnitude')
return xml
def main():
# Set up command line switches
parser = argparse.ArgumentParser(
description='Create xml files for CHEOPS observing requests.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = textwrap.dedent('''\
Creates XML files suitable for CHEOPS PHT2, FC, and CHEOPSim from
observation requests given in an input table file.
The target for each observation is defined by the input _RAJ2000 and
_DEJ2000 coordinates. There must be a matching source in Gaia DR2 for
each input coordinate. The G-band magnitude of the source must also
match the G-band magnitude provided in the input table, or estimated
from Vmag and SpTy from the same table if Gmag is not given. The
following table is an abbreviated version of the look-up table used to
estimate the G-band magnitude from Vmag and SpTy.
SpTy G-V Teff/K
------------------
A0 -0.019 9700
F5 -0.109 6510
G5 -0.181 5660
K0 -0.229 5280
M0 -0.997 3870
M9 -3.337 2400
N.B. An estimate of the spectral type is needed in any case for
observation requests because accurate flat-fielding requires an
estimate of the star's effective temperature.
The input table can be any format suitable for reading with the
command astropy.table.Table.read(), e.g., CSV.
The following columns must be defined in the table.
ObsReqName - unique observing request identifier
Target - target name
_RAJ2000 - right ascension, ICRS epoch J2000.0, hh:mm:ss.ss
_DEJ2000 - declination, ICRS epoch J2000.0, +dd:mm:ss.s
SpTy - spectral type (any string starting [AFGKM][0-9])
BJD_early - earliest start date (BJD)
BJD_late - latest start date (BJD)
T_visit - visit duration in seconds
N_Visits - number of requested visits
Priority - 1, 2 or 3
MinEffDur - minimum on-source time, percentage of T_visit (integer)
In addition, the input table must specify either ...
Gmag - G-band magnitude
e_Gmag - error in g-band magnitude
... or ...
Vmag - V-band magnitude
e_Vmag - error in V-band magnitude
If the flag --ignore-gaia-id-check is not specified on the command
line then the following column is also required.
Gaia_DR2 - Gaia DR2 identification number (integer)
If the flag --auto-expose is not specified on the command
line then the following column is also required.
T_exp - exposure time (seconds)
In addition, for time-critical observations the following columns must
also be defined.
BJD_0 - reference time for 0 phase (e.g., mid-transit), BJD
Period - period in days
Ph_early - earliest allowable start phase for visit
Ph_late - latest allowable start phase for visit
The following columns will also be used if available.
N_Ranges - number of phase ranges with extra efficiency constraints
BegPh1 - start of phase range 1
EndPh1 - end of phase range 1
Effic1 - minimum observing efficiency (%), phase range 1 (integer)
BegPh2 - start of phase range 1
EndPh2 - end of phase range 1
Effic2 - minimum observing efficiency (%), phase range 2 (integer)
N.B. If you have 2 phase ranges with extra efficiency constraints but
only require one of them to be satisified then use N_Ranges = -2
The terminal output includes the following columns
Gaia_DR2_ID - Gaia DR2 ID from Gaia data archive. This must match
the value of Gaia_DR2 in the input file unless the flag
--ignore-gaia-id-check is specified.
** N.B. The PI is responsible to check the DR2 ID is correct **
_RAJ2000,_DEJ2000 - ICRS position of matching Gaia source in degrees
Gmag - The target mean G-band magnitude from Gaia DR2 catalogue.
Contam - estimate of the contamination of a 30 arcsec photometric
aperture by nearby stars relative to the target flux.
Vis - estimate of the percentage of the orbit for which the target is
observable by CHEOPS. This estimate is not a substitute for the
detailed scheduling information provided by the CHEOPS
Feasibility Checker.
Texp - the exposure time used in the output XML file.
e-/s - The count rate in e-/s based on the star's Gaia G magnitude and
spectral type. This value returned is suitable for use in the
CHEOPS exposure time calculator using the option "Expected flux
in CHEOPS passband".
duty - duty cycle (%)
frac - maximim counts as a fraction of the full-well capacity (%)
img - image stacking order
igt - imaggete stacking order
Flags - sum of the following error/warnings flags.
+ 32768 = Gaia ID error - input/output IDs do not match
+ 16384 = Acquisition error, brighter star within 51"
+ 8192 = Acquisition warning, brighter star within 51"-180"
+ 4096 = Contamination error, Contam > 1
+ 2048 = Contamination warning, Contam = 0.1 - 1
+ 1024 = No spectral type match, assuming G-V = -0.15
+ 512 = Visibility error, efficiency = 0
+ 256 = Visibility warning, efficiency < 50%
+ 128 = Exposure time error - target will be saturated
+ 64 = Exposure time warning - below recommended minimum time
+ 32 = Exposure time error - magnitude out of range, not set
+ 16 = Exposure time warning - magnitude out of range, not checked
The exposure time can be calculated automatically based on the G-band
magnitude and spectral type of the target. The default behaviour is to
use an exposure time that will give 85% of the full-well capacity at
the peak of PSF, up to the maximum allowed exposure time of 60s. This
percentage can be adjusted using the option --scaling-factor-percent.
See examples/make_xml_files/ReadMe.txt in the source distribution for a
description of example input files included in the same folder.
--
'''))
parser.add_argument('table', nargs='?',
help='Table of observing requests to be processed into XML'
)
parser.add_argument('-p', '--programme_id',
default=1, type=int,
help='''Programme ID
(default: %(default)d)
'''
)
parser.add_argument('-r', '--match_radius',
default=1.0, type=float,
help='''
Tolerance in arcsec for cross-matching with Gaia DR2 (default:
%(default)3.1f)
'''
)
parser.add_argument('-g', '--gaia_mag_tolerance',
default=0.5, type=float,
help= '''
Tolerance in magnitudes for Gaia DR2 cross-match (default:
%(default)3.1f)
'''
)
parser.add_argument('-u', '--use_gaia_mag_from_table',
action='store_const',
dest='user_g_mag',
const=True,
default=False,
help='''
Use Gaia magnitude from the input table instead of Gaia DR2 value for
calculation and in the output XML file.
'''
)
parser.add_argument('--ignore-gaia-id-check',
action='store_const',
dest='id_check',
const=False,
default=True,
help='''
Use Gaia DR2 ID from Gaia data archive
** N.B. The PI is responsible to check the DR2 ID is correct **
'''
)
parser.add_argument('-a', '--auto-expose',
action='store_const',
dest='auto_expose',
const=True,
default=False,
help='Calculate exposure time automatically'
)
parser.add_argument('-s', '--scaling-factor-percent',
default=85., type=float,
help='Scaling factor for auto-expose calculation'
)
parser.add_argument('-e', '--example-file-copy',
action='store_const',
dest='copy_examples',
const=True,
default=False,
help='Get a copy of the example files - no other action is performed'
)
parser.add_argument('-f', '--overwrite',
action='store_const',
dest='overwrite',
const=True,
default=False,
help='Overwrite existing output files.'
)
parser.add_argument('-c', '--checker',
action='store_const',
dest='checker',
const=True,
default=False,
help='Output XML suitable for use with Scheduling Feasibility Checker'
)
parser.add_argument('-x', '--suffix',
default='_EXT_APP_ObservationRequests.xml', type=str,
help='''
Output file name suffix
(default: %(default)s)
'''
)
parser.add_argument('-d', '--directory',
default='.', type=str,
help='''
Output directory for xml files
(default: %(default)s)
'''
)
parser.add_argument('--proprietary_last',
default=365, type=int,
help='Propietary period after last visit'
)
parser.add_argument('--proprietary_first',
default=547, type=int,
help='Propietary period after first visit'
)
args = parser.parse_args()
if args.copy_examples:
src = join(dirname(abspath(__file__)),'examples','make_xml_files')
src_files = listdir(src)
for file_name in src_files:
full_file_name = join(src, file_name)
if (isfile(full_file_name)):
copy(full_file_name, getcwd())
print("Copied examples files from {}".format(src))
exit()
if args.table is None:
parser.print_usage()
exit(1)
table = Table.read(args.table)
if len(set(table['ObsReqName'])) < len(table):
raise ValueError("Duplicate observing request names in {}"
.format(args.table))
try:
table['Old_Gaia_DR2'] = table['Gaia_DR2']
except KeyError as e:
if args.id_check:
message = e.args[0]
message += (" - use flag --ignore-gaia-id-check to insert GAIA"
" DR2 identifiers from Gaia data archive. ** N.B. The PI"
" is responsible to check the DR2 ID is correct ** " )
e.args = (message,)
raise
else:
table['Gaia_DR2'] = -1
table['Old_Gaia_DR2'] = -1
try:
table['Old_T_exp'] = table['T_exp']
except KeyError as e:
if args.auto_expose:
table['T_exp'] = -1.0
table['Old_T_exp'] = -1
else:
message = e.args[0]
message += (" - use flag --auto-expose to use recommended maximum"
" exposure time")
e.args = (message,)
raise
for key in ('T_eff',):
try:
table['Old_{}'.format(key)] = table[key]
except KeyError:
table[key] = -1
for key in ('pmra', 'pmdec', 'parallax', 'dr2_g_mag', 'e_dr2_g_mag'):
try:
table['Old_{}'.format(key)] = table[key]
except KeyError:
table[key] = 0.0
# Create missing optional columns
for key in ( 'Period', 'BJD_0', 'Ph_early', 'Ph_late', 'BegPh1',
'EndPh1', 'Effic1', 'BegPh2', 'EndPh2', 'Effic2'):
if not key in table.columns:
table[key] = 0.0
if not 'N_Ranges' in table.columns:
table['N_Ranges'] = 0
# Ensure RA and Dec columns are wide enough to accept updates RA/Dec
# values from Gaia catalogue to full precision
table['_RAJ2000'] = [s.rjust(11) for s in table['_RAJ2000']]
table['_DEJ2000'] = [s.rjust(11) for s in table['_DEJ2000']]
# Load contamination function from pickle
config = load_config()
cache_path = config['DEFAULT']['data_cache_path']
pfile = join(cache_path,'Contamination_33arcsec_aperture.p')
with open(pfile, 'rb') as fp:
fC= pickle.load(fp)
rtol = args.match_radius
gtol = args.gaia_mag_tolerance
# Header to screen output
print('# Output from: {} version {}'.format(parser.prog, __version__))
print('# Run started: {}'.format(Time(Time.now(),precision=0).iso))
print('# Input file: {}'.format(args.table))
print('# Gaia match radius: {:0.1f} arcsec'.format(rtol))
print('# Gmag tolerance: {:0.1f} mag '.format(gtol))
if args.auto_expose:
print('# Exposure time scaling factor: {:0.1f} %'.
format(args.scaling_factor_percent))
else:
print('# Exposure time from input file')
print('# Output file suffix: {} '.format(args.suffix))
ObsReqNameFieldWidth = max(12, len(max(table['ObsReqName'],key=len)))
ObsReqNameFormat = "{{:{}s}}".format(ObsReqNameFieldWidth)
ObsReqNameHeader = ObsReqNameFormat.format('ObsReqName')
TerminalOutputFormat = (
'{}'.format(ObsReqNameFormat)+
'{:20d} {:5.2f} {:8.4f} {:+8.4f} {:6.3f} {:2d} {:4.1f} {:5d}'+
'{:9.2e} {:4.0f} {:3.0f} {:3d} {:3d}')
print('#')
if not args.id_check:
print('#')
print('# ** WARNING: Gaia ID of target not checked against input **')
print('# ** The PI is responsible to check the DR2 ID is correct **')
print('#')
tstr = 'Gaia_DR2_ID Gmag _RAJ2000 _DEJ2000 Contam Vis Texp Flags'
tstr += ' e-/s frac duty img igt'
print('#{}'.format(ObsReqNameHeader) + tstr.format(ObsReqNameHeader))
# String of coordinates, Vmag/Gmag and SpTy to enable re-use of DR2 data
old_tag = None
for row in table:
coo = SkyCoord(row['_RAJ2000'],row['_DEJ2000'],
frame='icrs',unit=(u.hourangle, u.deg))
if 'Gmag' in row.colnames:
tag = "{}, {}, {}".format(coo.to_string(), row['Gmag'], row['SpTy'])
else:
tag = "{}, {}, {}".format(coo.to_string(), row['Vmag'], row['SpTy'])
if tag != old_tag:
old_tag = tag
DR2data,contam,flags,coords = _GaiaDR2Match(row, fC, rtol, gtol,
args.id_check)
rastr = coords.ra.to_string('hour', precision=2, sep=':', pad=True)
decstr = coords.dec.to_string('deg', precision=1, sep=':', pad=True,
alwayssign=True)
row['Gaia_DR2'] = DR2data['source_id']
row['_RAJ2000'] = rastr
row['_DEJ2000'] = decstr
row['pmra'] = DR2data['pmra']
row['pmdec'] = DR2data['pmdec']
row['parallax'] = DR2data['parallax']
row['dr2_g_mag'] = DR2data['phot_g_mean_mag']
row['e_dr2_g_mag'] = 1.086/DR2data['phot_g_mean_flux_over_error']
try:
if row['Old_T_eff'] <= 0:
raise KeyError
row['T_eff'] = row['Old_T_eff']
except KeyError:
try:
key = re.match('[AFGKM][0-9]', row['SpTy'])[0]
row['T_eff'] = SpTypeToTeff[key]
except KeyError:
warn('# No Teff value for spectral type, using Teff=5999')
row['T_eff'] = 5999
_T = row['T_eff']
if args.user_g_mag:
_G = row['Gmag']
else:
_G = DR2data['phot_g_mean_mag']
if args.auto_expose:
row['T_exp'] = exposure_time(_G, _T,
frac=args.scaling_factor_percent/100)
if row['T_exp'] >60:
raise ValueError("Maximum exposure time 60 s exceeded")
img, igt, cad, duty, frac = cadence(row['T_exp'], _G, _T)
if frac > 0.95:
flags += 128
if frac < 0.1:
flags += 64
xmlfile = "{}{}".format(row['ObsReqName'],args.suffix)
if args.directory is None:
xmlpath = xmlfile
else:
xmlpath = join(args.directory, xmlfile)
if exists(xmlpath) and not args.overwrite:
raise IOError("Output file {} exists, use -f option to overwrite"
.format(xmlpath))
f = open(xmlpath,'w')
f.write(_target_table_row_to_xml(row,
progamme_id=args.programme_id,
proprietary_first=args.proprietary_first,
proprietary_last=args.proprietary_last,
checker=args.checker, user_g_mag=args.user_g_mag)
)
f.close()
vis = visibility(coords.ra.degree,coords.dec.degree)
if vis < 50:
flags += 256
if vis == 0:
flags += 256
c_tot, c_av, c_max = count_rate(_G, row['T_exp'])
print(TerminalOutputFormat.format( row['ObsReqName'],
DR2data['source_id'], DR2data['phot_g_mean_mag'],
coords.ra.degree, coords.dec.degree,
contam, vis, row['T_exp'],flags, c_tot, 100*frac, duty, img, igt))
| 41,091 | 36.595608 | 147 | py |
pycheops | pycheops-master/pycheops/models.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
r"""
models
======
Models and likelihood functions for use with lmfit
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from lmfit.model import Model
from lmfit.models import COMMON_INIT_DOC, COMMON_GUESS_DOC
from numba import jit
from .funcs import t2z, xyz_planet, vrad, tzero2tperi, esolve
from scipy.optimize import brent, brentq
from collections import OrderedDict
from asteval import Interpreter
from pycheops.constants import c
c_light = c/1000 # km/s
__all__ = ['qpower2', 'ueclipse', 'TransitModel', 'EclipseModel',
'FactorModel', 'ThermalPhaseModel', 'ReflectionModel',
'RVModel', 'RVCompanion','EBLMModel', 'PlanetModel',
'scaled_transit_fit', 'minerr_transit_fit']
@jit(nopython=True)
def qpower2(z,k,c,a):
r"""
Fast and accurate transit light curves for the power-2 limb-darkening law
The power-2 limb-darkening law is
.. math::
I(\mu) = 1 - c (1 - \mu^\alpha)
Light curves are calculated using the qpower2 approximation [2]_. The
approximation is accurate to better than 100ppm for radius ratio k < 0.1.
**N.B.** qpower2 is untested/inaccurate for values of k > 0.2
.. [2] Maxted, P.F.L. & Gill, S., 2019A&A...622A..33M
:param z: star-planet separation on the sky cf. star radius (array)
:param k: planet-star radius ratio (scalar, k<1)
:param c: power-2 limb darkening coefficient
:param a: power-2 limb darkening exponent
:returns: light curve (observed flux)
:Example:
>>> from pycheops.models import qpower2
>>> from pycheops.funcs import t2z
>>> from numpy import linspace
>>> import matplotlib.pyplot as plt
>>> t = linspace(-0.025,0.025,1000)
>>> sini = 0.999
>>> rstar = 0.05
>>> ecc = 0.2
>>> om = 120
>>> tzero = 0.0
>>> P = 0.1
>>> z=t2z(t,tzero,P,sini,rstar,ecc,om)
>>> c = 0.5
>>> a = 0.7
>>> k = 0.1
>>> f = qpower2(z,k,c,a)
>>> plt.plot(t,f)
>>> plt.show()
"""
f = np.ones_like(z)
I_0 = (a+2)/(np.pi*(a-c*a+2))
g = 0.5*a
for i,zi in enumerate(z):
zt = np.abs(zi)
if zt <= (1-k):
s = 1-zt**2
c0 = (1-c+c*s**g)
c2 = 0.5*a*c*s**(g-2)*((a-1)*zt**2-1)
f[i] = 1-I_0*np.pi*k**2*(
c0 + 0.25*k**2*c2 - 0.125*a*c*k**2*s**(g-1) )
elif np.abs(zt-1) < k:
d = (zt**2 - k**2 + 1)/(2*zt)
ra = 0.5*(zt-k+d)
rb = 0.5*(1+d)
sa = 1-ra**2
sb = 1-rb**2
q = min(max(-1.,(zt-d)/k),1.)
w2 = k**2-(d-zt)**2
w = np.sqrt(w2)
b0 = 1 - c + c*sa**g
b1 = -a*c*ra*sa**(g-1)
b2 = 0.5*a*c*sa**(g-2)*((a-1)*ra**2-1)
a0 = b0 + b1*(zt-ra) + b2*(zt-ra)**2
a1 = b1+2*b2*(zt-ra)
aq = np.arccos(q)
J1 = ( (a0*(d-zt)-(2/3)*a1*w2 +
0.25*b2*(d-zt)*(2*(d-zt)**2-k**2))*w
+ (a0*k**2 + 0.25*b2*k**4)*aq )
J2 = a*c*sa**(g-1)*k**4*(
0.125*aq + (1/12)*q*(q**2-2.5)*np.sqrt(max(0.,1-q**2)) )
d0 = 1 - c + c*sb**g
d1 = -a*c*rb*sb**(g-1)
K1 = ((d0-rb*d1)*np.arccos(d) +
((rb*d+(2/3)*(1-d**2))*d1 - d*d0) *
np.sqrt(max(0.,1-d**2)) )
K2 = (1/3)*c*a*sb**(g+0.5)*(1-d)
f[i] = 1 - I_0*(J1 - J2 + K1 - K2)
return f
@jit(nopython=True)
def scaled_transit_fit(flux, sigma, model):
r"""
Optimum scaled transit depth for data with scaled errors
Find the value of the scaling factor s that provides the best fit of the
model m = 1 + s*(model-1) to the normalised input fluxes. It is assumed
that the true standard errors on the flux measurements are a factor f
times the nominal standard error(s) provided in sigma. Also returns
standard error estimates for s and f, sigma_s and sigma_f, respectively.
:param flux: Array of normalised flux measurements
:param sigma: Standard error estimate(s) for flux - array or scalar
:param model: Transit model to be scaled
:returns: s, b, sigma_s, sigma_b
"""
N = len(flux)
if N < 3:
return np.nan, np.nan, np.nan, np.nan
w = 1/sigma**2
_m = np.sum(w*(model-1)**2)
if _m == 0:
return np.nan, np.nan, np.nan, np.nan
s = np.sum(w*(model-1)*(flux-1))/_m
chisq = np.sum(w*((flux-1)-s*(model-1))**2)
b = np.sqrt(chisq/N)
sigma_s = b/np.sqrt(_m)
_t = 3*chisq/b**4 - N/b**2
if _t > 0:
sigma_b = 1/np.sqrt(_t)
else:
return np.nan, np.nan, np.nan, np.nan
return s, b, sigma_s, sigma_b
def minerr_transit_fit(flux, sigma, model):
r"""
Optimum scaled transit depth for data with lower bounds on errors
Find the value of the scaling factor s that provides the best fit of the
model m = 1 + s*(model-1) to the normalised input fluxes. It is assumed
that the nominal standard error(s) provided in sigma are lower bounds to
the true standard errors on the flux measurements. [1]_ The probability
distribution for the true standard errors is assumed to be
.. math::
P(\sigma_{\rm true} | \sigma) = \sigma/\sigma_{\rm true}^2
:param flux: Array of normalised flux measurements
:param sigma: Lower bound(s) on standard error for flux - array or scalar
:param model: Transit model to be scaled
:returns: s, sigma_s
.. rubric References
.. [1] Sivia, D.S. & Skilling, J., Data Analysis - A Bayesian Tutorial, 2nd
ed., section 8.3.1
"""
N = len(flux)
if N < 3:
return np.nan, np.nan
def _negloglike(s, flux, sigma, model):
model = 1 + s*(model-1)
Rsq = ((model-flux)/sigma)**2
# In the limit Rsq -> 0, log-likelihood -> log(0.5)
x = np.full_like(Rsq,np.log(0.5))
_j = Rsq > np.finfo(0.0).eps
x[_j] = np.log((1-np.exp(-0.5*Rsq[_j]))/Rsq[_j])
return -np.sum(x)
def _loglikediff(s, loglike_0, flux, sigma, model):
return loglike_0 + _negloglike(s, flux, sigma, model)
if np.min(model) == 1:
return 0,0
# Bracket the minimum of _negloglike
s_min = 0
fa = _negloglike(s_min, flux, sigma, model)
s_mid = 1
fb = _negloglike(s_mid, flux, sigma, model)
if fb < fa:
s_max = 2
fc = _negloglike(s_max, flux, sigma, model)
while fc < fb:
s_max = 2*s_max
fc = _negloglike(s_max, flux, sigma, model)
else:
s_max = s_mid
fc = fb
s_mid = 0.5
fb = _negloglike(s_mid, flux, sigma, model)
while fb > fa:
if s_mid < 2**-16:
return 0,0
s_mid = 0.5*s_mid
fb = _negloglike(s_mid, flux, sigma, model)
s_opt, _f, _, _ = brent(_negloglike, args=(flux, sigma, model),
brack=(s_min,s_mid,s_max), full_output=True)
loglike_0 = -_f -0.5
s_hi = s_max
f_hi = _loglikediff(s_hi, loglike_0, flux, sigma, model)
while f_hi < 0:
s_hi = 2*s_hi
f_hi = _loglikediff(s_hi, loglike_0, flux, sigma, model)
s_hi = brentq(_loglikediff, s_opt, s_hi,
args = (loglike_0, flux, sigma, model))
s_err = s_hi - s_opt
return s_opt, s_err
@jit(nopython=True)
def ueclipse(z,k):
r"""
Eclipse light curve for a planet with uniform surface brightness by a star
:param z: star-planet separation on the sky cf. star radius (array)
:param k: planet-star radius ratio (scalar, k<1)
:returns: light curve (observed flux from eclipsed source)
"""
if (k > 1):
raise ValueError("ueclipse requires k < 1")
fl = np.ones_like(z)
for i,zi in enumerate(z):
zt = np.abs(zi)
if zt <= (1-k):
fl[i] = 0
elif np.abs(zt-1) < k:
t1 = np.arccos(min(max(-1,(zt**2+k**2-1)/(2*zt*k)),1))
t2 = np.arccos(min(max(-1,(zt**2+1-k**2)/(2*zt)),1))
t3 = 0.5*np.sqrt(max(0,(1+k-zt)*(zt+k-1)*(zt-k+1)*(zt+k+1)))
fl[i] = 1 - (k**2*t1 + t2 - t3)/(np.pi*k**2)
return fl
#----------------------
class TransitModel(Model):
r"""Light curve model for the transit of a spherical star by an opaque
spherical body (planet).
:param t: - independent variable (time)
:param T_0: - time of mid-transit
:param P: - orbital period
:param D: - (R_p/R_s)**2 = k**2
:param W: - (R_s/a)*sqrt((1+k)**2 - b**2)/pi
:param b: - a*cos(i)/R_s
:param f_c: - sqrt(ecc)*cos(omega)
:param f_s: - sqrt(ecc)*sin(omega)
:param h_1: - I(0.5) = 1 - c*(1-0.5**alpha)
:param h_2: - I(0.5) - I(0) = c*0.5**alpha
:param l_3: - Third light
Limb-darkening is described by the power-2 law:
.. math::
I(\mu) = 1 - c (1 - \mu^\alpha)
The transit depth, width shape are parameterised by D, W and b. These
parameters are defined above in terms of the radius of the star and
planet, R_s and R_p, respectively, the semi-major axis, a, and the orbital
inclination, i. The eccentricy and longitude of periastron for the star's
orbit are e and omega, respectively.
The following parameters are defined for convenience:
* k = R_p/R_s;
* aR = a/R_s;
* rho = 0.013418*aR**3/(P/d)**2.
**N.B.** the mean stellar density in solar units is rho, but only if the
mass ratio q = M_planet/M_star is q << 1.
The flux value outside of transit is 1. The light curve is calculated using
the qpower2 algorithm, which is fast but only accurate for k < ~0.3.
If the input parameters are invalid or k>0.5 the model is returned as an
array of value 1 everywhere.
Third light is a constant added to the light curve and the fluxes are
re-normalised, i.e. TransitModel = (light_curve + l_3)/(1+l_3)
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _transit_func(t, T_0, P, D, W, b, f_c, f_s, h_1, h_2, l_3):
if (D <= 0) or (D > 0.25) or (W <= 0) or (b < 0):
return np.ones_like(t)
if ((1-abs(f_c)) <= 0) or ((1-abs(f_s)) <= 0):
return np.ones_like(t)
q1 = (1-h_2)**2
if (q1 <= 0) or (q1 >=1): return np.ones_like(t)
q2 = (h_1-h_2)/(1-h_2)
if (q2 <= 0) or (q2 >=1): return np.ones_like(t)
k = np.sqrt(D)
q = (1+k)**2 - b**2
if q <= 0: return np.ones_like(t)
r_star = np.pi*W/np.sqrt(q)
q = 1-b**2*r_star**2
if q <= 0: return np.ones_like(t)
sini = np.sqrt(q)
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.ones_like(t)
om = np.arctan2(f_s, f_c)*180/np.pi
c2 = 1 - h_1 + h_2
a2 = np.log2(c2/h_2)
z,m = t2z(t, T_0, P, sini, r_star, ecc, om, returnMask = True)
if False in np.isfinite(z): return np.ones_like(t)
# Set z values where planet is behind star to a big nominal value
z[m] = 100
return (qpower2(z, k, c2, a2)+l_3)/(1+l_3)
super(TransitModel, self).__init__(_transit_func, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', value=1, min=1e-15)
self.set_param_hint(f'{p}D', value=0.01, min=0, max=0.25)
self.set_param_hint(f'{p}W', value=0.1, min=0, max=0.3)
self.set_param_hint(f'{p}b', value=0.3, min=0, max=1.0)
self.set_param_hint(f'{p}f_c', value=0, min=-1, max=1, vary=False)
self.set_param_hint(f'{p}f_s', value=0, min=-1, max=1, vary=False)
expr = "{p:s}f_c**2 + {p:s}f_s**2".format(p=self.prefix)
self.set_param_hint(f'{p}e',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}h_1', value=0.7224,min=0,max=1,vary=False)
self.set_param_hint(f'{p}h_2', value=0.6713,min=0,max=1,vary=False)
self.set_param_hint(f'{p}l_3', value=0,min=-0.99,max=1e6,vary=False)
expr = "(1-{p:s}h_2)**2".format(p=self.prefix)
self.set_param_hint(f'{p}q_1',min=0,max=1,expr=expr)
expr = "({p:s}h_1-{p:s}h_2)/(1-{p:s}h_2)".format(p=self.prefix)
self.set_param_hint(f'{p}q_2',min=0,max=1,expr=expr)
expr = "sqrt({p:s}D)".format(p=self.prefix)
self.set_param_hint(f'{p}k'.format(p=self.prefix),
expr=expr, min=0, max=0.5)
expr ="sqrt((1+{p:s}k)**2-{p:s}b**2)/{p:s}W/pi".format(p=self.prefix)
self.set_param_hint(f'{p}aR',min=1, expr=expr)
expr = "0.013418*{p:s}aR**3/{p:s}P**2".format(p=self.prefix)
self.set_param_hint(f'{p}rho', min=0, expr = expr)
#----------------------
class EclipseModel(Model):
r"""Light curve model for the eclipse by a spherical star of a spherical
body (planet) with no limb darkening.
:param t: - independent variable (time)
:param T_0: - time of mid-transit
:param P: - orbital period
:param D: - (R_p/R_s)**2 = k**2
:param W: - (R_s/a)*sqrt((1+k)**2 - b**2)/pi
:param b: - a*cos(i)/R_s
:param L: - Depth of eclipse
:param f_c: - sqrt(ecc).cos(omega)
:param f_s: - sqrt(ecc).sin(omega)
:param a_c: - correction for light travel time across the orbit
:param l_3: - Third light
The transit depth, width shape are parameterised by D, W and b. These
parameters are defined above in terms of the radius of the star and
planet, R_s and R_p, respectively, the semi-major axis, a, and the orbital
inclination, i. The eccentricy and longitude of periastron for the star's
orbit are e and omega, respectively. These are the same parameters used in
TransitModel. The flux level outside of eclipse is 1 and inside eclipse is
(1-L), i.e. L = F_planet/(F_star + F_planet), where the planet-star flux
ratio is F_planet/F_star = L/(1-L).
The apparent time of mid-eclipse includes the correction a_c for the light
travel time across the orbit, i.e., for a circular orbit the time of
mid-eclipse is (T_0 + 0.5*P) + a_c. **N.B.** a_c must have the same units
as P.
The following parameters are defined for convenience:
* k = R_p/R_s;
* aR = a/R_s;
* rho = 0.013418*aR**3/(P/d)**2.
**N.B.** the mean stellar density in solar units is rho, but only if the
mass ratio q = M_planet/M_star is q << 1.
Third light is a constant added to the light curve and the fluxes are
re-normalised, i.e. EclipseModel = (light_curve + l_3)/(1+l_3)
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _eclipse_func(t, T_0, P, D, W, b, L, f_c, f_s, a_c, l_3):
if (D <= 0) or (D > 0.25) or (W <= 0) or (b < 0):
return np.ones_like(t)
if (L <= 0) or (L >= 1):
return np.ones_like(t)
if ((1-abs(f_c)) <= 0) or ((1-abs(f_s)) <= 0):
return np.ones_like(t)
k = np.sqrt(D)
q = (1+k)**2 - b**2
if q <= 0: return np.ones_like(t)
r_star = np.pi*W/np.sqrt(q)
q = 1-b**2*r_star**2
if q <= 0: return np.ones_like(t)
sini = np.sqrt(q)
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.ones_like(t)
om = np.arctan2(f_s, f_c)*180/np.pi
z,m = t2z(t-a_c, T_0, P, sini, r_star, ecc, om, returnMask=True)
if False in np.isfinite(z): return np.ones_like(t)
# Set z values where star is behind planet to a large nominal value
z[~m] = 100
return (1 + L*(ueclipse(z, k)-1) + l_3)/(1+l_3)
super(EclipseModel, self).__init__(_eclipse_func, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', value=1, min=1e-15)
self.set_param_hint(f'{p}D', value=0.01, min=0, max=0.25)
self.set_param_hint(f'{p}W', value=0.1, min=0, max=0.3)
self.set_param_hint(f'{p}b', value=0.3, min=0, max=1.0)
self.set_param_hint(f'{p}L', value=0.001, min=0, max=1)
self.set_param_hint(f'{p}f_c', value=0, min=-1, max=1, vary=False)
self.set_param_hint(f'{p}f_s', value=0, min=-1, max=1, vary=False)
expr = "{p:s}f_c**2 + {p:s}f_s**2".format(p=self.prefix)
self.set_param_hint(f'{p}e',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}a_c', value=0, min=0, vary=False)
self.set_param_hint(f'{p}l_3', value=0,min=-0.99,max=1e6,vary=False)
expr = "sqrt({prefix:s}D)".format(prefix=self.prefix)
self.set_param_hint(f'{p}k', expr=expr, min=0, max=1)
expr = "{prefix:s}L/{prefix:s}D".format(prefix=self.prefix)
self.set_param_hint(f'{p}J', expr=expr, min=0)
expr ="sqrt((1+{p:s}k)**2-{p:s}b**2)/{p:s}W/pi".format(p=self.prefix)
self.set_param_hint(f'{p}aR',min=1, expr=expr)
expr ="0.013418*{p:s}aR**3/{p:s}P**2".format(p=self.prefix)
self.set_param_hint(f'{p}rho', min=0, expr = expr)
#----------------------
class FactorModel(Model):
r"""Flux scaling and trend factor model
f = c*(1 + dfdt*dt + d2fdt2*dt**2 + dfdbg*bg(t) +
dfdcontam*contam(t) + dfdsmear*smear(t) +
ramp*deltaT(t)/1e6 +
dfdx*dx(t) + dfdy*dy(t) +
d2fdx2*dx(t)**2 + d2f2y2*dy(t)**2 + d2fdxdy*dx(t)*dy(t) +
dfdsinphi*sin(phi(t)) + dfdcosphi*cos(phi(t)) +
dfdsin2phi*sin(2.phi(t)) + dfdcos2phi*cos(2.phi(t)) +
dfdsin3phi*sin(3.phi(t)) + dfdcos3phi*cos(3.phi(t)) + ..)
The detrending coefficients dfdx, etc. are 0 and fixed by default. If any
of the coefficients dfdx, d2fdxdy or d2f2x2 is not 0, a function to
calculate the x-position offset as a function of time, dx(t), must be
passed as a keyword argument, and similarly for the y-position offset,
dy(t). For detrending against the spacecraft roll angle, phi(t), the
functions to be provided as keywords arguments are sinphi(t) and
cosphi(t). The linear trend dfdbg is proportional to the estimated
background flux in the aperture, bg(t). The linear trend dfdcontam is
proportional to the estimated contamination in the aperture contam(t). The
linear trend dfdsmear is proportional to the estimated smearing correction
in the aperture, smear(t). The time trend decribed by dfdt and d2fdt2 is
calculated using the variable dt = t - median(t).
See Dataset.lmfit() for details of the extra_decorr_vectors option that is
used to define extra_basis_funcs.
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
dx=None, dy=None, sinphi=None, cosphi=None, bg=None,
contam=None, smear=None, deltaT=None,
extra_basis_funcs=None, **kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def factor(t, c=1.0,dfdt=0, d2fdt2=0, dfdbg=0,
dfdcontam=0, dfdsmear=0, ramp=0,
dfdx=0, dfdy=0, d2fdxdy=0, d2fdx2=0, d2fdy2=0,
dfdcosphi=0, dfdsinphi=0, dfdcos2phi=0, dfdsin2phi=0,
dfdcos3phi=0, dfdsin3phi=0, **kwargs):
dt = t - np.median(t)
trend = 1 + dfdt*dt + d2fdt2*dt**2
if dfdbg != 0:
trend += dfdbg*self.bg(t)
if dfdcontam != 0:
trend += dfdcontam*self.contam(t)
if dfdsmear != 0:
trend += dfdsmear*self.smear(t)
if ramp != 0:
trend += ramp*deltaT(t)/1e6
if dfdx != 0 or d2fdx2 != 0:
trend += dfdx*self.dx(t) + d2fdx2*self.dx(t)**2
if dfdy != 0 or d2fdy2 != 0:
trend += dfdy*self.dy(t) + d2fdy2*self.dy(t)**2
if d2fdxdy != 0 :
trend += d2fdxdy*self.dx(t)*self.dy(t)
if (dfdsinphi != 0 or dfdsin2phi != 0 or dfdsin3phi != 0 or
dfdcosphi != 0 or dfdcos2phi != 0 or dfdcos3phi != 0):
sinphit = self.sinphi(t)
cosphit = self.cosphi(t)
trend += dfdsinphi*sinphit + dfdcosphi*cosphit
if dfdsin2phi != 0:
trend += dfdsin2phi*(2*sinphit*cosphit)
if dfdcos2phi != 0:
trend += dfdcos2phi*(2*cosphit**2 - 1)
if dfdsin3phi != 0:
trend += dfdsin3phi*(3*sinphit - 4* sinphit**3)
if dfdcos3phi != 0:
trend += dfdcos3phi*(4*cosphit**3 - 3*cosphit)
for p in self.extra_basis_funcs:
trend += kwargs['dfd'+p]*self.extra_basis_funcs[p](t)
return c*trend
super(FactorModel, self).__init__(factor, **kwargs)
self.bg = bg
self.contam = contam
self.smear = smear
self.dx = dx
self.dy = dy
self.sinphi = sinphi
self.cosphi = cosphi
self.set_param_hint('c', min=0)
for p in ['dfdt', 'd2fdt2', 'dfdbg', 'dfdcontam', 'dfdsmear',
'dfdx', 'dfdy', 'd2fdx2', 'd2fdxdy', 'd2fdy2', 'ramp',
'dfdsinphi', 'dfdcosphi', 'dfdcos2phi', 'dfdsin2phi',
'dfdcos3phi', 'dfdsin3phi']:
self.set_param_hint(p, value=0, vary=False)
# Extra basis functions
if extra_basis_funcs == None:
self.extra_basis_funcs = {}
else:
self.extra_basis_funcs = extra_basis_funcs
for p in self.extra_basis_funcs:
self.set_param_hint('dfd'+p, value=0, vary=False)
def guess(self, data, **kwargs):
r"""Estimate initial model parameter values from data."""
pars = self.make_params()
pars['%sc' % self.prefix].set(value=data.median())
for p in ['dfdt', 'd2fdt2' 'dfdbg', 'dfdcontam', 'dfdsmear',
'dfdx', 'dfdy', 'd2fdx2', 'd2fdy2',
'dfdsinphi', 'dfdcosphi', 'dfdcos2phi', 'dfdsin2phi',
'dfdcos3phi', 'dfdsin3phi']:
pars['{}{}'.format(self.prefix, p)].set(value = 0.0, vary=False)
return update_param_vals(pars, self.prefix, **kwargs)
#----------------------
class ThermalPhaseModel(Model):
r"""Thermal phase model for a tidally-locked planet
.. math::
a_{th}[1-\cos(\phi))/2 + b_{th}*(1+\sin(\phi)/2 + c_{th},
where :math:`\phi = 2\pi(t-T_0)/P`
:param t: - independent variable (time)
:param T_0: - time of inferior conjunction (mid-transit)
:param P: - orbital period
:param a_th: - coefficient of cosine-like term
:param b_th: - coefficient of sine-like term
:param c_th: - constant term (minimum flux)
The following parameters are defined for convenience.
* A = sqrt(a_th**2 + b_th**2), peak-to-trough amplitude of the phase curve
* F = c_th + (a_th + b_th + A)/2, flux at the maximum of the phase curve
* ph_max = arctan2(b_th,-a_th)/(2*pi) = phase at maximum flux
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _thermal_phase(t, T_0, P, a_th, b_th, c_th):
phi = 2*np.pi*(t-T_0)/P
return a_th*(1-np.cos(phi))/2 + b_th*(1+np.sin(phi))/2 + c_th
super(ThermalPhaseModel, self).__init__(_thermal_phase, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', min=1e-15)
self.set_param_hint(f'{p}a_th', value=0)
self.set_param_hint(f'{p}b_th', value=0)
self.set_param_hint(f'{p}c_th', value=0, min=0)
expr="hypot({p:s}a_th,{p:s}b_th)".format(p=self.prefix)
self.set_param_hint(f'{p}A', expr=expr)
expr="{p:s}c_th+({p:s}a_th+{p:s}b_th+{p:s}A)/2".format(p=self.prefix)
self.set_param_hint(f'{p}Fmax', expr=expr, min=0)
expr = "{p:s}Fmax - {p:s}A".format(p=self.prefix)
self.set_param_hint(f'{p}Fmin', expr=expr, min=0)
expr = "arctan2({p:s}b_th,-{p:s}a_th)/(2*pi)".format(p=self.prefix)
self.set_param_hint(f'{p}ph_max', expr=expr)
__init__.__doc__ = COMMON_INIT_DOC
#----------------------
class ReflectionModel(Model):
r"""Reflected stellar light from a planet with a Lambertian phase function.
The fraction of the stellar flux reflected from the planet of radius
:math:`R_p` at a distance :math:`r` from the star and viewed at phase
angle :math:`\beta` is
.. math::
A_g(R_p/r)^2 \times [\sin(\beta) + (\pi-\beta)*\cos(\beta) ]/\pi
The eccentricity and longitude of periastron for the planet's orbit are
ecc and omega, respectively.
:param t: - independent variable (time)
:param T_0: - time of inferior conjunction (mid-transit)
:param P: - orbital period
:param A_g: - geometric albedo
:param r_p: - R_p/a, where a is the semi-major axis.
:param f_c: - sqrt(ecc).cos(omega)
:param f_s: - sqrt(ecc).sin(omega)
:param sini: - sin(inclination)
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _reflection(t, T_0, P, A_g, r_p, f_c, f_s, sini):
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.zeros_like(t)
om = np.arctan2(f_s, f_c)*180/np.pi
x,y,z = xyz_planet(t, T_0, P, sini, ecc, om)
r = np.sqrt(x**2+y**2+z**2)
beta = np.arccos(-z/r)
Phi_L = (np.sin(beta) + (np.pi-beta)*np.cos(beta) )/np.pi
return A_g*(r_p/r)**2*Phi_L
super(ReflectionModel, self).__init__(_reflection, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P',value=1, min=1e-15)
self.set_param_hint(f'{p}A_g', value=0.5, min=0, max=1)
self.set_param_hint(f'{p}r_p', min=0, max=1)
self.set_param_hint(f'{p}f_c', value=0, vary=False, min=-1, max=1)
self.set_param_hint(f'{p}f_s', value=0, vary=False, min=-1, max=1)
self.set_param_hint(f'{p}sini', value=1, vary=False, min=0, max=1)
__init__.__doc__ = COMMON_INIT_DOC
#----------------------
class RVModel(Model):
r"""Radial velocity in a Keplerian orbit with post-Newtonion corrections.
The post-Newtonion corrections accounted for in this model are: the light
travel time across the orbit, the tranverse Doppler effect, and the
gravitational redshift.
Set the mass ratio q=0 to ignore post-Newtonion corrections.
:param t: - independent variable (time)
:param T_0: - time of inferior conjunction for the companion (mid-transit)
:param P: - orbital period
:param V_0: - radial velocity of the centre-of-mass
:param K: - semi-amplitude of spectroscopic orbit
:param f_c: - sqrt(ecc).cos(omega)
:param f_s: - sqrt(ecc).sin(omega)
:param sini: - sine of the orbital inclination
:param q: - M_companion/M_star (or 0 for pure Keplerian orbit)
The equations for post-Newtonian effects can be found in Konacki et al.
(2010ApJ...719.1293K) or Sybilski et al. (2013MNRAS.431.2024S) but note
that the sin term in the equation for the light travel time should be
squared in these equations.
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _rv(t, T_0, P, V_0, K, f_c, f_s, sini, q):
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.zeros_like(t)
omega = np.arctan2(f_s, f_c)
omdeg = omega*180/np.pi
if q == 0:
return V_0 + vrad(t, T_0, P, K, ecc, omdeg, sini, primary=True)
tp = tzero2tperi(T_0,P,sini,ecc,omdeg)
M = 2*np.pi*(t-tp)/P
E = esolve(M,ecc)
nu = 2*np.arctan(np.sqrt((1+ecc)/(1-ecc))*np.tan(E/2))
vr_nonrel = V_0 + K*(np.cos(nu+omega) + ecc*np.cos(omega))
delta_LT = K**2*np.sin(nu+omega)**2*(1+ecc*np.cos(nu))/c_light
delta_TD = K**2*(1 + ecc*np.cos(nu) - (1-ecc**2)/2)/c_light/sini**2
delta_GR = K**2*(1+1/q)*(1+ecc*np.cos(nu))/c_light/sini**2
return vr_nonrel + delta_LT + delta_TD + delta_GR
super(RVModel, self).__init__(_rv, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', min=1e-15)
self.set_param_hint(f'{p}K', min=1e-15)
self.set_param_hint(f'{p}f_c', value=0, vary=False, min=-1, max=1)
self.set_param_hint(f'{p}f_s', value=0, vary=False, min=-1, max=1)
self.set_param_hint(f'{p}q', value=0, vary=False, min=0)
expr = "{p:s}f_c**2 + {p:s}f_s**2".format(p=self.prefix)
self.set_param_hint(f'{p}e',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}sini', value=1, vary=False, min=0, max=1)
expr = "180*atan2({p:s}f_s, {p:s}f_c)/pi".format(p=self.prefix)
self.set_param_hint(f'{p}omega', expr=expr)
__init__.__doc__ = COMMON_INIT_DOC
#----------------------
class RVCompanion(Model):
r"""Radial velocity in a Keplerian orbit for the companion
The post-Newtonion corrections accounted for in this model are: the light
travel time across the orbit, the tranverse Doppler effect, and the
gravitational redshift.
In the definitions of f_c and f_s, omega is the longitude of periastron
for the primary/host star, not the companion.
Set the mass ratio q=0 to ignore post-Newtonion corrections.
:param t: - independent variable (time)
:param T_0: - time of inferior conjunction for the companion (mid-transit)
:param P: - orbital period
:param V_0: - radial velocity of the centre-of-mass
:param K: - semi-amplitude of spectroscopic orbit
:param f_c: - sqrt(ecc).cos(omega)
:param f_s: - sqrt(ecc).sin(omega)
:param sini: - sine of the orbital inclination
:param q: - M_companion/M_star (or 0 for pure Keplerian orbit)
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _rv(t, T_0, P, V_0, K, f_c, f_s, sini, q):
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.zeros_like(t)
omega = np.arctan2(f_s, f_c)
omdeg = omega*180/np.pi
if q == 0:
return V_0 + vrad(t, T_0, P, K, ecc, omdeg, sini, primary=False)
tp = tzero2tperi(T_0,P,sini,ecc,omdeg)
omega += np.pi
M = 2*np.pi*(t-tp)/P
E = esolve(M,ecc)
nu = 2*np.arctan(np.sqrt((1+ecc)/(1-ecc))*np.tan(E/2))
vr_nonrel = V_0 + K*(np.cos(nu+omega) + ecc*np.cos(omega))
delta_LT = K**2*np.sin(nu+omega)**2*(1+ecc*np.cos(nu))/c_light
delta_TD = K**2*(1 + ecc*np.cos(nu) - (1-ecc**2)/2)/c_light/sini**2
delta_GR = K**2*(1+q)*(1+ecc*np.cos(nu))/c_light/sini**2
return vr_nonrel + delta_LT + delta_TD + delta_GR
super(RVCompanion, self).__init__(_rv, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', min=1e-15)
self.set_param_hint(f'{p}K', min=1e-15)
self.set_param_hint(f'{p}q', value=0, vary=False, min=0)
self.set_param_hint(f'{p}f_c', value=0, vary=False, min=-1, max=1)
self.set_param_hint(f'{p}f_s', value=0, vary=False, min=-1, max=1)
self.set_param_hint(f'{p}sini', value=1, vary=False, min=0, max=1)
expr = "{p:s}f_c**2 + {p:s}f_s**2".format(p=self.prefix)
self.set_param_hint(f'{p}e'.format(p=self.prefix), expr=expr,
min=0, max=1)
expr = "180*atan2({p:s}f_s, {p:s}f_c)/pi".format(p=self.prefix)
self.set_param_hint(f'{p}omega'.format(p=self.prefix), expr=expr)
__init__.__doc__ = COMMON_INIT_DOC
#----------------------
class PlanetModel(Model):
r"""Light curve model for a transiting exoplanet including transits,
eclipses, and a thermal phase curve for the planet with an offset.
:param t: - independent variable (time)
:param T_0: - time of mid-transit
:param P: - orbital period
:param D: - (R_2/R_1)**2 = k**2
:param W: - (R_1/a)*sqrt((1+k)**2 - b**2)/pi
:param b: - a*cos(i)/R_1
:param F_min: - minimum flux in the thermal phase model
:param F_max: - maximum flux in the thermal phase model
:param ph_off: - offset phase in the thermal phase model
:param f_c: - sqrt(ecc).cos(omega)
:param f_s: - sqrt(ecc).sin(omega)
:param h_1: - I(0.5) = 1 - c*(1-0.5**alpha)
:param h_2: - I(0.5) - I(0) = c*0.5**alpha
:param a_c: - correction for light travel time across the orbit
:param l_3: - Third light
The flux level from the star is 1 and is assumed to be constant.
The thermal phase curve from the planet is approximated by a cosine
function with amplitude A=F_max-F_min plus the minimum flux, F_min, i.e.,
the maximum flux is F_max = F_min+A, and this occurs at phase (ph_off+0.5)
relative to the time of mid-transit, i.e.,
.. math::
f_{\rm th} = F_{\rm min} + A[1-\cos(\phi-\phi_{\rm off})]/2
where :math:`\phi = 2\pi(t-T_0)/P` and
:math:`\phi_{\rm off} = 2\pi\,{\rm ph\_off}`.
The transit depth, width shape are parameterised by D, W and b. These
parameters are defined above in terms of the radius of the star, R_1 and
R_2, the semi-major axis, a, and the orbital inclination, i. This model
assumes R_1 >> R_2, i.e., k=R_2/R_1 <~0.2. The eccentricy and longitude
of periastron for the star's orbit are e and omega, respectively. These
are the same parameters used in TransitModel. The eclipse of the planet
assumes a uniform flux distribution.
The apparent time of mid-eclipse includes the correction a_c for the
light travel time across the orbit, i.e., for a circular orbit the time of
mid-eclipse is (T_0 + 0.5*P) + a_c.
**N.B.** a_c must have the same units as P.
Stellar limb-darkening is described by the power-2 law:
.. math::
I(\mu) = 1 - c (1 - \mu^\alpha)
The following parameters are defined for convenience:
* k = R_2/R_1;
* aR = a/R_1;
* A = F_max - F_min = amplitude of thermal phase effect.
* rho = 0.013418*aR**3/(P/d)**2.
**N.B.** the mean stellar density in solar units is rho, but only if the
mass ratio q = M_planet/M_star is q << 1.
Third light is a constant added to the light curve and the fluxes are
re-normalised, i.e. PlanetModel = (light_curve + l_3)/(1+l_3)
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _planet_func(t, T_0, P, D, W, b, F_min, F_max, ph_off, f_c, f_s,
h_1, h_2, a_c, l_3):
if (D <= 0) or (D > 0.25) or (W <= 0) or (b < 0):
return np.ones_like(t)
if (F_min < 0):
return np.ones_like(t)
if ((1-abs(f_c)) <= 0) or ((1-abs(f_s)) <= 0):
return np.ones_like(t)
q1 = (1-h_2)**2
if (q1 <= 0) or (q1 >=1): return np.ones_like(t)
q2 = (h_1-h_2)/(1-h_2)
if (q2 <= 0) or (q2 >=1): return np.ones_like(t)
c2 = 1 - h_1 + h_2
a2 = np.log2(c2/h_2)
k = np.sqrt(D)
q = (1+k)**2 - b**2
if q <= 0: return np.ones_like(t)
r_star = np.pi*W/np.sqrt(q)
q = 1-b**2*r_star**2
if q <= 0: return np.ones_like(t)
sini = np.sqrt(q)
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.ones_like(t)
om = np.arctan2(f_s, f_c)*180/np.pi
# Star-planet apparent separation and mask eclipses/transits
z,m = t2z(t, T_0, P, sini, r_star, ecc, om, returnMask=True)
if False in np.isfinite(z): return np.ones_like(t)
# Set z values where planet is behind star 1 to a large nominal
# value for calculation of the transit
zt = z + 0 # copy
zt[m] = 100
# Flux from the star including transits
f_star = qpower2(zt, k, c2, a2)
# thermal phase effect
A = F_max - F_min
f_th = F_min + A*(1-np.cos(2*np.pi*((t-T_0)/P-ph_off)))/2
# Flux from planet including eclipses
z[~m] = 100
f_planet = f_th * ueclipse(z, k)
return (f_star + f_planet + l_3)/(1+l_3)
super(PlanetModel, self).__init__(_planet_func, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', value=1, min=1e-15)
self.set_param_hint(f'{p}D', value=0.01, min=0, max=0.25)
self.set_param_hint(f'{p}W', value=0.1, min=0, max=0.3)
self.set_param_hint(f'{p}b', value=0.3, min=0, max=1.0)
self.set_param_hint(f'{p}F_min', value=0, min=0)
self.set_param_hint(f'{p}F_max', value=0, min=0)
self.set_param_hint(f'{p}ph_off', min=-0.5, max=0.5)
self.set_param_hint(f'{p}f_c', value=0, min=-1, max=1, vary=False)
self.set_param_hint(f'{p}f_s', value=0, min=-1, max=1, vary=False)
expr = "{p:s}f_c**2 + {p:s}f_s**2".format(p=self.prefix)
self.set_param_hint(f'{p}e',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}h_1', value=0.7224, min=0, max=1, vary=False)
self.set_param_hint(f'{p}h_2', value=0.6713, min=0, max=1, vary=False)
expr = "(1-{p:s}h_2)**2".format(p=self.prefix)
self.set_param_hint(f'{p}q_1',min=0,max=1,expr=expr)
expr = "({p:s}h_1-{p:s}h_2)/(1-{p:s}h_2)".format(p=self.prefix)
self.set_param_hint(f'{p}q_2',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}a_c', value=0, min=0, vary=False)
self.set_param_hint(f'{p}l_3', value=0,min=-0.99,max=1e6,vary=False)
expr = "sqrt({prefix:s}D)".format(prefix=self.prefix)
self.set_param_hint(f'{p}k', expr=expr, min=0, max=1)
expr ="sqrt((1+{p:s}k)**2-{p:s}b**2)/{p:s}W/pi".format(p=self.prefix)
self.set_param_hint(f'{p}aR',min=1, expr=expr)
expr = "{prefix:s}F_max-{prefix:s}F_min".format(prefix=self.prefix)
self.set_param_hint(f'{p}A', expr=expr)
expr = "0.013418*{p:s}aR**3/{p:s}P**2".format(p=self.prefix)
self.set_param_hint(f'{p}rho', min=0, expr = expr)
#----------------------
class EBLMModel(Model):
r"""Light curve model for the mutual eclipses by spherical stars in an
eclipsing binary with one low-mass companion, e.g., F/G-star + M-dwarf.
:param t: - independent variable (time)
:param T_0: - time of mid-transit
:param P: - orbital period
:param D: - (R_2/R_1)**2 = k**2
:param W: - (R_1/a)*sqrt((1+k)**2 - b**2)/pi
:param b: - a*cos(i)/R_1
:param L: - Depth of eclipse
:param f_c: - sqrt(ecc).cos(omega)
:param f_s: - sqrt(ecc).sin(omega)
:param h_1: - I(0.5) = 1 - c*(1-0.5**alpha)
:param h_2: - I(0.5) - I(0) = c*0.5**alpha
:param a_c: - correction for light travel time across the orbit
:param l_3: - Third light
The transit depth, width shape are parameterised by D, W and b. These
parameters are defined above in terms of the radii of the stars, R_1 and
R_2, the semi-major axis, a, and the orbital inclination, i. This model
assumes R_1 >> R_2, i.e., k=R_2/R_1 <~0.2. The eccentricy and longitude
of periastron for the star's orbit are e and omega, respectively. These
are the same parameters used in TransitModel. The flux level outside of
eclipse is 1 and inside eclipse is (1-L). The apparent time of mid-eclipse
includes the correction a_c for the light travel time across the orbit,
i.e., for a circular orbit the time of mid-eclipse is (T_0 + 0.5*P) + a_c.
**N.B.** a_c must have the same units as P.
The power-2 law is used to model the limb-darkening of star 1.
Limb-darkening on star 2 is ignored.
The following parameters are defined for convenience:
* k = R_2/R_1;
* aR = a/R_1;
* J = L/D (surface brightness ratio).
The flux level outside of eclipse is 1 and inside eclipse is (1-L), i.e.
L = F_2/(F_1 + F_2), where the flux ratio is F_2/F_1 = L/(1-L).
Third light is a constant added to the light curve and the fluxes are
re-normalised, i.e. EBLMModel = (light_curve + l_3)/(1+l_3)
"""
def __init__(self, independent_vars=['t'], prefix='', nan_policy='raise',
**kwargs):
kwargs.update({'prefix': prefix, 'nan_policy': nan_policy,
'independent_vars': independent_vars})
def _eblm_func(t, T_0, P, D, W, b, L, f_c, f_s, h_1, h_2, a_c, l_3):
if (D <= 0) or (D > 0.25) or (W <= 0) or (b < 0):
return np.ones_like(t)
if (L <= 0) or (L >= 1):
return np.ones_like(t)
if ((1-abs(f_c)) <= 0) or ((1-abs(f_s)) <= 0):
return np.ones_like(t)
q1 = (1-h_2)**2
if (q1 <= 0) or (q1 >=1): return np.ones_like(t)
q2 = (h_1-h_2)/(1-h_2)
if (q2 <= 0) or (q2 >=1): return np.ones_like(t)
c2 = 1 - h_1 + h_2
a2 = np.log2(c2/h_2)
k = np.sqrt(D)
q = (1+k)**2 - b**2
if q <= 0: return np.ones_like(t)
r_star = np.pi*W/np.sqrt(q)
q = 1-b**2*r_star**2
if q <= 0: return np.ones_like(t)
sini = np.sqrt(q)
ecc = f_c**2 + f_s**2
if ecc > 0.95 : return np.ones_like(t)
om = np.arctan2(f_s, f_c)*180/np.pi
z,m = t2z(t, T_0, P, sini, r_star, ecc, om, returnMask=True)
if False in np.isfinite(z): return np.ones_like(t)
# Set z values where star 2 is behind star 1 to a large nominal
# value for calculation of the transit
z[m] = 100
lc = qpower2(z, k, c2, a2)
z,m = t2z(t-a_c, T_0, P, sini, r_star, ecc, om, returnMask=True)
if False in np.isfinite(z): return np.ones_like(t)
# Set z values where star 1 is behind star 2 to a large nominal
# value for calculation of the eclipse
z[~m] = 100
return ((lc + L*ueclipse(z, k))/(1+L) + l_3)/(1+l_3)
super(EBLMModel, self).__init__(_eblm_func, **kwargs)
self._set_paramhints_prefix()
def _set_paramhints_prefix(self):
p = self.prefix
self.set_param_hint(f'{p}P', value=1, min=1e-15)
self.set_param_hint(f'{p}D', value=0.01, min=0, max=0.25)
self.set_param_hint(f'{p}W', value=0.1, min=0, max=0.3)
self.set_param_hint(f'{p}b', value=0.3, min=0, max=1.0)
self.set_param_hint(f'{p}L', value=0.001, min=0, max=1)
self.set_param_hint(f'{p}f_c', value=0, min=-1, max=1, vary=False)
self.set_param_hint(f'{p}f_s', value=0, min=-1, max=1, vary=False)
expr = "{p:s}f_c**2 + {p:s}f_s**2".format(p=self.prefix)
self.set_param_hint(f'{p}e',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}h_1', value=0.7224,min=0,max=1,vary=False)
self.set_param_hint(f'{p}h_2', value=0.6713,min=0,max=1,vary=False)
expr = "(1-{p:s}h_2)**2".format(p=self.prefix)
self.set_param_hint(f'{p}q_1',min=0,max=1,expr=expr)
expr = "({p:s}h_1-{p:s}h_2)/(1-{p:s}h_2)".format(p=self.prefix)
self.set_param_hint(f'{p}q_2',min=0,max=1,expr=expr)
self.set_param_hint(f'{p}a_c', value=0, min=0, vary=False)
self.set_param_hint(f'{p}l_3', value=0,min=-0.99,max=1e6,vary=False)
expr = "sqrt({prefix:s}D)".format(prefix=self.prefix)
self.set_param_hint(f'{p}k', expr=expr, min=0, max=1)
expr = "{prefix:s}L/{prefix:s}D".format(prefix=self.prefix)
self.set_param_hint(f'{p}J', expr=expr, min=0)
expr ="sqrt((1+{p:s}k)**2-{p:s}b**2)/{p:s}W/pi".format(p=self.prefix)
self.set_param_hint(f'{p}aR',min=1, expr=expr)
expr ="0.013418*{p:s}aR**3/{p:s}P**2".format(p=self.prefix)
self.set_param_hint(f'{p}rho', min=0, expr = expr)
#----------------------
class Priors(OrderedDict):
"""An ordered dictionary of all the Prior objects required to evaluate the
log-value of the prior for Bayesian model fitting.
All values of a Priors() instance must be Prior objects.
A Priors() instance includes an asteval interpreter used for
evaluation of constrained Parameters.
ToDo: copying, pickling and serialization
"""
def __init__(self, usersyms=None):
"""
Arguments
---------
usersyms : dictionary of symbols to add to the
:class:`asteval.Interpreter`.
"""
super(Parameters, self).__init__(self)
self._asteval = Interpreter(usersyms=usersyms)
#----------------------
class Prior(object):
"""A Prior is an object that can be used in the calculation of the
log-likelihood function for Bayeisan model fitting methods.
A Prior has a `name` attribute that corresponds to the name of one of the
parameters in the model, i.e., there must be a corresponding Parameter
object in the model with the same name. The log-value of the prior for a
given parameter value is evaluated using the mathemetical expression
provided in `expr`, e.g., for a Gaussian with mean mu and standard
deviation sigma, the contribution to the total log-likelihood for a
parameter with value x is -0.5*(mu-x)**2/sigma**2. The constants mu and
sigma are hyper-parameters, i.e., parameters of the prior model, not of
the data model.
"""
def __init__(self, name=None, expr=None, hyper=None):
"""
Parameters
----------
name : str
Name of the Parameter to which the Prior is applied.
expr : str
Mathematical expression used to evaluate the prior log-value
hyper : dict
A dictionary of hyper-parameters.
"""
self.name = name
self.expr = expr
self.hyper = hyper
def __repr__(self):
"""Return printable representation of a Parameter object."""
return "<Prior {}> : {}".format(self.name, self.expr)
| 48,594 | 39.597327 | 80 | py |
pycheops | pycheops-master/pycheops/multivisit.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
MultiVisit
==========
Object class for analysis of multiple data sets
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from glob import glob
from .dataset import Dataset
from .starproperties import StarProperties
import re
import pickle
from warnings import warn
from .dataset import _kw_to_Parameter, _log_prior
from .dataset import _make_interp
from lmfit import Parameters, Parameter
from lmfit import fit_report as lmfit_report
from lmfit import __version__ as _lmfit_version_
from . import __version__
from lmfit.models import ExpressionModel, Model
from lmfit.minimizer import MinimizerResult
from .models import TransitModel, FactorModel, EclipseModel, EBLMModel
from .models import PlanetModel
from celerite2.terms import Term, SHOTerm
from celerite2 import GaussianProcess
from .funcs import rhostar, massradius, eclipse_phase
from uncertainties import UFloat, ufloat
from emcee import EnsembleSampler
import corner
from sys import stdout
import matplotlib.pyplot as plt
from collections import OrderedDict
from lmfit.printfuncs import gformat
from copy import copy, deepcopy
from .utils import phaser, lcbin
from scipy.stats import iqr
from astropy.time import Time
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
import cdspyreadme
import os
# Iteration limit for initialisation of walkers
_ITMAX_ = 999
#--------
class CosineTerm(Term):
def __init__(self, omega_j, sigma_j):
self.omega_j = omega_j
self.sigma_j = sigma_j
def get_coefficients(self):
ar = np.empty(0)
cr = np.empty(0)
ac = np.array([self.sigma_j])
bc = np.zeros(1)
cc = np.zeros(1)
dc = np.array([self.omega_j])
return (ar, cr, ac, bc, cc, dc)
#--------
SineModel = ExpressionModel('sin(2*pi*(x-x0)/P)')
#--------
# Parameter delta_t needed here to cope with change of time system between
# Dataset and Multivisit
def _glint_func(t, glint_scale, f_theta=None, f_glint=None, delta_t=None):
glint = f_glint(f_theta(t-delta_t))
return glint_scale * glint
#--------
def _make_labels(plotkeys, d0, extra_labels):
labels = []
r = re.compile('dfd(.*)_([0-9][0-9])')
r2 = re.compile('d2fd(.*)2_([0-9][0-9])')
rt = re.compile('ttv_([0-9][0-9])')
rr = re.compile('ramp_([0-9][0-9])')
rl = re.compile('L_([0-9][0-9])')
rc = re.compile('c_([0-9][0-9])')
for key in plotkeys:
if key in extra_labels.keys():
labels.append(extra_labels[key])
elif key == 'T_0':
labels.append(r'T$_0-{:.0f}$'.format(d0))
elif key == 'h_1':
labels.append(r'$h_1$')
elif key == 'h_2':
labels.append(r'$h_2$')
elif key == 'f_c':
labels.append(r'$f_c$')
elif key == 'f_s':
labels.append(r'$f_s$')
elif key == 'l_3':
labels.append(r'$\ell_3$')
elif r.match(key):
p,n = r.match(key).group(1,2)
p = p.replace('_','\_')
labels.append(r'$df\,/\,d{}_{{{}}}$'.format(p,n))
elif r2.match(key):
p,n = r2.match(key).group(1,2)
p = p.replace('_','\_')
labels.append(r'$d^2f\,/\,d{}^2_{{{}}}$'.format(p,n))
elif rt.match(key):
n = rt.match(key).group(1)
labels.append(r'$\Delta\,T_{{{}}}$'.format(n))
elif rr.match(key):
n = rr.match(key).group(1)
labels.append(r'$df\,/\,d\Delta\,T_{{{}}}$'.format(n))
elif rl.match(key):
n = rl.match(key).group(1)
labels.append(r'$L_{{{}}}$'.format(n))
elif rc.match(key):
n = rc.match(key).group(1)
labels.append(r'$c_{{{}}}$'.format(n))
elif key == 'log_sigma_w':
labels.append(r'$\log\sigma_w$')
elif key == 'log_omega0':
labels.append(r'$\log\omega_0$')
elif key == 'log_S0':
labels.append(r'$\log{\rm S}_0$')
elif key == 'log_Q':
labels.append(r'$\log{\rm Q}$')
elif key == 'logrho':
labels.append(r'$\log\rho_{\star}$')
elif key == 'aR':
labels.append(r'${\rm a}\,/\,{\rm R}_{\star}$')
elif key == 'sini':
labels.append(r'\sin i')
else:
labels.append(key)
return labels
class MultiVisit(object):
"""
CHEOPS MultiVisit object
Specify a target name to initialize from pickled datasets in the current
working directory (or in datadir if datadir is not None).
The target name can include blanks - these are replaced by "_"
automatically before searching for matching file names.
The parameter ident is used to collect star and planet properties from the
relevant tables at DACE. If ident is None (default) then the target name
is used in place of ident. Set ident='none' to disable this feature. See
also StarProperties for other options that can be set using id_kws, e.g.,
id_kws={'dace':False} to use SWEET-Cat instead of DACE.
All dates and times in each of the dataset are stored as BJD-2457000 (same
as TESS).
:param target: target name to identify pickled datasets
:param datadir: directory containing pickled datasets
:param tag: tag used when desired datasets were saved
:param ident: identifier in star properties table. If None use target. If
'none'
:param id_kws: keywords for call to StarProperties.
:param verbose: print dataset names, etc. if True
Notes on fitting routines
~~~~~~~~~~~~~~~~~~~~~~~~~
Transit parameters
~~~~~~~~~~~~~~~~~~
The same values of the transit parameters T_0, P, D, W, b, f_c and f_s are
used for all the datasets in the combined fit. This also applies to h_1
and h_2 when fitting transits.
User-defined parameters can be specified in one of the following ways:
* fixed value, e.g., P=1.234
* free parameter with uniform prior interval specified as a 2-tuple,
e.g., f_c=(-0.5,0.5). The initial value is taken as the the mid-point of
the allowed interval;
* free parameter with uniform prior interval and initial value
specified as a 3-tuple, e.g., (0.1, 0.2, 1);
* free parameter with a Gaussian prior specified as a ufloat, e.g.,
ufloat(0,1);
* as an lmfit Parameter object.
A transit parameter will be fixed in the fit to the combined datasets
only if the same parameter was fixed in the last fit to all datasets
and the same parameter is not specified as a free parameter in the
call to this method.
If no user-defined value is provided then the initial value for each
transit parameter is set using the mean value across the individual
datasets. For T_0 an integer number of periods are added or subtracted
from the individual T_0 values so that the mean T_0 value corresponds
to a time of mid-transit near the centre of the datasets.
N.B. The timescale for T_0 in BJD_TDB - 2457000.
Priors on transit parameters are only set if they are specified in the
call to the fitting method using either a ufloat, or as an lmfit Parameter
object that includes a ufloat in its user_data.
Priors on the derived parameters e, q_1, q_2, logrho, etc. can be
specified as a dictionary of ufloat values using the extra_priors
keyword, e.g., extra_priors={'e':ufloat(0.2,0.01)}. Priors on parameters
that apply to individual datasets can also be specified in extra_priors,
e.g., extra_priors = {'dfdt_01':ufloat(0.0,0.001)}. Priors listed in
extra_priors will supercede priors on parameters saved with the individual
datasets.
Noise model
~~~~~~~~~~~
The noise model assumes that the error bars on each data point have
addition white noise with standard deviation log_sigma_w. Optionally,
correlated noise can be included using celerite2 with kernel
SHOTerm(log_omega0, log_S0, log_Q). The same values of log_sigma_w,
log_omega0, log_S0 and log_Q are used for all the datasets in the combined
fit.
The fit to the combined datasets will only include a GP if log_omega0 and
log_S0 are both specified as arguments in the call to the fitting method.
If log_Q is not specified as an argument in the call to the fitting method
then it is fixed at the value log_Q=1/sqrt(2).
Gaussian priors on the values of log_omega0, log_S0 and log_Q will
only be applied if the user-specified value includes a Gaussian prior,
e.g., log_omega0=ufloat(6,1), log_S0=ufloat(-24,2).
N.B. Gaussian priors on log_omega0, log_S0 and log_Q specified in the
individual datasets are ignored.
Parameter decorrelation
~~~~~~~~~~~~~~~~~~~~~~~
Decorrelation against roll angle (phi) is handled differently in
Multivisit to Dataset. The decorrelation against cos(phi), sin(phi),
cos(2.phi), sin(2.phi), etc. is done using a combination of the trick
from Rodrigo et al. (2017RNAAS...1....7L) and the celerite model by
Foremann-Mackey et al. (2017AJ....154..220F). This enables the
coefficients of this "linear harmonic instrumental noise model" to be
treated as nuisance parameters that are automatically marginalised
away by adding a suitable term (CosineTerm) to the covariance matrix. This
is all done transparently by setting "unroll=True". The number of harmonic
terms is set by nroll, e.g., setting nroll=3 (default) includes terms
up to sin(3.phi) and cos(3.phi). This requires that phi is a linear
function of time for each dataset, which is a good approximation for
individual CHEOPS visits.
Other decorrelation parameters not derived from the roll angle, e.g. dfdx,
dfdy, etc. are included in the fit to individual datasets only if they
were free parameters in the last fit to that dataset. The decorrelation is
done independently for each dataset. The free parameters are labelled
dfdx_ii, dfdy_ii where ii is the number of the dataset to which each
decorrelation parameter applies, i.e. ii=01, 02, 03, etc.
Glint correction is done independently for each dataset if the glint
correction was included in the last fit to that dataset. The glint
scale factor for dataset ii is labelled glint_scale_ii. The glint
scaling factor for each dataset can either be a fixed or a free
parameter, depending on whether it was a fixed or a free parameter in
the last fit to that dataset.
Note that the "unroll" method implicitly assumes that the rate of change
of roll angle, Omega = d(phi)/dt, is constant. This is a reasonable
approximation but can introduce some extra noise in cases where
instrumental noise correlated with roll angle is large, e.g., observations
of faint stars in crowded fields. In this case it may be better to
include the best-fit trends against roll angle from the last fit stored in
the .dataset file in the fit to each dataset. This case be done using the
keyword argument "unwrap=True". This option can be combined with the
"unroll=True" option, i.e. to use "unroll" as a small correction to the
"unwrap" roll-angle decorrelation from the last fit to each data set.
If you only want to store and yield 1-in-thin samples in the chain, set
thin to an integer greater than 1. When this is set, thin*steps will be
made and the chains returned with have "steps" values per walker.
Fits, models, trends and correlated noise
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The best fit to the light curve in each data set is
f_fit = f_sys x f_fac + f_glint + f_celerite + f_unwrap
- "f_sys" includes all the photometric effects intrinsic to the
star/planet system, i.e. transits and eclipses
- "f_fac" includes all the trends correlated with parameters apart
from spacecraft roll angle
- "f_glint" is an optional function of roll angle scaled by the parameter
glint_scale used to model internal reflections or other features
correlated with roll angle (otherwise f_glint=0).
- "f_celerite" is the maximum-likelihood Gaussian process generated for a
kernel SHOTerm() + CosineTerm(Omega) + CosineTerm(2*Omega) + ..., where
the number of CosineTerm() kernels is specified by nroll and SHOTerm()
is only included if correlated noise is included in the model.
- "f_unwrap" are the trends correlated with spacecraft roll angle removed
if the unwrap=True option is specified (otherwise f_unwrap = 0)
For plotting and data output we require the "detrended flux", i.e.
flux_d = f_sys + f_sho + f_fit - f_obs
where f_obs is the observed flux and f_sho is the maximum-likelihood
Gaussian process generated using only the SHOTerm() kernel, i.e. the
detrended fluxes include the correlated noise modelled by f_sho. The
detrended fluxes for the best fits to each dataset are included in the
output lmfit ModelResult object in the attribute fluxes_det.
Return value
~~~~~~~~~~~~
The fitting routines return lmfit MinimizerResult objects with a few
extra attributes. Samples generated by emcee are returned as a python
array in the attribute flat_chain instead of a pandas.DataFrame object in
the attribute flatchain.
Backends
--------
See https://emcee.readthedocs.io/en/stable/tutorials/monitor/ for use of
the backend keyword.
"""
def __init__(self, target=None, datadir=None, tag="",
ident=None, id_kws={'dace':True},
verbose=True):
self.target = target
self.datadir = datadir
self.datasets = []
if target is None: return
ptn = target.replace(" ","_")+'_'+tag+'_*.dataset'
if datadir is not None:
ptn = os.path.join(datadir,ptn)
datatimes = [Dataset.load(i).bjd_ref for i in glob(ptn)]
g = [x for _,x in sorted(zip(datatimes,glob(ptn)))]
if len(g) == 0:
warn(f'No matching dataset names for target {target}', UserWarning)
return
if ident != 'none':
if ident is None: ident = target
self.star = StarProperties(ident, **id_kws)
if verbose:
print(self.star)
print('''
N file_key Aperture last_ GP Glint Scale pipe_ver extra
--------------------------------------------------------------------------''')
for n,fl in enumerate(g):
d = Dataset.load(fl)
# Make time scales consistent
dBJD = d.bjd_ref - 2457000
d._old_bjd_ref = d.bjd_ref
d.bjd_ref = 2457000
d.lc['time'] += dBJD
d.lc['bjd_ref'] = dBJD
for xbf in d.__extra_basis_funcs__:
d.__extra_basis_funcs__[xbf].x += dBJD
if 'lmfit' in d.__dict__:
p = deepcopy(d.lmfit.params['T_0'])
p._val += dBJD
p.init_value += dBJD
p.min += dBJD
p.max += dBJD
d.lmfit.params['T_0'] = p
if 'T_0' in d.lmfit.var_names:
d.lmfit.init_vals[d.lmfit.var_names.index('T_0')] += dBJD
if 'T_0' in d.lmfit.init_values:
d.lmfit.init_values['T_0'] += dBJD
if 'emcee' in d.__dict__:
p = deepcopy(d.emcee.params['T_0'])
p._val += dBJD
p.init_value += dBJD
p.min += dBJD
p.max += dBJD
d.emcee.params['T_0'] = p
p = deepcopy(d.emcee.params_best['T_0'])
p._val += dBJD
p.init_value += dBJD
p.min += dBJD
p.max += dBJD
d.emcee.params_best['T_0'] = p
if 'T_0' in d.emcee.var_names:
j = d.emcee.var_names.index('T_0')
d.emcee.init_vals[j] += dBJD
d.emcee.chain[:,j] += dBJD
if 'T_0' in d.emcee.init_values:
d.emcee.init_values['T_0'] += dBJD
self.datasets.append(d)
if verbose:
dd = d.__dict__
ap = d.lc['aperture'] if 'lc' in dd else '---'
lf = d.__lastfit__ if '__lastfit__' in dd else '---'
try:
gp = 'Yes' if d.gp else 'No'
except AttributeError:
gp = 'No'
gl = 'Yes' if 'f_glint' in dd else 'No'
if d.__scale__ == None:
sc = 'n/a'
elif d.__scale__:
sc = 'True'
else:
sc = 'False'
pv = d.pipe_ver
nx = len(d.__extra_basis_funcs__)
print(f' {n+1:2} {d.file_key} {ap:8} {lf:5} {gp:3}'
f' {gl:5} {sc:5} {pv} {nx}')
#--------------------------------------------------------------------------
#
# Big slab of code here to run the emcee sampler because almost everything is
# common to all fitting routines. Mostly this is parameter handling and model
# creation.
#
# "params" is an lmfit Parameters object that is used for storing the results,
# initial values, etc. Not passed to the target log-posterior function.
#
# "self.__models__" is a list of lmfit models that get evaluated in the target
# log-posterior function.
#
# "self.__modpars__" is a list of Parameters objects, one for each dataset.
# These parameters used to evaluate the models in "self.__models__". These all
# have the same transit model parameters, but different decorrelation
# parameters sent to FactorModel for each dataset. The values in these
# parameter objects are updated in every call to "self._lnpost_".
#
# "self.__rolls__" is a list of celerite kernels for implicit roll-angle
# decorrelation if unroll=True, else a list of "None" values. A separate
# kernel is needed for each dataset because the average roll angle rate is
# different for each visit.
#
# "self.__noisemodel__" is an lmfit Parameters object used for passing the
# noise model parameters log_sigma_w, log_omega0, etc. to the target
# log-posterior function. The user_data may be a ufloat with the "prior".
#
# "self.__fluxes_unwrap__" is the list of roll-angle corrections computed if
# unwrap=True, or a list of arrays contaning 0 if unwrap=False
#
# "self.__priors__" is a list of priors stored as ufloat values.
#
# "self.__var_names__" is a list of the free parameters in the combined fit.
#
def __run_emcee__(self, **kwargs):
# Dict of initial parameter values for creation of models
# Calculation of mean needs P and W so T_0 is not first in the list
vals = OrderedDict()
fittype = self.__fittype__
klist = ['D', 'W', 'b', 'P', 'T_0', 'f_c', 'f_s', 'l_3']
if fittype in ['transit', 'eblm', 'planet']:
klist.append('h_1')
klist.append('h_2')
if fittype in ['eclipse', 'eblm']:
klist.append('L')
klist.append('a_c')
if fittype in ['planet']:
for k in ['F_max', 'F_min', 'ph_off', 'a_c']:
klist.append(k)
for k in klist:
vals[k] = kwargs[k]
# dicts of parameter limits and step sizes for initialisation
pmin = {'P':0, 'D':0, 'W':0, 'b':0, 'f_c':-1, 'f_s':-1,
'h_1':0, 'h_2':0, 'L':0, 'F_max':0, 'l_3':-0.99}
pmax = {'D':0.3, 'W':0.3, 'b':2.0, 'f_c':1, 'f_s':1,
'h_1':1, 'h_2':1, 'L':1.0, 'F_max':1.0, 'l_3':1e6}
step = {'D':1e-4, 'W':1e-4, 'b':1e-2, 'P':1e-6, 'T_0':1e-4,
'f_c':1e-4, 'f_s':1e-3, 'h_1':1e-3, 'h_2':1e-2,
'L':1e-5, 'F_max':1e-5, 'l_3':1e-3}
# Initial stderr value for list of values that may be np.nan or None
def robust_stderr(vals, stds, default):
varr = np.array([v if v is not None else np.nan for v in vals])
sarr = np.array([s if s is not None else np.nan for s in stds])
vok = np.isfinite(varr)
nv = sum(vok)
sok = vok & np.isfinite(sarr)
ns = sum(sok)
if nv == 0: return default
if nv == 1:
if ns == 1: return sarr[sok][0]
return default
if ns == nv:
t = np.nanmean(sarr)/np.sqrt(nv)
if t > 0: return t
return default
t = np.nanstd(varr)
if t > 0: return t
return default
# Create a Parameters() object with initial values and priors on model
# parameters (including fixed parameters)
extra_priors = kwargs['extra_priors']
priors = {} if extra_priors is None else extra_priors
params = Parameters()
plist = [d.emcee.params if d.__lastfit__ == 'emcee' else
d.lmfit.params for d in self.datasets]
vv,vs,vn = [],[],[] # Free params for emcee, name value, err
for k in vals:
# For fit_planet, 'L'='F_max', so ...
if (fittype == 'planet') and (k == 'F_max'):
kp,kv = 'L','F_max'
else:
kp,kv = k,k
if vals[k] is None: # No user-defined value
vary = True in [p[kp].vary if kp in p else False for p in plist]
# Use mean of best-fit values from datasets
if kp == 'T_0':
t = np.array([p[kp].value for p in plist])
c = np.round((t-t[0])/params['P'])
c -= c.max()//2
t -= c*params['P']
val = t.mean()
vmin = val - params['W']*params['P']/2
vmax = val + params['W']*params['P']/2
if vary:
stds = [p[kp].stderr for p in plist]
stderr = robust_stderr(t, stds, step['T_0'])
else:
# Not all datasets have all parameters so ...
v = [p[kp].value if kp in p else np.nan for p in plist]
val = np.nanmean(v)
if vary:
stds=[p[kp].stderr if kp in p else None for p in plist]
stderr = robust_stderr(v, stds, step[kv])
v = [p[kp].min if kp in p else np.nan for p in plist]
vmin = np.nanmin(v)
if (kv in pmin) and not np.isfinite(vmin):
vmin = pmin[kv]
v = [p[kp].max if kp in p else np.nan for p in plist]
vmax = np.nanmax(v)
if (kv in pmax) and not np.isfinite(vmax):
vmax = pmax[kv]
params.add(kv, val, vary=vary, min=vmin, max=vmax)
vals[kv] = val
if vary:
params.stderr = stderr
else: # Value for parameter from kwargs
params[kv] = _kw_to_Parameter(kv, vals[kv])
vals[kv] = params[kv].value
if (kv in pmin) and not np.isfinite(params[kv].min):
params[kv].min = pmin[kv]
if (kv in pmax) and not np.isfinite(params[kv].max):
params[kv].max = pmax[kv]
if params[kv].vary:
vn.append(kv)
vv.append(params[kv].value)
if isinstance(params[kv].user_data, UFloat):
priors[kv] = params[kv].user_data
# Step size for setting up initial walker positions
if params[kv].stderr is None:
if params[kv].user_data is None:
vs.append(step[kv])
else:
vs.append(params[kv].user_data.s)
else:
if np.isfinite(params[kv].stderr):
vs.append(params[kv].stderr)
else:
vs.append(step[kv])
else:
# Needed to avoid errors when printing parameters
params[kv].stderr = None
# Derived parameters
params.add('k',expr='sqrt(D)',min=0,max=1)
params.add('aR',expr='sqrt((1+k)**2-b**2)/W/pi',min=1)
params.add('sini',expr='sqrt(1 - (b/aR)**2)')
# Avoid use of aR in this expr for logrho - breaks error propogation.
expr = 'log10(4.3275e-4*((1+k)**2-b**2)**1.5/W**3/P**2)'
params.add('logrho',expr=expr,min=-9,max=6)
params.add('e',min=0,max=1,expr='f_c**2 + f_s**2')
# For eccentric orbits only from Winn, arXiv:1001.2010
if (params['e'].value>0) or params['f_c'].vary or params['f_s'].vary:
params.add('esinw',expr='sqrt(e)*f_s')
params.add('ecosw',expr='sqrt(e)*f_c')
params.add('b_tra',expr='b*(1-e**2)/(1+esinw)')
params.add('b_occ',expr='b*(1-e**2)/(1-esinw)')
params.add('T_tra',expr='P*W*sqrt(1-e**2)/(1+esinw)')
params.add('T_occ',expr='P*W*sqrt(1-e**2)/(1-esinw)')
if 'F_min' in params:
params.add('A',min=0,max=1,expr='F_max-F_min')
if 'h_1' in params:
params.add('q_1',min=0,max=1,expr='(1-h_2)**2')
params.add('q_2',min=0,max=1,expr='(h_1-h_2)/(1-h_2)')
# Priors given in extra_priors overwrite existing priors
if extra_priors is not None:
for k in extra_priors:
if k in params:
params[k].user_data = extra_priors[k]
if fittype == 'transit':
ttv = kwargs['ttv']
ttv_prior = kwargs['ttv_prior']
if ttv and (params['T_0'].vary or params['P'].vary):
raise ValueError('TTV not allowed if P or T_0 are variables')
edv, edv_prior = False, None
if fittype == 'eclipse':
edv = kwargs['edv']
edv_prior = kwargs['edv_prior']
if edv and params['L'].vary:
raise ValueError('L must be a fixed parameter of edv=True.')
ttv, ttv_prior = False, None
if fittype in ['eblm', 'planet']:
ttv = kwargs['ttv']
ttv_prior = kwargs['ttv_prior']
if ttv and (params['T_0'].vary or params['P'].vary):
raise ValueError('TTV not allowed if P or T_0 are variables')
edv = kwargs['edv']
edv_prior = kwargs['edv_prior']
if edv and params['L'].vary:
raise ValueError('L must be a fixed parameter of edv=True.')
# Make an lmfit Parameters() object that defines the noise model
noisemodel = Parameters()
k = 'log_sigma_w'
log_sigma_w = kwargs['log_sigma_w']
if log_sigma_w is None:
noisemodel.add(k, -6, min=-12, max=-2)
else:
noisemodel[k] = _kw_to_Parameter(k, log_sigma_w)
# Avoid crazy-low values that are consistent with sigma_w = 0
if not np.isfinite(noisemodel[k].min):
noisemodel[k].min = np.min([noisemodel[k].value-10, -30])
params[k] = copy(noisemodel[k])
if isinstance(noisemodel[k].user_data, UFloat):
priors[k] = noisemodel[k].user_data
if noisemodel[k].vary:
vn.append(k)
vv.append(noisemodel[k].value)
vs.append(1)
log_S0 = kwargs['log_S0']
log_omega0 = kwargs['log_omega0']
log_Q = kwargs['log_Q']
if log_S0 is not None and log_omega0 is not None:
if log_Q is None: log_Q = np.log(1/np.sqrt(2))
nvals = {'log_S0':log_S0, 'log_omega0':log_omega0, 'log_Q':log_Q}
for k in nvals:
noisemodel[k] = _kw_to_Parameter(k, nvals[k])
params[k] = copy(noisemodel[k])
if isinstance(noisemodel[k].user_data, UFloat):
priors[k] = noisemodel[k].user_data
if noisemodel[k].vary:
vn.append(k)
vv.append(noisemodel[k].value)
vs.append(1)
params.add('rho_SHO',expr='2*pi/exp(log_omega0)')
params.add('tau_SHO',expr='2*exp(log_Q)/exp(log_omega0)')
params.add('sigma_SHO',expr='sqrt(exp(log_Q+log_S0+log_omega0))')
noisemodel.add('rho_SHO',expr='2*pi/exp(log_omega0)')
noisemodel.add('tau_SHO',expr='2*exp(log_Q)/exp(log_omega0)')
noisemodel.add('sigma_SHO',
expr='sqrt(exp(log_Q+log_S0+log_omega0))')
# Lists of model parameters and data for individual datasets
fluxes_unwrap = []
n_unwrap = []
rolls = []
models = []
modpars = []
scales = []
# Cycle over datasets, each with its own set of parameters
for i,(d,p) in enumerate(zip(self.datasets, plist)):
f_unwrap = np.zeros_like(d.lc['time'])
n = 0
if kwargs['unwrap']:
phi = d.lc['roll_angle']*np.pi/180
for j in range(1,4):
k = 'dfdsinphi' if j < 2 else f'dfdsin{j}phi'
if k in p:
f_unwrap += p[k]*np.sin(j*phi)
n = j
k = 'dfdcosphi' if j < 2 else f'dfdcos{j}phi'
if k in p:
f_unwrap += p[k]*np.cos(j*phi)
n = j
n_unwrap.append(n)
fluxes_unwrap.append(f_unwrap)
t = d.lc['time']
try:
smear = d.lc['smear']
except KeyError:
smear = np.zeros_like(t)
try:
deltaT = d.lc['deltaT']
except KeyError:
deltaT = np.zeros_like(t)
if d.__scale__:
factor_model = FactorModel(
dx = _make_interp(t,d.lc['xoff'], scale='range'),
dy = _make_interp(t,d.lc['yoff'], scale='range'),
bg = _make_interp(t,d.lc['bg'], scale='range'),
contam = _make_interp(t,d.lc['contam'], scale='range'),
smear = _make_interp(t,smear, scale='range'),
deltaT = _make_interp(t,deltaT),
extra_basis_funcs=d.__extra_basis_funcs__)
else:
factor_model = FactorModel(
dx = _make_interp(t,d.lc['xoff']),
dy = _make_interp(t,d.lc['yoff']),
bg = _make_interp(t,d.lc['bg']),
contam = _make_interp(t,d.lc['contam']),
smear = _make_interp(t,smear),
deltaT = _make_interp(t,deltaT),
extra_basis_funcs=d.__extra_basis_funcs__)
if fittype == 'transit':
model = TransitModel()*factor_model
elif fittype == 'eclipse':
model = EclipseModel()*factor_model
elif fittype == 'eblm':
model = EBLMModel()*factor_model
elif fittype == 'planet':
model = PlanetModel()*factor_model
l = ['dfdbg','dfdcontam','dfdsmear','dfdx','dfdy']
if True in [p_ in l for p_ in p]:
scales.append(d.__scale__)
else:
scales.append(None)
if 'glint_scale' in p:
delta_t = d._old_bjd_ref - d.bjd_ref
model += Model(_glint_func, independent_vars=['t'],
f_theta=d.f_theta, f_glint=d.f_glint, delta_t=delta_t)
models.append(model)
modpar = model.make_params(verbose=False, **vals)
# Copy min/max values from params to modpar
for pm in modpar:
if pm in params:
modpar[pm].min = params[pm].min
modpar[pm].max = params[pm].max
if ttv:
modpar['T_0'].init_value = modpar['T_0'].value
modpars.append(modpar)
if ttv:
t = f'ttv_{i+1:02d}'
params.add(t, 0)
params[t].user_data = ufloat(0,ttv_prior)
vn.append(t)
vv.append(0)
vs.append(30)
priors[t] = params[t].user_data
if edv:
t = f'L_{i+1:02d}'
params.add(t, vals['L'])
params[t].user_data = ufloat(vals['L'], edv_prior)
vn.append(t)
vv.append(vals['L'])
vs.append(edv_prior)
priors[t] = params[t].user_data
# Now the decorrelation parameters, incliding arbitary
# basis functions, if present
for dfdp in [k for k in p if (k[:3]=='dfd' or k[:4]=='d2fd' or
k=='c' or k=='ramp' or k=='glint_scale') and
k[:6]!='dfdsin' and k[:6]!='dfdcos']:
if p[dfdp].vary:
pj = f'{dfdp}_{i+1:02d}'
params.add(pj, p[dfdp].value,
min=p[dfdp].min, max=p[dfdp].max)
if pj in priors:
params[pj].user_data = priors[pj]
vn.append(pj)
vv.append(p[dfdp].value)
try:
vs.append(p[dfdp].stderr)
except KeyError:
if dfdp == 'glint_scale':
vs.append(0.01)
elif dfdp == 'ramp':
vs.append(50)
else:
vs.append(1e-6)
if kwargs['unroll']:
sinphi = np.sin(np.radians(d.lc['roll_angle']))
s = SineModel.fit(sinphi, P=99/1440, x0=0, x=d.lc['time'])
Omega= 2*np.pi/s.params['P']
fluxrms = np.nanstd(d.lc['flux'])
roll = CosineTerm(omega_j=Omega, sigma_j=fluxrms)
for j in range(2,kwargs['nroll']+1):
roll = roll + CosineTerm(omega_j=j*Omega, sigma_j=fluxrms)
rolls.append(roll)
else:
rolls.append(None)
# END of for dataset in self.datasets:
# Copy parameters, models, priors, etc. to self.
self.__unwrap__ = kwargs["unwrap"]
self.__unroll__ = kwargs["unroll"]
self.__nroll__ = kwargs["nroll"]
self.__rolls__ = rolls
self.__models__ = models
self.__modpars__ = modpars
self.__noisemodel__ = noisemodel
self.__priors__ = priors
self.__var_names__ = vn # Change of name for consistency with result
self.__fluxes_unwrap__ = fluxes_unwrap
self.__n_unwrap__ = n_unwrap
self.__scales__ = scales
backend = kwargs['backend']
if backend is None:
iteration = 0
else:
try:
iteration = backend.iteration
except OSError:
iteration = 0
# Setup sampler
vv = np.array(vv)
vs = np.array(vs)
n_varys = len(vv)
nwalkers = kwargs['nwalkers']
if iteration > 0:
pos = None
else:
pos = []
for i in range(nwalkers):
lnpost_i = -np.inf
it = 0
while lnpost_i == -np.inf:
pos_i=vv+vs*np.random.randn(n_varys)*kwargs['init_scale']
lnpost_i, lnlike_i = self._lnpost_(pos_i)
it += 1
if it > _ITMAX_:
for n,v,s, in zip(vn, vv, vs):
print(n,v,s)
raise Exception('Failed to initialize walkers')
pos.append(pos_i)
sampler = EnsembleSampler(nwalkers, n_varys, self._lnpost_,
backend=backend)
progress = kwargs['progress']
if progress:
print('Running burn-in ..')
stdout.flush()
if iteration == 0:
pos,_,_,_ = sampler.run_mcmc(pos, kwargs['burn'], store=False,
skip_initial_state_check=True, progress=progress)
sampler.reset()
if progress:
print('Running sampler ..')
stdout.flush()
state = sampler.run_mcmc(pos, kwargs['steps'], thin_by=kwargs['thin'],
skip_initial_state_check=True, progress=progress)
# Run self._lnpost_ with best-fit parameters to obtain
# best-fit light curves, detrended fluxes, etc.
flatchain = sampler.get_chain(flat=True)
pos = flatchain[np.argmax(sampler.get_log_prob()),:]
f_fit,f_sys,f_det,f_sho,f_phi = self._lnpost_(pos,return_fit=True)
self.__fluxes_fit__ = f_fit
self.__fluxes_sys__ = f_sys
self.__fluxes_det__ = f_det
self.__fluxes_sho__ = f_sho
self.__fluxes_phi__ = f_phi
# lmfit MinimizerResult object summary of results for printing and
# plotting. Data/objects required to re-run the analysis go directly
# into self.
result = MinimizerResult()
result.status = 0
result.var_names = vn
result.covar = np.cov(flatchain.T)
result.init_vals = vv
result.init_values = copy(params.valuesdict())
af = sampler.acceptance_fraction.mean()
result.acceptance_fraction = af
steps, nwalkers, ndim = sampler.get_chain().shape
result.thin = kwargs['thin']
result.nfev = int(kwargs['thin']*nwalkers*steps/af)
result.nwalkers = nwalkers
result.nvarys = ndim
result.ndata = sum([len(d.lc['time']) for d in self.datasets])
result.nfree = result.ndata - ndim
result.method = 'emcee'
result.errorbars = True
result.bestfit = f_fit
result.fluxes_det = f_det
z = zip(self.datasets, f_fit)
result.residual = [(d.lc['flux']-ft) for d,ft in z]
z = zip(self.datasets, result.residual)
result.chisqr = np.sum([((r/d.lc['flux_err'])**2).sum() for d,r in z])
result.redchi = result.chisqr/result.nfree
lnlike = np.max(sampler.get_blobs())
result.lnlike = lnlike
result.aic = 2*result.nvarys - 2*lnlike
result.bic = result.nvarys*np.log(result.ndata) - 2*lnlike
result.rms = np.array([r.std() for r in result.residual])
result.npriors = len(self.__priors__)
result.priors = self.__priors__
quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
corrcoefs = np.corrcoef(flatchain.T)
parbest = params.copy()
for i, n in enumerate(vn):
std_l, median, std_u = quantiles[:, i]
params[n].value = median
params[n].stderr = 0.5 * (std_u - std_l)
parbest[n].value = pos[i]
parbest[n].stderr = 0.5 * (std_u - std_l)
if n in self.__noisemodel__:
self.__noisemodel__[n].value = median
self.__noisemodel__[n].stderr = 0.5 * (std_u - std_l)
correl = {}
for j, n2 in enumerate(vn):
if i != j:
correl[n2] = corrcoefs[i, j]
params[n].correl = correl
parbest[n].correl = correl
result.params = params
result.parbest = parbest
result.flat_chain = flatchain
self.__parbest__ = parbest
self.__result__ = result
self.__sampler__ = sampler
return result
#--------------------------------------------------------------------------
def _lnpost_(self, pos, return_fit=False):
lnlike = 0
if return_fit:
fluxes_sys = [] # transits and eclipses only
fluxes_fit = [] # lc fit per dataset
fluxes_sho = [] # GP process from SHOTerm() kernel only
fluxes_det = [] # detrended fluxes
fluxes_phi = [] # Roll-angle trends if unroll=True
# Update self.__noisemodel__ parameters
vn = self.__var_names__
noisemodel = self.__noisemodel__
for p in ('log_sigma_w', 'log_omega0', 'log_S0', 'log_Q'):
if p in vn:
v = pos[vn.index(p)]
if (v < noisemodel[p].min) or (v > noisemodel[p].max):
return -np.inf, -np.inf
noisemodel[p].set(value=v)
if 'log_Q' in noisemodel:
sho = SHOTerm(
S0=np.exp(noisemodel['log_S0'].value),
Q=np.exp(noisemodel['log_Q'].value),
w0=np.exp(noisemodel['log_omega0'].value))
else:
sho = False
for i, dataset in enumerate(self.datasets):
lc = dataset.lc
model = self.__models__[i]
modpar = self.__modpars__[i]
roll = self.__rolls__[i]
f_unwrap = self.__fluxes_unwrap__[i]
for p in ('T_0', 'P', 'D', 'W', 'b', 'f_c', 'f_s', 'l_3',
'h_1', 'h_2', 'L', 'F_max', 'F_min', 'ph_off'):
if p in vn:
v = pos[vn.index(p)]
if not np.isfinite(v): return -np.inf, -np.inf
if (v < modpar[p].min) or (v > modpar[p].max):
return -np.inf, -np.inf
modpar[p].value = v
# Check that none of the derived parameters are out of range
for p in ('e', 'q_1', 'q_2', 'k', 'aR', 'rho',):
if p in modpar:
v = modpar[p].value
if not np.isfinite(v): return -np.inf, -np.inf
if (v < modpar[p].min) or (v > modpar[p].max):
return -np.inf, -np.inf
for d in [k for k in modpar if k[:3]=='dfd' or k[:4]=='d2fd' or
k=='c' or k=='ramp' or k=='glint_scale']:
p = f'{d}_{i+1:02d}'
if p in vn:
v = pos[vn.index(p)]
if (v < modpar[d].min) or (v > modpar[d].max):
return -np.inf, -np.inf
modpar[d].value = v
p = f'ttv_{i+1:02d}'
if p in vn:
v = pos[vn.index(p)]
modpar['T_0'].value = modpar['T_0'].init_value + v/86400
p = f'L_{i+1:02d}'
if p in vn:
# Exclude negative eclipse depths
if pos[vn.index(p)] < 0:
return -np.inf, -np.inf
modpar['L'].value = pos[vn.index(p)]
# Evalate components of the model so that we can extract them
f_model = model.eval(modpar, t=lc['time'])
resid = lc['flux'] - f_unwrap - f_model
yvar = np.exp(2*noisemodel['log_sigma_w']) + lc['flux_err']**2
if roll or sho:
if roll and sho:
kernel = sho + roll
elif sho:
kernel = sho
else:
kernel = roll
gp = GaussianProcess(kernel)
gp.compute(lc['time'], diag=yvar, quiet=True)
if return_fit:
k = f'_{self.__fittype__}_func'
f_sys = model.eval_components(params=modpar,
t=lc['time'])[k]
fluxes_sys.append(f_sys)
f_celerite = gp.predict(resid, include_mean=False)
f_fit = f_model + f_celerite + f_unwrap
fluxes_fit.append(f_fit)
if roll and sho:
f_sho = gp.predict(resid, include_mean=False,
kernel=gp.kernel.terms[0])
f_phi = gp.predict(resid, include_mean=False,
kernel=gp.kernel.terms[1])
elif sho:
f_sho = f_celerite
f_phi = np.zeros_like(resid)
else:
f_sho = np.zeros_like(resid)
f_phi = f_celerite
f_det = f_sys + f_sho + f_fit - lc['flux']
fluxes_det.append(f_det)
fluxes_sho.append(f_sho)
fluxes_phi.append(f_phi)
else:
lnlike += gp.log_likelihood(resid)
else:
if return_fit:
k = f'_{self.__fittype__}_func'
f_sys = model.eval_components(params=modpar,
t=lc['time'])[k]
fluxes_sys.append(f_sys)
f_fit = f_model + f_unwrap
fluxes_fit.append(f_fit)
f_det = f_sys + f_fit - lc['flux']
fluxes_det.append(f_det)
fluxes_sho.append(np.zeros_like(f_sys))
else:
lnlike += -0.5*np.sum(resid**2/yvar+np.log(2*np.pi*yvar))
if return_fit:
return fluxes_fit, fluxes_sys, fluxes_det, fluxes_sho, fluxes_phi
args=[modpar[p] for p in ('D','W','b')]
lnprior = _log_prior(*args) # Priors on D, W and b
if not np.isfinite(lnprior): return -np.inf, -np.inf
for p in self.__priors__:
pn = self.__priors__[p].n
ps = self.__priors__[p].s
if p in vn:
z = (pos[vn.index(p)] - pn)/ps
elif p in ('e', 'q_1', 'q_2', 'k', 'aR', 'rho',):
z = (modpar[p] - pn)/ps
elif p == 'logrho':
z = (np.log10(modpar['rho']) - pn)/ps
else:
z = None
if z is not None:
lnprior += -0.5*(z**2 + np.log(2*np.pi*ps**2))
if np.isnan(lnprior) or np.isnan(lnlike):
return -np.inf, -np.inf
return lnlike + lnprior, lnlike
#--------------------------------------------------------------------------
def fit_transit(self,
steps=128, nwalkers=64, burn=256,
T_0=None, P=None, D=None, W=None, b=None, f_c=None, f_s=None,
h_1=None, h_2=None, l_3=None,
ttv=False, ttv_prior=3600, extra_priors=None,
log_sigma_w=None, log_omega0=None, log_S0=None, log_Q=None,
unroll=True, nroll=3, unwrap=False, thin=1,
init_scale=0.5, progress=True, backend=None):
"""
Use emcee to fit the transits in the current datasets
If T_0 and P are both fixed parameters then ttv=True can be used to
include the free parameters ttv_i, the offset in seconds from the
predicted time of mid-transit for each dataset i = 1, ..., N. The
prior on the values of ttv_i is a Gaussian with a width ttv_prior in
seconds.
"""
# Get a dictionary of all keyword arguments excluding 'self'
kwargs = dict(locals())
del kwargs['self']
self.__fittype__ = 'transit'
return self.__run_emcee__(**kwargs)
#--------------------------------------------------------------------------
def fit_eclipse(self,
steps=128, nwalkers=64, burn=256,
T_0=None, P=None, D=None, W=None, b=None, f_c=None, f_s=None,
L=None, a_c=0, l_3=None, edv=False, edv_prior=1e-3,
extra_priors=None, log_sigma_w=None, log_omega0=None,
log_S0=None, log_Q=None, unroll=True, nroll=3, unwrap=False,
thin=1, init_scale=0.5, progress=True, backend=None):
"""
Use emcee to fit the eclipses in the current datasets
Eclipse depths variations can be included in the fit using the keyword
edv=True. In this case L must be a fixed parameter and the eclipse
depth for dataset i is L_i, i=1, ..., N. The prior on the values of
L_i is a Gaussian with mean value L and width edv_prior.
"""
# Get a dictionary of all keyword arguments excluding 'self'
kwargs = dict(locals())
del kwargs['self']
self.__fittype__ = 'eclipse'
return self.__run_emcee__(**kwargs)
#--------------------------------------------------------------------------
def fit_eblm(self, steps=128, nwalkers=64, burn=256,
T_0=None, P=None, D=None, W=None, b=None, f_c=None, f_s=None,
h_1=None, h_2=None, l_3=None, ttv=False, ttv_prior=3600,
L=None, a_c=0, edv=False, edv_prior=1e-3, extra_priors=None,
log_sigma_w=None, log_omega0=None, log_S0=None, log_Q=None,
unroll=True, nroll=3, unwrap=False, thin=1,
init_scale=0.5, progress=True, backend=None):
"""
Use emcee to fit the transits and eclipses in the current datasets
using a model for an eclipsing binary with a low-mass companion.
The model does not account for the thermal/reflected phase effect.
If T_0 and P are both fixed parameters then ttv=True can be used to
include the free parameters ttv_i, the offset in seconds from the
predicted time of mid-transit for each dataset i = 1, ..., N. The
prior on the values of ttv_i is a Gaussian with a width ttv_prior in
seconds.
Eclipse depths variations can be included in the fit using the keyword
edv=True. In this case L must be a fixed parameter and the eclipse
depth for dataset i is L_i, i=1, ..., N. The prior on the values of
L_i is a Gaussian with mean value L and width edv_prior.
"""
# Get a dictionary of all keyword arguments excluding 'self'
kwargs = dict(locals())
del kwargs['self']
self.__fittype__ = 'eblm'
return self.__run_emcee__(**kwargs)
#--------------------------------------------------------------------------
def fit_planet(self, steps=128, nwalkers=64, burn=256,
T_0=None, P=None, D=None, W=None, b=None, f_c=None, f_s=None,
h_1=None, h_2=None, l_3=None, ttv=False, ttv_prior=3600,
F_max=None, F_min=0, ph_off=0,
a_c=0, edv=False, edv_prior=1e-3, extra_priors=None,
log_sigma_w=None, log_omega0=None, log_S0=None, log_Q=None,
unroll=True, nroll=3, unwrap=False, thin=1,
init_scale=0.5, progress=True, backend=None):
"""
Use emcee to fit the transits and eclipses in the current datasets
using the PlanetModel model.
If T_0 and P are both fixed parameters then ttv=True can be used to
include the free parameters ttv_i, the offset in seconds from the
predicted time of mid-transit for each dataset i = 1, ..., N. The
prior on the values of ttv_i is a Gaussian with a width ttv_prior in
seconds.
Eclipse depths variations can be included in the fit using the keyword
edv=True. In this case F_max must be a fixed parameter and the value of
F_max for dataset i is F_max_i, i=1, ..., N. The prior on the values of
F_max_i is a Gaussian with mean value F_max and width edv_prior.
By default, this method assumes ph_off=0 and F_min=0. The initial
value of F_max is calculated from the best-fit values of L in the
input eclipse datasets, if possible.
"""
# Get a dictionary of all keyword arguments excluding 'self'
kwargs = dict(locals())
del kwargs['self']
self.__fittype__ = 'planet'
return self.__run_emcee__(**kwargs)
#--------------------------------------------------------------------------
def fit_report(self, **kwargs):
"""
Return a string summarizing the results of the last emcee fit
"""
result = self.__result__
report = lmfit_report(result, **kwargs)
n = [len(d.lc['time']) for d in self.datasets]
rms = np.sqrt(np.average(result.rms**2,weights=n))*1e6
s = " RMS residual = {:0.1f} ppm\n".format(rms)
j = report.index('[[Variables]]')
report = report[:j] + s + report[j:]
noPriors = True
params = result.params
parnames = list(params.keys())
namelen = max([len(n) for n in parnames])
if result.npriors > 0: report+="\n[[Priors]]"
for p in result.priors:
q = result.priors[p]
report += "\n %s:%s" % (p, ' '*(namelen-len(p)))
report += '%s +/-%s' % (gformat(q.n), gformat(q.s))
report += '\n[[Notes]]'
if self.__unroll__:
report += '\n Implicit roll-angle decorrelation used'
report += f' nroll={self.__nroll__} terms'
else:
report += f'\n Implicit roll-angle decorrelation not used.'
if self.__unwrap__:
report += '\n Best-fit roll-angle decorrelation was subtracted'
report += ' from light curves (unwrap=True)'
else:
report += '\n Best-fit roll-angle decorrelation was not used'
report += ' (unwrap=False)'
for i,s in enumerate(self.__scales__):
if s is not None:
report += f'\n Dataset {i+1}: '
if s:
report += 'decorrelation parameters were scaled)'
else:
report +='decorrelation parameters were not scaled'
report += '\n[[Software versions]]'
pipe_vers = ""
for s in set([d.pipe_ver for d in self.datasets]):
pipe_vers += f"{s}, "
report += '\n CHEOPS DRP : %s' % pipe_vers[:-2]
report += '\n pycheops : %s' % __version__
report += '\n lmfit : %s' % _lmfit_version_
return(report)
# ----------------------------------------------------------------
def ttv_plot(self, plot_kws=None, figsize=(8,5)):
"""
Plot results of TTV analysis
The keyword plot_kws can be used to set keyword options in the call to
plt.errorbar().
"""
result = self.__result__
if plot_kws is None:
plot_kws={'fmt':'bo', 'capsize':4}
fig,ax = plt.subplots(figsize=figsize)
for j in range(len(self.datasets)):
t = self.datasets[j].lc['time'].mean() - 1900
ttv = result.params[f'ttv_{j+1:02d}'].value
ttv_err = result.params[f'ttv_{j+1:02d}'].stderr
ax.errorbar(t,ttv,yerr=ttv_err, **plot_kws)
plt.axhline(0,c='darkcyan',ls=':')
ax.set_xlabel('BJD - 2458900')
ax.set_ylabel(r'$\Delta T$')
return fig
# ----------------------------------------------------------------
def trail_plot(self, plotkeys=None,
plot_kws={'alpha':0.1}, width=8, height=1.5):
"""
Plot parameter values v. step number for each walker.
These plots are useful for checking the convergence of the sampler.
The parameters width and height specifiy the size of the subplot for
each parameter.
The parameters to be plotted at specified by the keyword plotkeys, or
plotkeys='all' to plot every jump parameter.
The keyword plot_kws can be used to set keyword options in the plots.
"""
result = self.__result__
params = result.params
samples = self.__sampler__.get_chain()
var_names = result.var_names
n = len(self.datasets)
if plotkeys == 'all':
plotkeys = var_names
elif plotkeys is None:
if self.__fittype__ == 'transit':
l = ['D', 'W', 'b', 'T_0', 'P', 'h_1', 'h_2']
elif self.__fittype__ == 'planet':
l = ['D', 'W', 'b', 'T_0', 'P', 'F_max']
elif self.__fittype__ == 'eblm':
l = ['D', 'W', 'b', 'T_0', 'P', 'L']
elif 'L_01' in var_names:
l = ['D','W','b']+[f'L_{j+1:02d}' for j in range(n)]
else:
l = ['L']+[f'c_{j+1:02d}' for j in range(n)]
plotkeys = list(set(var_names).intersection(l))
plotkeys.sort()
n = len(plotkeys)
fig,ax = plt.subplots(nrows=n, figsize=(width,n*height), sharex=True)
if n == 1: ax = [ax,]
d0 = 0
if 'T_0' in plotkeys:
d0 = np.floor(np.nanmedian(samples[:,:,var_names.index('T_0')]))
extra_labels = {}
for i,d in enumerate(self.datasets):
if d.extra_decorr_vectors != None:
for k in d.extra_decorr_vectors:
if k == 't':
continue
if 'label' in d.extra_decorr_vectors[k].keys():
label = d.extra_decorr_vectors[k]['label']
label += f'$_{{{i+1:02d}}}$'
extra_labels[f'dfd{k}_{i+1:02d}'] = label
labels = _make_labels(plotkeys, d0, extra_labels)
for i,key in enumerate(plotkeys):
if key == 'T_0':
ax[i].plot(samples[:,:,var_names.index(key)]-d0, **plot_kws)
else:
ax[i].plot(samples[:,:,var_names.index(key)], **plot_kws)
ax[i].set_ylabel(labels[i])
ax[i].yaxis.set_label_coords(-0.1, 0.5)
ax[-1].set_xlim(0, len(samples)-1)
ax[-1].set_xlabel("step number");
fig.tight_layout()
return fig
# ----------------------------------------------------------------
def corner_plot(self, plotkeys=None, custom_labels=None,
show_priors=True, show_ticklabels=False, kwargs=None):
"""
Parameter correlation plot
Use custom_labels to change the string used for the axis labels, e.g.
custom_labels={'F_max':r'$F_{\rm pl}/F_{\star}$'}
plotkeys
:param plotkeys: list of variables to include in the corner plot
:param custom_labels: dict of custom labels
:param show_priors: show +-1-sigma limits for Gaussian priors
:param show_ticklabels: Show sumerical labels for tick marks
:param kwargs: dict of keywords to pass through to corner.corner
See also https://corner.readthedocs.io/en/latest/
"""
result = self.__result__
params = result.params
var_names = result.var_names
n = len(self.datasets)
if plotkeys == 'all':
plotkeys = var_names
if plotkeys == None:
if self.__fittype__ == 'transit':
l = ['D', 'W', 'b', 'T_0', 'P', 'h_1', 'h_2']
elif self.__fittype__ == 'planet':
l = ['D', 'W', 'b', 'T_0', 'P', 'F_max']
elif self.__fittype__ == 'eblm':
l = ['D', 'W', 'b', 'T_0', 'P', 'L']
elif 'L_01' in var_names:
l = ['D','W','b']+[f'L_{j+1:02d}' for j in range(n)]
else:
l = ['L']+[f'c_{j+1:02d}' for j in range(n)]
plotkeys = list(set(var_names).intersection(l))
plotkeys.sort()
chain = self.__sampler__.get_chain(flat=True)
xs = []
if 'T_0' in plotkeys:
d0 = np.floor(np.nanmedian(chain[:,var_names.index('T_0')]))
else:
d0 = 0
for key in plotkeys:
if key in var_names:
if key == 'T_0':
xs.append(chain[:,var_names.index(key)]-d0)
else:
xs.append(chain[:,var_names.index(key)])
if key == 'sigma_w' and params['log_sigma_w'].vary:
xs.append(np.exp(self.emcee.chain[:,-1])*1e6)
if 'D' in var_names:
k = np.sqrt(chain[:,var_names.index('D')])
else:
k = np.sqrt(params['D'].value) # Needed for later calculations
if key == 'k' and 'D' in var_names:
xs.append(k)
if 'b' in var_names:
b = chain[:,var_names.index('b')]
else:
b = params['b'].value # Needed for later calculations
if 'W' in var_names:
W = chain[:,var_names.index('W')]
else:
W = params['W'].value
aR = np.sqrt((1+k)**2-b**2)/W/np.pi
if key == 'aR':
xs.append(aR)
sini = np.sqrt(1 - (b/aR)**2)
if key == 'sini':
xs.append(sini)
if 'P' in var_names:
P = chain[:,var_names.index('P')]
else:
P = params['P'].value # Needed for later calculations
if key == 'logrho':
logrho = np.log10(4.3275e-4*((1+k)**2-b**2)**1.5/W**3/P**2)
xs.append(logrho)
kws = {} if kwargs is None else kwargs
xs = np.array(xs).T
if custom_labels is None:
extra_labels = {}
else:
extra_labels = custom_labels
for i,d in enumerate(self.datasets):
if d.extra_decorr_vectors != None:
for k in d.extra_decorr_vectors:
if k == 't':
continue
if 'label' in d.extra_decorr_vectors[k].keys():
label = d.extra_decorr_vectors[k]['label']
label += f'$_{{{i+1:02d}}}$'
extra_labels[f'dfd{k}_{i+1:02d}'] = label
labels = _make_labels(plotkeys, d0, extra_labels)
figure = corner.corner(xs, labels=labels, **kws)
nax = len(labels)
axes = np.array(figure.axes).reshape((nax, nax))
if not show_ticklabels:
for i in range(nax):
ax = axes[-1, i]
ax.set_xticklabels([])
ax.set_xlabel(labels[i])
ax.xaxis.set_label_coords(0.5, -0.1)
for i in range(1,nax):
ax = axes[i,0]
ax.set_yticklabels([])
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
if show_priors:
for i, key in enumerate(plotkeys):
q = params[key].user_data
if isinstance(q, UFloat):
if key == 'T_0': q -= d0
ax = axes[i, i]
ax.axvline(q.n - q.s, color="g", linestyle='--')
ax.axvline(q.n + q.s, color="g", linestyle='--')
return figure
# ------------------------------------------------------------
def cds_data_export(self, title=None, author=None, authors=None,
abstract=None, keywords=None, bibcode=None,
acknowledgements=None):
'''
Save light curve, best fit, etc. to files suitable for CDS upload
Generates ReadMe file and data files with the following columns..
Format Units Label Explanations
F11.6 d time Time of mid-exposure (BJD_TDB)
F8.6 --- flux Normalized flux
F8.6 --- e_flux Normalized flux error
F8.6 --- flux_d Normalized flux corrected for instrumental trends
F8.4 pix xoff Target position offset in x-direction
F8.4 pix yoff Target position offset in y-direction
F8.4 deg roll Spacecraft roll angle
F9.7 --- contam Fraction of flux in aperture from nearby stars
F9.7 --- smear Fraction of flux in aperture from readout trails
F9.7 --- bg Fraction of flux in aperture from background
F7.3 --- temp_2 thermFront_2 temperature sensor reading
:param title: title
:param author: First author
:param authors: Full author list of the paper
:param abstract: Abstract of the paper
:param keywords: list of keywords as in the printed publication
:param bibcode: Bibliography code for the printed publication
:param acknowledgements: list of acknowledgements
See http://cdsarc.u-strasbg.fr/submit/catstd/catstd-3.1.htx for the
correct formatting of title, keywords, etc.
The acknowledgements are normally used to give the name and e-mail
address of the person who generated the table, e.g.
"Pierre Maxted, p.maxted(at)keele.ac.uk"
'''
cds = cdspyreadme.CDSTablesMaker()
cds.title = title if title is not None else ""
cds.author = author if author is not None else ""
cds.authors = authors if author is not None else ""
cds.abstract = abstract if abstract is not None else ""
cds.keywords = keywords if keywords is not None else ""
cds.bibcode = bibcode if bibcode is not None else ""
cds.date = Time.now().value.year
result = self.__result__
par = result.parbest
for j,d in enumerate(self.datasets):
T=Table()
T['time'] = d.lc['time'] + 2457000
T['time'].info.format = '16.6f'
T['time'].description = 'Time of mid-exposure'
T['time'].units = u.day
T['flux'] = d.lc['flux']
T['flux'].info.format = '8.6f'
T['flux'].description = 'Normalized flux'
T['e_flux'] = d.lc['flux_err']
T['e_flux'].info.format = '8.6f'
T['e_flux'].description = 'Normalized flux error'
T['flux_d'] = self.__fluxes_det__[j]
T['flux_d'].info.format = '8.6f'
T['flux_d'].description = (
'Normalized flux corrected for instrumental trends' )
T['xoff'] = d.lc['xoff']
T['xoff'].info.format = '8.4f'
T['xoff'].description = "Target position offset in x-direction"
T['yoff'] = d.lc['yoff']
T['yoff'].info.format = '8.4f'
T['yoff'].description = "Target position offset in y-direction"
T['roll'] = d.lc['roll_angle']
T['roll'].info.format = '8.4f'
T['roll'].description = "Spacecraft roll angle"
T['roll'].units = u.degree
T['contam'] = d.lc['contam']
T['contam'].info.format = '9.7f'
T['contam'].description = (
"Fraction of flux in aperture from nearby stars" )
if np.ptp(d.lc['smear']) > 0:
T['smear'] = d.lc['smear']
T['smear'].info.format = '9.7f'
T['smear'].description = (
"Fraction of flux in aperture from readout trails" )
T['bg'] = d.lc['bg']
T['bg'].info.format = '9.7f'
T['bg'].description = (
"Fraction of flux in aperture from background" )
if np.ptp(d.lc['deltaT']) > 0:
T['temp_2'] = d.lc['deltaT'] - 12
T['temp_2'].info.format = '7.3f'
T['temp_2'].description = (
"thermFront_2 temperature sensor reading" )
T['temp_2'].units = u.Celsius
table = cds.addTable(T, f'lc{j+1:02d}.dat',
description=f"Data from archive file {d.file_key}" )
# Set output format
for p in T.colnames:
c=table.get_column(p)
c.set_format(f'F{T[p].format[:-1]}')
# Units
c=table.get_column('time'); c.unit = 'd'
c=table.get_column('xoff'); c.unit = 'pix'
c=table.get_column('yoff'); c.unit = 'pix'
c=table.get_column('roll'); c.unit = 'deg'
cds.writeCDSTables()
templatename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data','cdspyreadme','ReadMe.template')
coo = SkyCoord(self.datasets[0].lc['header']['RA_TARG'],
self.datasets[0].lc['header']['DEC_TARG'],unit='deg')
rastr = coo.ra.to_string(unit='hour',sep=' ',precision=1, pad=True)
destr = coo.dec.to_string(unit='deg',sep=' ',precision=0,
alwayssign=True, pad=True)
desc = (f'CHEOPS photometry of {self.target} generated using pycheops '+
f'version {__version__}.')
templateValue = {
'object':f'{rastr} {destr} {self.target}',
'description':desc,
'acknowledgements':acknowledgements
}
cds.setReadmeTemplate(templatename, templateValue)
with open("ReadMe", "w") as fd:
cds.makeReadMe(out=fd)
# ------------------------------------------------------------
def plot_fit(self, title=None, detrend=False,
binwidth=0.005, add_gaps=True, gap_tol=0.005,
data_offset=None, res_offset=None, phase0=None,
xlim=None, data_ylim=None, res_ylim=None, renorm=True,
show_gp=True, figsize=None, fontsize=12):
"""
If there are gaps in the data longer than gap_tol phase units and
add_gaps is True then put a gap in the lines used to plot the fit. The
transit/eclipse model is plotted using a thin line in these gaps.
Binned data are plotted in phase bins of width binwidth. Set
binwidth=False to disable this feature.
The data are plotted in the range phase0 to 1+phase0.
The offsets between the light curves from different datasets can be
set using the data_offset keyword. The offset between the residuals
from different datasets can be set using the res_offset keyword. The
y-axis limits for the data and residuals plots can be set using the
data_ylim and res_ylim keywords, e.g. res_ylim = (-0.001,0.001).
With renorm=True and detrend=False, each data set is re-scaled by the
value of c_01, c_02, for that data set.
For fits to datasets containing a mixture of transits and eclipses,
data_offset and res_offset can be 2-tuples with the offsets for
transits and eclipses, respectively.
For fits to datasets containing a mixture of transits and eclipses,
the x-axis and y-axis limits for the data plots are specifed in the
form ((min_left,max_left),(min_right,max-right))
For fits that include a Gaussian process (GP), use show_gp=True to
plot residuals that show the GP fit to the residuals, otherwise the
residuals from fit includding the GP are shown.
"""
n = len(self.datasets)
par = self.__parbest__
result = self.__result__
P = par['P'].value
T_0 = par['T_0'].value
ph_fluxes = [] # Phases for observed/detrended fluxes
fluxes = [] # observed/detrended fluxes
resids = [] # Residuals for plotting (with correlated noise)
ph_fits = [] # For plotting fits with lines - may contain np.nan
fits = [] # Best fit - may contain np.nan to get gaps
rednoise = [] # SHO GP fits (with np.nan for gaps)
# Phases for models same as ph_fits. May contain np.nans for gaps
lcmodels = [] # Model fluxes with transit+/eclipse effects only
ph_grid = [] # Grid of phases across one cycle
lc_grid = [] # Models evaulated across ph_grid
iqrmax = 0
phmin = np.inf
phmax = -np.inf
if phase0 is None: phase0 = -0.25
for j,dataset in enumerate(self.datasets):
modpar = copy(self.__modpars__[j])
ph = phaser(dataset.lc['time'], P, T_0, phase0)
phmin = min([min(ph), phmin])
phmax = max([max(ph), phmax])
ph_fluxes.append(ph)
if detrend:
flux = self.__fluxes_det__[j]
fit = copy(self.__fluxes_sys__[j] + self.__fluxes_sho__[j])
else:
if renorm:
if f'c_{j+1:02d}' in self.__parbest__:
c = self.__parbest__[f'c_{j+1:02d}'].value
else:
c = 1
else:
c = 1
flux = copy(dataset.lc['flux'])/c
fit = copy(self.__fluxes_fit__[j])/c
fluxes.append(flux)
iqrmax = np.max([iqrmax, iqr(flux)])
f_sho = self.__fluxes_sho__[j]
if show_gp:
resids.append(flux - fit + f_sho)
else:
resids.append(flux - fit)
# Insert np.nan where there are gaps in phase so that the plotted
# lines have a break
lcmodel = copy(self.__fluxes_sys__[j])
g = np.where((ph[1:]-ph[:-1]) > gap_tol)[0]
if add_gaps and len(g) > 0:
phmid = 0.5*(ph[1:]+ph[:-1])
ph = np.insert(ph, g+1, phmid[g])
fit = np.insert(fit, g+1, np.nan)
lcmodel = np.insert(lcmodel, g+1, np.nan)
f_sho = np.insert(f_sho, g+1, np.nan)
ph_fits.append(ph)
fits.append(fit)
lcmodels.append(lcmodel)
rednoise.append(f_sho)
t0 = T_0+phase0*P
tp = np.linspace(t0,t0+P,65536,endpoint=False)
ph_grid.append(phaser(tp,P,T_0,phase0))
model = self.__models__[j]
k = f'_{self.__fittype__}_func'
lc_grid.append(model.eval_components(params=modpar,t=tp)[k])
plt.rc('font', size=fontsize)
if self.__fittype__ in ['eblm', 'planet']:
f_c = par['f_c'].value
f_s = par['f_s'].value
ecc = f_c**2 + f_s**2
omdeg = np.arctan2(f_s, f_c)*180/np.pi
sini = par['sini'].value
ph_sec = eclipse_phase(P,sini,ecc,omdeg)
is_ecl = [min(abs(ph-ph_sec)) < 0.05 for ph in ph_fluxes]
n_ecl = sum(is_ecl)
n_tr = n-n_ecl
if figsize is None:
figsize = (8, 2+1.5*max(n_ecl,n_tr))
fig,axes=plt.subplots(nrows=2,ncols=2, figsize=figsize,
gridspec_kw={'height_ratios':[2,1]})
if data_offset is None:
doff_tr,doff_ecl = 2.5*iqrmax,2.5*iqrmax
else:
if np.isscalar(data_offset):
doff_tr,doff_ecl = data_offset, data_offset
else:
doff_tr,doff_ecl = data_offset
phmin_tr, phmax_tr = np.inf, -np.inf
phmin_ecl, phmax_ecl = np.inf, -np.inf
j_ecl, j_tr = 0, 0
for (ph,flx,i) in zip(ph_fluxes, fluxes, is_ecl):
if i:
off = j_ecl*doff_ecl
j_ecl += 1
ax = axes[0,1]
phmin_ecl = min([min(ph), phmin_ecl])
phmax_ecl = max([max(ph), phmax_ecl])
else:
off = j_tr*doff_tr
j_tr += 1
ax = axes[0,0]
phmin_tr = min([min(ph), phmin_tr])
phmax_tr = max([max(ph), phmax_tr])
ax.plot(ph, flx+off,'o',c='skyblue',ms=2, zorder=1)
if binwidth:
r_, f_, e_, n_ = lcbin(ph, flx, binwidth=binwidth)
ax.errorbar(r_, f_+off, yerr=e_, fmt='o',
c='midnightblue', ms=5, capsize=2, zorder=3)
j_ecl, j_tr = 0, 0
for (ph,fit,lcmod,i) in zip(ph_fits,fits,lcmodels,is_ecl):
if i:
off = j_ecl*doff_ecl
j_ecl += 1
ax = axes[0,1]
else:
off = j_tr*doff_tr
j_tr += 1
ax = axes[0,0]
k = np.argsort(ph)
ax.plot(ph[k],fit[k]+off,c='saddlebrown', lw=2, zorder=4)
if not detrend:
ax.plot(ph[k],lcmod[k]+off,c='forestgreen',zorder=2,lw=2)
j_ecl, j_tr = 0, 0
for (ph, fp, i) in zip(ph_grid, lc_grid, is_ecl):
if i:
off = j_ecl*doff_ecl
j_ecl += 1
ax = axes[0,1]
else:
off = j_tr*doff_tr
j_tr += 1
ax = axes[0,0]
k = np.argsort(ph)
ax.plot(ph[k],fp[k]+off,c='forestgreen', lw=1, zorder=2)
roff = 10*np.max(result.rms)
if res_offset is None:
roff_tr,roff_ecl = roff,roff
else:
if np.isscalar(res_offset):
roff_tr,roff_ecl = res_offset, res_offset
else:
roff_tr,roff_ecl = res_offset
j_ecl = 0
j_tr = 0
for (ph,res,i) in zip(ph_fluxes,resids,is_ecl):
if i:
off = j_ecl*roff_ecl
j_ecl += 1
ax = axes[1,1]
else:
off = j_tr*roff_tr
j_tr += 1
ax = axes[1,0]
ax.plot(ph, res+off,'o',c='skyblue',ms=2)
ax.axhline(off, color='saddlebrown',ls=':')
if binwidth:
r_, f_, e_, n_ = lcbin(ph, res, binwidth=binwidth)
ax.errorbar(r_, f_+off, yerr=e_,
fmt='o', c='midnightblue', ms=5, capsize=2)
if show_gp:
j_ecl, j_tr = 0, 0
for ph,rn,i in zip(ph_fits, rednoise, is_ecl):
if i:
off = j_ecl*roff_ecl
j_ecl += 1
ax = axes[1,1]
else:
off = j_tr*roff_tr
j_tr += 1
ax = axes[1,0]
ax.plot(ph, rn+off, lw=2, c='saddlebrown')
axes[0,0].set_xticklabels([])
axes[0,1].set_xticklabels([])
if xlim is None:
pad = (phmax_tr-phmin_tr)/10
pht = max([abs(phmin_tr), abs(phmax_tr)])
axes[0,0].set_xlim(-pht-pad,pht+pad)
axes[1,0].set_xlim(-pht-pad,pht+pad)
pad = (phmax_ecl-phmin_ecl)/10
axes[0,1].set_xlim(phmin_ecl-pad,phmax_ecl+pad)
axes[1,1].set_xlim(phmin_ecl-pad,phmax_ecl+pad)
else:
axes[0,0].set_xlim(*xlim[0])
axes[1,0].set_xlim(*xlim[0])
axes[0,1].set_xlim(*xlim[1])
axes[1,1].set_xlim(*xlim[1])
if data_ylim is not None:
axes[0,0].set_ylim(*data_ylim[0])
axes[0,1].set_ylim(*data_ylim[1])
if detrend:
axes[0,0].set_ylabel('Flux-trend')
else:
axes[0,0].set_ylabel('Flux')
axes[0,0].set_title(title)
if res_ylim is None:
if roff_tr != 0:
axes[1,0].set_ylim(np.sort([-0.75*roff_tr,
roff_tr*(n_tr-0.25)]))
else:
axes[1,0].set_ylim(-roff, roff)
if roff_ecl != 0:
ax = axes[1,1]
ax.set_ylim(np.sort([-0.75*roff_ecl,roff_ecl*(n_ecl-0.25)]))
else:
axes[1,1].set_ylim(-roff, roff)
else:
axes[1,0].set_ylim(*res_ylim[0])
axes[1,1].set_ylim(*res_ylim[1])
axes[1,0].set_xlabel('Phase')
axes[1,1].set_xlabel('Phase')
axes[1,0].set_ylabel('Residual')
else: # Not EBLM or Planet
if figsize is None:
figsize = (8, 2+1.5*n)
fig,ax=plt.subplots(nrows=2,sharex=True, figsize=figsize,
gridspec_kw={'height_ratios':[2,1]})
doff = 2.5*iqrmax if data_offset is None else data_offset
for j, (ph, flx) in enumerate(zip(ph_fluxes, fluxes)):
off = j*doff
ax[0].plot(ph, flx+off,'o',c='skyblue',ms=2, zorder=1)
if binwidth:
r_, f_, e_, n_ = lcbin(ph, flx, binwidth=binwidth)
ax[0].errorbar(r_, f_+off, yerr=e_, fmt='o',
c='midnightblue', ms=5, capsize=2, zorder=3)
for j, (ph,fit,lcmod) in enumerate(zip(ph_fits,fits,lcmodels)):
off = j*doff
k = np.argsort(ph)
ax[0].plot(ph[k],fit[k]+off,c='saddlebrown', lw=2, zorder=4)
if not detrend:
ax[0].plot(ph[k],lcmod[k]+off,c='forestgreen',zorder=2,lw=2)
for j, (ph, fp) in enumerate(zip(ph_grid, lc_grid)):
off = j*doff
k = np.argsort(ph)
ax[0].plot(ph[k],fp[k]+off,c='forestgreen', lw=1, zorder=2)
roff = 10*np.max(result.rms) if res_offset is None else res_offset
for j, (ph,res) in enumerate(zip(ph_fluxes, resids)):
off=j*roff
ax[1].plot(ph, res+off,'o',c='skyblue',ms=2)
ax[1].axhline(off, color='saddlebrown',ls=':')
if binwidth:
r_, f_, e_, n_ = lcbin(ph, res, binwidth=binwidth)
ax[1].errorbar(r_, f_+off, yerr=e_,
fmt='o', c='midnightblue', ms=5, capsize=2)
if show_gp:
for j, (ph,rn) in enumerate(zip(ph_fits, rednoise)):
off=j*roff
ax[1].plot(ph, rn+off, lw=2, c='saddlebrown')
if xlim is None:
pad = (phmax-phmin)/10
if self.__fittype__ == "transit":
pht = max([abs(phmin), abs(phmax)])
ax[1].set_xlim(-pht-pad,pht+pad)
else:
ax[1].set_xlim(phmin-pad,phmax+pad)
else:
ax[1].set_xlim(*xlim)
if data_ylim is not None: ax[0].set_ylim(*data_ylim)
if detrend:
ax[0].set_ylabel('Flux-trend')
else:
ax[0].set_ylabel('Flux')
ax[0].set_title(title)
if res_ylim is None:
if roff != 0:
ax[1].set_ylim(np.sort([-0.75*roff, roff*(n-0.25)]))
else:
rms = np.max(result.rms)
ax[1].set_ylim(-5*rms, 5*rms)
else:
ax[1].set_ylim(*res_ylim)
ax[1].set_xlabel('Phase')
ax[1].set_ylabel('Residual')
fig.tight_layout()
return fig
# ------------------------------------------------------------
def tzero(self, BJD_0, P):
'''
Return the time of mid-transit closest to the centre of the combined
dataset as BJD-2457000, i.e., on the same time scale as the data.
:param BJD_0: BJD of mid-transit - float or ufloat
:param P: orbital period in days - float or ufloat
Returns
:param T_0: time of mid-transit, BJD-2457000, float or ufloat
'''
t = np.mean([d.lc['time'].mean() for d in self.datasets])
c = (t-BJD_0+2457000)/P
if isinstance(c, UFloat): c = c.n
return BJD_0-2457000 + round(c)*P
# ------------------------------------------------------------
def massradius(self, m_star=None, r_star=None, K=None, q=0,
jovian=True, plot_kws=None, return_samples=False,
verbose=True):
'''
Use the results from the previous transit light curve fit to estimate
the mass and/or radius of the planet.
Requires that stellar properties are supplied using the keywords
m_star and/or r_star. If only one parameter is supplied then the other
is estimated using the stellar density derived from the transit light
curve analysis. The planet mass can only be estimated if the the
semi-amplitude of its orbit (in m/s) is supplied using the keyword
argument K. See pycheops.funcs.massradius for valid formats to specify
these parameters.
N.B. by default, the mean stellar density calculated from the light
curve fit is an uses the approximation q->0, where q=m_p/m_star is
the mass ratio. If this approximation is not valid then supply an
estimate of the mass ratio using the keyword argment q.
Output units are selected using the keyword argument jovian=True
(Jupiter mass/radius) or jovian=False (Earth mass/radius).
See pycheops.funcs.massradius for options available using the plot_kws
keyword argument.
'''
flatchain = self.__sampler__.get_chain(flat=True)
vn = self.__result__.var_names
pars = self.__result__.params
# Generate value(s) from previous emcee sampler run
def _v(p):
if (p in vn):
v = flatchain[:,vn.index(p)]
elif p in pars.valuesdict().keys():
v = pars[p].value
else:
raise AttributeError(
'Parameter {} missing from dataset'.format(p))
return v
# Generate a sample of values for a parameter
def _s(x, nm=100_000):
if isinstance(x,float) or isinstance(x,int):
return np.full(nm, x, dtype=float)
elif isinstance(x, UFloat):
return np.random.normal(x.n, x.s, nm)
elif isinstance(x, np.ndarray):
if len(x) == nm:
return x
elif len(x) > nm:
return x[random_sample(range(len(x)), nm)]
else:
return x[(np.random.random(nm)*len(x+1)).astype(int)]
elif isinstance(x, tuple):
if len(x) == 2:
return np.random.normal(x[0], x[1], nm)
elif len(x) == 3:
raise NotImplementedError
raise ValueError("Unrecognised type for parameter values")
# Generate samples for derived parameters not specified by the user
# from the chain rather than the summary statistics
k = np.sqrt(_v('D'))
b = _v('b')
W = _v('W')
P = _v('P')
aR = np.sqrt((1+k)**2-b**2)/W/np.pi
sini = np.sqrt(1 - (b/aR)**2)
f_c = _v('f_c')
f_s = _v('f_s')
ecc = f_c**2 + f_s**2
_q = _s(q, len(flatchain))
rho_star = rhostar(1/aR,P,_q)
# N.B. use of np.abs to cope with values with large errors
if r_star is None and m_star is not None:
_m = np.abs(_s(m_star, len(flatchain)))
r_star = (_m/rho_star)**(1/3)
if m_star is None and r_star is not None:
_r = np.abs(_s(r_star, len(flatchain)))
m_star = rho_star*_r**3
if m_star is None and r_star is not None:
if isinstance(r_star, tuple):
_r = ufloat(r_star[0], r_star[1])
else:
_r = r_star
m_star = rho_star*_r**3
if verbose:
print('[[Mass/radius]]')
if plot_kws is None:
plot_kws = {}
return massradius(P=P, k=k, sini=sini, ecc=ecc,
m_star=m_star, r_star=r_star, K=K, aR=aR,
jovian=jovian, return_samples=return_samples,
verbose=verbose, **plot_kws)
#------
def save(self, tag="", overwrite=False):
"""
Save the current MultiVisit instance as a pickle file
:param tag: string to tag different versions of the same MultiVisit
:param overwrite: set True to overwrite existing version of file
:returns: pickle file name
"""
fl = self.target.replace(" ","_")+'_'+tag+'.multivisit'
if os.path.isfile(fl) and not overwrite:
msg = f'File {fl} exists. If you mean to replace it then '
msg += 'use the argument "overwrite=True".'
raise OSError(msg)
with open(fl, 'wb') as fp:
pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
return fl
#------
@classmethod
def load(self, filename):
"""
Load a MultiVisit from a pickle file
:param filename: pickle file name
:returns: MultiVisit object
"""
with open(filename, 'rb') as fp:
self = pickle.load(fp)
return self
#------
def __getstate__(self):
state = self.__dict__.copy()
# Replace lmfit models with their string representation
if '__models__' in state.keys():
state['__models__'] = [m.__repr__() for m in state['__models__']]
else:
state['__models__'] = []
return state
#------
def __setstate__(self, state):
self.__dict__.update(state)
models = []
for model_repr,d in zip(self.__models__, self.datasets):
t = d.lc['time']
try:
smear = d.lc['smear']
except KeyError:
smear = np.zeros_like(t)
try:
deltaT = d.lc['deltaT']
except KeyError:
deltaT = np.zeros_like(t)
if d.__scale__:
factor_model = FactorModel(
dx = _make_interp(t,d.lc['xoff'], scale='range'),
dy = _make_interp(t,d.lc['yoff'], scale='range'),
bg = _make_interp(t,d.lc['bg'], scale='range'),
contam = _make_interp(t,d.lc['contam'], scale='range'),
smear = _make_interp(t,smear, scale='range'),
deltaT = _make_interp(t,deltaT),
extra_basis_funcs=d.__extra_basis_funcs__)
else:
factor_model = FactorModel(
dx = _make_interp(t,d.lc['xoff']),
dy = _make_interp(t,d.lc['yoff']),
bg = _make_interp(t,d.lc['bg']),
contam = _make_interp(t,d.lc['contam']),
smear = _make_interp(t,smear),
deltaT = _make_interp(t,deltaT),
extra_basis_funcs=d.__extra_basis_funcs__)
if self.__fittype__ == 'transit':
model = TransitModel()*factor_model
elif self.__fittype__ == 'eclipse':
model = EclipseModel()*factor_model
elif self.__fittype__ == 'eblm':
model = EBLMModel()*factor_model
elif self.__fittype__ == 'planet':
model = PlanetModel()*factor_model
if 'glint_func' in model_repr:
delta_t = d._old_bjd_ref - d.bjd_ref
model += Model(_glint_func, independent_vars=['t'],
f_theta=d.f_theta, f_glint=d.f_glint, delta_t=delta_t)
models.append(model)
self.__models__ = models
| 92,174 | 40.149554 | 80 | py |
pycheops | pycheops-master/pycheops/planetproperties.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
# 2020 Prof Andrew Cameron, University of St Andrews
# 2020 Dr Thomas Wilson, University of St Andrews
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
PlanetProperties
================
Object class to obtain/store observed properties of a planet and to infer
parameters such as surface gravity and density.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord, match_coordinates_sky
import requests
from pycheops.core import load_config
from pathlib import Path
from time import localtime, mktime
from uncertainties import ufloat, UFloat
from numpy.random import normal
import astropy.units as u
from pycheops import StarProperties
from uncertainties.umath import sqrt as usqrt
from uncertainties.umath import sin as usin
from uncertainties.umath import cos as ucos
from uncertainties.umath import atan2 as uatan2
import os
import warnings
from contextlib import redirect_stderr
from dace_query.cheops import Cheops
class PlanetProperties(object):
"""
CHEOPS PlanetProperties object
The observed properties T0, P, ecosw, esinw, depth (ppm), width (days) and
K (km/s) are obtained from one of the following sources (listed here in
priority order).
- specified by the user
- DACE planet table (unless query_dace=False)
- TEPCat (unless query_tepcat=False)
"""
def __init__(self, identifier, force_download=False, configFile=None,
query_dace=True, query_tepcat=True, T0=None, P=None, ecosw=None,
esinw=None, depth=None, width=None, K=None, verbose=True):
self.identifier = identifier
self.T0 = T0
self.P = P
self.ecosw = ecosw
self.esinw = esinw
self.depth = depth
self.width = width
self.K = K
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
if query_dace:
f = {"obj_id_planet_catname":{"contains":[identifier]}}
planet_data = Cheops.query_catalog('planet', filters=f)
target = planet_data['obj_id_planet_catname']
if len(target) == 1:
if verbose:
print('Target ',target[0],' found in DACE-Planets.')
T0_val = planet_data['obj_trans_t0_bjd'][0]
T0_err = planet_data['obj_trans_t0_bjd_err'][0]
P_val = planet_data['obj_trans_period_days'][0]
P_err = planet_data['obj_trans_period_days_err'][0]
ecosw_val = planet_data['obj_trans_ecosw'][0]
ecosw_err = planet_data['obj_trans_ecosw_err'][0]
esinw_val = planet_data['obj_trans_esinw'][0]
esinw_err = planet_data['obj_trans_esinw_err'][0]
depth_val = planet_data['obj_trans_depth_ppm'][0]
depth_err = planet_data['obj_trans_depth_ppm_err'][0]
width_val = planet_data['obj_trans_duration_days'][0]
width_err = planet_data['obj_trans_duration_days_err'][0]
# 'obj_rv_k_mps' is in km/s so need to convert to m/s
# Note use of float() to avoid problems with 'NaN' values
K_val = float(planet_data['obj_rv_k_mps'][0])*1000
K_err = float(planet_data['obj_rv_k_mps_err'][0])*1000
# Still need to get errors on these parameters and replace
# np.nan with None
try:
self.T0 = ufloat(float(T0_val),float(T0_err))
self.T0_note = "DACE-Planets"
except:
self.T0 = None
try:
self.P = ufloat(float(P_val),float(P_err))
self.P_note = "DACE-Planets"
except:
self.P = None
try:
self.ecosw=ufloat(float(ecosw_val),float(ecosw_err))
self.ecosw_note = "DACE-Planets"
except:
self.ecosw = None
try:
self.esinw=ufloat(float(esinw_val),float(esinw_err))
self.esinw_note = "DACE-Planets"
except:
self.esinw = None
try:
self.depth=ufloat(float(depth_val),float(depth_err))
self.depth_note = "DACE-Planets"
except:
self.depth = None
try:
self.width=ufloat(float(width_val),float(width_err))
self.width_note = "DACE-Planets"
except:
self.width = None
try:
self.K=ufloat(float(K_val),float(K_err))
self.K_note = "DACE-Planets"
except:
self.K = None
# Tidy up any missing values stored as NaNs in DACE
if np.isnan(self.depth.n):
self.depth = None
if np.isnan(self.width.n):
self.width = None
if np.isnan(self.ecosw.n):
self.ecosw = None
if np.isnan(self.esinw.n):
self.esinw = None
if np.isnan(self.K.n):
self.K = None
elif len(target) < 1:
print('No matching planet in DACE-Planets.')
if verbose:
print('List of valid planet_id keys:')
l = Cheops.query_catalog('planet')['obj_id_planet_catname']
print(l)
else:
print(r'Target ',identifier,'not defined uniquely: ',target)
if query_tepcat:
TEPCatObsPath = Path(_cache_path,'observables.csv')
download_tepcat = False
if force_download:
download_tepcat = True
elif TEPCatObsPath.is_file():
file_age = mktime(localtime())-os.path.getmtime(TEPCatObsPath)
if file_age > int(config['TEPCatObs']['update_interval']):
download_tepcat = True
else:
download_tepcat = False
else:
download_tepcat = True
if download_tepcat:
try:
url = config['TEPCatObs']['download_url']
except:
raise KeyError("TEPCatObs table not found in config file."
" Run core.setup_config")
try:
req=requests.post(url)
except:
warnings.warn("Failed to update TEPCatObs from server")
else:
with open(TEPCatObsPath, 'wb') as file:
file.write(req.content)
if verbose:
print('TEPCat data downloaded from \n {}'.format(url))
# Awkward table to deal with because of repeated column names
T = Table.read(TEPCatObsPath,format='ascii.no_header')
hdr = list(T[0])
targets=np.array(T[T.colnames[hdr.index('System')]][1:],
dtype=np.str_)
RAh=np.array(T[T.colnames[hdr.index('RAh')]][1:],
dtype=np.str_)
RAm=np.array(T[T.colnames[hdr.index('RAm')]][1:],
dtype=np.str_)
RAs=np.array(T[T.colnames[hdr.index('RAs')]][1:],
dtype=np.str_)
Decd=np.array(T[T.colnames[hdr.index('Decd')]][1:],
dtype=np.str_)
Decm=np.array(T[T.colnames[hdr.index('Decm')]][1:],
dtype=np.str_)
Decs=np.array(T[T.colnames[hdr.index('Decs')]][1:],
dtype=np.str_)
T0_vals=np.array(T[T.colnames[hdr.index('T0(HJDorBJD)')]][1:],
dtype=float)
T0_errs=np.array(T[T.colnames[hdr.index('T0err')]][1:],
dtype=float)
periods=np.array(T[T.colnames[hdr.index('Period(day)')]][1:],
dtype=float)
perrors=np.array(T[T.colnames[hdr.index('Perioderr')]][1:],
dtype=float)
lengths=np.array(T[T.colnames[hdr.index('length')]][1:],
dtype=float)
depths =np.array(T[T.colnames[hdr.index('depth')]][1:],
dtype=float)
ok = [t.startswith(identifier.replace(' ','_')) for t in targets]
if sum(ok) > 1:
print('Matching planet names: ', *targets[ok])
raise ValueError('More than one planet matches identifier.')
elif sum(ok) == 1:
T0_val=T0_vals[ok][0]
T0_err=T0_errs[ok][0]
period=periods[ok][0]
perror=perrors[ok][0]
length=lengths[ok][0]
depth_val=depths[ok][0]*10000
else:
try:
tar_coords = SkyCoord.from_name(identifier)
all_coords = []
for index, i in enumerate(RAh):
all_coords.append(
RAh[index]+":"+RAm[index]+":"+RAs[index]+" "+
Decd[index]+":"+Decm[index]+":"+Decs[index])
TEPCat_coords = SkyCoord(all_coords, frame="icrs",
unit=(u.hourangle, u.deg))
ok = tar_coords.separation(TEPCat_coords) < 5*u.arcsec
if sum(ok) > 1:
print('Matching planets: ', *targets[ok])
raise ValueError(
'More than one planet matches coordinates.')
elif sum(ok)==1:
T0_val=T0_vals[ok][0]
T0_err=T0_errs[ok][0]
period=periods[ok][0]
perror=perrors[ok][0]
length=lengths[ok][0]
depth_val=depths[ok][0]*10000
else:
print('No matching planet in TEPCat.')
except:
print('No coordinate match for planet in TEPCat.')
if sum(ok)==1:
if self.T0 == None:
try:
self.T0 = ufloat(float(T0_val),float(T0_err))
self.T0_note = "TEPCat"
except:
self.T0 = None
if self.P == None:
try:
self.P = ufloat(float(period),float(perror))
self.P_note = "TEPCat"
except:
self.P = None
if self.depth == None:
try:
self.depth=ufloat(float(depth_val),1e2)
self.depth_note = "TEPCat"
except:
self.depth = None
if self.width == None:
try:
self.width=ufloat(float(length),0.01)
self.width_note = "TEPCat"
except:
self.width = None
# User defined values
if T0:
if isinstance(T0, UFloat):
self.T0 = T0
self.T0_note = "User"
else:
raise ValueError("T0 keyword is not ufloat")
if P:
if isinstance(P, UFloat):
self.P = P
self.P_note = "User"
else:
raise ValueError("P keyword is not ufloat")
if ecosw:
if isinstance(ecosw, UFloat):
self.ecosw = ecosw
self.ecosw_note = "User"
else:
raise ValueError("ecosw keyword is not ufloat")
if esinw:
if isinstance(esinw, UFloat):
self.esinw = esinw
self.esinw_note = "User"
else:
raise ValueError("esinw keyword is not ufloat")
if depth:
if isinstance(depth, UFloat):
self.depth = depth
self.depth_note = "User"
else:
raise ValueError("depth keyword is not ufloat")
if width:
if isinstance(width, UFloat):
self.width = width
self.width_note = "User"
else:
raise ValueError("width keyword is not ufloat")
if K:
if isinstance(K, UFloat):
self.K = K
self.K_note = "User"
else:
raise ValueError("K keyword is not ufloat")
# Eccentricity and omega from ecosw and esinw
self.ecc = None
self.omega = None
self.f_s = None
self.f_c = None
if self.ecosw and self.esinw:
ecosw = self.ecosw
esinw = self.esinw
ecc = usqrt(ecosw*ecosw+esinw*esinw)
if ecc.n != 0:
omega = uatan2(esinw,ecosw)
f_s = usqrt(ecc)*usin(omega)
f_c = usqrt(ecc)*ucos(omega)
elif ecc.s != 0:
# Work-around to avoid NaNs for e=0 with finite error.
eps = .0001
ecc = usqrt((ecosw+eps)**2+(esinw+eps)**2)-eps
omega = None
f_s = ufloat(0,np.sqrt(esinw.s))
f_c = ufloat(0,np.sqrt(ecosw.s))
else:
ecc = None
omega = None
f_s = None
f_c = None
self.ecc = ecc
self.ecc_note = "Derived"
self.omega = omega
self.omega_note = "Derived"
self.f_s = f_s
self.f_s_note = "Derived"
self.f_c = f_c
self.f_c_note = "Derived"
##############################################################################################################
### Calculate system properties (b, aR (or just a), and planetary mass, radius, density, surface gravity, and
### equilibrium temperature) when we can import stellar radius/mass from StarProperties
##############################################################################################################
# starproperties = StarProperties(identifier)
# aR = funcs.a_rsun(P, starproperties.M)*astrocon.R_sun.value/starproperties.R ### in stellar radii
# b_pl = np.sqrt((1+np.sqrt(D*1.e-6))**2 - (aR*starproperties.R*astrocon.R_sun.value*W*24*np.pi/P)**2)
# g_pl = funcs.g_2(np.sqrt(D*1.e-6)/aR,P,K/1000,ecc=ecc) ### in m/s^2
# m_pl = funcs.m_comp(funcs.f_m(P,K/1000,ecc=ecc), starproperties.M, sini=usqrt(1-(b_pl/aR)**2))/astrocon.M_sun.value ### in kg
# r_pl = np.sqrt(D*1.e-6)*starproperties.R*astrocon.R_sun.value ### in m
# p_pl = m_pl/((4*np.pi/3)*r_pl**3) ### in kg/m^3
# T_pl = starproperties.T*np.sqrt(1/(2*aR))*(1-A_b)**(1/4)
def __repr__(self):
s = 'Identifier : {}\n'.format(self.identifier)
if self.T0:
s += 'T0 : {:12.4f} +/- {:0.4f} BJD [{}]\n'.format(
self.T0.n, self.T0.s,self.T0_note)
if self.P:
s += 'P : {:13.7f} +/- {:0.7f} days [{}]\n'.format(
self.P.n, self.P.s, self.P_note)
if self.depth:
s += 'depth : {:10.4f} +/- {:0.4f} ppm [{}]\n'.format(
self.depth.n, self.depth.s, self.depth_note)
if self.width:
s += 'width : {:7.4f} +/- {:0.4f} days [{}]\n'.format(
self.width.n, self.width.s, self.width_note)
if self.K:
s += 'K : {:7.4f} +/- {:0.4f} m/s [{}]\n'.format(
self.K.n, self.K.s, self.K_note)
if self.ecosw:
s += 'ecosw : {:+7.4f} +/- {:0.4f} [{}]\n'.format(
self.ecosw.n, self.ecosw.s, self.ecosw_note)
if self.esinw:
s += 'esinw : {:+7.4f} +/- {:0.4f} [{}]\n'.format(
self.esinw.n, self.esinw.s, self.esinw_note)
if self.ecc:
s += 'ecc : {:6.4f} +/- {:0.4f} [{}]\n'.format(
self.ecc.n, self.ecc.s, self.ecc_note)
if self.omega:
s += 'omega : {:+8.5f} +/- {:0.5f} radian [{}]\n'.format(
self.omega.n, self.omega.s, self.omega_note)
if self.f_c:
s += 'f_c : {:+7.4f} +/- {:0.4f} [{}]\n'.format(
self.f_c.n, self.f_c.s, self.f_c_note)
if self.f_s:
s += 'f_s : {:+7.4f} +/- {:0.4f} [{}]\n'.format(
self.f_s.n, self.f_s.s, self.f_s_note)
return s
| 18,041 | 41.652482 | 135 | py |
pycheops | pycheops-master/pycheops/quantities.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
quantities
==========
Nominal values of solar and planetary constants from IAU Resolution B3 [1]_
plus related constants as astropy quantities.
Masses in SI units are derived using the 2014 CODATA value for the Newtonian
constant, G=6.67408E-11 m3.kg-1.s-2.
The following conversion constants are defined.
Solar conversion constants
--------------------------
* R_SunN - solar radius
* S_SunN - total solar irradiance
* L_SunN - solar luminosity
* Teff_SunN - solar effective temperature
* GM_SunN - solar mass parameter
* M_SunN - solar mass derived from GM_SunN and G_2014
* V_SunN - solar volume = (4.pi.R_SunN**3/3)
Planetary conversion constants
------------------------------
* R_eEarthN - equatorial radius of the Earth
* R_pEarthN - polar radius of the Earth
* R_eJupN - equatorial radius of Jupiter
* R_pJupN - polar radius of Jupiter
* GM_EarthN - terrestrial mass parameter
* GM_JupN - jovian mass parameter
* M_EarthN - mass of the Earth from GM_EarthN and G_2014
* M_JupN - mass of Jupiter from GM_JupN and G_2014
* V_EarthN - volume of the Earth (4.pi.R_eEarthN^2.R_pEarthN/3)
* V_JupN - volume of Jupiter (4.pi.R_eJupN^2.R_pJupN/3)
* R_EarthN - volume-average radius of the Earth (3.V_EarthN/4.pi)^(1/3)
* R_JupN - volume-average radius of Jupiter (3.V_JupN/4.pi)^(1/3)
Related constants
-----------------
* G_2014 - 2014 CODATA value for the Newtonian constant
* mean_solar_day - 86,400.002 seconds [2]_
* au - IAU 2009 value for astronomical constant in metres. [3]_
* pc - 1 parsec = 3600*au*180/pi
Fundamental constants
---------------------
* c - speed of light in m.s-1 [3]_
Example
-------
Calculate the density relative to Jupiter for a planet 1/10 the radius of
the Sun with a mass 1/1000 of a solar mass. Note that we use the
volume-average radius for Jupiter in this case::
>>> from pycheops.quantities import M_SunN, R_SunN, M_JupN, R_JupN
>>> M_planet_Jup = M_SunN/1000 / M_JupN
>>> R_planet_Jup = R_SunN/10 / R_JupN
>>> rho_planet_Jup = M_planet_Jup / (R_planet_Jup**3)
>>> print ("Planet mass = {:.3f} M_Jup".format(M_planet_Jup))
>>> print ("Planet radius = {:.3f} R_Jup".format(R_planet_Jup))
>>> print ("Planet density = {:.3f} rho_Jup".format(rho_planet_Jup))
Planet mass = 1.048 M_Jup
Planet radius = 0.995 R_Jup
Planet density = 1.063 rho_Jup
.. rubric:: References
.. [1] https://www.iau.org/static/resolutions/IAU2015_English.pdf
.. [2] http://tycho.usno.navy.mil/leapsec.html
.. [3] Luzum et al., Celest Mech Dyn Astr (2011) 110:293-304
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import astropy.units as u
__all__ = [ 'G_2014',
'R_SunN','S_SunN','L_SunN','Teff_SunN','GM_SunN','M_SunN','V_SunN',
'R_eEarthN','R_pEarthN','GM_EarthN','M_EarthN','V_EarthN','R_EarthN',
'R_eJupN','R_pJupN','GM_JupN','M_JupN','V_JupN','R_JupN',
'mean_solar_day','au','pc']
from math import pi
m = u.m
s = u.s
kg = u.kg
W = u.W
K = u.K
# Work-around for generation of documents using sphinx
try:
_ = 1*m
except:
m = 1
s = 1
kg = 1
W = 1
K = 1
G_2014 = 6.67408E-11 *kg*m**3/s**2 # 2014 CODATA value
R_SunN = 6.957E8 *m # Solar radius
S_SunN = 1361 *W/m**2 # Total solar irradiance
L_SunN = 3.828E26 *W # Solar luminosity
Teff_SunN = 5772 *K # Solar effective temperature
GM_SunN = 1.3271244E20 *m**3/s**2 # Solar mass parameter
M_SunN = GM_SunN/G_2014 # Solar mass
V_SunN = 4*pi*R_SunN**3/3 # Solar volume
R_eEarthN = 6.3781E6 *m # Equatorial radius of the Earth
R_pEarthN = 6.3568E6 *m # Polar radius of the Earth
R_eJupN = 7.1492E7 *m # Equatorial radius of Jupiter
R_pJupN = 6.6854E7 *m # Polar radius of Jupiter
GM_EarthN = 3.986004E14 *m**3/s**2 # Terrestrial mass parameter
GM_JupN = 1.2668653E17 *m**3/s**2 # Jovian mass parameter
M_EarthN = GM_EarthN/G_2014 # Earth mass
M_JupN = GM_JupN/G_2014 # Jupiter mass
V_EarthN = 4*pi*R_eEarthN**2*R_pEarthN/3 # Volume of the Earth
V_JupN = 4*pi*R_eJupN**2*R_pJupN/3 # Volume of Jupiter
R_EarthN = (R_eEarthN**2*R_pEarthN)**(1/3) # Mean radius of the Earth
R_JupN = (R_eJupN**2*R_pJupN)**(1/3) # Mean radius of Jupiter
mean_solar_day = 86400.002 *s # seconds
au = 1.49597870700E11 *m # IAU 2009 Astronomical unit
pc = 3600*au*180/pi # parsec
c = 2.99792458e8 *m/s # Speed of light
| 5,576 | 36.42953 | 77 | py |
pycheops | pycheops-master/pycheops/starproperties.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
StarProperties
==============
Object class to obtain/store observed properties of a star and to infer
parameters such as radius and density.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
import requests
from .core import load_config
from pathlib import Path
from time import localtime, mktime
from uncertainties import ufloat, UFloat
from .ld import stagger_power2_interpolator, atlas_h1h2_interpolator
from .ld import phoenix_h1h2_interpolator
from numpy.random import normal
import os
from astropy.io.ascii import convert_numpy
from contextlib import redirect_stderr
from dace_query.cheops import Cheops
class StarProperties(object):
"""
CHEOPS StarProperties object
The observed properties T_eff, log_g and [Fe/H] are obtained from
DACE or SWEET-Cat, or can be specified by the user.
Set match_arcsec=None to skip extraction of parameters from SWEET-Cat.
By default properties are obtained from SWEET-Cat.
Set dace=True to obtain parameters from the stellar properties table at
DACE.
User-defined properties are specified either as a ufloat or as a 2-tuple
(value, error), e.g., teff=(5000,100).
User-defined properties over-write values obtained from SWEET-Cat or DACE.
The stellar density is estimated using an linear relation between log(rho)
and log(g) derived using the method of Moya et al. (2018ApJS..237...21M)
Limb darkening parameters in the CHEOPS band are interpolated from Table 2
of Maxted (2018A&A...616A..39M). The error on these parameters is
propogated from the errors in Teff, log_g and [Fe/H] plus an additional
error of 0.01 for h_1 and 0.05 for h_2, as recommended in Maxted (2018).
If [Fe/H] for the star is not specified, the value 0.0 +/- 0.3 is assumed.
If the stellar parameters are outside the range covered by Table 2 of
Maxted (2018), then the results from ATLAS model from Table 10 of Claret
(2019RNAAS...3...17C) are used instead. For stars cooler than 3500K the
PHOENIX models for solar metalicity from Table 5 of Claret (2019) are
used. The parameters h_1 and h_2 are both given nominal errors of 0.1 for
both ATLAS model, and 0.15 for PHOENIX models.
"""
def __init__(self, identifier, force_download=False, dace=False,
match_arcsec=5, configFile=None,
teff=None, logg=None, metal=None,
verbose=True):
self.identifier = identifier
coords = SkyCoord.from_name(identifier)
self.ra = coords.ra.to_string(precision=2,unit='hour',sep=':',pad=True)
self.dec = coords.dec.to_string(precision=1,sep=':',unit='degree',
alwayssign=True,pad=True)
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
sweetCatPath = Path(_cache_path,'sweetcat.csv')
if force_download:
download_sweetcat = True
elif dace:
download_sweetcat = False
elif sweetCatPath.is_file():
file_age = mktime(localtime())-os.path.getmtime(sweetCatPath)
if file_age > int(config['SWEET-Cat']['update_interval']):
download_sweetcat = True
else:
download_sweetcat = False
else:
download_sweetcat = True
if download_sweetcat:
url = config['SWEET-Cat']['download_url']
req=requests.post(url)
with open(sweetCatPath, 'wb') as file:
file.write(req.content)
if verbose:
print('SWEET-Cat data downloaded from \n {}'.format(url))
if dace:
db = Cheops.query_catalog("stellar")
cat_c = SkyCoord(db['obj_pos_ra_deg'], db['obj_pos_dec_deg'],
unit='degree,degree')
idx, sep, _ = coords.match_to_catalog_sky(cat_c)
if sep.arcsec[0] > match_arcsec:
raise ValueError(
'No matching star in DACE stellar properties table')
self.teff = ufloat(db['obj_phys_teff_k'][idx],99)
self.teff_note = "DACE"
self.logg = ufloat(db['obj_phys_logg'][idx],0.09)
self.logg_note = "DACE"
self.metal = ufloat(db['obj_phys_feh'][idx],0.09)
self.metal_note = "DACE"
self.gaiadr2 = db['obj_id_gaiadr2'][idx]
else:
converters={'gaia_dr2': [convert_numpy(np.int64)],
'gaia_dr3': [convert_numpy(np.int64)] }
sweetCat = Table.read(sweetCatPath, encoding='UTF-8',
format='csv', converters=converters)
# Use NaN for masked values
sweetCat = sweetCat.filled(fill_value=np.nan)
if match_arcsec is None:
entry = None
else:
cat_c = SkyCoord(sweetCat['RA'],sweetCat['DEC'],
unit='hour,degree')
idx, sep, _ = coords.match_to_catalog_sky(cat_c)
if sep.arcsec[0] > match_arcsec:
raise ValueError('No matching star in SWEET-Cat')
entry = sweetCat[idx]
try:
self.teff = ufloat(entry['Teff'],entry['eTeff'])
self.teff_note = "SWEET-Cat"
except:
self.teff = None
try:
if entry['Logg_gaia'] > 0:
self.logg = ufloat(entry['Logg_gaia'],entry['eLogg_gaia'])
self.logg_note = "SWEET-Cat (gaia)"
else:
self.logg = ufloat(entry['Logg'],entry['eLogg'])
self.logg_note = "SWEET-Cat (spec)"
except:
self.logg = None
try:
self.metal=ufloat(entry['[Fe/H]'], entry['e[Fe/H]'] )
self.metal_note = "SWEET-Cat"
except:
self.metal = None
# User defined values
if teff:
self.teff = teff if isinstance(teff, UFloat) else ufloat(*teff)
self.teff_note = "User"
if logg:
self.logg = logg if isinstance(logg, UFloat) else ufloat(*logg)
self.logg_note = "User"
if metal:
self.metal = metal if isinstance(metal, UFloat) else ufloat(*metal)
self.metal_note = "User"
# log rho from log g using method of Moya et al.
# (2018ApJS..237...21M). Accuracy is 4.4%
self.logrho = None
if self.logg:
if (self.logg.n > 3.697) and (self.logg.n < 4.65):
logrho = -7.352 + 1.6580*self.logg
self.logrho = ufloat(logrho.n, np.hypot(logrho.s, 0.044))
self.h_1 = None
self.h_2 = None
self.ld_ref = None
if self.teff and self.logg:
metal = self.metal if self.metal else ufloat(0,0.3)
power2 = stagger_power2_interpolator()
_,_,h_1,h_2 = power2(self.teff.n,self.logg.n,metal.n)
if not np.isnan(h_1):
self.ld_ref = 'Stagger'
Xteff = normal(self.teff.n, self.teff.s, 256)
Xlogg = normal(self.logg.n, self.logg.s, 256)
Xmetal = normal(metal.n, metal.s, 256)
X = power2(Xteff,Xlogg,Xmetal)
# Additional error derived in Maxted, 2019
e_h_1 = np.hypot(0.01,np.sqrt(np.nanmean((X[:,2]-h_1)**2)))
e_h_2 = np.hypot(0.05,np.sqrt(np.nanmean((X[:,3]-h_2)**2)))
self.h_1 = ufloat(round(h_1,3),round(e_h_1,3))
self.h_2 = ufloat(round(h_2,3),round(e_h_2,3))
if self.ld_ref is None:
atlas = atlas_h1h2_interpolator()
h_1,h_2 = atlas(self.teff.n,self.logg.n,metal.n)
if not np.isnan(h_1):
self.h_1 = ufloat(round(h_1,3),0.1)
self.h_2 = ufloat(round(h_2,3),0.1)
self.ld_ref = 'ATLAS'
if self.ld_ref is None:
phoenix = phoenix_h1h2_interpolator()
h_1,h_2 = phoenix(self.teff.n,self.logg.n)
if not np.isnan(h_1):
self.h_1 = ufloat(round(h_1,3),0.15)
self.h_2 = ufloat(round(h_2,3),0.15)
self.ld_ref = 'PHOENIX-COND'
def __repr__(self):
s = 'Identifier : {}\n'.format(self.identifier)
s += 'Coordinates: {} {}\n'.format(self.ra, self.dec)
if self.teff:
s += 'T_eff : {:5.0f} +/- {:3.0f} K [{}]\n'.format(
self.teff.n, self.teff.s,self.teff_note)
if self.logg:
s += 'log g : {:5.2f} +/- {:0.2f} [{}]\n'.format(
self.logg.n, self.logg.s, self.logg_note)
if self.metal:
s += '[M/H] : {:+5.2f} +/- {:0.2f} [{}]\n'.format(
self.metal.n, self.metal.s, self.metal_note)
if self.logrho:
s += 'log rho : {:5.2f} +/- {:0.2f} (solar units)\n'.format(
self.logrho.n, self.logrho.s)
if self.ld_ref:
s += 'h_1 : {:5.3f} +/- {:0.3f} [{}]\n'.format(
self.h_1.n, self.h_1.s,self.ld_ref)
s += 'h_2 : {:5.3f} +/- {:0.3f} [{}]\n'.format(
self.h_2.n, self.h_2.s,self.ld_ref)
return s
| 10,320 | 40.785425 | 79 | py |
pycheops | pycheops-master/pycheops/utils.py | # -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
utils
======
Utility functions
Functions
---------
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from uncertainties import ufloat
__all__ = ['uprint', 'parprint', 'lcbin', 'phaser', 'mode']
def uprint(u, n, w=8, sf=2, wn=None, indent=1, short=False, asym=True):
"""
Print the value and error of a ufloat
The number of decimal places in the value and error are set such that the
error has the specified number of significant figures. If the error is 0
the output will have sf decimal places.
:param u: ufloat
:param n: parameter name
:param w: field width for values
:param wn: field width for name
:param sf: number of sig. fig. in the error
:param indent: number of spaces before text
:returns: formatted string
"""
if wn is None:
wn = len(n)+1
val = u.n
err = u.s
ndp = sf if err == 0 else sf - int(np.floor(np.log10(err))) - 1
if ndp < 0:
b = 10**(-ndp-1)
val = round(val/b)*b
err = round(err/b)*b
else:
val = round(val,ndp)
err = round(err,ndp)
if ndp > (w-2):
f='{:{wn}s} = {:{w}.{sf}e} +/- {:{w}.1e}'
s = f.format(n, val,err,sf=sf,w=w,wn=wn)
elif (ndp > 0) and short:
if ndp < sf:
b = 10**(sf-ndp)
else:
b = 10**ndp
err = round(err,ndp)*b
if err == 10:
err = 1
ndp -= 1
f='{:{wn}s} = {:{w}.{ndp}f} ({:{sf}.0f})'
s = f.format(n, val,err,ndp=ndp,w=w,wn=wn,sf=sf)
else:
if ndp >= 0:
f='{:{wn}s} = {:{w}.{ndp}f} +/- {:{w}.{ndp}f}'
s = f.format(n, val,err,ndp=ndp,w=w,wn=wn)
elif ndp > -sf-(w-6):
f='{:{wn}s} = {:{w}.0f} +/- {:{w}.0f}'
s = f.format(n, val,err,w=w,wn=wn)
else:
f='{:{wn}s} = {:{w}.{sf}e} +/- {:{w}.1e}'
s = f.format(n, val,err,sf=sf,w=w,wn=wn)
return " "*indent+s
#----------
def parprint(x,n, w=8, sf=2, wn=None, indent=4, short=False, asym=True):
"""
Print the value and error of a parameter based on a sample
The number of decimal places in the value and error are set such that the
error has the specified number of significant figures. If the error is 0
the output will have sf decimal places.
The parameter value is set to the sample median and the error is based on
the 15.87% and 84.13% percentiles of the sample.
:param x: input sample for probability distribution of the parameter
:param n: parameter name
:param w: field width for values
:param wn: field width for name
:param sf: number of sig. fig. in the error
:param indent: number of spaces before text
:param asym: also print asymmetric error bars
:returns: formatted string
"""
if wn is None:
wn = len(n)+1
std_l, val, std_u = np.percentile(x, [15.87, 50, 84.13])
err = 0.5*(std_u-std_l)
e_hi = std_u - val
e_lo = val - std_l
ndp = sf if err == 0 else sf - int(np.floor(np.log10(err))) - 1
if ndp < 0:
b = 10**(-ndp-1)
val = round(val/b)*b
err = round(err/b)*b
e_lo = round(e_lo/b)*b
e_hi = round(e_hi/b)*b
else:
val = round(val,ndp)
err = round(err,ndp)
e_lo = round(e_lo,ndp)
e_hi = round(e_hi,ndp)
if ndp > (w-2):
f='{:{wn}s} = {:{w}.{sf}e} +/- {:{w}.1e}'
if asym:
f+=' ({:+{w}.1e},{:+{w}.1e})'
s = f.format(n, val,err,-e_lo,e_hi,sf=sf,w=w,wn=wn)
else:
s = f.format(n, val,err,sf=sf,w=w,wn=wn)
elif (ndp > 0) and short:
if ndp < sf:
b = 10**(sf-ndp)
else:
b = 10**ndp
err = round(err,ndp)*b
e_lo = round(e_lo,ndp)*b
e_hi = round(e_hi,ndp)*b
f='{:{wn}s} = {:{w}.{ndp}f} ({:{sf}.0f})'
if asym:
f+=' (-{:{sf}.0f},+{:{sf}.0f})'
s = f.format(n, val,err,e_lo,e_hi,ndp=ndp,w=w,wn=wn,sf=sf)
else:
s = f.format(n, val,err,ndp=ndp,w=w,wn=wn,sf=sf)
else:
if ndp >= 0:
f='{:{wn}s} = {:{w}.{ndp}f} +/- {:{w}.{ndp}f}'
if asym:
f+=' ({:+{w}.{ndp}f},{:+{w}.{ndp}f})'
s = f.format(n, val,err,-e_lo,e_hi,ndp=ndp,w=w,wn=wn)
else:
s = f.format(n, val,err,ndp=ndp,w=w,wn=wn)
elif ndp > -sf-(w-6):
f='{:{wn}s} = {:{w}.0f} +/- {:{w}.0f}'
if asym:
f+=' ({:+{w}.0f},{:+{w}.0f})'
s = f.format(n, val,err,-e_lo,e_hi,w=w,wn=wn)
else:
s = f.format(n, val,err,w=w,wn=wn)
else:
f='{:{wn}s} = {:{w}.{sf}e} +/- {:{w}.1e}'
if asym:
f+=' ({:+{w}.1e},{:+{w}.1e})'
s = f.format(n, val,err,-e_lo,e_hi,sf=sf,w=w,wn=wn)
else:
s = f.format(n, val,err,sf=sf,w=w,wn=wn)
return " "*indent+s
#----------
def lcbin(time, flux, binwidth=0.06859, nmin=4, time0=None,
robust=False, tmid=False):
"""
Calculate average flux and error in time bins of equal width.
The default bin width is equivalent to one CHEOPS orbit in units of days.
To avoid binning data on either side of the gaps in the light curve due to
the CHEOPS orbit, the algorithm searches for the largest gap in the data
shorter than binwidth and places the bin edges so that they fall at the
centre of this gap. This behaviour can be avoided by setting a value for
the parameter time0.
The time values for the output bins can be either the average time value
of the input points or, if tmid is True, the centre of the time bin.
If robust is True, the output bin values are the median of the flux values
of the bin and the standard error is estimated from their mean absolute
deviation. Otherwise, the mean and standard deviation are used.
The output values are as follows.
* t_bin - average time of binned data points or centre of time bin.
* f_bin - mean or median of the input flux values.
* e_bin - standard error of flux points in the bin.
* n_bin - number of flux points in the bin.
:param time: time
:param flux: flux (or other quantity to be time-binned)
:param binwidth: bin width in the same units as time
:param nmin: minimum number of points for output bins
:param time0: time value at the lower edge of one bin
:param robust: use median and robust estimate of standard deviation
:param tmid: return centre of time bins instead of mean time value
:returns: t_bin, f_bin, e_bin, n_bin
"""
if time0 is None:
tgap = (time[1:]+time[:-1])/2
gap = time[1:]-time[:-1]
j = gap < binwidth
gap = gap[j]
tgap = tgap[j]
time0 = tgap[np.argmax(gap)]
time0 = time0 - binwidth*np.ceil((time0-min(time))/binwidth)
n = int(1+np.ceil(np.ptp(time)/binwidth))
r = (time0,time0+n*binwidth)
n_in_bin,bin_edges = np.histogram(time,bins=n,range=r)
bin_indices = np.digitize(time,bin_edges)
t_bin = np.zeros(n)
f_bin = np.zeros(n)
e_bin = np.zeros(n)
n_bin = np.zeros(n, dtype=int)
for i,n in enumerate(n_in_bin):
if n >= nmin:
j = bin_indices == i+1
n_bin[i] = n
if tmid:
t_bin[i] = (bin_edges[i]+bin_edges[i+1])/2
else:
t_bin[i] = np.nanmean(time[j])
if robust:
f_bin[i] = np.nanmedian(flux[j])
e_bin[i] = 1.25*np.nanmean(abs(flux[j] - f_bin[i]))/np.sqrt(n)
else:
f_bin[i] = np.nanmean(flux[j])
if n > 1:
e_bin[i] = np.std(flux[j])/np.sqrt(n-1)
else:
e_bin[i] = 0.0
j = (n_bin >= nmin)
return t_bin[j], f_bin[j], e_bin[j], n_bin[j]
#-----------
def ellpar(x, y, nstd=2):
"""
Error ellipse for a joint probability distribution
:param x: input sample x values
:param y: input sample y values
:param nstd: number of standard deviations contained by the ellipse
:returns: xy, w, h, theta as defined for matplotlib.patches.Ellipse
"""
xy=(np.mean(x), np.mean(y))
cov = np.cov(x, y)
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:,order]
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nstd * np.sqrt(vals)
return xy, w, h, theta
#-----------
def mode(x):
"""
Estimate the mode of a sample
This robust estimate of the mode is made using the half-sample method.
Adapted from function provided in robust.py -
"Small collection of robust statistical estimators based on functions from
Henry Freudenriech (Hughes STX) statistics library (called ROBLIB) that have
been incorporated into the AstroIDL User's Library."
"""
# Create the function that we can use for the half-sample mode
def _hsm(data):
j = None
if data.size == 1:
return data[0]
elif data.size == 2:
return data.mean()
elif data.size == 3:
i1 = data[1] - data[0]
i2 = data[2] - data[1]
if i1 < i2:
return data[:2].mean()
elif i2 > i1:
return data[1:].mean()
else:
return data[1]
else:
wMin = data[-1] - data[0]
if wMin == 0.0:
return data[0]
N = data.size // 2 + data.size % 2
for i in range(0, N):
w = data[i+N-1] - data[i]
if w < wMin:
wMin = w
j = i
if j is None:
return data[data.size // 2]
return _hsm(data[j:j+N])
# The data need to be sorted for this to work
data = np.sort(x)
# Find the mode
dataMode = _hsm(data)
return dataMode
#-----------
def phaser(time, period=1, time0=0, phase0=-0.25):
"""
Calculate the phase for an array of times
:param time: array of times
:param period: period
:param time0: time for phase=0
:param phase0: minimum output phase value
:returns: ( ( ( (time-time0)/period % 1) - phase0) % 1) + phase0
"""
return ( ( ( (time-time0)/period % 1) - phase0) % 1) + phase0
| 11,431 | 31.293785 | 80 | py |
pycheops | pycheops-master/pycheops/tests/test_constants.py |
from math import pi, sqrt
from unittest import TestCase
from cheops.constants import *
class TestConstants(TestCase):
def test_pc(self):
assert abs(pc - 180*3600*au/pi)/pc < 1e-12
| 200 | 12.4 | 50 | py |
pycheops | pycheops-master/pycheops/tests/test_funcs.py |
from math import pi, sqrt
from unittest import TestCase
from cheops.constants import *
from cheops.funcs import *
class TestConstants(TestCase):
def test_rhostar(self):
m_star = 1.234 * M_SunN
r_star = 0.987 * R_SunN
rho_star = m_star/(4*pi*r_star**3/3.)
m_planet = 5.678 * M_JupN
P = 9.876
M = (m_star+m_planet)/M_SunN
a = a_rsun(P, M)
r_1 = r_star/R_SunN/a
q = m_planet/m_star
rho_Sun = M_SunN/V_SunN
assert abs(rho_star/rho_Sun - rhostar(r_1, P, q=q)) < 1e-9
def test_m_comp(self):
m_1 = 1.23456
m_2 = 0.23456
P = 3.45678
sini = 0.8765
e = 0.45678
K_1,K_2 = K_kms(m_1, m_2, P, sini, e)
fm1 = f_m(P, K_1, e)
fm2 = f_m(P, K_2, e)
t_1 = abs(m_2 - m_comp(fm1,m_1,sini))
t_2 = abs(m_1 - m_comp(fm2,m_2,sini))
assert t_1+t_2 < 1e-9
| 929 | 22.25 | 66 | py |
null | tvPersistence.jl-main/README.md | # tvPersistence.jl
The code has been developed in Julia as a code accompanying the Barunik and Vacha (2023) paper, and provides estimation of time-varying persistence using *localised heterogeneous persistence* introduced in
Baruník, J. and Vacha, L. (2023): *The Dynamic Persistence of Economic Shocks*, manuscript [available here for download](https://barunik.github.io)
## Software requirements
Install [Julia](http://julialang.org/) version 1.6.0 or newer and with the first use of this code install the same version of packages with which the projects is built and work in the environment of the project as
```julia
using Pkg
Pkg.activate(".") # activating project in its directory
Pkg.instantiate() # installing packages with which versions the project is built
```
# TBD soon
The package is coming soon.
| 819 | 38.047619 | 213 | md |
pyrouge | pyrouge-master/setup.py | from setuptools import setup
import os
from pyrouge.utils.file_utils import list_files
data_files = list_files('pyrouge/tests/data')
data_files = [p.replace('pyrouge/tests/', '') for p in data_files]
script_files = [os.path.join('bin', s) for s in os.listdir('bin')]
setup(
name='pyrouge',
version='0.1.3',
author='Benjamin Heinzerling, Anders Johannsen',
author_email='[email protected]',
packages=['pyrouge', 'pyrouge.utils', 'pyrouge.tests'],
scripts=script_files,
#test_suite='pyrouge.test.suite',
package_data={'pyrouge.tests': data_files},
url='https://github.com/noutenki/pyrouge',
license='LICENSE.txt',
description='A Python wrapper for the ROUGE summarization evaluation'
' package.',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Text Processing :: Linguistic'],
long_description=open('README.rst').read(),
)
| 1,170 | 33.441176 | 73 | py |
pyrouge | pyrouge-master/pyrouge/Rouge155.py | from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import platform
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import DirectoryProcessor
from pyrouge.utils.file_utils import verify_dir
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, log_level=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
if log_level is None:
self.log = log.get_global_console_logger()
else:
self.log = log.get_global_console_logger(log_level)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
sent_split_to_string = lambda s: "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = Rouge155.__get_model_filenames_for_id(
id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
env = os.environ.copy()
if hasattr(self, "_home_dir") and self._home_dir:
env['ROUGE_EVAL_HOME'] = self._home_dir
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command, env=env).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp()
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
'-2',
'-1',
'-U',
'-r', 1000,
'-n', 4,
'-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + ['-m'] + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
| 22,251 | 35.359477 | 79 | py |
pyrouge | pyrouge-master/pyrouge/__init__.py | from pyrouge.Rouge155 import Rouge155
| 38 | 18.5 | 37 | py |
pyrouge | pyrouge-master/pyrouge/test.py | import unittest
from pyrouge.tests.Rouge155_test import PyrougeTest
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(PyrougeTest))
unittest.TextTestRunner().run(suite)
| 224 | 24 | 56 | py |
pyrouge | pyrouge-master/pyrouge/tests/Rouge155_test.py | from __future__ import print_function, unicode_literals, division
import unittest
import os
import re
from subprocess import check_output
from tempfile import mkdtemp
from pyrouge import Rouge155
from pyrouge.utils.file_utils import str_from_file, xml_equal
module_path = os.path.dirname(__file__)
os.chdir(module_path)
add_data_path = lambda p: os.path.join('data', p)
check_output_clean = lambda c: check_output(c).decode("UTF-8").strip()
class PyrougeTest(unittest.TestCase):
def test_paths(self):
rouge = Rouge155()
def get_home_from_settings():
with open(rouge.settings_file) as f:
for line in f.readlines():
if line.startswith("home_dir"):
rouge_home = line.split("=")[1].strip()
return rouge_home
self.assertEqual(rouge.home_dir, get_home_from_settings())
self.assertTrue(os.path.exists(rouge.bin_path))
self.assertTrue(os.path.exists(rouge.data_dir))
wrong_path = "/nonexisting/path/rewafafkljaerearjafankwe3"
with self.assertRaises(Exception) as context:
rouge.system_dir = wrong_path
self.assertEqual(
str(context.exception),
"Cannot set {} directory because the path {} does not "
"exist.".format("system", wrong_path))
right_path = add_data_path("systems")
rouge.system_dir = right_path
self.assertEqual(rouge.system_dir, right_path)
with self.assertRaises(Exception) as context:
rouge.model_dir = wrong_path
self.assertEqual(
str(context.exception),
"Cannot set {} directory because the path {} does not "
"exist.".format("model", wrong_path))
right_path = add_data_path("models")
rouge.model_dir = right_path
self.assertEqual(rouge.model_dir, right_path)
def test_wrong_system_pattern(self):
wrong_regexp = "adfdas454fd"
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models")
#rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.system_filename_pattern = wrong_regexp
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
with self.assertRaises(Exception) as context:
rouge.evaluate()
self.assertEqual(
str(context.exception),
"Did not find any files matching the pattern {} in the system "
"summaries directory {}.".format(wrong_regexp, rouge.system_dir))
def test_wrong_model_pattern(self):
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models_plain")
rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
with self.assertRaises(Exception) as context:
rouge.evaluate()
match_string = (
r"Could not find any model summaries for the system "
r"summary with ID " + "(\d+)" + r". Specified model filename "
r"pattern was: " + re.escape(rouge.model_filename_pattern))
try:
assert_regex = self.assertRegex
except AttributeError:
assert_regex = self.assertRegexpMatches
assert_regex(str(context.exception), re.compile(match_string))
def test_text_conversion(self):
rouge = Rouge155()
text = str_from_file(add_data_path("spl_test_doc"))
html = rouge.convert_text_to_rouge_format(text, "D00000.M.100.A.C")
target = str_from_file(add_data_path("spl_test_doc.html"))
self.assertEqual(html, target)
# only run this test if BeautifulSoup is installed
try:
from bs4 import BeautifulSoup
def test_get_plain_text(self):
input_dir = add_data_path("SL2003_models_rouge_format")
output_dir = mkdtemp()
target_dir = add_data_path("SL2003_models_plain_text")
command = (
"pyrouge_convert_rouge_format_to_plain_text "
"-i {} -o {}".format(input_dir, output_dir))
check_output(command.split())
filenames = os.listdir(input_dir)
for filename in filenames:
output_file = os.path.join(output_dir, filename)
output = str_from_file(output_file)
target_file = os.path.join(target_dir, filename)
target = str_from_file(target_file)
self.assertEqual(output, target)
except ImportError:
pass
def test_convert_summaries(self):
input_dir = add_data_path("SL2003_models_plain_text")
output_dir = mkdtemp()
target_dir = add_data_path("SL2003_models_rouge_format")
command = (
"pyrouge_convert_plain_text_to_rouge_format -i {} -o {}".format(
input_dir, output_dir))
check_output(command.split())
filenames = os.listdir(input_dir)
for filename in filenames:
output_file = os.path.join(output_dir, filename)
output = str_from_file(output_file)
target_file = os.path.join(target_dir, filename)
target = str_from_file(target_file)
filename = filename.replace(".html", "")
target = target.replace(filename, "dummy title")
self.assertEqual(output, target, filename)
def test_config_file(self):
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models")
rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
rouge.config_file = add_data_path("config_test.xml")
rouge.write_config(system_id=11)
self.assertTrue(xml_equal(
rouge.config_file,
add_data_path("ROUGE-test_11.xml")))
os.remove(rouge.config_file)
def test_evaluation(self):
rouge = Rouge155()
rouge.system_dir = add_data_path("systems")
rouge.model_dir = add_data_path("models")
rouge.system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
rouge.model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
pyrouge_output = rouge.evaluate(system_id=11).strip()
rouge_command = (
"{bin} -e {data} -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 "
"-a -m {xml}".format(
bin=rouge.bin_path,
data=rouge.data_dir,
xml=add_data_path("ROUGE-test_11.xml")))
orig_rouge_output = check_output_clean(rouge_command.split())
self.assertEqual(pyrouge_output, orig_rouge_output)
def test_rouge_for_plain_text(self):
model_dir = add_data_path("models_plain")
system_dir = add_data_path("systems_plain")
pyrouge_command = (
"pyrouge_evaluate_plain_text_files -m {} -s {} -sfp "
"D(\d+).M.100.T.A -mfp D#ID#.M.100.T.[A-Z] -id 1".format(
model_dir, system_dir))
pyrouge_output = check_output_clean(pyrouge_command.split())
rouge = Rouge155()
config_file = add_data_path("config_test2.xml")
rouge_command = (
"{bin} -e {data} -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 "
"-a -m {xml}".format(
bin=rouge.bin_path,
data=rouge.data_dir,
xml=config_file))
orig_rouge_output = check_output_clean(rouge_command.split())
self.assertEqual(pyrouge_output, orig_rouge_output)
def test_write_config(self):
system_dir = add_data_path("systems")
model_dir = add_data_path("models")
system_filename_pattern = "SL.P.10.R.11.SL062003-(\d+).html"
model_filename_pattern = "SL.P.10.R.[A-D].SL062003-#ID#.html"
config_file = os.path.join(mkdtemp(), "config_test.xml")
command = (
"pyrouge_write_config_file -m {m} -s {s} "
"-mfp {mfp} -sfp {sfp} -c {c}".format(
m=model_dir, s=system_dir,
mfp=model_filename_pattern, sfp=system_filename_pattern,
c=config_file))
check_output(command.split())
target_xml = add_data_path("config_test.xml")
print(config_file, target_xml)
self.assertTrue(xml_equal(config_file, target_xml))
def test_options(self):
rouge = Rouge155()
model_dir = add_data_path("models_plain")
system_dir = add_data_path("systems_plain")
config_file = add_data_path("config_test2.xml")
command_part1 = (
"pyrouge_evaluate_plain_text_files -m {} -s {} -sfp "
"D(\d+).M.100.T.A -mfp D#ID#.M.100.T.[A-Z] -id 1 -rargs".format(
model_dir, system_dir))
command_part2 = [
"\"-e {data} -c 90 -2 -1 -U -r 1000 -n 2 -w 1.2 "
"-a -m {xml}\"".format(
data=rouge.data_dir, xml=config_file)]
pyrouge_command = command_part1.split() + command_part2
pyrouge_output = check_output_clean(pyrouge_command)
rouge_command = (
"{bin} -e {data} -c 90 -2 -1 -U -r 1000 -n 2 -w 1.2 "
"-a -m {xml}".format(
bin=rouge.bin_path, data=rouge.data_dir, xml=config_file))
orig_rouge_output = check_output_clean(rouge_command.split())
self.assertEqual(pyrouge_output, orig_rouge_output)
def main():
unittest.main()
if __name__ == "__main__":
main()
| 9,629 | 40.508621 | 77 | py |
pyrouge | pyrouge-master/pyrouge/tests/__init__.py | 0 | 0 | 0 | py |
|
pyrouge | pyrouge-master/pyrouge/tests/__main__.py | import unittest
import pyrouge.test
from pyrouge.test.Rouge155_test import PyrougeTest
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(PyrougeTest))
unittest.TextTestRunner().run(suite)
| 243 | 23.4 | 56 | py |
pyrouge | pyrouge-master/pyrouge/tests/data/spl_test_doc.html | <html>
<head>
<title>D00000.M.100.A.C</title>
</head>
<body bgcolor="white">
<a name="1">[1]</a> <a href="#1" id=1>BritishCCC authoritiesCCC arrestedCCC GeneralCCC AugustoCCC PinochetCCC inCCC LondonCCC forCCC backCCC surgeryCCC onCCC anCCC internationalCCC warrantCCC</a>
<a name="2">[2]</a> <a href="#2" id=2>issuedCCC byCCC SpanishCCC magistrateCCC BaltasarCCC GarzonCCC</a>
<a name="3">[3]</a> <a href="#3" id=3>TheCCC MadridCCC courtCCC chargedCCC PinochetCCC</a>
<a name="4">[4]</a> <a href="#4" id=4>whoCCC ruledCCC ChileCCC asCCC aCCC despotCCC forCCC yearsCCC</a>
<a name="5">[5]</a> <a href="#5" id=5>withCCC crimesCCC againstCCC humanityCCC</a>
<a name="6">[6]</a> <a href="#6" id=6>includingCCC genocideCCC andCCC terrorismCCC involvingCCC theCCC deathsCCC ofCCC moreCCC thanCCC peopleCCC</a>
<a name="7">[7]</a> <a href="#7" id=7>TheCCC ChileanCCC governmentCCC protestedCCC thatCCC PinochetCCC nowCCC aCCC hasCCC legalCCC immunityCCC</a>
<a name="8">[8]</a> <a href="#8" id=8>butCCC fewCCC inCCC ChileanCCC societyCCC protestedCCC theCCC arrestCCC</a>
<a name="9">[9]</a> <a href="#9" id=9>PinochetCCC arrestCCC showsCCC theCCC growingCCC significanceCCC ofCCC internationalCCC lawCCC</a>
<a name="10">[10]</a> <a href="#10" id=10>suggestingCCC thatCCC officialsCCC accusedCCC ofCCC atrocitiesCCC haveCCC fewerCCC placesCCC toCCC hideCCC theseCCC daysCCC</a>
<a name="11">[11]</a> <a href="#11" id=11>evenCCC ifCCC theyCCC areCCC carryingCCC diplomaticCCC passportsCCC</a>
</body>
</html>
| 1,503 | 78.157895 | 195 | html |
pyrouge | pyrouge-master/pyrouge/tests/data/SL2003_models_plain_text/SL.P.10.R.A.SL062003-01.html | Poor nations demand trade subsidies from developed nations. | 59 | 59 | 59 | html |
pyrouge | pyrouge-master/pyrouge/tests/data/SL2003_models_plain_text/SL.P.10.R.A.SL062003-02.html | Indonesia charges Imam Samudra and Amrozi with Bali bombing. | 60 | 60 | 60 | html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.