repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/downsample.py | # Copyright (c) OpenMMLab. All rights reserved.
def pixel_unshuffle(x, scale):
"""Down-sample by pixel unshuffle.
Args:
x (Tensor): Input tensor.
scale (int): Scale factor.
Returns:
Tensor: Output tensor.
"""
b, c, h, w = x.shape
if h % scale != 0 or w % scale != 0:
raise AssertionError(
f'Invalid scale ({scale}) of pixel unshuffle for tensor '
f'with shape: {x.shape}')
h = int(h / scale)
w = int(w / scale)
x = x.view(b, c, h, scale, w, scale)
x = x.permute(0, 1, 3, 5, 2, 4)
return x.reshape(b, -1, h, w)
| 613 | 25.695652 | 69 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/gca_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, xavier_init
from torch.nn import functional as F
class GCAModule(nn.Module):
"""Guided Contextual Attention Module.
From https://arxiv.org/pdf/2001.04069.pdf.
Based on https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting.
This module use image feature map to augment the alpha feature map with
guided contextual attention score.
Image feature and alpha feature are unfolded to small patches and later
used as conv kernel. Thus, we refer the unfolding size as kernel size.
Image feature patches have a default kernel size 3 while the kernel size of
alpha feature patches could be specified by `rate` (see `rate` below). The
image feature patches are used to convolve with the image feature itself
to calculate the contextual attention. Then the attention feature map is
convolved by alpha feature patches to obtain the attention alpha feature.
At last, the attention alpha feature is added to the input alpha feature.
Args:
in_channels (int): Input channels of the guided contextual attention
module.
out_channels (int): Output channels of the guided contextual attention
module.
kernel_size (int): Kernel size of image feature patches. Default 3.
stride (int): Stride when unfolding the image feature. Default 1.
rate (int): The downsample rate of image feature map. The corresponding
kernel size and stride of alpha feature patches will be `rate x 2`
and `rate`. It could be regarded as the granularity of the gca
module. Default: 2.
pad_args (dict): Parameters of padding when convolve image feature with
image feature patches or alpha feature patches. Allowed keys are
`mode` and `value`. See torch.nn.functional.pad() for more
information. Default: dict(mode='reflect').
interpolation (str): Interpolation method in upsampling and
downsampling.
penalty (float): Punishment hyperparameter to avoid a large correlation
between each unknown patch and itself.
eps (float): A small number to avoid dividing by 0 when calculating
the normed image feature patch. Default: 1e-4.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
rate=2,
pad_args=dict(mode='reflect'),
interpolation='nearest',
penalty=-1e4,
eps=1e-4):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.rate = rate
self.pad_args = pad_args
self.interpolation = interpolation
self.penalty = penalty
self.eps = eps
# reduced the channels of input image feature.
self.guidance_conv = nn.Conv2d(in_channels, in_channels // 2, 1)
# convolution after the attention alpha feature
self.out_conv = ConvModule(
out_channels,
out_channels,
1,
norm_cfg=dict(type='BN'),
act_cfg=None)
self.init_weights()
def init_weights(self):
xavier_init(self.guidance_conv, distribution='uniform')
xavier_init(self.out_conv.conv, distribution='uniform')
constant_init(self.out_conv.norm, 1e-3)
def forward(self, img_feat, alpha_feat, unknown=None, softmax_scale=1.):
"""Forward function of GCAModule.
Args:
img_feat (Tensor): Image feature map of shape
(N, ori_c, ori_h, ori_w).
alpha_feat (Tensor): Alpha feature map of shape
(N, alpha_c, ori_h, ori_w).
unknown (Tensor, optional): Unknown area map generated by trimap.
If specified, this tensor should have shape
(N, 1, ori_h, ori_w).
softmax_scale (float, optional): The softmax scale of the attention
if unknown area is not provided in forward. Default: 1.
Returns:
Tensor: The augmented alpha feature.
"""
if alpha_feat.shape[2:4] != img_feat.shape[2:4]:
raise ValueError(
'image feature size does not align with alpha feature size: '
f'image feature size {img_feat.shape[2:4]}, '
f'alpha feature size {alpha_feat.shape[2:4]}')
if unknown is not None and unknown.shape[2:4] != img_feat.shape[2:4]:
raise ValueError(
'image feature size does not align with unknown mask size: '
f'image feature size {img_feat.shape[2:4]}, '
f'unknown mask size {unknown.shape[2:4]}')
# preprocess image feature
img_feat = self.guidance_conv(img_feat)
img_feat = F.interpolate(
img_feat, scale_factor=1 / self.rate, mode=self.interpolation)
# preprocess unknown mask
unknown, softmax_scale = self.process_unknown_mask(
unknown, img_feat, softmax_scale)
img_ps, alpha_ps, unknown_ps = self.extract_feature_maps_patches(
img_feat, alpha_feat, unknown)
# create self correlation mask with shape:
# (N, img_h*img_w, img_h, img_w)
self_mask = self.get_self_correlation_mask(img_feat)
# split tensors by batch dimension; tuple is returned
img_groups = torch.split(img_feat, 1, dim=0)
img_ps_groups = torch.split(img_ps, 1, dim=0)
alpha_ps_groups = torch.split(alpha_ps, 1, dim=0)
unknown_ps_groups = torch.split(unknown_ps, 1, dim=0)
scale_groups = torch.split(softmax_scale, 1, dim=0)
groups = (img_groups, img_ps_groups, alpha_ps_groups,
unknown_ps_groups, scale_groups)
out = []
# i is the virtual index of the sample in the current batch
for img_i, img_ps_i, alpha_ps_i, unknown_ps_i, scale_i in zip(*groups):
similarity_map = self.compute_similarity_map(img_i, img_ps_i)
gca_score = self.compute_guided_attention_score(
similarity_map, unknown_ps_i, scale_i, self_mask)
out_i = self.propagate_alpha_feature(gca_score, alpha_ps_i)
out.append(out_i)
out = torch.cat(out, dim=0)
out.reshape_as(alpha_feat)
out = self.out_conv(out) + alpha_feat
return out
def extract_feature_maps_patches(self, img_feat, alpha_feat, unknown):
"""Extract image feature, alpha feature unknown patches.
Args:
img_feat (Tensor): Image feature map of shape
(N, img_c, img_h, img_w).
alpha_feat (Tensor): Alpha feature map of shape
(N, alpha_c, ori_h, ori_w).
unknown (Tensor, optional): Unknown area map generated by trimap of
shape (N, 1, img_h, img_w).
Returns:
tuple: 3-tuple of
``Tensor``: Image feature patches of shape \
(N, img_h*img_w, img_c, img_ks, img_ks).
``Tensor``: Guided contextual attention alpha feature map. \
(N, img_h*img_w, alpha_c, alpha_ks, alpha_ks).
``Tensor``: Unknown mask of shape (N, img_h*img_w, 1, 1).
"""
# extract image feature patches with shape:
# (N, img_h*img_w, img_c, img_ks, img_ks)
img_ks = self.kernel_size
img_ps = self.extract_patches(img_feat, img_ks, self.stride)
# extract alpha feature patches with shape:
# (N, img_h*img_w, alpha_c, alpha_ks, alpha_ks)
alpha_ps = self.extract_patches(alpha_feat, self.rate * 2, self.rate)
# extract unknown mask patches with shape: (N, img_h*img_w, 1, 1)
unknown_ps = self.extract_patches(unknown, img_ks, self.stride)
unknown_ps = unknown_ps.squeeze(dim=2) # squeeze channel dimension
unknown_ps = unknown_ps.mean(dim=[2, 3], keepdim=True)
return img_ps, alpha_ps, unknown_ps
def compute_similarity_map(self, img_feat, img_ps):
"""Compute similarity between image feature patches.
Args:
img_feat (Tensor): Image feature map of shape
(1, img_c, img_h, img_w).
img_ps (Tensor): Image feature patches tensor of shape
(1, img_h*img_w, img_c, img_ks, img_ks).
Returns:
Tensor: Similarity map between image feature patches with shape \
(1, img_h*img_w, img_h, img_w).
"""
img_ps = img_ps[0] # squeeze dim 0
# convolve the feature to get correlation (similarity) map
escape_NaN = torch.FloatTensor([self.eps]).to(img_feat)
img_ps_normed = img_ps / torch.max(self.l2_norm(img_ps), escape_NaN)
img_feat = self.pad(img_feat, self.kernel_size, self.stride)
similarity_map = F.conv2d(img_feat, img_ps_normed)
return similarity_map
def compute_guided_attention_score(self, similarity_map, unknown_ps, scale,
self_mask):
"""Compute guided attention score.
Args:
similarity_map (Tensor): Similarity map of image feature with shape
(1, img_h*img_w, img_h, img_w).
unknown_ps (Tensor): Unknown area patches tensor of shape
(1, img_h*img_w, 1, 1).
scale (Tensor): Softmax scale of known and unknown area:
[unknown_scale, known_scale].
self_mask (Tensor): Self correlation mask of shape
(1, img_h*img_w, img_h, img_w). At (1, i*i, i, i) mask value
equals -1e4 for i in [1, img_h*img_w] and other area is all
zero.
Returns:
Tensor: Similarity map between image feature patches with shape \
(1, img_h*img_w, img_h, img_w).
"""
# scale the correlation with predicted scale factor for known and
# unknown area
unknown_scale, known_scale = scale[0]
out = similarity_map * (
unknown_scale * unknown_ps.gt(0.).float() +
known_scale * unknown_ps.le(0.).float())
# mask itself, self-mask only applied to unknown area
out = out + self_mask * unknown_ps
gca_score = F.softmax(out, dim=1)
return gca_score
def propagate_alpha_feature(self, gca_score, alpha_ps):
"""Propagate alpha feature based on guided attention score.
Args:
gca_score (Tensor): Guided attention score map of shape
(1, img_h*img_w, img_h, img_w).
alpha_ps (Tensor): Alpha feature patches tensor of shape
(1, img_h*img_w, alpha_c, alpha_ks, alpha_ks).
Returns:
Tensor: Propagated alpha feature map of shape \
(1, alpha_c, alpha_h, alpha_w).
"""
alpha_ps = alpha_ps[0] # squeeze dim 0
if self.rate == 1:
gca_score = self.pad(gca_score, kernel_size=2, stride=1)
alpha_ps = alpha_ps.permute(1, 0, 2, 3)
out = F.conv2d(gca_score, alpha_ps) / 4.
else:
out = F.conv_transpose2d(
gca_score, alpha_ps, stride=self.rate, padding=1) / 4.
return out
def process_unknown_mask(self, unknown, img_feat, softmax_scale):
"""Process unknown mask.
Args:
unknown (Tensor, optional): Unknown area map generated by trimap of
shape (N, 1, ori_h, ori_w)
img_feat (Tensor): The interpolated image feature map of shape
(N, img_c, img_h, img_w).
softmax_scale (float, optional): The softmax scale of the attention
if unknown area is not provided in forward. Default: 1.
Returns:
tuple: 2-tuple of
``Tensor``: Interpolated unknown area map of shape \
(N, img_h*img_w, img_h, img_w).
``Tensor``: Softmax scale tensor of known and unknown area of \
shape (N, 2).
"""
n, _, h, w = img_feat.shape
if unknown is not None:
unknown = unknown.clone()
unknown = F.interpolate(
unknown, scale_factor=1 / self.rate, mode=self.interpolation)
unknown_mean = unknown.mean(dim=[2, 3])
known_mean = 1 - unknown_mean
unknown_scale = torch.clamp(
torch.sqrt(unknown_mean / known_mean), 0.1, 10).to(img_feat)
known_scale = torch.clamp(
torch.sqrt(known_mean / unknown_mean), 0.1, 10).to(img_feat)
softmax_scale = torch.cat([unknown_scale, known_scale], dim=1)
else:
unknown = torch.ones((n, 1, h, w)).to(img_feat)
softmax_scale = torch.FloatTensor(
[softmax_scale,
softmax_scale]).view(1, 2).repeat(n, 1).to(img_feat)
return unknown, softmax_scale
def extract_patches(self, x, kernel_size, stride):
"""Extract feature patches.
The feature map will be padded automatically to make sure the number of
patches is equal to `(H / stride) * (W / stride)`.
Args:
x (Tensor): Feature map of shape (N, C, H, W).
kernel_size (int): Size of each patches.
stride (int): Stride between patches.
Returns:
Tensor: Extracted patches of shape \
(N, (H / stride) * (W / stride) , C, kernel_size, kernel_size).
"""
n, c, _, _ = x.shape
x = self.pad(x, kernel_size, stride)
x = F.unfold(x, (kernel_size, kernel_size), stride=(stride, stride))
x = x.permute(0, 2, 1)
x = x.reshape(n, -1, c, kernel_size, kernel_size)
return x
def pad(self, x, kernel_size, stride):
left = (kernel_size - stride + 1) // 2
right = (kernel_size - stride) // 2
pad = (left, right, left, right)
return F.pad(x, pad, **self.pad_args)
def get_self_correlation_mask(self, img_feat):
_, _, h, w = img_feat.shape
# As ONNX does not support dynamic num_classes, we have to convert it
# into an integer
self_mask = F.one_hot(
torch.arange(h * w).view(h, w), num_classes=int(h * w))
self_mask = self_mask.permute(2, 0, 1).view(1, h * w, h, w)
# use large negative value to mask out self-correlation before softmax
self_mask = self_mask * self.penalty
return self_mask.to(img_feat)
@staticmethod
def l2_norm(x):
x = x**2
x = x.sum(dim=[1, 2, 3], keepdim=True)
return torch.sqrt(x)
| 14,808 | 40.250696 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/generation_model_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
from torch.nn import init
def generation_init_weights(module, init_type='normal', init_gain=0.02):
"""Default initialization of network weights for image generation.
By default, we use normal init, but xavier and kaiming might work
better for some applications.
Args:
module (nn.Module): Module to be initialized.
init_type (str): The name of an initialization method:
normal | xavier | kaiming | orthogonal.
init_gain (float): Scaling factor for normal, xavier and
orthogonal.
"""
def init_func(m):
"""Initialization function.
Args:
m (nn.Module): Module to be initialized.
"""
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1
or classname.find('Linear') != -1):
if init_type == 'normal':
normal_init(m, 0.0, init_gain)
elif init_type == 'xavier':
xavier_init(m, gain=init_gain, distribution='normal')
elif init_type == 'kaiming':
kaiming_init(
m,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='normal')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight, gain=init_gain)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError(
f"Initialization method '{init_type}' is not implemented")
elif classname.find('BatchNorm2d') != -1:
# BatchNorm Layer's weight is not a matrix;
# only normal distribution applies.
normal_init(m, 1.0, init_gain)
module.apply(init_func)
class GANImageBuffer:
"""This class implements an image buffer that stores previously
generated images.
This buffer allows us to update the discriminator using a history of
generated images rather than the ones produced by the latest generator
to reduce model oscillation.
Args:
buffer_size (int): The size of image buffer. If buffer_size = 0,
no buffer will be created.
buffer_ratio (float): The chance / possibility to use the images
previously stored in the buffer.
"""
def __init__(self, buffer_size, buffer_ratio=0.5):
self.buffer_size = buffer_size
# create an empty buffer
if self.buffer_size > 0:
self.img_num = 0
self.image_buffer = []
self.buffer_ratio = buffer_ratio
def query(self, images):
"""Query current image batch using a history of generated images.
Args:
images (Tensor): Current image batch without history information.
"""
if self.buffer_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
# if the buffer is not full, keep inserting current images
if self.img_num < self.buffer_size:
self.img_num = self.img_num + 1
self.image_buffer.append(image)
return_images.append(image)
else:
use_buffer = np.random.random() < self.buffer_ratio
# by self.buffer_ratio, the buffer will return a previously
# stored image, and insert the current image into the buffer
if use_buffer:
random_id = np.random.randint(0, self.buffer_size)
image_tmp = self.image_buffer[random_id].clone()
self.image_buffer[random_id] = image
return_images.append(image_tmp)
# by (1 - self.buffer_ratio), the buffer will return the
# current image
else:
return_images.append(image)
# collect all the images and return
return_images = torch.cat(return_images, 0)
return return_images
class UnetSkipConnectionBlock(nn.Module):
"""Construct a Unet submodule with skip connections, with the following
structure: downsampling - `submodule` - upsampling.
Args:
outer_channels (int): Number of channels at the outer conv layer.
inner_channels (int): Number of channels at the inner conv layer.
in_channels (int): Number of channels in input images/features. If is
None, equals to `outer_channels`. Default: None.
submodule (UnetSkipConnectionBlock): Previously constructed submodule.
Default: None.
is_outermost (bool): Whether this module is the outermost module.
Default: False.
is_innermost (bool): Whether this module is the innermost module.
Default: False.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='BN')`.
use_dropout (bool): Whether to use dropout layers. Default: False.
"""
def __init__(self,
outer_channels,
inner_channels,
in_channels=None,
submodule=None,
is_outermost=False,
is_innermost=False,
norm_cfg=dict(type='BN'),
use_dropout=False):
super().__init__()
# cannot be both outermost and innermost
assert not (is_outermost and is_innermost), (
"'is_outermost' and 'is_innermost' cannot be True"
'at the same time.')
self.is_outermost = is_outermost
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the unet skip connection block.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
kernel_size = 4
stride = 2
padding = 1
if in_channels is None:
in_channels = outer_channels
down_conv_cfg = dict(type='Conv2d')
down_norm_cfg = norm_cfg
down_act_cfg = dict(type='LeakyReLU', negative_slope=0.2)
up_conv_cfg = dict(type='Deconv')
up_norm_cfg = norm_cfg
up_act_cfg = dict(type='ReLU')
up_in_channels = inner_channels * 2
up_bias = use_bias
middle = [submodule]
upper = []
if is_outermost:
down_act_cfg = None
down_norm_cfg = None
up_bias = True
up_norm_cfg = None
upper = [nn.Tanh()]
elif is_innermost:
down_norm_cfg = None
up_in_channels = inner_channels
middle = []
else:
upper = [nn.Dropout(0.5)] if use_dropout else []
down = [
ConvModule(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=use_bias,
conv_cfg=down_conv_cfg,
norm_cfg=down_norm_cfg,
act_cfg=down_act_cfg,
order=('act', 'conv', 'norm'))
]
up = [
ConvModule(
in_channels=up_in_channels,
out_channels=outer_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=up_bias,
conv_cfg=up_conv_cfg,
norm_cfg=up_norm_cfg,
act_cfg=up_act_cfg,
order=('act', 'conv', 'norm'))
]
model = down + middle + up + upper
self.model = nn.Sequential(*model)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.is_outermost:
return self.model(x)
# add skip connections
return torch.cat([x, self.model(x)], 1)
class ResidualBlockWithDropout(nn.Module):
"""Define a Residual Block with dropout layers.
Ref:
Deep Residual Learning for Image Recognition
A residual block is a conv block with skip connections. A dropout layer is
added between two common conv modules.
Args:
channels (int): Number of channels in the conv layer.
padding_mode (str): The name of padding layer:
'reflect' | 'replicate' | 'zeros'.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='IN')`.
use_dropout (bool): Whether to use dropout layers. Default: True.
"""
def __init__(self,
channels,
padding_mode,
norm_cfg=dict(type='BN'),
use_dropout=True):
super().__init__()
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the residual block with dropout layers.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
block = [
ConvModule(
in_channels=channels,
out_channels=channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm_cfg=norm_cfg,
padding_mode=padding_mode)
]
if use_dropout:
block += [nn.Dropout(0.5)]
block += [
ConvModule(
in_channels=channels,
out_channels=channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm_cfg=norm_cfg,
act_cfg=None,
padding_mode=padding_mode)
]
self.block = nn.Sequential(*block)
def forward(self, x):
"""Forward function. Add skip connections without final ReLU.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
out = x + self.block(x)
return out
| 10,699 | 34.430464 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/ensemble.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class SpatialTemporalEnsemble(nn.Module):
""" Apply spatial and temporal ensemble and compute outputs
Args:
is_temporal_ensemble (bool, optional): Whether to apply ensemble
temporally. If True, the sequence will also be flipped temporally.
If the input is an image, this argument must be set to False.
Default: False.
"""
def __init__(self, is_temporal_ensemble=False):
super().__init__()
self.is_temporal_ensemble = is_temporal_ensemble
def _transform(self, imgs, mode):
"""Apply spatial transform (flip, rotate) to the images.
Args:
imgs (torch.Tensor): The images to be transformed/
mode (str): The mode of transform. Supported values are 'vertical',
'horizontal', and 'transpose', corresponding to vertical flip,
horizontal flip, and rotation, respectively.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
is_single_image = False
if imgs.ndim == 4:
if self.is_temporal_ensemble:
raise ValueError('"is_temporal_ensemble" must be False if '
'the input is an image.')
is_single_image = True
imgs = imgs.unsqueeze(1)
if mode == 'vertical':
imgs = imgs.flip(4).clone()
elif mode == 'horizontal':
imgs = imgs.flip(3).clone()
elif mode == 'transpose':
imgs = imgs.permute(0, 1, 2, 4, 3).clone()
if is_single_image:
imgs = imgs.squeeze(1)
return imgs
def spatial_ensemble(self, imgs, model):
"""Apply spatial ensemble.
Args:
imgs (torch.Tensor): The images to be processed by the model. Its
size should be either (n, t, c, h, w) or (n, c, h, w).
model (nn.Module): The model to process the images.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
img_list = [imgs.cpu()]
for mode in ['vertical', 'horizontal', 'transpose']:
img_list.extend([self._transform(t, mode) for t in img_list])
output_list = [model(t.to(imgs.device)).cpu() for t in img_list]
for i in range(len(output_list)):
if i > 3:
output_list[i] = self._transform(output_list[i], 'transpose')
if i % 4 > 1:
output_list[i] = self._transform(output_list[i], 'horizontal')
if (i % 4) % 2 == 1:
output_list[i] = self._transform(output_list[i], 'vertical')
outputs = torch.stack(output_list, dim=0)
outputs = outputs.mean(dim=0, keepdim=False)
return outputs.to(imgs.device)
def forward(self, imgs, model):
"""Apply spatial and temporal ensemble.
Args:
imgs (torch.Tensor): The images to be processed by the model. Its
size should be either (n, t, c, h, w) or (n, c, h, w).
model (nn.Module): The model to process the images.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
outputs = self.spatial_ensemble(imgs, model)
if self.is_temporal_ensemble:
outputs += self.spatial_ensemble(imgs.flip(1), model).flip(1)
outputs *= 0.5
return outputs
| 3,541 | 32.415094 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from .sr_backbone_utils import default_init_weights
class PixelShufflePack(nn.Module):
""" Pixel Shuffle upsample layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Upsample ratio.
upsample_kernel (int): Kernel size of Conv layer to expand channels.
Returns:
Upsampled feature map.
"""
def __init__(self, in_channels, out_channels, scale_factor,
upsample_kernel):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(
self.in_channels,
self.out_channels * scale_factor * scale_factor,
self.upsample_kernel,
padding=(self.upsample_kernel - 1) // 2)
self.init_weights()
def init_weights(self):
"""Initialize weights for PixelShufflePack.
"""
default_init_weights(self, 1)
def forward(self, x):
"""Forward function for PixelShufflePack.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
| 1,517 | 28.192308 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/img_normalize.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class ImgNormalize(nn.Conv2d):
"""Normalize images with the given mean and std value.
Based on Conv2d layer, can work in GPU.
Args:
pixel_range (float): Pixel range of feature.
img_mean (Tuple[float]): Image mean of each channel.
img_std (Tuple[float]): Image std of each channel.
sign (int): Sign of bias. Default -1.
"""
def __init__(self, pixel_range, img_mean, img_std, sign=-1):
assert len(img_mean) == len(img_std)
num_channels = len(img_mean)
super().__init__(num_channels, num_channels, kernel_size=1)
std = torch.Tensor(img_std)
self.weight.data = torch.eye(num_channels).view(
num_channels, num_channels, 1, 1)
self.weight.data.div_(std.view(num_channels, 1, 1, 1))
self.bias.data = sign * pixel_range * torch.Tensor(img_mean)
self.bias.data.div_(std)
self.weight.requires_grad = False
self.bias.requires_grad = False
| 1,063 | 31.242424 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/mask_conv_module.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
class MaskConvModule(ConvModule):
"""Mask convolution module.
This is a simple wrapper for mask convolution like: 'partial conv'.
Convolutions in this module always need a mask as extra input.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
act_cfg (dict): Config dict for activation layer, "relu" by default.
inplace (bool): Whether to use inplace mode for activation.
with_spectral_norm (bool): Whether use spectral norm in conv module.
padding_mode (str): If the `padding_mode` has not been supported by
current `Conv2d` in Pytorch, we will use our own padding layer
instead. Currently, we support ['zeros', 'circular'] with official
implementation and ['reflect'] with our own implementation.
Default: 'zeros'.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
supported_conv_list = ['PConv']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.conv_cfg['type'] in self.supported_conv_list
self.init_weights()
def forward(self,
x,
mask=None,
activate=True,
norm=True,
return_mask=True):
"""Forward function for partial conv2d.
Args:
input (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor): Tensor with shape of (n, c, h, w) or
(n, 1, h, w). If mask is not given, the function will
work as standard conv2d. Default: None.
activate (bool): Whether use activation layer.
norm (bool): Whether use norm layer.
return_mask (bool): If True and mask is not None, the updated
mask will be returned. Default: True.
Returns:
Tensor or tuple: Result Tensor or 2-tuple of
``Tensor``: Results after partial conv.
``Tensor``: Updated mask will be returned if mask is given \
and `return_mask` is True.
"""
for layer in self.order:
if layer == 'conv':
if self.with_explicit_padding:
x = self.padding_layer(x)
mask = self.padding_layer(mask)
if return_mask:
x, updated_mask = self.conv(
x, mask, return_mask=return_mask)
else:
x = self.conv(x, mask, return_mask=False)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
if return_mask:
return x, updated_mask
return x
| 3,649 | 40.011236 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/partial_conv.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import CONV_LAYERS
@CONV_LAYERS.register_module(name='PConv')
class PartialConv2d(nn.Conv2d):
"""Implementation for partial convolution.
Image Inpainting for Irregular Holes Using Partial Convolutions
[https://arxiv.org/abs/1804.07723]
Args:
multi_channel (bool): If True, the mask is multi-channel. Otherwise,
the mask is single-channel.
eps (float): Need to be changed for mixed precision training.
For mixed precision training, you need change 1e-8 to 1e-6.
"""
def __init__(self, *args, multi_channel=False, eps=1e-8, **kwargs):
super().__init__(*args, **kwargs)
# whether the mask is multi-channel or not
self.multi_channel = multi_channel
self.eps = eps
if self.multi_channel:
out_channels, in_channels = self.out_channels, self.in_channels
else:
out_channels, in_channels = 1, 1
self.register_buffer(
'weight_mask_updater',
torch.ones(out_channels, in_channels, self.kernel_size[0],
self.kernel_size[1]))
self.mask_kernel_numel = np.prod(self.weight_mask_updater.shape[1:4])
self.mask_kernel_numel = (self.mask_kernel_numel).item()
def forward(self, input, mask=None, return_mask=True):
"""Forward function for partial conv2d.
Args:
input (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor): Tensor with shape of (n, c, h, w) or
(n, 1, h, w). If mask is not given, the function will
work as standard conv2d. Default: None.
return_mask (bool): If True and mask is not None, the updated
mask will be returned. Default: True.
Returns:
torch.Tensor : Results after partial conv.\
torch.Tensor : Updated mask will be returned if mask is given and \
``return_mask`` is True.
"""
assert input.dim() == 4
if mask is not None:
assert mask.dim() == 4
if self.multi_channel:
assert mask.shape[1] == input.shape[1]
else:
assert mask.shape[1] == 1
# update mask and compute mask ratio
if mask is not None:
with torch.no_grad():
updated_mask = F.conv2d(
mask,
self.weight_mask_updater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation)
mask_ratio = self.mask_kernel_numel / (updated_mask + self.eps)
updated_mask = torch.clamp(updated_mask, 0, 1)
mask_ratio = mask_ratio * updated_mask
# standard conv2d
if mask is not None:
input = input * mask
raw_out = super().forward(input)
if mask is not None:
if self.bias is None:
output = raw_out * mask_ratio
else:
# compute new bias when mask is given
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = (raw_out - bias_view) * mask_ratio + bias_view
output = output * updated_mask
else:
output = raw_out
if return_mask and mask is not None:
return output, updated_mask
return output
| 3,605 | 34.009709 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/pixelwise_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from .utils import masked_loss
_reduction_modes = ['none', 'mean', 'sum']
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target)**2 + eps)
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 (mean absolute error, MAE) loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduce loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * l1_loss(
pred,
target,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * mse_loss(
pred,
target,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self,
loss_weight=1.0,
reduction='mean',
sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(
pred,
target,
weight,
eps=self.eps,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MaskedTVLoss(L1Loss):
"""Masked TV loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__(loss_weight=loss_weight)
def forward(self, pred, mask=None):
"""Forward function.
Args:
pred (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor, optional): Tensor with shape of (n, 1, h, w).
Defaults to None.
Returns:
[type]: [description]
"""
y_diff = super().forward(
pred[:, :, :-1, :], pred[:, :, 1:, :], weight=mask[:, :, :-1, :])
x_diff = super().forward(
pred[:, :, :, :-1], pred[:, :, :, 1:], weight=mask[:, :, :, :-1])
loss = x_diff + y_diff
return loss
| 7,356 | 32.13964 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over masked region
elif reduction == 'mean':
# expand weight from N1HW to NCHW
if weight.size(1) == 1:
weight = weight.expand_as(loss)
# small value to prevent division by zero
eps = 1e-12
# perform sample-wise mean
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True) # NCHW to N111
loss = (loss / (weight + eps)).sum() / weight.size(0)
# perform pixel-wise mean
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
sample_wise=False,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
| 3,743 | 31.275862 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/gan_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import conv2d
from ..registry import LOSSES
@LOSSES.register_module()
class GANLoss(nn.Module):
"""Define GAN loss.
Args:
gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
real_label_val (float): The value for real label. Default: 1.0.
fake_label_val (float): The value for fake label. Default: 0.0.
loss_weight (float): Loss weight. Default: 1.0.
Note that loss_weight is only for generators; and it is always 1.0
for discriminators.
"""
def __init__(self,
gan_type,
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0):
super().__init__()
self.gan_type = gan_type
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
self.loss_weight = loss_weight
if self.gan_type == 'smgan':
self.gaussian_blur = GaussianBlur()
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan' or self.gan_type == 'smgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
self.loss = self._wgan_loss
elif self.gan_type == 'hinge':
self.loss = nn.ReLU()
else:
raise NotImplementedError(
f'GAN type {self.gan_type} is not implemented.')
def _wgan_loss(self, input, target):
"""wgan loss.
Args:
input (Tensor): Input tensor.
target (bool): Target label.
Returns:
Tensor: wgan loss.
"""
return -input.mean() if target else input.mean()
def get_target_label(self, input, target_is_real):
"""Get target label.
Args:
input (Tensor): Input tensor.
target_is_real (bool): Whether the target is real or fake.
Returns:
(bool | Tensor): Target tensor. Return bool for wgan, otherwise,
return Tensor.
"""
if self.gan_type == 'wgan':
return target_is_real
target_val = (
self.real_label_val if target_is_real else self.fake_label_val)
return input.new_ones(input.size()) * target_val
def forward(self, input, target_is_real, is_disc=False, mask=None):
"""
Args:
input (Tensor): The input for the loss module, i.e., the network
prediction.
target_is_real (bool): Whether the target is real or fake.
is_disc (bool): Whether the loss for discriminators or not.
Default: False.
Returns:
Tensor: GAN loss value.
"""
target_label = self.get_target_label(input, target_is_real)
if self.gan_type == 'hinge':
if is_disc: # for discriminators in hinge-gan
input = -input if target_is_real else input
loss = self.loss(1 + input).mean()
else: # for generators in hinge-gan
loss = -input.mean()
elif self.gan_type == 'smgan':
input_height, input_width = input.shape[2:]
mask_height, mask_width = mask.shape[2:]
# Handle inconsistent size between outputs and masks
if input_height != mask_height or input_width != mask_width:
input = F.interpolate(
input,
size=(mask_height, mask_width),
mode='bilinear',
align_corners=True)
target_label = self.get_target_label(input, target_is_real)
if is_disc:
if target_is_real:
target_label = target_label
else:
target_label = self.gaussian_blur(mask).detach().cuda(
) if mask.is_cuda else self.gaussian_blur(
mask).detach().cpu()
# target_label = self.gaussian_blur(mask).detach().cpu()
loss = self.loss(input, target_label)
else:
loss = self.loss(input, target_label) * mask / mask.mean()
loss = loss.mean()
else: # other gan types
loss = self.loss(input, target_label)
# loss_weight is always 1.0 for discriminators
return loss if is_disc else loss * self.loss_weight
@LOSSES.register_module()
class GaussianBlur(nn.Module):
"""A Gaussian filter which blurs a given tensor with a two-dimensional
gaussian kernel by convolving it along each channel. Batch operation
is supported.
This function is modified from kornia.filters.gaussian:
`<https://kornia.readthedocs.io/en/latest/_modules/kornia/filters/gaussian.html>`.
Args:
kernel_size (tuple[int]): The size of the kernel. Default: (71, 71).
sigma (tuple[float]): The standard deviation of the kernel.
Default (10.0, 10.0)
Returns:
Tensor: The Gaussian-blurred tensor.
Shape:
- input: Tensor with shape of (n, c, h, w)
- output: Tensor with shape of (n, c, h, w)
"""
def __init__(self, kernel_size=(71, 71), sigma=(10.0, 10.0)):
super(GaussianBlur, self).__init__()
self.kernel_size = kernel_size
self.sigma = sigma
self.padding = self.compute_zero_padding(kernel_size)
self.kernel = self.get_2d_gaussian_kernel(kernel_size, sigma)
@staticmethod
def compute_zero_padding(kernel_size):
"""Compute zero padding tuple."""
padding = [(ks - 1) // 2 for ks in kernel_size]
return padding[0], padding[1]
def get_2d_gaussian_kernel(self, kernel_size, sigma):
"""Get the two-dimensional Gaussian filter matrix coefficients.
Args:
kernel_size (tuple[int]): Kernel filter size in the x and y
direction. The kernel sizes
should be odd and positive.
sigma (tuple[int]): Gaussian standard deviation in
the x and y direction.
Returns:
kernel_2d (Tensor): A 2D torch tensor with gaussian filter
matrix coefficients.
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError(
'kernel_size must be a tuple of length two. Got {}'.format(
kernel_size))
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError(
'sigma must be a tuple of length two. Got {}'.format(sigma))
kernel_size_x, kernel_size_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x = self.get_1d_gaussian_kernel(kernel_size_x, sigma_x)
kernel_y = self.get_1d_gaussian_kernel(kernel_size_y, sigma_y)
kernel_2d = torch.matmul(
kernel_x.unsqueeze(-1),
kernel_y.unsqueeze(-1).t())
return kernel_2d
def get_1d_gaussian_kernel(self, kernel_size, sigma):
"""Get the Gaussian filter coefficients in one dimension (x or y direction).
Args:
kernel_size (int): Kernel filter size in x or y direction.
Should be odd and positive.
sigma (float): Gaussian standard deviation in x or y direction.
Returns:
kernel_1d (Tensor): A 1D torch tensor with gaussian filter
coefficients in x or y direction.
"""
if not isinstance(kernel_size,
int) or kernel_size % 2 == 0 or kernel_size <= 0:
raise TypeError(
'kernel_size must be an odd positive integer. Got {}'.format(
kernel_size))
kernel_1d = self.gaussian(kernel_size, sigma)
return kernel_1d
def gaussian(self, kernel_size, sigma):
def gauss_arg(x):
return -(x - kernel_size // 2)**2 / float(2 * sigma**2)
gauss = torch.stack([
torch.exp(torch.tensor(gauss_arg(x))) for x in range(kernel_size)
])
return gauss / gauss.sum()
def forward(self, x):
if not torch.is_tensor(x):
raise TypeError(
'Input x type is not a torch.Tensor. Got {}'.format(type(x)))
if not len(x.shape) == 4:
raise ValueError(
'Invalid input shape, we expect BxCxHxW. Got: {}'.format(
x.shape))
_, c, _, _ = x.shape
tmp_kernel = self.kernel.to(x.device).to(x.dtype)
kernel = tmp_kernel.repeat(c, 1, 1, 1)
return conv2d(x, kernel, padding=self.padding, stride=1, groups=c)
def gradient_penalty_loss(discriminator, real_data, fake_data, mask=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
mask (Tensor): Masks for inpainting. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = torch.rand(batch_size, 1, 1, 1).to(real_data)
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if mask is not None:
gradients = gradients * mask
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if mask is not None:
gradients_penalty /= torch.mean(mask)
return gradients_penalty
@LOSSES.register_module()
class GradientPenaltyLoss(nn.Module):
"""Gradient penalty loss for wgan-gp.
Args:
loss_weight (float): Loss weight. Default: 1.0.
"""
def __init__(self, loss_weight=1.):
super().__init__()
self.loss_weight = loss_weight
def forward(self, discriminator, real_data, fake_data, mask=None):
"""Forward function.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
mask (Tensor): Masks for inpainting. Default: None.
Returns:
Tensor: Loss.
"""
loss = gradient_penalty_loss(
discriminator, real_data, fake_data, mask=mask)
return loss * self.loss_weight
@LOSSES.register_module()
class DiscShiftLoss(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x**2)
return loss * self.loss_weight
| 11,506 | 32.353623 | 86 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .gan_loss import DiscShiftLoss, GANLoss, GaussianBlur, GradientPenaltyLoss
from .perceptual_loss import PerceptualLoss, PerceptualVGG
from .pixelwise_loss import CharbonnierLoss, L1Loss, MaskedTVLoss, MSELoss
from .utils import mask_reduce_loss, reduce_loss
__all__ = [
'L1Loss',
'MSELoss',
'CharbonnierLoss',
'GANLoss',
'GaussianBlur',
'GradientPenaltyLoss',
'PerceptualLoss',
'PerceptualVGG',
'reduce_loss',
'mask_reduce_loss',
'DiscShiftLoss',
'MaskedTVLoss',
]
| 567 | 26.047619 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/perceptual_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
from mmcv.runner import load_checkpoint
from torch.nn import functional as F
from mmedit.utils import get_root_logger
from ..registry import LOSSES
class PerceptualVGG(nn.Module):
"""VGG network used in calculating perceptual loss.
In this implementation, we allow users to choose whether use normalization
in the input feature and the type of vgg network. Note that the pretrained
path must fit the vgg type.
Args:
layer_name_list (list[str]): According to the name in this list,
forward function will return the corresponding features. This
list contains the name each layer in `vgg.feature`. An example
of this list is ['4', '10'].
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image.
Importantly, the input feature must in the range [0, 1].
Default: True.
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
"""
def __init__(self,
layer_name_list,
vgg_type='vgg19',
use_input_norm=True,
pretrained='torchvision://vgg19'):
super().__init__()
if pretrained.startswith('torchvision://'):
assert vgg_type in pretrained
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
# get vgg model and load pretrained vgg weight
# remove _vgg from attributes to avoid `find_unused_parameters` bug
_vgg = getattr(vgg, vgg_type)()
self.init_weights(_vgg, pretrained)
num_layers = max(map(int, layer_name_list)) + 1
assert len(_vgg.features) >= num_layers
# only borrow layers that will be used from _vgg to avoid unused params
self.vgg_layers = _vgg.features[:num_layers]
if self.use_input_norm:
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [-1, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
for v in self.vgg_layers.parameters():
v.requires_grad = False
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.use_input_norm:
x = (x - self.mean) / self.std
output = {}
for name, module in self.vgg_layers.named_children():
x = module(x)
if name in self.layer_name_list:
output[name] = x.clone()
return output
def init_weights(self, model, pretrained):
"""Init weights.
Args:
model (nn.Module): Models to be inited.
pretrained (str): Path for pretrained weights.
"""
logger = get_root_logger()
load_checkpoint(model, pretrained, logger=logger)
@LOSSES.register_module()
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layers_weights (dict): The weight for each layer of vgg feature for
perceptual loss. Here is an example: {'4': 1., '9': 1., '18': 1.},
which means the 5th, 10th and 18th feature layer will be
extracted with weight 1.0 in calculating losses.
layers_weights_style (dict): The weight for each layer of vgg feature
for style loss. If set to 'None', the weights are set equal to
the weights for perceptual loss. Default: None.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 1.0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'.
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self,
layer_weights,
layer_weights_style=None,
vgg_type='vgg19',
use_input_norm=True,
perceptual_weight=1.0,
style_weight=1.0,
norm_img=True,
pretrained='torchvision://vgg19',
criterion='l1'):
super().__init__()
self.norm_img = norm_img
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.layer_weights_style = layer_weights_style
self.vgg = PerceptualVGG(
layer_name_list=list(self.layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
if self.layer_weights_style is not None and \
self.layer_weights_style != self.layer_weights:
self.vgg_style = PerceptualVGG(
layer_name_list=list(self.layer_weights_style.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
else:
self.layer_weights_style = self.layer_weights
self.vgg_style = None
criterion = criterion.lower()
if criterion == 'l1':
self.criterion = torch.nn.L1Loss()
elif criterion == 'mse':
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(
f'{criterion} criterion has not been supported in'
' this version.')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.norm_img:
x = (x + 1.) * 0.5
gt = (gt + 1.) * 0.5
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
percep_loss += self.criterion(
x_features[k], gt_features[k]) * self.layer_weights[k]
percep_loss *= self.perceptual_weight
else:
percep_loss = None
# calculate style loss
if self.style_weight > 0:
if self.vgg_style is not None:
x_features = self.vgg_style(x)
gt_features = self.vgg_style(gt.detach())
style_loss = 0
for k in x_features.keys():
style_loss += self.criterion(
self._gram_mat(x_features[k]),
self._gram_mat(
gt_features[k])) * self.layer_weights_style[k]
style_loss *= self.style_weight
else:
style_loss = None
return percep_loss, style_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
(n, c, h, w) = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
@LOSSES.register_module()
class TransferalPerceptualLoss(nn.Module):
"""Transferal perceptual loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
use_attention (bool): If True, use soft-attention tensor. Default: True
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self, loss_weight=1.0, use_attention=True, criterion='mse'):
super().__init__()
self.use_attention = use_attention
self.loss_weight = loss_weight
criterion = criterion.lower()
if criterion == 'l1':
self.loss_function = torch.nn.L1Loss()
elif criterion == 'mse':
self.loss_function = torch.nn.MSELoss()
else:
raise ValueError(
f"criterion should be 'l1' or 'mse', but got {criterion}")
def forward(self, maps, soft_attention, textures):
"""Forward function.
Args:
maps (Tuple[Tensor]): Input tensors.
soft_attention (Tensor): Soft-attention tensor.
textures (Tuple[Tensor]): Ground-truth tensors.
Returns:
Tensor: Forward results.
"""
if self.use_attention:
h, w = soft_attention.shape[-2:]
softs = [torch.sigmoid(soft_attention)]
for i in range(1, len(maps)):
softs.append(
F.interpolate(
soft_attention,
size=(h * pow(2, i), w * pow(2, i)),
mode='bicubic',
align_corners=False))
else:
softs = [1., 1., 1.]
loss_texture = 0
for map, soft, texture in zip(maps, softs, textures):
loss_texture += self.loss_function(map * soft, texture * soft)
return loss_texture * self.loss_weight
| 10,350 | 34.940972 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .sr_backbones import BasicVSRPlusPlus
__all__ = ['BasicVSRPlusPlus']
| 123 | 23.8 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/basicvsr_pp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmcv.ops import ModulatedDeformConv2d, modulated_deform_conv2d
from mmcv.runner import load_checkpoint
from mmedit.models.backbones.sr_backbones.basicvsr_net import (
ResidualBlocksWithInputConv, SPyNet)
from mmedit.models.common import PixelShufflePack, flow_warp
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class BasicVSRPlusPlus(nn.Module):
"""BasicVSR++ network structure.
Support either x4 upsampling or same size output.
Paper:
BasicVSR++: Improving Video Super-Resolution with Enhanced Propagation
and Alignment
Args:
mid_channels (int, optional): Channel number of the intermediate
features. Default: 64.
num_blocks (int, optional): The number of residual blocks in each
propagation branch. Default: 7.
max_residue_magnitude (int): The maximum magnitude of the offset
residue (Eq. 6 in paper). Default: 10.
is_low_res_input (bool, optional): Whether the input is low-resolution
or not. If False, the output resolution is equal to the input
resolution. Default: True.
spynet_pretrained (str, optional): Pre-trained model path of SPyNet.
Default: None.
cpu_cache_length (int, optional): When the length of sequence is larger
than this value, the intermediate features are sent to CPU. This
saves GPU memory, but slows down the inference speed. You can
increase this number if you have a GPU with large memory.
Default: 100.
"""
def __init__(self,
mid_channels=64,
num_blocks=7,
max_residue_magnitude=10,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=100):
super().__init__()
self.mid_channels = mid_channels
self.is_low_res_input = is_low_res_input
self.cpu_cache_length = cpu_cache_length
# optical flow
self.spynet = SPyNet(pretrained=spynet_pretrained)
# feature extraction module
if is_low_res_input:
self.feat_extract = ResidualBlocksWithInputConv(3, mid_channels, 5)
else:
self.feat_extract = nn.Sequential(
nn.Conv2d(3, mid_channels, 3, 2, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(mid_channels, mid_channels, 3, 2, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
ResidualBlocksWithInputConv(mid_channels, mid_channels, 5))
# propagation branches
self.deform_align = nn.ModuleDict()
self.backbone = nn.ModuleDict()
modules = ['backward_1', 'forward_1', 'backward_2', 'forward_2']
for i, module in enumerate(modules):
self.deform_align[module] = SecondOrderDeformableAlignment(
2 * mid_channels,
mid_channels,
3,
padding=1,
deform_groups=16,
max_residue_magnitude=max_residue_magnitude)
self.backbone[module] = ResidualBlocksWithInputConv(
(2 + i) * mid_channels, mid_channels, num_blocks)
# upsampling module
self.reconstruction = ResidualBlocksWithInputConv(
5 * mid_channels, mid_channels, 5)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# check if the sequence is augmented by flipping
self.is_mirror_extended = False
def check_if_mirror_extended(self, lqs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
"""
if lqs.size(1) % 2 == 0:
lqs_1, lqs_2 = torch.chunk(lqs, 2, dim=1)
if torch.norm(lqs_1 - lqs_2.flip(1)) == 0:
self.is_mirror_extended = True
def compute_flow(self, lqs):
"""Compute optical flow using SPyNet for feature alignment.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lqs.size()
lqs_1 = lqs[:, :-1, :, :, :].reshape(-1, c, h, w)
lqs_2 = lqs[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(lqs_1, lqs_2).view(n, t - 1, 2, h, w)
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lqs_2, lqs_1).view(n, t - 1, 2, h, w)
if self.cpu_cache:
flows_backward = flows_backward.cpu()
flows_forward = flows_forward.cpu()
return flows_forward, flows_backward
def propagate(self, feats, flows, module_name):
"""Propagate the latent features throughout the sequence.
Args:
feats dict(list[tensor]): Features from previous branches. Each
component is a list of tensors with shape (n, c, h, w).
flows (tensor): Optical flows with shape (n, t - 1, 2, h, w).
module_name (str): The name of the propgation branches. Can either
be 'backward_1', 'forward_1', 'backward_2', 'forward_2'.
Return:
dict(list[tensor]): A dictionary containing all the propagated
features. Each key in the dictionary corresponds to a
propagation branch, which is represented by a list of tensors.
"""
n, t, _, h, w = flows.size()
frame_idx = range(0, t + 1)
flow_idx = range(-1, t)
mapping_idx = list(range(0, len(feats['spatial'])))
mapping_idx += mapping_idx[::-1]
if 'backward' in module_name:
frame_idx = frame_idx[::-1]
flow_idx = frame_idx
feat_prop = flows.new_zeros(n, self.mid_channels, h, w)
for i, idx in enumerate(frame_idx):
feat_current = feats['spatial'][mapping_idx[idx]]
if self.cpu_cache:
feat_current = feat_current.cuda()
feat_prop = feat_prop.cuda()
# second-order deformable alignment
if i > 0:
flow_n1 = flows[:, flow_idx[i], :, :, :]
if self.cpu_cache:
flow_n1 = flow_n1.cuda()
cond_n1 = flow_warp(feat_prop, flow_n1.permute(0, 2, 3, 1))
# initialize second-order features
feat_n2 = torch.zeros_like(feat_prop)
flow_n2 = torch.zeros_like(flow_n1)
cond_n2 = torch.zeros_like(cond_n1)
if i > 1: # second-order features
feat_n2 = feats[module_name][-2]
if self.cpu_cache:
feat_n2 = feat_n2.cuda()
flow_n2 = flows[:, flow_idx[i - 1], :, :, :]
if self.cpu_cache:
flow_n2 = flow_n2.cuda()
flow_n2 = flow_n1 + flow_warp(flow_n2,
flow_n1.permute(0, 2, 3, 1))
cond_n2 = flow_warp(feat_n2, flow_n2.permute(0, 2, 3, 1))
# flow-guided deformable convolution
cond = torch.cat([cond_n1, feat_current, cond_n2], dim=1)
feat_prop = torch.cat([feat_prop, feat_n2], dim=1)
feat_prop = self.deform_align[module_name](feat_prop, cond,
flow_n1, flow_n2)
# concatenate and residual blocks
feat = [feat_current] + [
feats[k][idx]
for k in feats if k not in ['spatial', module_name]
] + [feat_prop]
if self.cpu_cache:
feat = [f.cuda() for f in feat]
feat = torch.cat(feat, dim=1)
feat_prop = feat_prop + self.backbone[module_name](feat)
feats[module_name].append(feat_prop)
if self.cpu_cache:
feats[module_name][-1] = feats[module_name][-1].cpu()
torch.cuda.empty_cache()
if 'backward' in module_name:
feats[module_name] = feats[module_name][::-1]
return feats
def upsample(self, lqs, feats):
"""Compute the output image given the features.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
feats (dict): The features from the propgation branches.
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
outputs = []
num_outputs = len(feats['spatial'])
mapping_idx = list(range(0, num_outputs))
mapping_idx += mapping_idx[::-1]
for i in range(0, lqs.size(1)):
hr = [feats[k].pop(0) for k in feats if k != 'spatial']
hr.insert(0, feats['spatial'][mapping_idx[i]])
hr = torch.cat(hr, dim=1)
if self.cpu_cache:
hr = hr.cuda()
hr = self.reconstruction(hr)
hr = self.lrelu(self.upsample1(hr))
hr = self.lrelu(self.upsample2(hr))
hr = self.lrelu(self.conv_hr(hr))
hr = self.conv_last(hr)
if self.is_low_res_input:
hr += self.img_upsample(lqs[:, i, :, :, :])
else:
hr += lqs[:, i, :, :, :]
if self.cpu_cache:
hr = hr.cpu()
torch.cuda.empty_cache()
outputs.append(hr)
return torch.stack(outputs, dim=1)
def forward(self, lqs):
"""Forward function for BasicVSR++.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lqs.size()
# whether to cache the features in CPU (no effect if using CPU)
if t > self.cpu_cache_length and lqs.is_cuda:
self.cpu_cache = True
else:
self.cpu_cache = False
if self.is_low_res_input:
lqs_downsample = lqs.clone()
else:
lqs_downsample = F.interpolate(
lqs.view(-1, c, h, w), scale_factor=0.25,
mode='bicubic').view(n, t, c, h // 4, w // 4)
# check whether the input is an extended sequence
self.check_if_mirror_extended(lqs)
feats = {}
# compute spatial features
if self.cpu_cache:
feats['spatial'] = []
for i in range(0, t):
feat = self.feat_extract(lqs[:, i, :, :, :]).cpu()
feats['spatial'].append(feat)
torch.cuda.empty_cache()
else:
feats_ = self.feat_extract(lqs.view(-1, c, h, w))
h, w = feats_.shape[2:]
feats_ = feats_.view(n, t, -1, h, w)
feats['spatial'] = [feats_[:, i, :, :, :] for i in range(0, t)]
# compute optical flow using the low-res inputs
assert lqs_downsample.size(3) >= 64 and lqs_downsample.size(4) >= 64, (
'The height and width of low-res inputs must be at least 64, '
f'but got {h} and {w}.')
flows_forward, flows_backward = self.compute_flow(lqs_downsample)
# feature propgation
for iter_ in [1, 2]:
for direction in ['backward', 'forward']:
module = f'{direction}_{iter_}'
feats[module] = []
if direction == 'backward':
flows = flows_backward
elif flows_forward is not None:
flows = flows_forward
else:
flows = flows_backward.flip(1)
feats = self.propagate(feats, flows, module)
if self.cpu_cache:
del flows
torch.cuda.empty_cache()
return self.upsample(lqs, feats)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Default: None.
strict (bool, optional): Whether strictly load the pretrained
model. Default: True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
class SecondOrderDeformableAlignment(ModulatedDeformConv2d):
"""Second-order deformable alignment module.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
max_residue_magnitude (int): The maximum magnitude of the offset
residue (Eq. 6 in paper). Default: 10.
"""
def __init__(self, *args, **kwargs):
self.max_residue_magnitude = kwargs.pop('max_residue_magnitude', 10)
super(SecondOrderDeformableAlignment, self).__init__(*args, **kwargs)
self.conv_offset = nn.Sequential(
nn.Conv2d(3 * self.out_channels + 4, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, 27 * self.deform_groups, 3, 1, 1),
)
self.init_offset()
def init_offset(self):
constant_init(self.conv_offset[-1], val=0, bias=0)
def forward(self, x, extra_feat, flow_1, flow_2):
extra_feat = torch.cat([extra_feat, flow_1, flow_2], dim=1)
out = self.conv_offset(extra_feat)
o1, o2, mask = torch.chunk(out, 3, dim=1)
# offset
offset = self.max_residue_magnitude * torch.tanh(
torch.cat((o1, o2), dim=1))
offset_1, offset_2 = torch.chunk(offset, 2, dim=1)
offset_1 = offset_1 + flow_1.flip(1).repeat(1,
offset_1.size(1) // 2, 1,
1)
offset_2 = offset_2 + flow_2.flip(1).repeat(1,
offset_2.size(1) // 2, 1,
1)
offset = torch.cat([offset_1, offset_2], dim=1)
# mask
mask = torch.sigmoid(mask)
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
self.stride, self.padding,
self.dilation, self.groups,
self.deform_groups)
| 16,773 | 37.56092 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/basicvsr_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import load_checkpoint
from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,
flow_warp, make_layer)
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class BasicVSRNet(nn.Module):
"""BasicVSR network structure for video super-resolution.
Support only x4 upsampling.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
mid_channels (int): Channel number of the intermediate features.
Default: 64.
num_blocks (int): Number of residual blocks in each propagation branch.
Default: 30.
spynet_pretrained (str): Pre-trained model path of SPyNet.
Default: None.
"""
def __init__(self, mid_channels=64, num_blocks=30, spynet_pretrained=None):
super().__init__()
self.mid_channels = mid_channels
# optical flow network for feature alignment
self.spynet = SPyNet(pretrained=spynet_pretrained)
# propagation branches
self.backward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
self.forward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
# upsample
self.fusion = nn.Conv2d(
mid_channels * 2, mid_channels, 1, 1, 0, bias=True)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
self.is_mirror_extended = False
if lrs.size(1) % 2 == 0:
lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)
if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:
self.is_mirror_extended = True
def compute_flow(self, lrs):
"""Compute optical flow using SPyNet for feature warping.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lrs.size()
lrs_1 = lrs[:, :-1, :, :, :].reshape(-1, c, h, w)
lrs_2 = lrs[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(lrs_1, lrs_2).view(n, t - 1, 2, h, w)
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lrs_2, lrs_1).view(n, t - 1, 2, h, w)
return flows_forward, flows_backward
def forward(self, lrs):
"""Forward function for BasicVSR.
Args:
lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lrs.size()
assert h >= 64 and w >= 64, (
'The height and width of inputs should be at least 64, '
f'but got {h} and {w}.')
# check whether the input is an extended sequence
self.check_if_mirror_extended(lrs)
# compute optical flow
flows_forward, flows_backward = self.compute_flow(lrs)
# backward-time propgation
outputs = []
feat_prop = lrs.new_zeros(n, self.mid_channels, h, w)
for i in range(t - 1, -1, -1):
if i < t - 1: # no warping required for the last timestep
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([lrs[:, i, :, :, :], feat_prop], dim=1)
feat_prop = self.backward_resblocks(feat_prop)
outputs.append(feat_prop)
outputs = outputs[::-1]
# forward-time propagation and upsampling
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, t):
lr_curr = lrs[:, i, :, :, :]
if i > 0: # no warping required for the first timestep
if flows_forward is not None:
flow = flows_forward[:, i - 1, :, :, :]
else:
flow = flows_backward[:, -i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([lr_curr, feat_prop], dim=1)
feat_prop = self.forward_resblocks(feat_prop)
# upsampling given the backward and forward features
out = torch.cat([outputs[i], feat_prop], dim=1)
out = self.lrelu(self.fusion(out))
out = self.lrelu(self.upsample1(out))
out = self.lrelu(self.upsample2(out))
out = self.lrelu(self.conv_hr(out))
out = self.conv_last(out)
base = self.img_upsample(lr_curr)
out += base
outputs[i] = out
return torch.stack(outputs, dim=1)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults: None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
class ResidualBlocksWithInputConv(nn.Module):
"""Residual blocks with a convolution in front.
Args:
in_channels (int): Number of input channels of the first conv.
out_channels (int): Number of channels of the residual blocks.
Default: 64.
num_blocks (int): Number of residual blocks. Default: 30.
"""
def __init__(self, in_channels, out_channels=64, num_blocks=30):
super().__init__()
main = []
# a convolution used to match the channels of the residual blocks
main.append(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=True))
main.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
# residual blocks
main.append(
make_layer(
ResidualBlockNoBN, num_blocks, mid_channels=out_channels))
self.main = nn.Sequential(*main)
def forward(self, feat):
"""
Forward function for ResidualBlocksWithInputConv.
Args:
feat (Tensor): Input feature with shape (n, in_channels, h, w)
Returns:
Tensor: Output feature with shape (n, out_channels, h, w)
"""
return self.main(feat)
class SPyNet(nn.Module):
"""SPyNet network structure.
The difference to the SPyNet in [tof.py] is that
1. more SPyNetBasicModule is used in this version, and
2. no batch normalization is used in this version.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
Args:
pretrained (str): path for pre-trained SPyNet. Default: None.
"""
def __init__(self, pretrained):
super().__init__()
self.basic_module = nn.ModuleList(
[SPyNetBasicModule() for _ in range(6)])
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=True, logger=logger)
elif pretrained is not None:
raise TypeError('[pretrained] should be str or None, '
f'but got {type(pretrained)}.')
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def compute_flow(self, ref, supp):
"""Compute flow from ref to supp.
Note that in this function, the images are already resized to a
multiple of 32.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
n, _, h, w = ref.size()
# normalize the input images
ref = [(ref - self.mean) / self.std]
supp = [(supp - self.mean) / self.std]
# generate downsampled frames
for level in range(5):
ref.append(
F.avg_pool2d(
input=ref[-1],
kernel_size=2,
stride=2,
count_include_pad=False))
supp.append(
F.avg_pool2d(
input=supp[-1],
kernel_size=2,
stride=2,
count_include_pad=False))
ref = ref[::-1]
supp = supp[::-1]
# flow computation
flow = ref[0].new_zeros(n, 2, h // 32, w // 32)
for level in range(len(ref)):
if level == 0:
flow_up = flow
else:
flow_up = F.interpolate(
input=flow,
scale_factor=2,
mode='bilinear',
align_corners=True) * 2.0
# add the residue to the upsampled flow
flow = flow_up + self.basic_module[level](
torch.cat([
ref[level],
flow_warp(
supp[level],
flow_up.permute(0, 2, 3, 1),
padding_mode='border'), flow_up
], 1))
return flow
def forward(self, ref, supp):
"""Forward function of SPyNet.
This function computes the optical flow from ref to supp.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
# upsize to a multiple of 32
h, w = ref.shape[2:4]
w_up = w if (w % 32) == 0 else 32 * (w // 32 + 1)
h_up = h if (h % 32) == 0 else 32 * (h // 32 + 1)
ref = F.interpolate(
input=ref, size=(h_up, w_up), mode='bilinear', align_corners=False)
supp = F.interpolate(
input=supp,
size=(h_up, w_up),
mode='bilinear',
align_corners=False)
# compute flow, and resize back to the original resolution
flow = F.interpolate(
input=self.compute_flow(ref, supp),
size=(h, w),
mode='bilinear',
align_corners=False)
# adjust the flow values
flow[:, 0, :, :] *= float(w) / float(w_up)
flow[:, 1, :, :] *= float(h) / float(h_up)
return flow
class SPyNetBasicModule(nn.Module):
"""Basic Module for SPyNet.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
"""
def __init__(self):
super().__init__()
self.basic_module = nn.Sequential(
ConvModule(
in_channels=8,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=64,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=64,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=16,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=16,
out_channels=2,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=None))
def forward(self, tensor_input):
"""
Args:
tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
8 channels contain:
[reference image (3), neighbor image (3), initial flow (2)].
Returns:
Tensor: Refined flow with shape (b, 2, h, w)
"""
return self.basic_module(tensor_input)
| 14,148 | 32.608076 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .basicvsr_net import BasicVSRNet
from .basicvsr_pp import BasicVSRPlusPlus
__all__ = ['BasicVSRNet', 'BasicVSRPlusPlus']
| 175 | 28.333333 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/sr_vimeo90k_multiple_gt_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRVimeo90KMultipleGTDataset(BaseSRDataset):
"""Vimeo90K dataset for video super resolution for recurrent networks.
The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)
frames. Then it applies specified transforms and finally returns a dict
containing paired data and other information.
It reads Vimeo90K keys from the txt file. Each line contains:
1. video frame folder
2. image shape
Examples:
::
00001/0266 (256,448,3)
00001/0268 (256,448,3)
Args:
lq_folder (str | :obj:`Path`): Path to a lq folder.
gt_folder (str | :obj:`Path`): Path to a gt folder.
ann_file (str | :obj:`Path`): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
num_input_frames (int): Number of frames in each training sequence.
Default: 7.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
lq_folder,
gt_folder,
ann_file,
pipeline,
scale,
num_input_frames=7,
test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for Vimeo-90K dataset.
Returns:
list[dict]: A list of dicts for paired paths and other information.
"""
# get keys
with open(self.ann_file, 'r') as fin:
keys = [line.strip().split(' ')[0] for line in fin]
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
lq_paths = [
osp.join(self.lq_folder, key, f'im{i}.png')
for i in range(1, self.num_input_frames + 1)
]
gt_paths = [
osp.join(self.gt_folder, key, f'im{i}.png')
for i in range(1, self.num_input_frames + 1)
]
data_infos.append(
dict(lq_path=lq_paths, gt_path=gt_paths, key=key))
return data_infos
| 2,608 | 30.059524 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/sr_folder_multiple_gt_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import mmcv
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRFolderMultipleGTDataset(BaseSRDataset):
"""General dataset for video super resolution, used for recurrent networks.
The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)
frames. Then it applies specified transforms and finally returns a dict
containing paired data and other information.
This dataset takes an annotation file specifying the sequences used in
training or test. If no annotation file is provided, it assumes all video
sequences under the root directory is used for training or test.
In the annotation file (.txt), each line contains:
1. folder name;
2. number of frames in this sequence (in the same folder)
Examples:
::
calendar 41
city 34
foliage 49
walk 47
Args:
lq_folder (str | :obj:`Path`): Path to a lq folder.
gt_folder (str | :obj:`Path`): Path to a gt folder.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
ann_file (str): The path to the annotation file. If None, we assume
that all sequences in the folder is used. Default: None
num_input_frames (None | int): The number of frames per iteration.
If None, the whole clip is extracted. If it is a positive integer,
a sequence of 'num_input_frames' frames is extracted from the clip.
Note that non-positive integers are not accepted. Default: None.
test_mode (bool): Store `True` when building test dataset.
Default: `True`.
"""
def __init__(self,
lq_folder,
gt_folder,
pipeline,
scale,
ann_file=None,
num_input_frames=None,
test_mode=True):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = ann_file
if num_input_frames is not None and num_input_frames <= 0:
raise ValueError('"num_input_frames" must be None or positive, '
f'but got {num_input_frames}.')
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def _load_annotations_from_file(self):
data_infos = []
ann_list = mmcv.list_from_file(self.ann_file)
for ann in ann_list:
key, sequence_length = ann.strip().split(' ')
if self.num_input_frames is None:
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
data_infos.append(
dict(
lq_path=self.lq_folder,
gt_path=self.gt_folder,
key=key,
num_input_frames=int(num_input_frames),
sequence_length=int(sequence_length)))
return data_infos
def load_annotations(self):
"""Load annoations for the dataset.
Returns:
list[dict]: Returned list of dicts for paired paths of LQ and GT.
"""
if self.ann_file:
return self._load_annotations_from_file()
sequences = sorted(glob.glob(osp.join(self.lq_folder, '*')))
data_infos = []
for sequence in sequences:
sequence_length = len(glob.glob(osp.join(sequence, '*.png')))
if self.num_input_frames is None:
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
data_infos.append(
dict(
lq_path=self.lq_folder,
gt_path=self.gt_folder,
key=sequence.replace(f'{self.lq_folder}{os.sep}', ''),
num_input_frames=num_input_frames,
sequence_length=sequence_length))
return data_infos
| 4,220 | 33.884298 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/registry.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
| 145 | 23.333333 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/base_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from abc import ABCMeta, abstractmethod
from torch.utils.data import Dataset
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets should subclass it.
All subclasses should overwrite:
``load_annotations``, supporting to load information and generate
image lists.
Args:
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): If True, the dataset will work in test mode.
Otherwise, in train mode.
"""
def __init__(self, pipeline, test_mode=False):
super().__init__()
self.test_mode = test_mode
self.pipeline = Compose(pipeline)
@abstractmethod
def load_annotations(self):
"""Abstract function for loading annotation.
All subclasses should overwrite this function
"""
def prepare_train_data(self, idx):
"""Prepare training data.
Args:
idx (int): Index of the training batch data.
Returns:
dict: Returned training batch.
"""
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def prepare_test_data(self, idx):
"""Prepare testing data.
Args:
idx (int): Index for getting each testing batch.
Returns:
Tensor: Returned testing batch.
"""
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
"""Length of the dataset.
Returns:
int: Length of the dataset.
"""
return len(self.data_infos)
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
if self.test_mode:
return self.prepare_test_data(idx)
return self.prepare_train_data(idx)
| 2,006 | 24.405063 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/dataset_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
from .registry import DATASETS
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""Length of the dataset.
Returns:
int: Length of the dataset.
"""
return self.times * self._ori_len
| 1,034 | 24.875 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/sr_reds_multiple_gt_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRREDSMultipleGTDataset(BaseSRDataset):
"""REDS dataset for video super resolution for recurrent networks.
The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)
frames. Then it applies specified transforms and finally returns a dict
containing paired data and other information.
Args:
lq_folder (str | :obj:`Path`): Path to a lq folder.
gt_folder (str | :obj:`Path`): Path to a gt folder.
num_input_frames (int): Number of input frames.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
val_partition (str): Validation partition mode. Choices ['official' or
'REDS4']. Default: 'official'.
repeat (int): Number of replication of the validation set. This is used
to allow training REDS4 with more than 4 GPUs. For example, if
8 GPUs are used, this number can be set to 2. Default: 1.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
lq_folder,
gt_folder,
num_input_frames,
pipeline,
scale,
val_partition='official',
repeat=1,
test_mode=False):
self.repeat = repeat
if not isinstance(repeat, int):
raise TypeError('"repeat" must be an integer, but got '
f'{type(repeat)}.')
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.num_input_frames = num_input_frames
self.val_partition = val_partition
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for REDS dataset.
Returns:
list[dict]: A list of dicts for paired paths and other information.
"""
# generate keys
keys = [f'{i:03d}' for i in range(0, 270)]
if self.val_partition == 'REDS4':
val_partition = ['000', '011', '015', '020']
elif self.val_partition == 'official':
val_partition = [f'{i:03d}' for i in range(240, 270)]
else:
raise ValueError(
f'Wrong validation partition {self.val_partition}.'
f'Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if v in val_partition]
keys *= self.repeat
else:
keys = [v for v in keys if v not in val_partition]
data_infos = []
for key in keys:
data_infos.append(
dict(
lq_path=self.lq_folder,
gt_path=self.gt_folder,
key=key,
sequence_length=100, # REDS has 100 frames for each clip
num_input_frames=self.num_input_frames))
return data_infos
| 3,194 | 36.151163 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .base_dataset import BaseDataset
from .base_sr_dataset import BaseSRDataset
from .builder import build_dataloader, build_dataset
from .dataset_wrappers import RepeatDataset
from .registry import DATASETS, PIPELINES
from .sr_folder_multiple_gt_dataset import SRFolderMultipleGTDataset
from .sr_reds_multiple_gt_dataset import SRREDSMultipleGTDataset
from .sr_vimeo90k_multiple_gt_dataset import SRVimeo90KMultipleGTDataset
__all__ = [
'DATASETS', 'PIPELINES', 'build_dataset', 'build_dataloader',
'BaseDataset', 'BaseMattingDataset', 'ImgInpaintingDataset',
'AdobeComp1kDataset', 'SRLmdbDataset', 'SRFolderDataset',
'SRAnnotationDataset', 'BaseSRDataset', 'RepeatDataset', 'SRREDSDataset',
'SRVimeo90KDataset', 'BaseGenerationDataset', 'GenerationPairedDataset',
'GenerationUnpairedDataset', 'SRVid4Dataset', 'SRFolderGTDataset',
'SRREDSMultipleGTDataset', 'SRVimeo90KMultipleGTDataset',
'SRTestMultipleGTDataset', 'SRFolderRefDataset', 'SRFacialLandmarkDataset',
'SRFolderMultipleGTDataset', 'SRFolderVideoDataset', 'BaseVFIDataset',
'VFIVimeo90KDataset'
]
| 1,151 | 49.086957 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/base_sr_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
from collections import defaultdict
from pathlib import Path
from mmcv import scandir
from .base_dataset import BaseDataset
IMG_EXTENSIONS = ('.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm',
'.PPM', '.bmp', '.BMP', '.tif', '.TIF', '.tiff', '.TIFF')
class BaseSRDataset(BaseDataset):
"""Base class for super resolution datasets.
"""
def __init__(self, pipeline, scale, test_mode=False):
super().__init__(pipeline, test_mode)
self.scale = scale
@staticmethod
def scan_folder(path):
"""Obtain image path list (including sub-folders) from a given folder.
Args:
path (str | :obj:`Path`): Folder path.
Returns:
list[str]: image list obtained form given folder.
"""
if isinstance(path, (str, Path)):
path = str(path)
else:
raise TypeError("'path' must be a str or a Path object, "
f'but received {type(path)}.')
images = list(scandir(path, suffix=IMG_EXTENSIONS, recursive=True))
images = [osp.join(path, v) for v in images]
assert images, f'{path} has no valid image file.'
return images
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
results = copy.deepcopy(self.data_infos[idx])
results['scale'] = self.scale
return self.pipeline(results)
def evaluate(self, results, logger=None):
"""Evaluate with different metrics.
Args:
results (list[tuple]): The output of forward_test() of the model.
Return:
dict: Evaluation results dict.
"""
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
results = [res['eval_result'] for res in results] # a list of dict
eval_result = defaultdict(list) # a dict of list
for res in results:
for metric, val in res.items():
eval_result[metric].append(val)
for metric, val_list in eval_result.items():
assert len(val_list) == len(self), (
f'Length of evaluation result of {metric} is {len(val_list)}, '
f'should be {len(self)}')
# average the results
eval_result = {
metric: sum(values) / len(self)
for metric, values in eval_result.items()
}
return eval_result
| 2,779 | 30.590909 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
from functools import partial
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import build_from_cfg
from packaging import version
from torch.utils.data import ConcatDataset, DataLoader
from .dataset_wrappers import RepeatDataset
from .registry import DATASETS
from .samplers import DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
def _concat_dataset(cfg, default_args=None):
"""Concat datasets with different ann_file but the same type.
Args:
cfg (dict): The config of dataset.
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The concatenated dataset.
"""
ann_files = cfg['ann_file']
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
It supports a variety of dataset config. If ``cfg`` is a Sequential (list
or dict), it will be a concatenated dataset of the datasets specified by
the Sequential. If it is a ``RepeatDataset``, then it will repeat the
dataset ``cfg['dataset']`` for ``cfg['times']`` times. If the ``ann_file``
of the dataset is a Sequential, then it will build a concatenated dataset
with the same dataset type but different ``ann_file``.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
persistent_workers=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
samples_per_gpu (int): Number of samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset,
world_size,
rank,
shuffle=shuffle,
samples_per_gpu=samples_per_gpu,
seed=seed)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if version.parse(torch.__version__) >= version.parse('1.7.0'):
kwargs['persistent_workers'] = persistent_workers
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Function to initialize each worker.
The seed of each worker equals to
``num_worker * rank + worker_id + user_seed``.
Args:
worker_id (int): Id for each worker.
num_workers (int): Number of workers.
rank (int): Rank in distributed training.
seed (int): Random seed.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
| 6,177 | 32.945055 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/samplers/distributed_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
from __future__ import division
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmedit.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
"""DistributedSampler inheriting from `torch.utils.data.DistributedSampler`.
In pytorch of lower versions, there is no `shuffle` argument. This child
class will port one to DistributedSampler.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
samples_per_gpu=1,
seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.samples_per_gpu = samples_per_gpu
# fix the bug of the official implementation
self.num_samples_per_replica = int(
math.ceil(
len(self.dataset) * 1.0 / self.num_replicas / samples_per_gpu))
self.num_samples = self.num_samples_per_replica * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
# to avoid padding bug when meeting too small dataset
if len(dataset) < self.num_replicas * samples_per_gpu:
raise ValueError(
'You may use too small dataset and our distributed '
'sampler cannot pad your dataset correctly. We highly '
'recommend you to use fewer GPUs to finish your work')
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 2,892 | 39.180556 | 80 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/samplers/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
__all__ = ['DistributedSampler']
| 134 | 26 | 51 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/loading.py | # Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
import mmcv
import numpy as np
from mmcv.fileio import FileClient
from mmedit.core.mask import (bbox2mask, brush_stroke_mask, get_irregular_mask,
random_bbox)
from ..registry import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile:
"""Load image from file.
Args:
io_backend (str): io backend where images are store. Default: 'disk'.
key (str): Keys in results to find corresponding path. Default: 'gt'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
convert_to (str | None): The color space of the output image. If None,
no conversion is conducted. Default: None.
save_original_img (bool): If True, maintain a copy of the image in
`results` dict with name of `f'ori_{key}'`. Default: False.
use_cache (bool): If True, load all images at once. Default: False.
backend (str): The image loading backend type. Options are `cv2`,
`pillow`, and 'turbojpeg'. Default: None.
kwargs (dict): Args for file client.
"""
def __init__(self,
io_backend='disk',
key='gt',
flag='color',
channel_order='bgr',
convert_to=None,
save_original_img=False,
use_cache=False,
backend=None,
**kwargs):
self.io_backend = io_backend
self.key = key
self.flag = flag
self.save_original_img = save_original_img
self.channel_order = channel_order
self.convert_to = convert_to
self.kwargs = kwargs
self.file_client = None
self.use_cache = use_cache
self.cache = None
self.backend = backend
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
filepath = str(results[f'{self.key}_path'])
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
if self.use_cache:
if self.cache is None:
self.cache = dict()
if filepath in self.cache:
img = self.cache[filepath]
else:
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes,
flag=self.flag,
channel_order=self.channel_order,
backend=self.backend) # HWC
self.cache[filepath] = img
else:
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes,
flag=self.flag,
channel_order=self.channel_order,
backend=self.backend) # HWC
if self.convert_to is not None:
if self.channel_order == 'bgr' and self.convert_to.lower() == 'y':
img = mmcv.bgr2ycbcr(img, y_only=True)
elif self.channel_order == 'rgb':
img = mmcv.rgb2ycbcr(img, y_only=True)
else:
raise ValueError('Currently support only "bgr2ycbcr" or '
'"bgr2ycbcr".')
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
results[self.key] = img
results[f'{self.key}_path'] = filepath
results[f'{self.key}_ori_shape'] = img.shape
if self.save_original_img:
results[f'ori_{self.key}'] = img.copy()
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(io_backend={self.io_backend}, key={self.key}, '
f'flag={self.flag}, save_original_img={self.save_original_img}, '
f'channel_order={self.channel_order}, use_cache={self.use_cache})')
return repr_str
@PIPELINES.register_module()
class LoadImageFromFileList(LoadImageFromFile):
"""Load image from file list.
It accepts a list of path and read each frame from each path. A list
of frames will be returned.
Args:
io_backend (str): io backend where images are store. Default: 'disk'.
key (str): Keys in results to find corresponding path. Default: 'gt'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
convert_to (str | None): The color space of the output image. If None,
no conversion is conducted. Default: None.
save_original_img (bool): If True, maintain a copy of the image in
`results` dict with name of `f'ori_{key}'`. Default: False.
kwargs (dict): Args for file client.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
filepaths = results[f'{self.key}_path']
if not isinstance(filepaths, list):
raise TypeError(
f'filepath should be list, but got {type(filepaths)}')
filepaths = [str(v) for v in filepaths]
imgs = []
shapes = []
if self.save_original_img:
ori_imgs = []
for filepath in filepaths:
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes, flag=self.flag,
channel_order=self.channel_order) # HWC
# convert to y-channel, if specified
if self.convert_to is not None:
if self.channel_order == 'bgr' and self.convert_to.lower(
) == 'y':
img = mmcv.bgr2ycbcr(img, y_only=True)
elif self.channel_order == 'rgb':
img = mmcv.rgb2ycbcr(img, y_only=True)
else:
raise ValueError('Currently support only "bgr2ycbcr" or '
'"bgr2ycbcr".')
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
imgs.append(img)
shapes.append(img.shape)
if self.save_original_img:
ori_imgs.append(img.copy())
results[self.key] = imgs
results[f'{self.key}_path'] = filepaths
results[f'{self.key}_ori_shape'] = shapes
if self.save_original_img:
results[f'ori_{self.key}'] = ori_imgs
return results
@PIPELINES.register_module()
class RandomLoadResizeBg:
"""Randomly load a background image and resize it.
Required key is "fg", added key is "bg".
Args:
bg_dir (str): Path of directory to load background images from.
io_backend (str): io backend where images are store. Default: 'disk'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
kwargs (dict): Args for file client.
"""
def __init__(self,
bg_dir,
io_backend='disk',
flag='color',
channel_order='bgr',
**kwargs):
self.bg_dir = bg_dir
self.bg_list = list(mmcv.scandir(bg_dir))
self.io_backend = io_backend
self.flag = flag
self.channel_order = channel_order
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
h, w = results['fg'].shape[:2]
idx = np.random.randint(len(self.bg_list))
filepath = Path(self.bg_dir).joinpath(self.bg_list[idx])
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes, flag=self.flag, channel_order=self.channel_order) # HWC
bg = mmcv.imresize(img, (w, h), interpolation='bicubic')
results['bg'] = bg
return results
def __repr__(self):
return self.__class__.__name__ + f"(bg_dir='{self.bg_dir}')"
@PIPELINES.register_module()
class LoadMask:
"""Load Mask for multiple types.
For different types of mask, users need to provide the corresponding
config dict.
Example config for bbox:
.. code-block:: python
config = dict(img_shape=(256, 256), max_bbox_shape=128)
Example config for irregular:
.. code-block:: python
config = dict(
img_shape=(256, 256),
num_vertices=(4, 12),
max_angle=4.,
length_range=(10, 100),
brush_width=(10, 40),
area_ratio_range=(0.15, 0.5))
Example config for ff:
.. code-block:: python
config = dict(
img_shape=(256, 256),
num_vertices=(4, 12),
mean_angle=1.2,
angle_range=0.4,
brush_width=(12, 40))
Example config for set:
.. code-block:: python
config = dict(
mask_list_file='xxx/xxx/ooxx.txt',
prefix='/xxx/xxx/ooxx/',
io_backend='disk',
flag='unchanged',
file_client_kwargs=dict()
)
The mask_list_file contains the list of mask file name like this:
test1.jpeg
test2.jpeg
...
...
The prefix gives the data path.
Args:
mask_mode (str): Mask mode in ['bbox', 'irregular', 'ff', 'set',
'file'].
* bbox: square bounding box masks.
* irregular: irregular holes.
* ff: free-form holes from DeepFillv2.
* set: randomly get a mask from a mask set.
* file: get mask from 'mask_path' in results.
mask_config (dict): Params for creating masks. Each type of mask needs
different configs.
"""
def __init__(self, mask_mode='bbox', mask_config=None):
self.mask_mode = mask_mode
self.mask_config = dict() if mask_config is None else mask_config
assert isinstance(self.mask_config, dict)
# set init info if needed in some modes
self._init_info()
def _init_info(self):
if self.mask_mode == 'set':
# get mask list information
self.mask_list = []
mask_list_file = self.mask_config['mask_list_file']
with open(mask_list_file, 'r') as f:
for line in f:
line_split = line.strip().split(' ')
mask_name = line_split[0]
self.mask_list.append(
Path(self.mask_config['prefix']).joinpath(mask_name))
self.mask_set_size = len(self.mask_list)
self.io_backend = self.mask_config['io_backend']
self.flag = self.mask_config['flag']
self.file_client_kwargs = self.mask_config['file_client_kwargs']
self.file_client = None
elif self.mask_mode == 'file':
self.io_backend = 'disk'
self.flag = 'unchanged'
self.file_client_kwargs = dict()
self.file_client = None
def _get_random_mask_from_set(self):
if self.file_client is None:
self.file_client = FileClient(self.io_backend,
**self.file_client_kwargs)
# minus 1 to avoid out of range error
mask_idx = np.random.randint(0, self.mask_set_size)
mask_bytes = self.file_client.get(self.mask_list[mask_idx])
mask = mmcv.imfrombytes(mask_bytes, flag=self.flag) # HWC, BGR
if mask.ndim == 2:
mask = np.expand_dims(mask, axis=2)
else:
mask = mask[:, :, 0:1]
mask[mask > 0] = 1.
return mask
def _get_mask_from_file(self, path):
if self.file_client is None:
self.file_client = FileClient(self.io_backend,
**self.file_client_kwargs)
mask_bytes = self.file_client.get(path)
mask = mmcv.imfrombytes(mask_bytes, flag=self.flag) # HWC, BGR
if mask.ndim == 2:
mask = np.expand_dims(mask, axis=2)
else:
mask = mask[:, :, 0:1]
mask[mask > 0] = 1.
return mask
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.mask_mode == 'bbox':
mask_bbox = random_bbox(**self.mask_config)
mask = bbox2mask(self.mask_config['img_shape'], mask_bbox)
results['mask_bbox'] = mask_bbox
elif self.mask_mode == 'irregular':
mask = get_irregular_mask(**self.mask_config)
elif self.mask_mode == 'set':
mask = self._get_random_mask_from_set()
elif self.mask_mode == 'ff':
mask = brush_stroke_mask(**self.mask_config)
elif self.mask_mode == 'file':
mask = self._get_mask_from_file(results['mask_path'])
else:
raise NotImplementedError(
f'Mask mode {self.mask_mode} has not been implemented.')
results['mask'] = mask
return results
def __repr__(self):
return self.__class__.__name__ + f"(mask_mode='{self.mask_mode}')"
@PIPELINES.register_module()
class GetSpatialDiscountMask:
"""Get spatial discounting mask constant.
Spatial discounting mask is first introduced in:
Generative Image Inpainting with Contextual Attention.
Args:
gamma (float, optional): Gamma for computing spatial discounting.
Defaults to 0.99.
beta (float, optional): Beta for computing spatial discounting.
Defaults to 1.5.
"""
def __init__(self, gamma=0.99, beta=1.5):
self.gamma = gamma
self.beta = beta
def spatial_discount_mask(self, mask_width, mask_height):
"""Generate spatial discounting mask constant.
Args:
mask_width (int): The width of bbox hole.
mask_height (int): The height of bbox height.
Returns:
np.ndarray: Spatial discounting mask.
"""
w, h = np.meshgrid(np.arange(mask_width), np.arange(mask_height))
grid_stack = np.stack([h, w], axis=2)
mask_values = (self.gamma**(np.minimum(
grid_stack, [mask_height - 1, mask_width - 1] - grid_stack) *
self.beta)).max(
axis=2, keepdims=True)
return mask_values
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
mask_bbox = results['mask_bbox']
mask = results['mask']
mask_height, mask_width = mask_bbox[-2:]
discount_hole = self.spatial_discount_mask(mask_width, mask_height)
discount_mask = np.zeros_like(mask)
discount_mask[mask_bbox[0]:mask_bbox[0] + mask_height,
mask_bbox[1]:mask_bbox[1] + mask_width,
...] = discount_hole
results['discount_mask'] = discount_mask
return results
def __repr__(self):
return self.__class__.__name__ + (f'(gamma={self.gamma}, '
f'beta={self.beta})')
@PIPELINES.register_module()
class LoadPairedImageFromFile(LoadImageFromFile):
"""Load a pair of images from file.
Each sample contains a pair of images, which are concatenated in the w
dimension (a|b). This is a special loading class for generation paired
dataset. It loads a pair of images as the common loader does and crops
it into two images with the same shape in different domains.
Required key is "pair_path". Added or modified keys are "pair",
"pair_ori_shape", "ori_pair", "img_a", "img_b", "img_a_path",
"img_b_path", "img_a_ori_shape", "img_b_ori_shape", "ori_img_a" and
"ori_img_b".
Args:
io_backend (str): io backend where images are store. Default: 'disk'.
key (str): Keys in results to find corresponding path. Default: 'gt'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
save_original_img (bool): If True, maintain a copy of the image in
`results` dict with name of `f'ori_{key}'`. Default: False.
kwargs (dict): Args for file client.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
filepath = str(results[f'{self.key}_path'])
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes, flag=self.flag, channel_order=self.channel_order) # HWC
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
results[self.key] = img
results[f'{self.key}_path'] = filepath
results[f'{self.key}_ori_shape'] = img.shape
if self.save_original_img:
results[f'ori_{self.key}'] = img.copy()
# crop pair into a and b
w = img.shape[1]
if w % 2 != 0:
raise ValueError(
f'The width of image pair must be even number, but got {w}.')
new_w = w // 2
img_a = img[:, :new_w, :]
img_b = img[:, new_w:, :]
results['img_a'] = img_a
results['img_b'] = img_b
results['img_a_path'] = filepath
results['img_b_path'] = filepath
results['img_a_ori_shape'] = img_a.shape
results['img_b_ori_shape'] = img_b.shape
if self.save_original_img:
results['ori_img_a'] = img_a.copy()
results['ori_img_b'] = img_b.copy()
return results
| 19,292 | 34.206204 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/crop.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import random
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..registry import PIPELINES
from .utils import random_choose_unknown
@PIPELINES.register_module()
class Crop:
"""Crop data to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
random_crop (bool): If set to True, it will random crop
image. Otherwise, it will work as center crop.
is_pad_zeros (bool, optional): Whether to pad the image with 0 if
crop_size is greater than image size. Default: False.
"""
def __init__(self, keys, crop_size, random_crop=True, is_pad_zeros=False):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
self.keys = keys
self.crop_size = crop_size
self.random_crop = random_crop
self.is_pad_zeros = is_pad_zeros
def _crop(self, data):
if not isinstance(data, list):
data_list = [data]
else:
data_list = data
crop_bbox_list = []
data_list_ = []
for item in data_list:
data_h, data_w = item.shape[:2]
crop_h, crop_w = self.crop_size
if self.is_pad_zeros:
crop_y_offset, crop_x_offset = 0, 0
if crop_h > data_h:
crop_y_offset = (crop_h - data_h) // 2
if crop_w > data_w:
crop_x_offset = (crop_w - data_w) // 2
if crop_y_offset > 0 or crop_x_offset > 0:
pad_width = [(2 * crop_y_offset, 2 * crop_y_offset),
(2 * crop_x_offset, 2 * crop_x_offset)]
if item.ndim == 3:
pad_width.append((0, 0))
item = np.pad(
item,
tuple(pad_width),
mode='constant',
constant_values=0)
data_h, data_w = item.shape[:2]
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.random_crop:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset = max(0, (data_w - crop_w)) // 2
y_offset = max(0, (data_h - crop_h)) // 2
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
item_ = item[y_offset:y_offset + crop_h,
x_offset:x_offset + crop_w, ...]
crop_bbox_list.append(crop_bbox)
data_list_.append(item_)
if not isinstance(data, list):
return data_list_[0], crop_bbox_list[0]
return data_list_, crop_bbox_list
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
data_, crop_bbox = self._crop(results[k])
results[k] = data_
results[k + '_crop_bbox'] = crop_bbox
results['crop_size'] = self.crop_size
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'random_crop={self.random_crop}')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(object):
"""Crop data to random size and aspect ratio.
A crop of a random proportion of the original image
and a random aspect ratio of the original aspect ratio is made.
The cropped image is finally resized to a given size specified
by 'crop_size'. Modified keys are the attributes specified in "keys".
This code is partially adopted from
torchvision.transforms.RandomResizedCrop:
[https://pytorch.org/vision/stable/_modules/torchvision/transforms/\
transforms.html#RandomResizedCrop].
Args:
keys (list[str]): The images to be resized and random-cropped.
crop_size (int | tuple[int]): Target spatial size (h, w).
scale (tuple[float], optional): Range of the proportion of the original
image to be cropped. Default: (0.08, 1.0).
ratio (tuple[float], optional): Range of aspect ratio of the crop.
Default: (3. / 4., 4. / 3.).
interpolation (str, optional): Algorithm used for interpolation.
It can be only either one of the following:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
"""
def __init__(self,
keys,
crop_size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
assert keys, 'Keys should not be empty.'
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
elif not mmcv.is_tuple_of(crop_size, int):
raise TypeError('"crop_size" must be an integer '
'or a tuple of integers, but got '
f'{type(crop_size)}')
if not mmcv.is_tuple_of(scale, float):
raise TypeError('"scale" must be a tuple of float, '
f'but got {type(scale)}')
if not mmcv.is_tuple_of(ratio, float):
raise TypeError('"ratio" must be a tuple of float, '
f'but got {type(ratio)}')
self.keys = keys
self.crop_size = crop_size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def get_params(self, data):
"""Get parameters for a random sized crop.
Args:
data (np.ndarray): Image of type numpy array to be cropped.
Returns:
A tuple containing the coordinates of the top left corner
and the chosen crop size.
"""
data_h, data_w = data.shape[:2]
area = data_h * data_w
for _ in range(10):
target_area = random.uniform(*self.scale) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
crop_w = int(round(math.sqrt(target_area * aspect_ratio)))
crop_h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < crop_w <= data_w and 0 < crop_h <= data_h:
top = random.randint(0, data_h - crop_h)
left = random.randint(0, data_w - crop_w)
return top, left, crop_h, crop_w
# Fall back to center crop
in_ratio = float(data_w) / float(data_h)
if (in_ratio < min(self.ratio)):
crop_w = data_w
crop_h = int(round(crop_w / min(self.ratio)))
elif (in_ratio > max(self.ratio)):
crop_h = data_h
crop_w = int(round(crop_h * max(self.ratio)))
else: # whole image
crop_w = data_w
crop_h = data_h
top = (data_h - crop_h) // 2
left = (data_w - crop_w) // 2
return top, left, crop_h, crop_w
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
top, left, crop_h, crop_w = self.get_params(results[k])
crop_bbox = [top, left, crop_w, crop_h]
results[k] = results[k][top:top + crop_h, left:left + crop_w, ...]
results[k] = mmcv.imresize(
results[k],
self.crop_size,
return_scale=False,
interpolation=self.interpolation)
results[k + '_crop_bbox'] = crop_bbox
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_size={self.crop_size}, '
f'scale={self.scale}, ratio={self.ratio}, '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class FixedCrop:
"""Crop paired data (at a specific position) to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
crop_pos (Tuple[int]): Specific position (x, y). If set to None,
random initialize the position to crop paired data batch.
"""
def __init__(self, keys, crop_size, crop_pos=None):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
if not mmcv.is_tuple_of(crop_pos, int) and (crop_pos is not None):
raise TypeError(
'Elements of crop_pos must be int and crop_pos must be'
f' tuple or None, but got {type(crop_pos[0])} in '
f'{type(crop_pos)}')
self.keys = keys
self.crop_size = crop_size
self.crop_pos = crop_pos
def _crop(self, data, x_offset, y_offset, crop_w, crop_h):
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
data_ = data[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w,
...]
return data_, crop_bbox
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if isinstance(results[self.keys[0]], list):
data_h, data_w = results[self.keys[0]][0].shape[:2]
else:
data_h, data_w = results[self.keys[0]].shape[:2]
crop_h, crop_w = self.crop_size
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.crop_pos is None:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset, y_offset = self.crop_pos
crop_w = min(data_w - x_offset, crop_w)
crop_h = min(data_h - y_offset, crop_h)
for k in self.keys:
images = results[k]
is_list = isinstance(images, list)
if not is_list:
images = [images]
cropped_images = []
crop_bbox = None
for image in images:
# In fixed crop for paired images, sizes should be the same
if (image.shape[0] != data_h or image.shape[1] != data_w):
raise ValueError(
'The sizes of paired images should be the same. '
f'Expected ({data_h}, {data_w}), '
f'but got ({image.shape[0]}, '
f'{image.shape[1]}).')
data_, crop_bbox = self._crop(image, x_offset, y_offset,
crop_w, crop_h)
cropped_images.append(data_)
results[k + '_crop_bbox'] = crop_bbox
if not is_list:
cropped_images = cropped_images[0]
results[k] = cropped_images
results['crop_size'] = self.crop_size
results['crop_pos'] = self.crop_pos
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'crop_pos={self.crop_pos}')
return repr_str
@PIPELINES.register_module()
class PairedRandomCrop:
"""Paried random crop.
It crops a pair of lq and gt images with corresponding locations.
It also supports accepting lq list and gt list.
Required keys are "scale", "lq", and "gt",
added or modified keys are "lq" and "gt".
Args:
gt_patch_size (int): cropped gt patch size.
"""
def __init__(self, gt_patch_size):
self.gt_patch_size = gt_patch_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
scale = results['scale']
lq_patch_size = self.gt_patch_size // scale
lq_is_list = isinstance(results['lq'], list)
if not lq_is_list:
results['lq'] = [results['lq']]
gt_is_list = isinstance(results['gt'], list)
if not gt_is_list:
results['gt'] = [results['gt']]
h_lq, w_lq, _ = results['lq'][0].shape
h_gt, w_gt, _ = results['gt'][0].shape
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(
f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(
f'LQ ({h_lq}, {w_lq}) is smaller than patch size ',
f'({lq_patch_size}, {lq_patch_size}). Please check '
f'{results["lq_path"][0]} and {results["gt_path"][0]}.')
# randomly choose top and left coordinates for lq patch
top = np.random.randint(h_lq - lq_patch_size + 1)
left = np.random.randint(w_lq - lq_patch_size + 1)
# crop lq patch
results['lq'] = [
v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
for v in results['lq']
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
results['gt'] = [
v[top_gt:top_gt + self.gt_patch_size,
left_gt:left_gt + self.gt_patch_size, ...] for v in results['gt']
]
if not lq_is_list:
results['lq'] = results['lq'][0]
if not gt_is_list:
results['gt'] = results['gt'][0]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(gt_patch_size={self.gt_patch_size})'
return repr_str
@PIPELINES.register_module()
class CropAroundCenter:
"""Randomly crop the images around unknown area in the center 1/4 images.
This cropping strategy is adopted in GCA matting. The `unknown area` is the
same as `semi-transparent area`.
https://arxiv.org/pdf/2001.04069.pdf
It retains the center 1/4 images and resizes the images to 'crop_size'.
Required keys are "fg", "bg", "trimap" and "alpha", added or modified keys
are "crop_bbox", "fg", "bg", "trimap" and "alpha".
Args:
crop_size (int | tuple): Desired output size. If int, square crop is
applied.
"""
def __init__(self, crop_size):
if mmcv.is_tuple_of(crop_size, int):
assert len(crop_size) == 2, 'length of crop_size must be 2.'
elif not isinstance(crop_size, int):
raise TypeError('crop_size must be int or a tuple of int, but got '
f'{type(crop_size)}')
self.crop_size = _pair(crop_size)
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg = results['fg']
alpha = results['alpha']
trimap = results['trimap']
bg = results['bg']
h, w = fg.shape[:2]
assert bg.shape == fg.shape, (f'shape of bg {bg.shape} should be the '
f'same as fg {fg.shape}.')
crop_h, crop_w = self.crop_size
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
new_h = max(int(h * rescale_ratio), crop_h)
new_w = max(int(w * rescale_ratio), crop_w)
fg = mmcv.imresize(fg, (new_w, new_h), interpolation='nearest')
alpha = mmcv.imresize(
alpha, (new_w, new_h), interpolation='nearest')
trimap = mmcv.imresize(
trimap, (new_w, new_h), interpolation='nearest')
bg = mmcv.imresize(bg, (new_w, new_h), interpolation='bicubic')
h, w = new_h, new_w
# resize to 1/4 to ignore small unknown patches
small_trimap = mmcv.imresize(
trimap, (w // 4, h // 4), interpolation='nearest')
# find unknown area in center 1/4 region
margin_h, margin_w = crop_h // 2, crop_w // 2
sample_area = small_trimap[margin_h // 4:(h - margin_h) // 4,
margin_w // 4:(w - margin_w) // 4]
unknown_xs, unknown_ys = np.where(sample_area == 128)
unknown_num = len(unknown_xs)
if unknown_num < 10:
# too few unknown area in the center, crop from the whole image
top = np.random.randint(0, h - crop_h + 1)
left = np.random.randint(0, w - crop_w + 1)
else:
idx = np.random.randint(unknown_num)
top = unknown_xs[idx] * 4
left = unknown_ys[idx] * 4
bottom = top + crop_h
right = left + crop_w
results['fg'] = fg[top:bottom, left:right]
results['alpha'] = alpha[top:bottom, left:right]
results['trimap'] = trimap[top:bottom, left:right]
results['bg'] = bg[top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class CropAroundUnknown:
"""Crop around unknown area with a randomly selected scale.
Randomly select the w and h from a list of (w, h).
Required keys are the keys in argument `keys`, added or
modified keys are "crop_bbox" and the keys in argument `keys`.
This class assumes value of "alpha" ranges from 0 to 255.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'alpha'. If unknown_source is set to 'trimap', then it must also
contain 'trimap'.
crop_sizes (list[int | tuple[int]]): List of (w, h) to be selected.
unknown_source (str, optional): Unknown area to select from. It must be
'alpha' or 'tirmap'. Default to 'alpha'.
interpolations (str | list[str], optional): Interpolation method of
mmcv.imresize. The interpolation operation will be applied when
image size is smaller than the crop_size. If given as a list of
str, it should have the same length as `keys`. Or if given as a
str all the keys will be resized with the same method.
Default to 'bilinear'.
"""
def __init__(self,
keys,
crop_sizes,
unknown_source='alpha',
interpolations='bilinear'):
if 'alpha' not in keys:
raise ValueError(f'"alpha" must be in keys, but got {keys}')
self.keys = keys
if not isinstance(crop_sizes, list):
raise TypeError(
f'Crop sizes must be list, but got {type(crop_sizes)}.')
self.crop_sizes = [_pair(crop_size) for crop_size in crop_sizes]
if not mmcv.is_tuple_of(self.crop_sizes[0], int):
raise TypeError('Elements of crop_sizes must be int or tuple of '
f'int, but got {type(self.crop_sizes[0][0])}.')
if unknown_source not in ['alpha', 'trimap']:
raise ValueError('unknown_source must be "alpha" or "trimap", '
f'but got {unknown_source}')
if unknown_source not in keys:
# it could only be trimap, since alpha is checked before
raise ValueError(
'if unknown_source is "trimap", it must also be set in keys')
self.unknown_source = unknown_source
if isinstance(interpolations, str):
self.interpolations = [interpolations] * len(self.keys)
elif mmcv.is_list_of(interpolations,
str) and len(interpolations) == len(self.keys):
self.interpolations = interpolations
else:
raise TypeError(
'interpolations must be a str or list of str with '
f'the same length as keys, but got {interpolations}')
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
rand_ind = np.random.randint(len(self.crop_sizes))
crop_h, crop_w = self.crop_sizes[rand_ind]
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
h = max(int(h * rescale_ratio), crop_h)
w = max(int(w * rescale_ratio), crop_w)
for key, interpolation in zip(self.keys, self.interpolations):
results[key] = mmcv.imresize(
results[key], (w, h), interpolation=interpolation)
# Select the cropping top-left point which is an unknown pixel
if self.unknown_source == 'alpha':
unknown = (results['alpha'] > 0) & (results['alpha'] < 255)
else:
unknown = results['trimap'] == 128
top, left = random_choose_unknown(unknown.squeeze(), (crop_h, crop_w))
bottom = top + crop_h
right = left + crop_w
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_sizes={self.crop_sizes}, '
f"unknown_source='{self.unknown_source}', "
f'interpolations={self.interpolations})')
return repr_str
@PIPELINES.register_module()
class CropAroundFg:
"""Crop around the whole foreground in the segmentation mask.
Required keys are "seg" and the keys in argument `keys`.
Meanwhile, "seg" must be in argument `keys`. Added or modified keys are
"crop_bbox" and the keys in argument `keys`.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'seg'.
bd_ratio_range (tuple, optional): The range of the boundary (bd) ratio
to select from. The boundary ratio is the ratio of the boundary to
the minimal bbox that contains the whole foreground given by
segmentation. Default to (0.1, 0.4).
test_mode (bool): Whether use test mode. In test mode, the tight crop
area of foreground will be extended to the a square.
Default to False.
"""
def __init__(self, keys, bd_ratio_range=(0.1, 0.4), test_mode=False):
if 'seg' not in keys:
raise ValueError(f'"seg" must be in keys, but got {keys}')
if (not mmcv.is_tuple_of(bd_ratio_range, float)
or len(bd_ratio_range) != 2):
raise TypeError('bd_ratio_range must be a tuple of 2 int, but got '
f'{bd_ratio_range}')
self.keys = keys
self.bd_ratio_range = bd_ratio_range
self.test_mode = test_mode
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
seg = results['seg']
height, width = seg.shape[:2]
# get foreground bbox
fg_coor = np.array(np.where(seg))
top, left = np.amin(fg_coor, axis=1)
bottom, right = np.amax(fg_coor, axis=1)
# enlarge bbox
long_side = np.maximum(bottom - top, right - left)
if self.test_mode:
bottom = top + long_side
right = left + long_side
boundary_ratio = np.random.uniform(*self.bd_ratio_range)
boundary = int(np.round(boundary_ratio * long_side))
# NOTE: Different from the original repo, we keep track of the four
# corners of the bbox (left, top, right, bottom) while the original
# repo use (top, left, height, width) to represent bbox. This may
# introduce an difference of 1 pixel.
top = max(top - boundary, 0)
left = max(left - boundary, 0)
bottom = min(bottom + boundary, height)
right = min(right + boundary, width)
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
@PIPELINES.register_module()
class ModCrop:
"""Mod crop gt images, used during testing.
Required keys are "scale" and "gt",
added or modified keys are "gt".
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
img = results['gt'].copy()
scale = results['scale']
if img.ndim in [2, 3]:
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
results['gt'] = img
return results
@PIPELINES.register_module()
class CropLike:
"""Crop/pad the image in the target_key according to the size of image
in the reference_key .
Args:
target_key (str): The key needs to be cropped.
reference_key (str | None): The reference key, need its size.
Default: None.
"""
def __init__(self, target_key, reference_key=None):
assert reference_key and target_key
self.target_key = target_key
self.reference_key = reference_key
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require self.target_key and self.reference_key.
Returns:
dict: A dict containing the processed data and information.
Modify self.target_key.
"""
size = results[self.reference_key].shape
old_image = results[self.target_key]
old_size = old_image.shape
h, w = old_size[:2]
new_size = size[:2] + old_size[2:]
h_cover, w_cover = min(h, size[0]), min(w, size[1])
format_image = np.zeros(new_size, dtype=old_image.dtype)
format_image[:h_cover, :w_cover] = old_image[:h_cover, :w_cover]
results[self.target_key] = format_image
return results
def __repr__(self):
return (self.__class__.__name__ + f' target_key={self.target_key}, ' +
f'reference_key={self.reference_key}')
| 28,291 | 36.722667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/augmentation.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import math
import numbers
import os
import os.path as osp
import random
import cv2
import mmcv
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
from ..registry import PIPELINES
@PIPELINES.register_module()
class Resize:
"""Resize data to a specific size for training or resize the images to fit
the network input regulation for testing.
When used for resizing images to fit network input regulation, the case is
that a network may have several downsample and then upsample operation,
then the input height and width should be divisible by the downsample
factor of the network.
For example, the network would downsample the input for 5 times with
stride 2, then the downsample factor is 2^5 = 32 and the height
and width should be divisible by 32.
Required keys are the keys in attribute "keys", added or modified keys are
"keep_ratio", "scale_factor", "interpolation" and the
keys in attribute "keys".
All keys in "keys" should have the same shape. "test_trans" is used to
record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be resized.
scale (float | tuple[int]): If scale is tuple[int], target spatial
size (h, w). Otherwise, target spatial size is scaled by input
size.
Note that when it is used, `size_factor` and `max_size` are
useless. Default: None
keep_ratio (bool): If set to True, images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: False.
Note that it is used togher with `scale`.
size_factor (int): Let the output shape be a multiple of size_factor.
Default:None.
Note that when it is used, `scale` should be set to None and
`keep_ratio` should be set to False.
max_size (int): The maximum size of the longest side of the output.
Default:None.
Note that it is used togher with `size_factor`.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: None.
output_keys (list[str] | None): The resized images. Default: None
Note that if it is not `None`, its length should be equal to keys.
"""
def __init__(self,
keys,
scale=None,
keep_ratio=False,
size_factor=None,
max_size=None,
interpolation='bilinear',
backend=None,
output_keys=None):
assert keys, 'Keys should not be empty.'
if output_keys:
assert len(output_keys) == len(keys)
else:
output_keys = keys
if size_factor:
assert scale is None, ('When size_factor is used, scale should ',
f'be None. But received {scale}.')
assert keep_ratio is False, ('When size_factor is used, '
'keep_ratio should be False.')
if max_size:
assert size_factor is not None, (
'When max_size is used, '
f'size_factor should also be set. But received {size_factor}.')
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif mmcv.is_tuple_of(scale, int):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
elif scale is not None:
raise TypeError(
f'Scale must be None, float or tuple of int, but got '
f'{type(scale)}.')
self.keys = keys
self.output_keys = output_keys
self.scale = scale
self.size_factor = size_factor
self.max_size = max_size
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.backend = backend
def _resize(self, img):
if self.keep_ratio:
img, self.scale_factor = mmcv.imrescale(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
else:
img, w_scale, h_scale = mmcv.imresize(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
self.scale_factor = np.array((w_scale, h_scale), dtype=np.float32)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.size_factor:
h, w = results[self.keys[0]].shape[:2]
new_h = h - (h % self.size_factor)
new_w = w - (w % self.size_factor)
if self.max_size:
new_h = min(self.max_size - (self.max_size % self.size_factor),
new_h)
new_w = min(self.max_size - (self.max_size % self.size_factor),
new_w)
self.scale = (new_w, new_h)
for key, out_key in zip(self.keys, self.output_keys):
results[out_key] = self._resize(results[key])
if len(results[out_key].shape) == 2:
results[out_key] = np.expand_dims(results[out_key], axis=2)
results['scale_factor'] = self.scale_factor
results['keep_ratio'] = self.keep_ratio
results['interpolation'] = self.interpolation
results['backend'] = self.backend
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, output_keys={self.output_keys}, '
f'scale={self.scale}, '
f'keep_ratio={self.keep_ratio}, size_factor={self.size_factor}, '
f'max_size={self.max_size}, interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class RandomRotation:
"""Rotate the image by a randomly-chosen angle, measured in degree.
Args:
keys (list[str]): The images to be rotated.
degrees (tuple[float] | tuple[int] | float | int): If it is a tuple,
it represents a range (min, max). If it is a float or int,
the range is constructed as (-degrees, degrees).
"""
def __init__(self, keys, degrees):
if isinstance(degrees, (int, float)):
if degrees < 0.0:
raise ValueError('Degrees must be positive if it is a number.')
else:
degrees = (-degrees, degrees)
elif not mmcv.is_tuple_of(degrees, (int, float)):
raise TypeError(f'Degrees must be float | int or tuple of float | '
'int, but got '
f'{type(degrees)}.')
self.keys = keys
self.degrees = degrees
def __call__(self, results):
angle = random.uniform(self.degrees[0], self.degrees[1])
for k in self.keys:
results[k] = mmcv.imrotate(results[k], angle)
if results[k].ndim == 2:
results[k] = np.expand_dims(results[k], axis=2)
results['degrees'] = self.degrees
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, degrees={self.degrees})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input data with a probability.
Reverse the order of elements in the given data with a specific direction.
The shape of the data is preserved, but the elements are reordered.
Required keys are the keys in attributes "keys", added or modified keys are
"flip", "flip_direction" and the keys in attributes "keys".
It also supports flipping a list of images with the same flip.
Args:
keys (list[str]): The images to be flipped.
flip_ratio (float): The propability to flip the images.
direction (str): Flip images horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
"""
_directions = ['horizontal', 'vertical']
def __init__(self, keys, flip_ratio=0.5, direction='horizontal'):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported.'
f'Currently support ones are {self._directions}')
self.keys = keys
self.flip_ratio = flip_ratio
self.direction = direction
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
flip = np.random.random() < self.flip_ratio
if flip:
for key in self.keys:
if isinstance(results[key], list):
for v in results[key]:
mmcv.imflip_(v, self.direction)
else:
mmcv.imflip_(results[key], self.direction)
results['flip'] = flip
results['flip_direction'] = self.direction
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, flip_ratio={self.flip_ratio}, '
f'direction={self.direction})')
return repr_str
@PIPELINES.register_module()
class Pad:
"""Pad the images to align with network downsample factor for testing.
See `Reshape` for more explanation. `numpy.pad` is used for the pad
operation.
Required keys are the keys in attribute "keys", added or
modified keys are "test_trans" and the keys in attribute
"keys". All keys in "keys" should have the same shape. "test_trans" is used
to record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be padded.
ds_factor (int): Downsample factor of the network. The height and
weight will be padded to a multiple of ds_factor. Default: 32.
kwargs (option): any keyword arguments to be passed to `numpy.pad`.
"""
def __init__(self, keys, ds_factor=32, **kwargs):
self.keys = keys
self.ds_factor = ds_factor
self.kwargs = kwargs
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
new_h = self.ds_factor * ((h - 1) // self.ds_factor + 1)
new_w = self.ds_factor * ((w - 1) // self.ds_factor + 1)
pad_h = new_h - h
pad_w = new_w - w
if new_h != h or new_w != w:
pad_width = ((0, pad_h), (0, pad_w), (0, 0))
for key in self.keys:
results[key] = np.pad(results[key],
pad_width[:results[key].ndim],
**self.kwargs)
results['pad'] = (pad_h, pad_w)
return results
def __repr__(self):
repr_str = self.__class__.__name__
kwargs_str = ', '.join(
[f'{key}={val}' for key, val in self.kwargs.items()])
repr_str += (f'(keys={self.keys}, ds_factor={self.ds_factor}, '
f'{kwargs_str})')
return repr_str
@PIPELINES.register_module()
class RandomAffine:
"""Apply random affine to input images.
This class is adopted from
https://github.com/pytorch/vision/blob/v0.5.0/torchvision/transforms/
transforms.py#L1015
It should be noted that in
https://github.com/Yaoyi-Li/GCA-Matting/blob/master/dataloader/
data_generator.py#L70
random flip is added. See explanation of `flip_ratio` below.
Required keys are the keys in attribute "keys", modified keys
are keys in attribute "keys".
Args:
keys (Sequence[str]): The images to be affined.
degrees (float | tuple[float]): Range of degrees to select from. If it
is a float instead of a tuple like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): Tuple of maximum absolute fraction for
horizontal and vertical translations. For example translate=(a, b),
then horizontal shift is randomly sampled in the range
-img_width * a < dx < img_width * a and vertical shift is randomly
sampled in the range -img_height * b < dy < img_height * b.
Default: None.
scale (tuple, optional): Scaling factor interval, e.g (a, b), then
scale is randomly sampled from the range a <= scale <= b.
Default: None.
shear (float | tuple[float], optional): Range of shear degrees to
select from. If shear is a float, a shear parallel to the x axis
and a shear parallel to the y axis in the range (-shear, +shear)
will be applied. Else if shear is a tuple of 2 values, a x-axis
shear and a y-axis shear in (shear[0], shear[1]) will be applied.
Default: None.
flip_ratio (float, optional): Probability of the image being flipped.
The flips in horizontal direction and vertical direction are
independent. The image may be flipped in both directions.
Default: None.
"""
def __init__(self,
keys,
degrees,
translate=None,
scale=None,
shear=None,
flip_ratio=None):
self.keys = keys
if isinstance(degrees, numbers.Number):
assert degrees >= 0, ('If degrees is a single number, '
'it must be positive.')
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, tuple) and len(degrees) == 2, \
'degrees should be a tuple and it must be of length 2.'
self.degrees = degrees
if translate is not None:
assert isinstance(translate, tuple) and len(translate) == 2, \
'translate should be a tuple and it must be of length 2.'
for t in translate:
assert 0.0 <= t <= 1.0, ('translation values should be '
'between 0 and 1.')
self.translate = translate
if scale is not None:
assert isinstance(scale, tuple) and len(scale) == 2, \
'scale should be a tuple and it must be of length 2.'
for s in scale:
assert s > 0, 'scale values should be positive.'
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
assert shear >= 0, ('If shear is a single number, '
'it must be positive.')
self.shear = (-shear, shear)
else:
assert isinstance(shear, tuple) and len(shear) == 2, \
'shear should be a tuple and it must be of length 2.'
# X-Axis and Y-Axis shear with (min, max)
self.shear = shear
else:
self.shear = shear
if flip_ratio is not None:
assert isinstance(flip_ratio,
float), 'flip_ratio should be a float.'
self.flip_ratio = flip_ratio
else:
self.flip_ratio = 0
@staticmethod
def _get_params(degrees, translate, scale_ranges, shears, flip_ratio,
img_size):
"""Get parameters for affine transformation.
Returns:
paras (tuple): Params to be passed to the affine transformation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = (np.random.uniform(scale_ranges[0], scale_ranges[1]),
np.random.uniform(scale_ranges[0], scale_ranges[1]))
else:
scale = (1.0, 1.0)
if shears is not None:
shear = np.random.uniform(shears[0], shears[1])
else:
shear = 0.0
# Because `flip` is used as a multiplier in line 479 and 480,
# so -1 stands for flip and 1 stands for no flip. Thus `flip`
# should be an 'inverse' flag as the result of the comparison.
# See https://github.com/open-mmlab/mmediting/pull/799 for more detail
flip = (np.random.rand(2) > flip_ratio).astype(np.int32) * 2 - 1
return angle, translations, scale, shear, flip
@staticmethod
def _get_inverse_affine_matrix(center, angle, translate, scale, shear,
flip):
"""Helper method to compute inverse matrix for affine transformation.
As it is explained in PIL.Image.rotate, we need compute INVERSE of
affine transformation matrix: M = T * C * RSS * C^-1 where
T is translation matrix:
[1, 0, tx | 0, 1, ty | 0, 0, 1];
C is translation matrix to keep center:
[1, 0, cx | 0, 1, cy | 0, 0, 1];
RSS is rotation with scale and shear matrix.
It is different from the original function in torchvision.
1. The order are changed to flip -> scale -> rotation -> shear.
2. x and y have different scale factors.
RSS(shear, a, scale, f) =
[ cos(a + shear)*scale_x*f -sin(a + shear)*scale_y 0]
[ sin(a)*scale_x*f cos(a)*scale_y 0]
[ 0 0 1]
Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1.
"""
angle = math.radians(angle)
shear = math.radians(shear)
scale_x = 1.0 / scale[0] * flip[0]
scale_y = 1.0 / scale[1] * flip[1]
# Inverted rotation matrix with scale and shear
d = math.cos(angle + shear) * math.cos(angle) + math.sin(
angle + shear) * math.sin(angle)
matrix = [
math.cos(angle) * scale_x,
math.sin(angle + shear) * scale_x, 0, -math.sin(angle) * scale_y,
math.cos(angle + shear) * scale_y, 0
]
matrix = [m / d for m in matrix]
# Apply inverse of translation and of center translation:
# RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (
-center[1] - translate[1])
matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (
-center[1] - translate[1])
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += center[0]
matrix[5] += center[1]
return matrix
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
# if image is too small, set degree to 0 to reduce introduced dark area
if np.maximum(h, w) < 1024:
params = self._get_params((0, 0), self.translate, self.scale,
self.shear, self.flip_ratio, (h, w))
else:
params = self._get_params(self.degrees, self.translate, self.scale,
self.shear, self.flip_ratio, (h, w))
center = (w * 0.5 - 0.5, h * 0.5 - 0.5)
M = self._get_inverse_affine_matrix(center, *params)
M = np.array(M).reshape((2, 3))
for key in self.keys:
results[key] = cv2.warpAffine(
results[key],
M, (w, h),
flags=cv2.INTER_NEAREST + cv2.WARP_INVERSE_MAP)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, degrees={self.degrees}, '
f'translate={self.translate}, scale={self.scale}, '
f'shear={self.shear}, flip_ratio={self.flip_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomJitter:
"""Randomly jitter the foreground in hsv space.
The jitter range of hue is adjustable while the jitter ranges of saturation
and value are adaptive to the images. Side effect: the "fg" image will be
converted to `np.float32`.
Required keys are "fg" and "alpha", modified key is "fg".
Args:
hue_range (float | tuple[float]): Range of hue jittering. If it is a
float instead of a tuple like (min, max), the range of hue
jittering will be (-hue_range, +hue_range). Default: 40.
"""
def __init__(self, hue_range=40):
if isinstance(hue_range, numbers.Number):
assert hue_range >= 0, ('If hue_range is a single number, '
'it must be positive.')
self.hue_range = (-hue_range, hue_range)
else:
assert isinstance(hue_range, tuple) and len(hue_range) == 2, \
'hue_range should be a tuple and it must be of length 2.'
self.hue_range = hue_range
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg, alpha = results['fg'], results['alpha']
# convert to HSV space;
# convert to float32 image to keep precision during space conversion.
fg = mmcv.bgr2hsv(fg.astype(np.float32) / 255)
# Hue noise
hue_jitter = np.random.randint(self.hue_range[0], self.hue_range[1])
fg[:, :, 0] = np.remainder(fg[:, :, 0] + hue_jitter, 360)
# Saturation noise
sat_mean = fg[:, :, 1][alpha > 0].mean()
# jitter saturation within range (1.1 - sat_mean) * [-0.1, 0.1]
sat_jitter = (1.1 - sat_mean) * (np.random.rand() * 0.2 - 0.1)
sat = fg[:, :, 1]
sat = np.abs(sat + sat_jitter)
sat[sat > 1] = 2 - sat[sat > 1]
fg[:, :, 1] = sat
# Value noise
val_mean = fg[:, :, 2][alpha > 0].mean()
# jitter value within range (1.1 - val_mean) * [-0.1, 0.1]
val_jitter = (1.1 - val_mean) * (np.random.rand() * 0.2 - 0.1)
val = fg[:, :, 2]
val = np.abs(val + val_jitter)
val[val > 1] = 2 - val[val > 1]
fg[:, :, 2] = val
# convert back to BGR space
fg = mmcv.hsv2bgr(fg)
results['fg'] = fg * 255
return results
def __repr__(self):
return self.__class__.__name__ + f'hue_range={self.hue_range}'
@PIPELINES.register_module()
class ColorJitter:
"""An interface for torch color jitter so that it can be invoked in
mmediting pipeline.
Randomly change the brightness, contrast and saturation of an image.
Modified keys are the attributes specified in "keys".
Args:
keys (list[str]): The images to be resized.
to_rgb (bool): Whether to convert channels from BGR to RGB.
Default: False.
"""
def __init__(self, keys, to_rgb=False, **kwargs):
assert keys, 'Keys should not be empty.'
self.keys = keys
self.to_rgb = to_rgb
self.transform = transforms.ColorJitter(**kwargs)
def __call__(self, results):
for k in self.keys:
if self.to_rgb:
results[k] = results[k][..., ::-1]
results[k] = Image.fromarray(results[k])
results[k] = self.transform(results[k])
results[k] = np.asarray(results[k])
results[k] = results[k][..., ::-1]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, to_rgb={self.to_rgb})')
return repr_str
class BinarizeImage:
"""Binarize image.
Args:
keys (Sequence[str]): The images to be binarized.
binary_thr (float): Threshold for binarization.
to_int (bool): If True, return image as int32, otherwise
return image as float32.
"""
def __init__(self, keys, binary_thr, to_int=False):
self.keys = keys
self.binary_thr = binary_thr
self.to_int = to_int
def _binarize(self, img):
type_ = np.float32 if not self.to_int else np.int32
img = (img[..., :] > self.binary_thr).astype(type_)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
results[k] = self._binarize(results[k])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, binary_thr={self.binary_thr}, '
f'to_int={self.to_int})')
return repr_str
@PIPELINES.register_module()
class RandomMaskDilation:
"""Randomly dilate binary masks.
Args:
keys (Sequence[str]): The images to be resized.
get_binary (bool): If True, according to binary_thr, reset final
output as binary mask. Otherwise, return masks directly.
binary_thr (float): Threshold for obtaining binary mask.
kernel_min (int): Min size of dilation kernel.
kernel_max (int): Max size of dilation kernel.
"""
def __init__(self, keys, binary_thr=0., kernel_min=9, kernel_max=49):
self.keys = keys
self.kernel_min = kernel_min
self.kernel_max = kernel_max
self.binary_thr = binary_thr
def _random_dilate(self, img):
kernel_size = np.random.randint(self.kernel_min, self.kernel_max + 1)
kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)
dilate_kernel_size = kernel_size
img_ = cv2.dilate(img, kernel, iterations=1)
img_ = (img_ > self.binary_thr).astype(np.float32)
return img_, dilate_kernel_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
results[k], d_kernel = self._random_dilate(results[k])
if len(results[k].shape) == 2:
results[k] = np.expand_dims(results[k], axis=2)
results[k + '_dilate_kernel_size'] = d_kernel
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, kernel_min={self.kernel_min}, '
f'kernel_max={self.kernel_max})')
return repr_str
@PIPELINES.register_module()
class RandomTransposeHW:
"""Randomly transpose images in H and W dimensions with a probability.
(TransposeHW = horizontal flip + anti-clockwise rotatation by 90 degrees)
When used with horizontal/vertical flips, it serves as a way of rotation
augmentation.
It also supports randomly transposing a list of images.
Required keys are the keys in attributes "keys", added or modified keys are
"transpose" and the keys in attributes "keys".
Args:
keys (list[str]): The images to be transposed.
transpose_ratio (float): The propability to transpose the images.
"""
def __init__(self, keys, transpose_ratio=0.5):
self.keys = keys
self.transpose_ratio = transpose_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
transpose = np.random.random() < self.transpose_ratio
if transpose:
for key in self.keys:
if isinstance(results[key], list):
results[key] = [v.transpose(1, 0, 2) for v in results[key]]
else:
results[key] = results[key].transpose(1, 0, 2)
results['transpose'] = transpose
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, transpose_ratio={self.transpose_ratio})')
return repr_str
@PIPELINES.register_module()
class GenerateFrameIndiceswithPadding:
"""Generate frame index with padding for REDS dataset and Vid4 dataset
during testing.
Required keys: lq_path, gt_path, key, num_input_frames, max_frame_num
Added or modified keys: lq_path, gt_path
Args:
padding (str): padding mode, one of
'replicate' | 'reflection' | 'reflection_circle' | 'circle'.
Examples: current_idx = 0, num_input_frames = 5
The generated frame indices under different padding mode:
replicate: [0, 0, 0, 1, 2]
reflection: [2, 1, 0, 1, 2]
reflection_circle: [4, 3, 0, 1, 2]
circle: [3, 4, 0, 1, 2]
filename_tmpl (str): Template for file name. Default: '{:08d}'.
"""
def __init__(self, padding, filename_tmpl='{:08d}'):
if padding not in ('replicate', 'reflection', 'reflection_circle',
'circle'):
raise ValueError(f'Wrong padding mode {padding}.'
'Should be "replicate", "reflection", '
'"reflection_circle", "circle"')
self.padding = padding
self.filename_tmpl = filename_tmpl
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clip_name, frame_name = results['key'].split(os.sep)
current_idx = int(frame_name)
max_frame_num = results['max_frame_num'] - 1 # start from 0
num_input_frames = results['num_input_frames']
num_pad = num_input_frames // 2
frame_list = []
for i in range(current_idx - num_pad, current_idx + num_pad + 1):
if i < 0:
if self.padding == 'replicate':
pad_idx = 0
elif self.padding == 'reflection':
pad_idx = -i
elif self.padding == 'reflection_circle':
pad_idx = current_idx + num_pad - i
else:
pad_idx = num_input_frames + i
elif i > max_frame_num:
if self.padding == 'replicate':
pad_idx = max_frame_num
elif self.padding == 'reflection':
pad_idx = max_frame_num * 2 - i
elif self.padding == 'reflection_circle':
pad_idx = (current_idx - num_pad) - (i - max_frame_num)
else:
pad_idx = i - num_input_frames
else:
pad_idx = i
frame_list.append(pad_idx)
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_paths = [
osp.join(lq_path_root, clip_name,
f'{self.filename_tmpl.format(idx)}.png')
for idx in frame_list
]
gt_paths = [osp.join(gt_path_root, clip_name, f'{frame_name}.png')]
results['lq_path'] = lq_paths
results['gt_path'] = gt_paths
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f"(padding='{self.padding}')"
return repr_str
@PIPELINES.register_module()
class GenerateFrameIndices:
"""Generate frame index for REDS datasets. It also performs
temporal augmention with random interval.
Required keys: lq_path, gt_path, key, num_input_frames
Added or modified keys: lq_path, gt_path, interval, reverse
Args:
interval_list (list[int]): Interval list for temporal augmentation.
It will randomly pick an interval from interval_list and sample
frame index with the interval.
frames_per_clip(int): Number of frames per clips. Default: 99 for
REDS dataset.
"""
def __init__(self, interval_list, frames_per_clip=99):
self.interval_list = interval_list
self.frames_per_clip = frames_per_clip
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clip_name, frame_name = results['key'].split(
os.sep) # key example: 000/00000000
center_frame_idx = int(frame_name)
num_half_frames = results['num_input_frames'] // 2
max_frame_num = results.get('max_frame_num', self.frames_per_clip + 1)
frames_per_clip = min(self.frames_per_clip, max_frame_num - 1)
interval = np.random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = center_frame_idx - num_half_frames * interval
end_frame_idx = center_frame_idx + num_half_frames * interval
while (start_frame_idx < 0) or (end_frame_idx > frames_per_clip):
center_frame_idx = np.random.randint(0, frames_per_clip + 1)
start_frame_idx = center_frame_idx - num_half_frames * interval
end_frame_idx = center_frame_idx + num_half_frames * interval
frame_name = f'{center_frame_idx:08d}'
neighbor_list = list(
range(center_frame_idx - num_half_frames * interval,
center_frame_idx + num_half_frames * interval + 1, interval))
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_path = [
osp.join(lq_path_root, clip_name, f'{v:08d}.png')
for v in neighbor_list
]
gt_path = [osp.join(gt_path_root, clip_name, f'{frame_name}.png')]
results['lq_path'] = lq_path
results['gt_path'] = gt_path
results['interval'] = interval
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(interval_list={self.interval_list}, '
f'frames_per_clip={self.frames_per_clip})')
return repr_str
@PIPELINES.register_module()
class TemporalReverse:
"""Reverse frame lists for temporal augmentation.
Required keys are the keys in attributes "lq" and "gt",
added or modified keys are "lq", "gt" and "reverse".
Args:
keys (list[str]): The frame lists to be reversed.
reverse_ratio (float): The propability to reverse the frame lists.
Default: 0.5.
"""
def __init__(self, keys, reverse_ratio=0.5):
self.keys = keys
self.reverse_ratio = reverse_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
reverse = np.random.random() < self.reverse_ratio
if reverse:
for key in self.keys:
results[key].reverse()
results['reverse'] = reverse
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(keys={self.keys}, reverse_ratio={self.reverse_ratio})'
return repr_str
@PIPELINES.register_module()
class GenerateSegmentIndices:
"""Generate frame indices for a segment. It also performs temporal
augmention with random interval.
Required keys: lq_path, gt_path, key, num_input_frames, sequence_length
Added or modified keys: lq_path, gt_path, interval, reverse
Args:
interval_list (list[int]): Interval list for temporal augmentation.
It will randomly pick an interval from interval_list and sample
frame index with the interval.
start_idx (int): The index corresponds to the first frame in the
sequence. Default: 0.
filename_tmpl (str): Template for file name. Default: '{:08d}.png'.
"""
def __init__(self, interval_list, start_idx=0, filename_tmpl='{:08d}.png'):
self.interval_list = interval_list
self.filename_tmpl = filename_tmpl
self.start_idx = start_idx
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
# key example: '000', 'calendar' (sequence name)
clip_name = results['key']
interval = np.random.choice(self.interval_list)
self.sequence_length = results['sequence_length']
num_input_frames = results.get('num_input_frames',
self.sequence_length)
# randomly select a frame as start
if self.sequence_length - num_input_frames * interval < 0:
raise ValueError('The input sequence is not long enough to '
'support the current choice of [interval] or '
'[num_input_frames].')
start_frame_idx = np.random.randint(
0, self.sequence_length - num_input_frames * interval + 1)
end_frame_idx = start_frame_idx + num_input_frames * interval
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
neighbor_list = [v + self.start_idx for v in neighbor_list]
# add the corresponding file paths
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_path = [
osp.join(lq_path_root, clip_name, self.filename_tmpl.format(v))
for v in neighbor_list
]
gt_path = [
osp.join(gt_path_root, clip_name, self.filename_tmpl.format(v))
for v in neighbor_list
]
results['lq_path'] = lq_path
results['gt_path'] = gt_path
results['interval'] = interval
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(interval_list={self.interval_list})')
return repr_str
@PIPELINES.register_module()
class MirrorSequence:
"""Extend short sequences (e.g. Vimeo-90K) by mirroring the sequences
Given a sequence with N frames (x1, ..., xN), extend the sequence to
(x1, ..., xN, xN, ..., x1).
Args:
keys (list[str]): The frame lists to be extended.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = results[key] + results[key][::-1]
else:
raise TypeError('The input must be of class list[nparray]. '
f'Got {type(results[key])}.')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class CopyValues:
"""Copy the value of a source key to a destination key.
It does the following: results[dst_key] = results[src_key] for
(src_key, dst_key) in zip(src_keys, dst_keys).
Added keys are the keys in the attribute "dst_keys".
Args:
src_keys (list[str]): The source keys.
dst_keys (list[str]): The destination keys.
"""
def __init__(self, src_keys, dst_keys):
if not isinstance(src_keys, list) or not isinstance(dst_keys, list):
raise AssertionError('"src_keys" and "dst_keys" must be lists.')
if len(src_keys) != len(dst_keys):
raise ValueError('"src_keys" and "dst_keys" should have the same'
'number of elements.')
self.src_keys = src_keys
self.dst_keys = dst_keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict with a key added/modified.
"""
for (src_key, dst_key) in zip(self.src_keys, self.dst_keys):
results[dst_key] = copy.deepcopy(results[src_key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(src_keys={self.src_keys})')
repr_str += (f'(dst_keys={self.dst_keys})')
return repr_str
@PIPELINES.register_module()
class Quantize:
"""Quantize and clip the image to [0, 1].
It is assumed that the the input has range [0, 1].
Modified keys are the attributes specified in "keys".
Args:
keys (list[str]): The keys whose values are clipped.
"""
def __init__(self, keys):
self.keys = keys
def _quantize_clip(self, input_):
is_single_image = False
if isinstance(input_, np.ndarray):
is_single_image = True
input_ = [input_]
# quantize and clip
input_ = [np.clip((v * 255.0).round(), 0, 255) / 255. for v in input_]
if is_single_image:
input_ = input_[0]
return input_
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict with the values of the specified keys are rounded
and clipped.
"""
for key in self.keys:
results[key] = self._quantize_clip(results[key])
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class UnsharpMasking:
"""Apply unsharp masking to an image or a sequence of images.
Args:
kernel_size (int): The kernel_size of the Gaussian kernel.
sigma (float): The standard deviation of the Gaussian.
weight (float): The weight of the "details" in the final output.
threshold (float): Pixel differences larger than this value are
regarded as "details".
keys (list[str]): The keys whose values are processed.
Added keys are "xxx_unsharp", where "xxx" are the attributes specified
in "keys".
"""
def __init__(self, kernel_size, sigma, weight, threshold, keys):
if kernel_size % 2 == 0:
raise ValueError('kernel_size must be an odd number, but '
f'got {kernel_size}.')
self.kernel_size = kernel_size
self.sigma = sigma
self.weight = weight
self.threshold = threshold
self.keys = keys
kernel = cv2.getGaussianKernel(kernel_size, sigma)
self.kernel = np.matmul(kernel, kernel.transpose())
def _unsharp_masking(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
outputs = []
for img in imgs:
residue = img - cv2.filter2D(img, -1, self.kernel)
mask = np.float32(np.abs(residue) * 255 > self.threshold)
soft_mask = cv2.filter2D(mask, -1, self.kernel)
sharpened = np.clip(img + self.weight * residue, 0, 1)
outputs.append(soft_mask * sharpened + (1 - soft_mask) * img)
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
for key in self.keys:
results[f'{key}_unsharp'] = self._unsharp_masking(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, kernel_size={self.kernel_size}, '
f'sigma={self.sigma}, weight={self.weight}, '
f'threshold={self.threshold})')
return repr_str
| 46,436 | 35.081585 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/matting_aug.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import random
import cv2
import mmcv
import numpy as np
from mmcv.fileio import FileClient
from ..registry import PIPELINES
from .utils import adjust_gamma, random_choose_unknown
def add_gaussian_noise(img, mu, sigma):
img = img.astype(np.float32)
gauss_noise = np.random.normal(mu, sigma, img.shape)
noisy_img = img + gauss_noise
noisy_img = np.clip(noisy_img, 0, 255)
return noisy_img
@PIPELINES.register_module()
class MergeFgAndBg:
"""Composite foreground image and background image with alpha.
Required keys are "alpha", "fg" and "bg", added key is "merged".
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha'][..., None].astype(np.float32) / 255.
fg = results['fg']
bg = results['bg']
merged = fg * alpha + (1. - alpha) * bg
results['merged'] = merged
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
return repr_str
@PIPELINES.register_module()
class GenerateTrimap:
"""Using random erode/dilate to generate trimap from alpha matte.
Required key is "alpha", added key is "trimap".
Args:
kernel_size (int | tuple[int]): The range of random kernel_size of
erode/dilate; int indicates a fixed kernel_size. If `random` is set
to False and kernel_size is a tuple of length 2, then it will be
interpreted as (erode kernel_size, dilate kernel_size). It should
be noted that the kernel of the erosion and dilation has the same
height and width.
iterations (int | tuple[int], optional): The range of random iterations
of erode/dilate; int indicates a fixed iterations. If `random` is
set to False and iterations is a tuple of length 2, then it will be
interpreted as (erode iterations, dilate iterations). Default to 1.
random (bool, optional): Whether use random kernel_size and iterations
when generating trimap. See `kernel_size` and `iterations` for more
information.
"""
def __init__(self, kernel_size, iterations=1, random=True):
if isinstance(kernel_size, int):
kernel_size = kernel_size, kernel_size + 1
elif not mmcv.is_tuple_of(kernel_size, int) or len(kernel_size) != 2:
raise ValueError('kernel_size must be an int or a tuple of 2 int, '
f'but got {kernel_size}')
if isinstance(iterations, int):
iterations = iterations, iterations + 1
elif not mmcv.is_tuple_of(iterations, int) or len(iterations) != 2:
raise ValueError('iterations must be an int or a tuple of 2 int, '
f'but got {iterations}')
self.random = random
if self.random:
min_kernel, max_kernel = kernel_size
self.iterations = iterations
self.kernels = [
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size))
for size in range(min_kernel, max_kernel)
]
else:
erode_ksize, dilate_ksize = kernel_size
self.iterations = iterations
self.kernels = [
cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(erode_ksize, erode_ksize)),
cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(dilate_ksize, dilate_ksize))
]
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha']
if self.random:
kernel_num = len(self.kernels)
erode_kernel_idx = np.random.randint(kernel_num)
dilate_kernel_idx = np.random.randint(kernel_num)
min_iter, max_iter = self.iterations
erode_iter = np.random.randint(min_iter, max_iter)
dilate_iter = np.random.randint(min_iter, max_iter)
else:
erode_kernel_idx, dilate_kernel_idx = 0, 1
erode_iter, dilate_iter = self.iterations
eroded = cv2.erode(
alpha, self.kernels[erode_kernel_idx], iterations=erode_iter)
dilated = cv2.dilate(
alpha, self.kernels[dilate_kernel_idx], iterations=dilate_iter)
trimap = np.zeros_like(alpha)
trimap.fill(128)
trimap[eroded >= 255] = 255
trimap[dilated <= 0] = 0
results['trimap'] = trimap.astype(np.float32)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(kernels={self.kernels}, iterations={self.iterations}, '
f'random={self.random})')
return repr_str
@PIPELINES.register_module()
class GenerateTrimapWithDistTransform:
"""Generate trimap with distance transform function.
Args:
dist_thr (int, optional): Distance threshold. Area with alpha value
between (0, 255) will be considered as initial unknown area. Then
area with distance to unknown area smaller than the distance
threshold will also be consider as unknown area. Defaults to 20.
random (bool, optional): If True, use random distance threshold from
[1, dist_thr). If False, use `dist_thr` as the distance threshold
directly. Defaults to True.
"""
def __init__(self, dist_thr=20, random=True):
if not (isinstance(dist_thr, int) and dist_thr >= 1):
raise ValueError('dist_thr must be an int that is greater than 1, '
f'but got {dist_thr}')
self.dist_thr = dist_thr
self.random = random
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha']
# image dilation implemented by Euclidean distance transform
known = (alpha == 0) | (alpha == 255)
dist_to_unknown = cv2.distanceTransform(
known.astype(np.uint8), cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
dist_thr = np.random.randint(
1, self.dist_thr) if self.random else self.dist_thr
unknown = dist_to_unknown <= dist_thr
trimap = (alpha == 255) * 255
trimap[unknown] = 128
results['trimap'] = trimap.astype(np.uint8)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(dist_thr={self.dist_thr}, random={self.random})'
return repr_str
@PIPELINES.register_module()
class CompositeFg:
"""Composite foreground with a random foreground.
This class composites the current training sample with additional data
randomly (could be from the same dataset). With probability 0.5, the sample
will be composited with a random sample from the specified directory.
The composition is performed as:
.. math::
fg_{new} = \\alpha_1 * fg_1 + (1 - \\alpha_1) * fg_2
\\alpha_{new} = 1 - (1 - \\alpha_1) * (1 - \\alpha_2)
where :math:`(fg_1, \\alpha_1)` is from the current sample and
:math:`(fg_2, \\alpha_2)` is the randomly loaded sample. With the above
composition, :math:`\\alpha_{new}` is still in `[0, 1]`.
Required keys are "alpha" and "fg". Modified keys are "alpha" and "fg".
Args:
fg_dirs (str | list[str]): Path of directories to load foreground
images from.
alpha_dirs (str | list[str]): Path of directories to load alpha mattes
from.
interpolation (str): Interpolation method of `mmcv.imresize` to resize
the randomly loaded images.
"""
def __init__(self,
fg_dirs,
alpha_dirs,
interpolation='nearest',
io_backend='disk',
**kwargs):
self.fg_dirs = fg_dirs if isinstance(fg_dirs, list) else [fg_dirs]
self.alpha_dirs = alpha_dirs if isinstance(alpha_dirs,
list) else [alpha_dirs]
self.interpolation = interpolation
self.fg_list, self.alpha_list = self._get_file_list(
self.fg_dirs, self.alpha_dirs)
self.io_backend = io_backend
self.file_client = None
self.kwargs = kwargs
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
fg = results['fg']
alpha = results['alpha'].astype(np.float32) / 255.
h, w = results['fg'].shape[:2]
# randomly select fg
if np.random.rand() < 0.5:
idx = np.random.randint(len(self.fg_list))
fg2_bytes = self.file_client.get(self.fg_list[idx])
fg2 = mmcv.imfrombytes(fg2_bytes)
alpha2_bytes = self.file_client.get(self.alpha_list[idx])
alpha2 = mmcv.imfrombytes(alpha2_bytes, flag='grayscale')
alpha2 = alpha2.astype(np.float32) / 255.
fg2 = mmcv.imresize(fg2, (w, h), interpolation=self.interpolation)
alpha2 = mmcv.imresize(
alpha2, (w, h), interpolation=self.interpolation)
# the overlap of two 50% transparency will be 75%
alpha_tmp = 1 - (1 - alpha) * (1 - alpha2)
# if the result alpha is all-one, then we avoid composition
if np.any(alpha_tmp < 1):
# composite fg with fg2
fg = fg.astype(np.float32) * alpha[..., None] \
+ fg2.astype(np.float32) * (1 - alpha[..., None])
alpha = alpha_tmp
fg.astype(np.uint8)
results['fg'] = fg
results['alpha'] = (alpha * 255).astype(np.uint8)
return results
@staticmethod
def _get_file_list(fg_dirs, alpha_dirs):
all_fg_list = list()
all_alpha_list = list()
for fg_dir, alpha_dir in zip(fg_dirs, alpha_dirs):
fg_list = sorted(mmcv.scandir(fg_dir))
alpha_list = sorted(mmcv.scandir(alpha_dir))
# we assume the file names for fg and alpha are the same
assert len(fg_list) == len(alpha_list), (
f'{fg_dir} and {alpha_dir} should have the same number of '
f'images ({len(fg_list)} differs from ({len(alpha_list)})')
fg_list = [osp.join(fg_dir, fg) for fg in fg_list]
alpha_list = [osp.join(alpha_dir, alpha) for alpha in alpha_list]
all_fg_list.extend(fg_list)
all_alpha_list.extend(alpha_list)
return all_fg_list, all_alpha_list
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(fg_dirs={self.fg_dirs}, alpha_dirs={self.alpha_dirs}, '
f"interpolation='{self.interpolation}')")
return repr_str
@PIPELINES.register_module()
class GenerateSeg:
"""Generate segmentation mask from alpha matte.
Args:
kernel_size (int, optional): Kernel size for both erosion and
dilation. The kernel will have the same height and width.
Defaults to 5.
erode_iter_range (tuple, optional): Iteration of erosion.
Defaults to (10, 20).
dilate_iter_range (tuple, optional): Iteration of dilation.
Defaults to (15, 30).
num_holes_range (tuple, optional): Range of number of holes to
randomly select from. Defaults to (0, 3).
hole_sizes (list, optional): List of (h, w) to be selected as the
size of the rectangle hole.
Defaults to [(15, 15), (25, 25), (35, 35), (45, 45)].
blur_ksizes (list, optional): List of (h, w) to be selected as the
kernel_size of the gaussian blur.
Defaults to [(21, 21), (31, 31), (41, 41)].
"""
def __init__(self,
kernel_size=5,
erode_iter_range=(10, 20),
dilate_iter_range=(15, 30),
num_holes_range=(0, 3),
hole_sizes=[(15, 15), (25, 25), (35, 35), (45, 45)],
blur_ksizes=[(21, 21), (31, 31), (41, 41)]):
self.kernel_size = kernel_size
self.erode_iter_range = erode_iter_range
self.dilate_iter_range = dilate_iter_range
self.num_holes_range = num_holes_range
self.hole_sizes = hole_sizes
self.blur_ksizes = blur_ksizes
@staticmethod
def _crop_hole(img, start_point, hole_size):
"""Create a all-zero rectangle hole in the image.
Args:
img (np.ndarray): Source image.
start_point (tuple[int]): The top-left point of the rectangle.
hole_size (tuple[int]): The height and width of the rectangle hole.
Return:
np.ndarray: The cropped image.
"""
top, left = start_point
bottom = top + hole_size[0]
right = left + hole_size[1]
height, weight = img.shape[:2]
if top < 0 or bottom > height or left < 0 or right > weight:
raise ValueError(f'crop area {(left, top, right, bottom)} exceeds '
f'image size {(height, weight)}')
img[top:bottom, left:right] = 0
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha']
trimap = results['trimap']
# generete segmentation mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(self.kernel_size,
self.kernel_size))
seg = (alpha > 0.5).astype(np.float32)
seg = cv2.erode(
seg, kernel, iterations=np.random.randint(*self.erode_iter_range))
seg = cv2.dilate(
seg, kernel, iterations=np.random.randint(*self.dilate_iter_range))
# generate some holes in segmentation mask
num_holes = np.random.randint(*self.num_holes_range)
for _ in range(num_holes):
hole_size = random.choice(self.hole_sizes)
unknown = trimap == 128
start_point = random_choose_unknown(unknown, hole_size)
seg = self._crop_hole(seg, start_point, hole_size)
trimap = self._crop_hole(trimap, start_point, hole_size)
# perform gaussian blur to segmentation mask
seg = cv2.GaussianBlur(seg, random.choice(self.blur_ksizes), 0)
results['seg'] = seg.astype(np.uint8)
results['num_holes'] = num_holes
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(kernel_size={self.kernel_size}, '
f'erode_iter_range={self.erode_iter_range}, '
f'dilate_iter_range={self.dilate_iter_range}, '
f'num_holes_range={self.num_holes_range}, '
f'hole_sizes={self.hole_sizes}, blur_ksizes={self.blur_ksizes}')
return repr_str
@PIPELINES.register_module()
class PerturbBg:
"""Randomly add gaussian noise or gamma change to background image.
Required key is "bg", added key is "noisy_bg".
Args:
gamma_ratio (float, optional): The probability to use gamma correction
instead of gaussian noise. Defaults to 0.6.
"""
def __init__(self, gamma_ratio=0.6):
if gamma_ratio < 0 or gamma_ratio > 1:
raise ValueError('gamma_ratio must be a float between [0, 1], '
f'but got {gamma_ratio}')
self.gamma_ratio = gamma_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if np.random.rand() >= self.gamma_ratio:
# generate gaussian noise with random gaussian N([-7, 7), [2, 6))
mu = np.random.randint(-7, 7)
sigma = np.random.randint(2, 6)
results['noisy_bg'] = add_gaussian_noise(results['bg'], mu, sigma)
else:
# adjust gamma in a range of N(1, 0.12)
gamma = np.random.normal(1, 0.12)
results['noisy_bg'] = adjust_gamma(results['bg'], gamma)
return results
def __repr__(self):
return self.__class__.__name__ + f'(gamma_ratio={self.gamma_ratio})'
@PIPELINES.register_module()
class GenerateSoftSeg:
"""Generate soft segmentation mask from input segmentation mask.
Required key is "seg", added key is "soft_seg".
Args:
fg_thr (float, optional): Threshold of the foreground in the normalized
input segmentation mask. Defaults to 0.2.
border_width (int, optional): Width of border to be padded to the
bottom of the mask. Defaults to 25.
erode_ksize (int, optional): Fixed kernel size of the erosion.
Defaults to 5.
dilate_ksize (int, optional): Fixed kernel size of the dilation.
Defaults to 5.
erode_iter_range (tuple, optional): Iteration of erosion.
Defaults to (10, 20).
dilate_iter_range (tuple, optional): Iteration of dilation.
Defaults to (3, 7).
blur_ksizes (list, optional): List of (h, w) to be selected as the
kernel_size of the gaussian blur.
Defaults to [(21, 21), (31, 31), (41, 41)].
"""
def __init__(self,
fg_thr=0.2,
border_width=25,
erode_ksize=3,
dilate_ksize=5,
erode_iter_range=(10, 20),
dilate_iter_range=(3, 7),
blur_ksizes=[(21, 21), (31, 31), (41, 41)]):
if not isinstance(fg_thr, float):
raise TypeError(f'fg_thr must be a float, but got {type(fg_thr)}')
if not isinstance(border_width, int):
raise TypeError(
f'border_width must be an int, but got {type(border_width)}')
if not isinstance(erode_ksize, int):
raise TypeError(
f'erode_ksize must be an int, but got {type(erode_ksize)}')
if not isinstance(dilate_ksize, int):
raise TypeError(
f'dilate_ksize must be an int, but got {type(dilate_ksize)}')
if (not mmcv.is_tuple_of(erode_iter_range, int)
or len(erode_iter_range) != 2):
raise TypeError('erode_iter_range must be a tuple of 2 int, '
f'but got {erode_iter_range}')
if (not mmcv.is_tuple_of(dilate_iter_range, int)
or len(dilate_iter_range) != 2):
raise TypeError('dilate_iter_range must be a tuple of 2 int, '
f'but got {dilate_iter_range}')
if not mmcv.is_list_of(blur_ksizes, tuple):
raise TypeError(
f'blur_ksizes must be a list of tuple, but got {blur_ksizes}')
self.fg_thr = fg_thr
self.border_width = border_width
self.erode_ksize = erode_ksize
self.dilate_ksize = dilate_ksize
self.erode_iter_range = erode_iter_range
self.dilate_iter_range = dilate_iter_range
self.blur_ksizes = blur_ksizes
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
seg = results['seg'].astype(np.float32) / 255
height, _ = seg.shape[:2]
seg[seg > self.fg_thr] = 1
# to align with the original repo, pad the bottom of the mask
seg = cv2.copyMakeBorder(seg, 0, self.border_width, 0, 0,
cv2.BORDER_REPLICATE)
# erode/dilate segmentation mask
erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(self.erode_ksize,
self.erode_ksize))
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(self.dilate_ksize,
self.dilate_ksize))
seg = cv2.erode(
seg,
erode_kernel,
iterations=np.random.randint(*self.erode_iter_range))
seg = cv2.dilate(
seg,
dilate_kernel,
iterations=np.random.randint(*self.dilate_iter_range))
# perform gaussian blur to segmentation mask
seg = cv2.GaussianBlur(seg, random.choice(self.blur_ksizes), 0)
# remove the padded rows
seg = (seg * 255).astype(np.uint8)
seg = np.delete(seg, range(height, height + self.border_width), 0)
results['soft_seg'] = seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(fg_thr={self.fg_thr}, '
f'border_width={self.border_width}, '
f'erode_ksize={self.erode_ksize}, '
f'dilate_ksize={self.dilate_ksize}, '
f'erode_iter_range={self.erode_iter_range}, '
f'dilate_iter_range={self.dilate_iter_range}, '
f'blur_ksizes={self.blur_ksizes})')
return repr_str
@PIPELINES.register_module()
class TransformTrimap:
"""Transform trimap into two-channel and six-channel.
This class will generate a two-channel trimap composed of definite
foreground and background masks and encode it into a six-channel trimap
using Gaussian blurs of the generated two-channel trimap at three
different scales. The transformed trimap has 6 channels.
Required key is "trimap", added key is "transformed_trimap" and
"two_channel_trimap".
Adopted from the following repository:
https://github.com/MarcoForte/FBA_Matting/blob/master/networks/transforms.py.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
trimap = results['trimap']
assert len(trimap.shape) == 2
h, w = trimap.shape[:2]
# generate two-channel trimap
trimap2 = np.zeros((h, w, 2), dtype=np.uint8)
trimap2[trimap == 0, 0] = 255
trimap2[trimap == 255, 1] = 255
trimap_trans = np.zeros((h, w, 6), dtype=np.float32)
factor = np.array([[[0.02, 0.08, 0.16]]], dtype=np.float32)
for k in range(2):
if np.any(trimap2[:, :, k]):
dt_mask = -cv2.distanceTransform(255 - trimap2[:, :, k],
cv2.DIST_L2, 0)**2
dt_mask = dt_mask[..., None]
L = 320
trimap_trans[..., 3 * k:3 * k +
3] = np.exp(dt_mask / (2 * ((factor * L)**2)))
results['transformed_trimap'] = trimap_trans
results['two_channel_trimap'] = trimap2
return results
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
| 24,538 | 37.766193 | 81 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/compose.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
from mmcv.utils import build_from_cfg
from ..registry import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose a data pipeline with a sequence of transforms.
Args:
transforms (list[dict | callable]):
Either config dicts of transforms or transform objects.
"""
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, '
f'but got {type(transform)}')
def __call__(self, data):
"""Call function.
Args:
data (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
| 1,620 | 29.018519 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/random_degradations.py | # Copyright (c) OpenMMLab. All rights reserved.
import io
import logging
import random
import cv2
import numpy as np
from mmedit.datasets.pipelines import blur_kernels as blur_kernels
from ..registry import PIPELINES
try:
import av
has_av = True
except ImportError:
has_av = False
@PIPELINES.register_module()
class RandomBlur:
"""Apply random blur to the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
def get_kernel(self, num_kernels):
kernel_type = np.random.choice(
self.params['kernel_list'], p=self.params['kernel_prob'])
kernel_size = random.choice(self.params['kernel_size'])
sigma_x_range = self.params.get('sigma_x', [0, 0])
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
sigma_x_step = self.params.get('sigma_x_step', 0)
sigma_y_range = self.params.get('sigma_y', [0, 0])
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
sigma_y_step = self.params.get('sigma_y_step', 0)
rotate_angle_range = self.params.get('rotate_angle', [-np.pi, np.pi])
rotate_angle = np.random.uniform(rotate_angle_range[0],
rotate_angle_range[1])
rotate_angle_step = self.params.get('rotate_angle_step', 0)
beta_gau_range = self.params.get('beta_gaussian', [0.5, 4])
beta_gau = np.random.uniform(beta_gau_range[0], beta_gau_range[1])
beta_gau_step = self.params.get('beta_gaussian_step', 0)
beta_pla_range = self.params.get('beta_plateau', [1, 2])
beta_pla = np.random.uniform(beta_pla_range[0], beta_pla_range[1])
beta_pla_step = self.params.get('beta_plateau_step', 0)
omega_range = self.params.get('omega', None)
omega_step = self.params.get('omega_step', 0)
if omega_range is None: # follow Real-ESRGAN settings if not specified
if kernel_size < 13:
omega_range = [np.pi / 3., np.pi]
else:
omega_range = [np.pi / 5., np.pi]
omega = np.random.uniform(omega_range[0], omega_range[1])
# determine blurring kernel
kernels = []
for _ in range(0, num_kernels):
kernel = blur_kernels.random_mixed_kernels(
[kernel_type],
[1],
kernel_size,
[sigma_x, sigma_x],
[sigma_y, sigma_y],
[rotate_angle, rotate_angle],
[beta_gau, beta_gau],
[beta_pla, beta_pla],
[omega, omega],
None,
)
kernels.append(kernel)
# update kernel parameters
sigma_x += np.random.uniform(-sigma_x_step, sigma_x_step)
sigma_y += np.random.uniform(-sigma_y_step, sigma_y_step)
rotate_angle += np.random.uniform(-rotate_angle_step,
rotate_angle_step)
beta_gau += np.random.uniform(-beta_gau_step, beta_gau_step)
beta_pla += np.random.uniform(-beta_pla_step, beta_pla_step)
omega += np.random.uniform(-omega_step, omega_step)
sigma_x = np.clip(sigma_x, sigma_x_range[0], sigma_x_range[1])
sigma_y = np.clip(sigma_y, sigma_y_range[0], sigma_y_range[1])
rotate_angle = np.clip(rotate_angle, rotate_angle_range[0],
rotate_angle_range[1])
beta_gau = np.clip(beta_gau, beta_gau_range[0], beta_gau_range[1])
beta_pla = np.clip(beta_pla, beta_pla_range[0], beta_pla_range[1])
omega = np.clip(omega, omega_range[0], omega_range[1])
return kernels
def _apply_random_blur(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
# get kernel and blur the input
kernels = self.get_kernel(num_kernels=len(imgs))
imgs = [
cv2.filter2D(img, -1, kernel)
for img, kernel in zip(imgs, kernels)
]
if is_single_image:
imgs = imgs[0]
return imgs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_blur(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomResize:
"""Randomly resize the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
self.resize_dict = dict(
bilinear=cv2.INTER_LINEAR,
bicubic=cv2.INTER_CUBIC,
area=cv2.INTER_AREA,
lanczos=cv2.INTER_LANCZOS4)
def _random_resize(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
h, w = imgs[0].shape[:2]
resize_opt = self.params['resize_opt']
resize_prob = self.params['resize_prob']
resize_opt = np.random.choice(resize_opt, p=resize_prob).lower()
if resize_opt not in self.resize_dict:
raise NotImplementedError(f'resize_opt [{resize_opt}] is not '
'implemented')
resize_opt = self.resize_dict[resize_opt]
resize_step = self.params.get('resize_step', 0)
# determine the target size, if not provided
target_size = self.params.get('target_size', None)
if target_size is None:
resize_mode = np.random.choice(['up', 'down', 'keep'],
p=self.params['resize_mode_prob'])
resize_scale = self.params['resize_scale']
if resize_mode == 'up':
scale_factor = np.random.uniform(1, resize_scale[1])
elif resize_mode == 'down':
scale_factor = np.random.uniform(resize_scale[0], 1)
else:
scale_factor = 1
# determine output size
h_out, w_out = h * scale_factor, w * scale_factor
if self.params.get('is_size_even', False):
h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)
target_size = (int(h_out), int(w_out))
else:
resize_step = 0
# resize the input
if resize_step == 0: # same target_size for all input images
outputs = [
cv2.resize(img, target_size[::-1], interpolation=resize_opt)
for img in imgs
]
else: # different target_size for each input image
outputs = []
for img in imgs:
img = cv2.resize(
img, target_size[::-1], interpolation=resize_opt)
outputs.append(img)
# update scale
scale_factor += np.random.uniform(-resize_step, resize_step)
scale_factor = np.clip(scale_factor, resize_scale[0],
resize_scale[1])
# determine output size
h_out, w_out = h * scale_factor, w * scale_factor
if self.params.get('is_size_even', False):
h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)
target_size = (int(h_out), int(w_out))
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._random_resize(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomNoise:
"""Apply random noise to the input.
Currently support Gaussian noise and Poisson noise.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
def _apply_gaussian_noise(self, imgs):
sigma_range = self.params['gaussian_sigma']
sigma = np.random.uniform(sigma_range[0], sigma_range[1]) / 255.
sigma_step = self.params.get('gaussian_sigma_step', 0)
gray_noise_prob = self.params['gaussian_gray_noise_prob']
is_gray_noise = np.random.uniform() < gray_noise_prob
outputs = []
for img in imgs:
noise = np.float32(np.random.randn(*(img.shape))) * sigma
if is_gray_noise:
noise = noise[:, :, :1]
outputs.append(img + noise)
# update noise level
sigma += np.random.uniform(-sigma_step, sigma_step) / 255.
sigma = np.clip(sigma, sigma_range[0] / 255.,
sigma_range[1] / 255.)
return outputs
def _apply_poisson_noise(self, imgs):
scale_range = self.params['poisson_scale']
scale = np.random.uniform(scale_range[0], scale_range[1])
scale_step = self.params.get('poisson_scale_step', 0)
gray_noise_prob = self.params['poisson_gray_noise_prob']
is_gray_noise = np.random.uniform() < gray_noise_prob
outputs = []
for img in imgs:
noise = img.copy()
if is_gray_noise:
noise = cv2.cvtColor(noise[..., [2, 1, 0]], cv2.COLOR_BGR2GRAY)
noise = noise[..., np.newaxis]
noise = np.clip((noise * 255.0).round(), 0, 255) / 255.
unique_val = 2**np.ceil(np.log2(len(np.unique(noise))))
noise = np.random.poisson(noise * unique_val) / unique_val - noise
outputs.append(img + noise * scale)
# update noise level
scale += np.random.uniform(-scale_step, scale_step)
scale = np.clip(scale, scale_range[0], scale_range[1])
return outputs
def _apply_random_noise(self, imgs):
noise_type = np.random.choice(
self.params['noise_type'], p=self.params['noise_prob'])
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
if noise_type.lower() == 'gaussian':
imgs = self._apply_gaussian_noise(imgs)
elif noise_type.lower() == 'poisson':
imgs = self._apply_poisson_noise(imgs)
else:
raise NotImplementedError(f'"noise_type" [{noise_type}] is '
'not implemented.')
if is_single_image:
imgs = imgs[0]
return imgs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_noise(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomJPEGCompression:
"""Apply random JPEG compression to the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
def _apply_random_compression(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
# determine initial compression level and the step size
quality = self.params['quality']
quality_step = self.params.get('quality_step', 0)
jpeg_param = round(np.random.uniform(quality[0], quality[1]))
# apply jpeg compression
outputs = []
for img in imgs:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_param]
_, img_encoded = cv2.imencode('.jpg', img * 255., encode_param)
outputs.append(np.float32(cv2.imdecode(img_encoded, 1)) / 255.)
# update compression level
jpeg_param += np.random.uniform(-quality_step, quality_step)
jpeg_param = round(np.clip(jpeg_param, quality[0], quality[1]))
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_compression(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomVideoCompression:
"""Apply random video compression to the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
assert has_av, 'Please install av to use video compression.'
self.keys = keys
self.params = params
logging.getLogger('libav').setLevel(50)
def _apply_random_compression(self, imgs):
codec = random.choices(self.params['codec'],
self.params['codec_prob'])[0]
bitrate = self.params['bitrate']
bitrate = np.random.randint(bitrate[0], bitrate[1] + 1)
buf = io.BytesIO()
with av.open(buf, 'w', 'mp4') as container:
stream = container.add_stream(codec, rate=1)
stream.height = imgs[0].shape[0]
stream.width = imgs[0].shape[1]
stream.pix_fmt = 'yuv420p'
stream.bit_rate = bitrate
for img in imgs:
img = (255 * img).astype(np.uint8)
frame = av.VideoFrame.from_ndarray(img, format='rgb24')
frame.pict_type = 'NONE'
for packet in stream.encode(frame):
container.mux(packet)
# Flush stream
for packet in stream.encode():
container.mux(packet)
outputs = []
with av.open(buf, 'r', 'mp4') as container:
if container.streams.video:
for frame in container.decode(**{'video': 0}):
outputs.append(
frame.to_rgb().to_ndarray().astype(np.float32) / 255.)
return outputs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_compression(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
allowed_degradations = {
'RandomBlur': RandomBlur,
'RandomResize': RandomResize,
'RandomNoise': RandomNoise,
'RandomJPEGCompression': RandomJPEGCompression,
'RandomVideoCompression': RandomVideoCompression,
}
@PIPELINES.register_module()
class DegradationsWithShuffle:
"""Apply random degradations to input, with degradations being shuffled.
Degradation groups are supported. The order of degradations within the same
group is preserved. For example, if we have degradations = [a, b, [c, d]]
and shuffle_idx = None, then the possible orders are
::
[a, b, [c, d]]
[a, [c, d], b]
[b, a, [c, d]]
[b, [c, d], a]
[[c, d], a, b]
[[c, d], b, a]
Modified keys are the attributed specified in "keys".
Args:
degradations (list[dict]): The list of degradations.
keys (list[str]): A list specifying the keys whose values are
modified.
shuffle_idx (list | None, optional): The degradations corresponding to
these indices are shuffled. If None, all degradations are shuffled.
"""
def __init__(self, degradations, keys, shuffle_idx=None):
self.keys = keys
self.degradations = self._build_degradations(degradations)
if shuffle_idx is None:
self.shuffle_idx = list(range(0, len(degradations)))
else:
self.shuffle_idx = shuffle_idx
def _build_degradations(self, degradations):
for i, degradation in enumerate(degradations):
if isinstance(degradation, (list, tuple)):
degradations[i] = self._build_degradations(degradation)
else:
degradation_ = allowed_degradations[degradation['type']]
degradations[i] = degradation_(degradation['params'],
self.keys)
return degradations
def __call__(self, results):
# shuffle degradations
if len(self.shuffle_idx) > 0:
shuffle_list = [self.degradations[i] for i in self.shuffle_idx]
np.random.shuffle(shuffle_list)
for i, idx in enumerate(self.shuffle_idx):
self.degradations[idx] = shuffle_list[i]
# apply degradations to input
for degradation in self.degradations:
if isinstance(degradation, (tuple, list)):
for subdegrdation in degradation:
results = subdegrdation(results)
else:
results = degradation(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(degradations={self.degradations}, '
f'keys={self.keys}, '
f'shuffle_idx={self.shuffle_idx})')
return repr_str
| 18,941 | 33.007181 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import numpy as np
import torch
from mmcv.utils import print_log
_integer_types = (
np.byte,
np.ubyte, # 8 bits
np.short,
np.ushort, # 16 bits
np.intc,
np.uintc, # 16 or 32 or 64 bits
np.int_,
np.uint, # 32 or 64 bits
np.longlong,
np.ulonglong) # 64 bits
_integer_ranges = {
t: (np.iinfo(t).min, np.iinfo(t).max)
for t in _integer_types
}
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
dtype_range.update(_integer_ranges)
def dtype_limits(image, clip_negative=False):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
This function is adopted from skimage:
https://github.com/scikit-image/scikit-image/blob/
7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/util/dtype.py#L35
Args:
image (ndarray): Input image.
clip_negative (bool, optional): If True, clip the negative range
(i.e. return 0 for min intensity) even if the image dtype allows
negative values.
Returns
tuple: Lower and upper intensity limits.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
This function is adopted from skimage:
https://github.com/scikit-image/scikit-image/blob/
7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/exposure/
exposure.py#L439-L494
Also known as Power Law Transform.
This function transforms the input image pixelwise according to the
equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.
Args:
image (ndarray): Input image.
gamma (float, optional): Non negative real number. Defaults to 1.
gain (float, optional): The constant multiplier. Defaults to 1.
Returns:
ndarray: Gamma corrected output image.
"""
if np.any(image < 0):
raise ValueError('Image Correction methods work correctly only on '
'images with non-negative values. Use '
'skimage.exposure.rescale_intensity.')
dtype = image.dtype.type
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number.')
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
out = ((image / scale)**gamma) * scale * gain
return out.astype(dtype)
def random_choose_unknown(unknown, crop_size):
"""Randomly choose an unknown start (top-left) point for a given crop_size.
Args:
unknown (np.ndarray): The binary unknown mask.
crop_size (tuple[int]): The given crop size.
Returns:
tuple[int]: The top-left point of the chosen bbox.
"""
h, w = unknown.shape
crop_h, crop_w = crop_size
delta_h = center_h = crop_h // 2
delta_w = center_w = crop_w // 2
# mask out the validate area for selecting the cropping center
mask = np.zeros_like(unknown)
mask[delta_h:h - delta_h, delta_w:w - delta_w] = 1
if np.any(unknown & mask):
center_h_list, center_w_list = np.where(unknown & mask)
elif np.any(unknown):
center_h_list, center_w_list = np.where(unknown)
else:
print_log('No unknown pixels found!', level=logging.WARNING)
center_h_list = [center_h]
center_w_list = [center_w]
num_unknowns = len(center_h_list)
rand_ind = np.random.randint(num_unknowns)
center_h = center_h_list[rand_ind]
center_w = center_w_list[rand_ind]
# make sure the top-left point is valid
top = np.clip(center_h - delta_h, 0, h - crop_h)
left = np.clip(center_w - delta_w, 0, w - crop_w)
return top, left
def make_coord(shape, ranges=None, flatten=True):
""" Make coordinates at grid centers.
Args:
shape (tuple): shape of image.
ranges (tuple): range of coordinate value. Default: None.
flatten (bool): flatten to (n, 2) or Not. Default: True.
return:
coord (Tensor): coordinates.
"""
coord_seqs = []
for i, n in enumerate(shape):
if ranges is None:
v0, v1 = -1, 1
else:
v0, v1 = ranges[i]
r = (v1 - v0) / (2 * n)
seq = v0 + r + (2 * r) * torch.arange(n).float()
coord_seqs.append(seq)
coord = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
if flatten:
coord = coord.view(-1, coord.shape[-1])
return coord
| 4,623 | 28.832258 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/random_down_sampling.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
from mmcv import imresize
from ..registry import PIPELINES
@PIPELINES.register_module()
class RandomDownSampling:
"""Generate LQ image from GT (and crop), which will randomly pick a scale.
Args:
scale_min (float): The minimum of upsampling scale, inclusive.
Default: 1.0.
scale_max (float): The maximum of upsampling scale, exclusive.
Default: 4.0.
patch_size (int): The cropped lr patch size.
Default: None, means no crop.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear", "bicubic", "box", "lanczos",
"hamming" for 'pillow' backend.
Default: "bicubic".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: "pillow".
Scale will be picked in the range of [scale_min, scale_max).
"""
def __init__(self,
scale_min=1.0,
scale_max=4.0,
patch_size=None,
interpolation='bicubic',
backend='pillow'):
assert scale_max >= scale_min
self.scale_min = scale_min
self.scale_max = scale_max
self.patch_size = patch_size
self.interpolation = interpolation
self.backend = backend
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation. 'gt' is required.
Returns:
dict: A dict containing the processed data and information.
modified 'gt', supplement 'lq' and 'scale' to keys.
"""
img = results['gt']
scale = np.random.uniform(self.scale_min, self.scale_max)
if self.patch_size is None:
h_lr = math.floor(img.shape[-3] / scale + 1e-9)
w_lr = math.floor(img.shape[-2] / scale + 1e-9)
img = img[:round(h_lr * scale), :round(w_lr * scale), :]
img_down = resize_fn(img, (w_lr, h_lr), self.interpolation,
self.backend)
crop_lr, crop_hr = img_down, img
else:
w_lr = self.patch_size
w_hr = round(w_lr * scale)
x0 = np.random.randint(0, img.shape[-3] - w_hr)
y0 = np.random.randint(0, img.shape[-2] - w_hr)
crop_hr = img[x0:x0 + w_hr, y0:y0 + w_hr, :]
crop_lr = resize_fn(crop_hr, w_lr, self.interpolation,
self.backend)
results['gt'] = crop_hr
results['lq'] = crop_lr
results['scale'] = scale
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f' scale_min={self.scale_min}, '
f'scale_max={self.scale_max}, '
f'patch_size={self.patch_size}, '
f'interpolation={self.interpolation}, '
f'backend={self.backend}')
return repr_str
def resize_fn(img, size, interpolation='bicubic', backend='pillow'):
"""Resize the given image to a given size.
Args:
img (ndarray | torch.Tensor): The input image.
size (int | tuple[int]): Target size w or (w, h).
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear", "bicubic", "box", "lanczos",
"hamming" for 'pillow' backend.
Default: "bicubic".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: "pillow".
Returns:
ndarray | torch.Tensor: `resized_img`, whose type is same as `img`.
"""
if isinstance(size, int):
size = (size, size)
if isinstance(img, np.ndarray):
return imresize(
img, size, interpolation=interpolation, backend=backend)
elif isinstance(img, torch.Tensor):
image = imresize(
img.numpy(), size, interpolation=interpolation, backend=backend)
return torch.from_numpy(image)
else:
raise TypeError('img should got np.ndarray or torch.Tensor,'
f'but got {type(img)}')
| 4,735 | 36.587302 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/blur_kernels.py | # This code is referenced from BasicSR with modifications.
# Reference: https://github.com/xinntao/BasicSR/blob/master/basicsr/data/degradations.py # noqa
# Original licence: Copyright (c) 2020 xinntao, under the Apache 2.0 license.
import numpy as np
from scipy import special
def get_rotated_sigma_matrix(sig_x, sig_y, theta):
"""Calculate the rotated sigma matrix (two dimensional matrix).
Args:
sig_x (float): Standard deviation along the horizontal direction.
sig_y (float): Standard deviation along the vertical direction.
theta (float): Rotation in radian.
Returns:
ndarray: Rotated sigma matrix.
"""
diag = np.array([[sig_x**2, 0], [0, sig_y**2]]).astype(np.float32)
rot = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]).astype(np.float32)
return np.matmul(rot, np.matmul(diag, rot.T))
def _mesh_grid(kernel_size):
"""Generate the mesh grid, centering at zero.
Args:
kernel_size (int): The size of the kernel.
Returns:
x_grid (ndarray): x-coordinates with shape (kernel_size, kernel_size).
y_grid (ndarray): y-coordiantes with shape (kernel_size, kernel_size).
xy_grid (ndarray): stacked coordinates with shape
(kernel_size, kernel_size, 2).
"""
range_ = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
x_grid, y_grid = np.meshgrid(range_, range_)
xy_grid = np.hstack((x_grid.reshape((kernel_size * kernel_size, 1)),
y_grid.reshape(kernel_size * kernel_size,
1))).reshape(kernel_size, kernel_size,
2)
return xy_grid, x_grid, y_grid
def calculate_gaussian_pdf(sigma_matrix, grid):
"""Calculate PDF of the bivariate Gaussian distribution.
Args:
sigma_matrix (ndarray): The variance matrix with shape (2, 2).
grid (ndarray): Coordinates generated by :func:`_mesh_grid`,
with shape (K, K, 2), where K is the kernel size.
Returns:
kernel (ndarrray): Un-normalized kernel.
"""
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.sum(np.matmul(grid, inverse_sigma) * grid, 2))
return kernel
def bivariate_gaussian(kernel_size,
sig_x,
sig_y=None,
theta=None,
grid=None,
is_isotropic=True):
"""Generate a bivariate isotropic or anisotropic Gaussian kernel.
In isotropic mode, only `sig_x` is used. `sig_y` and `theta` are
ignored.
Args:
kernel_size (int): The size of the kernel
sig_x (float): Standard deviation along horizontal direction.
sig_y (float | None, optional): Standard deviation along the vertical
direction. If it is None, 'is_isotropic' must be set to True.
Default: None.
theta (float | None, optional): Rotation in radian. If it is None,
'is_isotropic' must be set to True. Default: None.
grid (ndarray, optional): Coordinates generated by :func:`_mesh_grid`,
with shape (K, K, 2), where K is the kernel size. Default: None
is_isotropic (bool, optional): Whether to use an isotropic kernel.
Default: True.
Returns:
kernel (ndarray): normalized kernel (i.e. sum to 1).
"""
if grid is None:
grid, _, _ = _mesh_grid(kernel_size)
if is_isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0,
sig_x**2]]).astype(np.float32)
else:
if sig_y is None:
raise ValueError('"sig_y" cannot be None if "is_isotropic" is '
'False.')
sigma_matrix = get_rotated_sigma_matrix(sig_x, sig_y, theta)
kernel = calculate_gaussian_pdf(sigma_matrix, grid)
kernel = kernel / np.sum(kernel)
return kernel
def bivariate_generalized_gaussian(kernel_size,
sig_x,
sig_y=None,
theta=None,
beta=1,
grid=None,
is_isotropic=True):
"""Generate a bivariate generalized Gaussian kernel.
Described in `Parameter Estimation For Multivariate Generalized
Gaussian Distributions` by Pascal et. al (2013). In isotropic mode,
only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int): The size of the kernel
sig_x (float): Standard deviation along horizontal direction
sig_y (float | None, optional): Standard deviation along the vertical
direction. If it is None, 'is_isotropic' must be set to True.
Default: None.
theta (float | None, optional): Rotation in radian. If it is None,
'is_isotropic' must be set to True. Default: None.
beta (float, optional): Shape parameter, beta = 1 is the normal
distribution. Default: 1.
grid (ndarray, optional): Coordinates generated by :func:`_mesh_grid`,
with shape (K, K, 2), where K is the kernel size. Default: None
is_isotropic (bool, optional): Whether to use an isotropic kernel.
Default: True.
Returns:
kernel (ndarray): normalized kernel.
"""
if grid is None:
grid, _, _ = _mesh_grid(kernel_size)
if is_isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0,
sig_x**2]]).astype(np.float32)
else:
sigma_matrix = get_rotated_sigma_matrix(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(
-0.5 *
np.power(np.sum(np.matmul(grid, inverse_sigma) * grid, 2), beta))
kernel = kernel / np.sum(kernel)
return kernel
def bivariate_plateau(kernel_size,
sig_x,
sig_y,
theta,
beta,
grid=None,
is_isotropic=True):
"""Generate a plateau-like anisotropic kernel.
This kernel has a form of 1 / (1+x^(beta)).
Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution # noqa
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
Args:
kernel_size (int): The size of the kernel
sig_x (float): Standard deviation along horizontal direction
sig_y (float): Standard deviation along the vertical direction.
theta (float): Rotation in radian.
beta (float): Shape parameter, beta = 1 is the normal distribution.
grid (ndarray, optional): Coordinates generated by :func:`_mesh_grid`,
with shape (K, K, 2), where K is the kernel size. Default: None
is_isotropic (bool, optional): Whether to use an isotropic kernel.
Default: True.
Returns:
kernel (ndarray): normalized kernel (i.e. sum to 1).
"""
if grid is None:
grid, _, _ = _mesh_grid(kernel_size)
if is_isotropic:
sigma_matrix = np.array([[sig_x**2, 0], [0,
sig_x**2]]).astype(np.float32)
else:
sigma_matrix = get_rotated_sigma_matrix(sig_x, sig_y, theta)
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.reciprocal(
np.power(np.sum(np.matmul(grid, inverse_sigma) * grid, 2), beta) + 1)
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_gaussian_kernel(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
noise_range=None,
is_isotropic=True):
"""Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and
`rotation_range` is ignored.
Args:
kernel_size (int): The size of the kernel.
sigma_x_range (tuple): The range of the standard deviation along the
horizontal direction. Default: [0.6, 5]
sigma_y_range (tuple): The range of the standard deviation along the
vertical direction. Default: [0.6, 5]
rotation_range (tuple): Range of rotation in radian.
noise_range (tuple, optional): Multiplicative kernel noise.
Default: None.
is_isotropic (bool, optional): Whether to use an isotropic kernel.
Default: True.
Returns:
kernel (ndarray): The kernel whose parameters are sampled from the
specified range.
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] <= sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if is_isotropic is False:
assert sigma_y_range[0] <= sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] <= rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
kernel = bivariate_gaussian(
kernel_size, sigma_x, sigma_y, rotation, is_isotropic=is_isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] <= noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(
noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_generalized_gaussian_kernel(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_range,
noise_range=None,
is_isotropic=True):
"""Randomly generate bivariate generalized Gaussian kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and
`rotation_range` is ignored.
Args:
kernel_size (int): The size of the kernel.
sigma_x_range (tuple): The range of the standard deviation along the
horizontal direction. Default: [0.6, 5]
sigma_y_range (tuple): The range of the standard deviation along the
vertical direction. Default: [0.6, 5]
rotation_range (tuple): Range of rotation in radian.
beta_range (float): The range of the shape parameter, beta = 1 is the
normal distribution.
noise_range (tuple, optional): Multiplicative kernel noise.
Default: None.
is_isotropic (bool, optional): Whether to use an isotropic kernel.
Default: True.
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] <= sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if is_isotropic is False:
assert sigma_y_range[0] <= sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] <= rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
# assume beta_range[0] <= 1 <= beta_range[1]
if np.random.uniform() <= 0.5:
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_generalized_gaussian(
kernel_size,
sigma_x,
sigma_y,
rotation,
beta,
is_isotropic=is_isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] <= noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(
noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_bivariate_plateau_kernel(kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_range,
noise_range=None,
is_isotropic=True):
"""Randomly generate bivariate plateau kernels.
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and
`rotation_range` is ignored.
Args:
kernel_size (int): The size of the kernel.
sigma_x_range (tuple): The range of the standard deviation along the
horizontal direction. Default: [0.6, 5]
sigma_y_range (tuple): The range of the standard deviation along the
vertical direction. Default: [0.6, 5]
rotation_range (tuple): Range of rotation in radian.
beta_range (float): The range of the shape parameter, beta = 1 is the
normal distribution.
noise_range (tuple, optional): Multiplicative kernel noise.
Default: None.
is_isotropic (bool, optional): Whether to use an isotropic kernel.
Default: True.
Returns:
kernel (ndarray):
"""
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
assert sigma_x_range[0] <= sigma_x_range[1], 'Wrong sigma_x_range.'
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
if is_isotropic is False:
assert sigma_y_range[0] <= sigma_y_range[1], 'Wrong sigma_y_range.'
assert rotation_range[0] <= rotation_range[1], 'Wrong rotation_range.'
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
else:
sigma_y = sigma_x
rotation = 0
# TODO: this may be not proper
if np.random.uniform() <= 0.5:
beta = np.random.uniform(beta_range[0], 1)
else:
beta = np.random.uniform(1, beta_range[1])
kernel = bivariate_plateau(
kernel_size,
sigma_x,
sigma_y,
rotation,
beta,
is_isotropic=is_isotropic)
# add multiplicative noise
if noise_range is not None:
assert noise_range[0] <= noise_range[1], 'Wrong noise range.'
noise = np.random.uniform(
noise_range[0], noise_range[1], size=kernel.shape)
kernel = kernel * noise
kernel = kernel / np.sum(kernel)
return kernel
def random_circular_lowpass_kernel(omega_range, kernel_size, pad_to=0):
""" Generate a 2D Sinc filter
Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter # noqa
Args:
omega_range (tuple): The cutoff frequency in radian (pi is max).
kernel_size (int): The size of the kernel. It must be an odd number.
pad_to (int, optional): The size of the padded kernel. It must be odd
or zero. Default: 0.
Returns:
ndarray: The Sinc kernel with specified parameters.
"""
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
omega = np.random.uniform(omega_range[0], omega_range[-1])
kernel = np.fromfunction(
lambda x, y: omega * special.j1(omega * np.sqrt(
(x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) /
(2 * np.pi * np.sqrt((x - (kernel_size - 1) / 2)**2 +
(y - (kernel_size - 1) / 2)**2)),
[kernel_size, kernel_size])
kernel[(kernel_size - 1) // 2,
(kernel_size - 1) // 2] = omega**2 / (4 * np.pi)
kernel = kernel / np.sum(kernel)
if pad_to > kernel_size:
pad_size = (pad_to - kernel_size) // 2
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
np.seterr(**err)
return kernel
def random_mixed_kernels(kernel_list,
kernel_prob,
kernel_size,
sigma_x_range=[0.6, 5],
sigma_y_range=[0.6, 5],
rotation_range=[-np.pi, np.pi],
beta_gaussian_range=[0.5, 8],
beta_plateau_range=[1, 2],
omega_range=[0, np.pi],
noise_range=None):
"""Randomly generate a kernel.
Args:
kernel_list (list): A list of kernel types. Choices are
'iso', 'aniso', 'skew', 'generalized_iso', 'generalized_aniso',
'plateau_iso', 'plateau_aniso', 'sinc'.
kernel_prob (list): The probability of choosing of the corresponding
kernel.
kernel_size (int): The size of the kernel.
sigma_x_range (list, optional): The range of the standard deviation
along the horizontal direction. Default: (0.6, 5).
sigma_y_range (list, optional): The range of the standard deviation
along the vertical direction. Default: (0.6, 5).
rotation_range (list, optional): Range of rotation in radian.
Default: (-np.pi, np.pi).
beta_gaussian_range (list, optional): The range of the shape parameter
for generalized Gaussian. Default: (0.5, 8).
beta_plateau_range (list, optional): The range of the shape parameter
for plateau kernel. Default: (1, 2).
omega_range (list, optional): The range of omega used in Sinc kernel.
Default: (0, np.pi).
noise_range (list, optional): Multiplicative kernel noise.
Default: None.
Returns:
kernel (ndarray): The kernel whose parameters are sampled from the
specified range.
"""
kernel_type = np.random.choice(kernel_list, p=kernel_prob)
if kernel_type == 'iso':
kernel = random_bivariate_gaussian_kernel(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
noise_range=noise_range,
is_isotropic=True)
elif kernel_type == 'aniso':
kernel = random_bivariate_gaussian_kernel(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
noise_range=noise_range,
is_isotropic=False)
elif kernel_type == 'generalized_iso':
kernel = random_bivariate_generalized_gaussian_kernel(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_gaussian_range,
noise_range=noise_range,
is_isotropic=True)
elif kernel_type == 'generalized_aniso':
kernel = random_bivariate_generalized_gaussian_kernel(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_gaussian_range,
noise_range=noise_range,
is_isotropic=False)
elif kernel_type == 'plateau_iso':
kernel = random_bivariate_plateau_kernel(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_plateau_range,
noise_range=None,
is_isotropic=True)
elif kernel_type == 'plateau_aniso':
kernel = random_bivariate_plateau_kernel(
kernel_size,
sigma_x_range,
sigma_y_range,
rotation_range,
beta_plateau_range,
noise_range=None,
is_isotropic=False)
elif kernel_type == 'sinc':
kernel = random_circular_lowpass_kernel(omega_range, kernel_size)
return kernel
| 20,284 | 36.774674 | 109 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/formating.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from torch.nn import functional as F
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type
in data loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
to_float32 (bool): Whether convert numpy image array to np.float32
before converted to tensor. Default: True.
"""
def __init__(self, keys, to_float32=True):
self.keys = keys
self.to_float32 = to_float32
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
# deal with gray scale img: expand a color channel
if len(results[key].shape) == 2:
results[key] = results[key][..., None]
if self.to_float32 and not isinstance(results[key], np.float32):
results[key] = results[key].astype(np.float32)
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + (
f'(keys={self.keys}, to_float32={self.to_float32})')
@PIPELINES.register_module()
class FramesToTensor(ImageToTensor):
"""Convert frames type to `torch.Tensor` type.
It accepts a list of frames, converts each to `torch.Tensor` type and then
concatenates in a new dimension (dim=0).
Args:
keys (Sequence[str]): Required keys to be converted.
to_float32 (bool): Whether convert numpy image array to np.float32
before converted to tensor. Default: True.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if not isinstance(results[key], list):
raise TypeError(f'results["{key}"] should be a list, '
f'but got {type(results[key])}')
for idx, v in enumerate(results[key]):
# deal with gray scale img: expand a color channel
if len(v.shape) == 2:
v = v[..., None]
if self.to_float32 and not isinstance(v, np.float32):
v = v.astype(np.float32)
results[key][idx] = to_tensor(v.transpose(2, 0, 1))
results[key] = torch.stack(results[key], dim=0)
if results[key].size(0) == 1:
results[key].squeeze_()
return results
@PIPELINES.register_module()
class GetMaskedImage:
"""Get masked image.
Args:
img_name (str): Key for clean image.
mask_name (str): Key for mask image. The mask shape should be
(h, w, 1) while '1' indicate holes and '0' indicate valid
regions.
"""
def __init__(self, img_name='gt_img', mask_name='mask'):
self.img_name = img_name
self.mask_name = mask_name
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clean_img = results[self.img_name]
mask = results[self.mask_name]
masked_img = clean_img * (1. - mask)
results['masked_img'] = masked_img
return results
def __repr__(self):
return self.__class__.__name__ + (
f"(img_name='{self.img_name}', mask_name='{self.mask_name}')")
@PIPELINES.register_module()
class FormatTrimap:
"""Convert trimap (tensor) to one-hot representation.
It transforms the trimap label from (0, 128, 255) to (0, 1, 2). If
``to_onehot`` is set to True, the trimap will convert to one-hot tensor of
shape (3, H, W). Required key is "trimap", added or modified key are
"trimap" and "to_onehot".
Args:
to_onehot (bool): whether convert trimap to one-hot tensor. Default:
``False``.
"""
def __init__(self, to_onehot=False):
self.to_onehot = to_onehot
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
trimap = results['trimap'].squeeze()
trimap[trimap == 128] = 1
trimap[trimap == 255] = 2
if self.to_onehot:
trimap = F.one_hot(trimap.to(torch.long), num_classes=3)
trimap = trimap.permute(2, 0, 1)
else:
trimap = trimap[None, ...] # expand the channels dimension
results['trimap'] = trimap.float()
results['meta'].data['to_onehot'] = self.to_onehot
return results
def __repr__(self):
return self.__class__.__name__ + f'(to_onehot={self.to_onehot})'
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_labels".
The "img_meta" item is always populated. The contents of the "meta"
dictionary depends on "meta_keys".
Args:
keys (Sequence[str]): Required keys to be collected.
meta_keys (Sequence[str]): Required keys to be collected to "meta".
Default: None.
"""
def __init__(self, keys, meta_keys=None):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + (
f'(keys={self.keys}, meta_keys={self.meta_keys})')
| 8,262 | 30.299242 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .augmentation import (BinarizeImage, ColorJitter, CopyValues, Flip,
GenerateFrameIndices,
GenerateFrameIndiceswithPadding,
GenerateSegmentIndices, MirrorSequence, Pad,
Quantize, RandomAffine, RandomJitter,
RandomMaskDilation, RandomTransposeHW, Resize,
TemporalReverse, UnsharpMasking)
from .compose import Compose
from .crop import (Crop, CropAroundCenter, CropAroundFg, CropAroundUnknown,
CropLike, FixedCrop, ModCrop, PairedRandomCrop,
RandomResizedCrop)
from .formating import (Collect, FormatTrimap, GetMaskedImage, ImageToTensor,
ToTensor)
from .generate_assistant import GenerateCoordinateAndCell, GenerateHeatmap
from .loading import (GetSpatialDiscountMask, LoadImageFromFile,
LoadImageFromFileList, LoadMask, LoadPairedImageFromFile,
RandomLoadResizeBg)
from .matlab_like_resize import MATLABLikeResize
from .matting_aug import (CompositeFg, GenerateSeg, GenerateSoftSeg,
GenerateTrimap, GenerateTrimapWithDistTransform,
MergeFgAndBg, PerturbBg, TransformTrimap)
from .normalization import Normalize, RescaleToZeroOne
from .random_degradations import (DegradationsWithShuffle, RandomBlur,
RandomJPEGCompression, RandomNoise,
RandomResize, RandomVideoCompression)
from .random_down_sampling import RandomDownSampling
__all__ = [
'Collect', 'FormatTrimap', 'LoadImageFromFile', 'LoadMask',
'RandomLoadResizeBg', 'Compose', 'ImageToTensor', 'ToTensor',
'GetMaskedImage', 'BinarizeImage', 'Flip', 'Pad', 'RandomAffine',
'RandomJitter', 'ColorJitter', 'RandomMaskDilation', 'RandomTransposeHW',
'Resize', 'RandomResizedCrop', 'CenterCrop', 'Crop', 'CropAroundCenter',
'CropAroundUnknown', 'ModCrop', 'PairedRandomCrop', 'Normalize',
'RescaleToZeroOne', 'GenerateTrimap', 'MergeFgAndBg', 'CompositeFg',
'TemporalReverse', 'LoadImageFromFileList', 'GenerateFrameIndices',
'GenerateFrameIndiceswithPadding', 'FixedCrop', 'LoadPairedImageFromFile',
'GenerateSoftSeg', 'GenerateSeg', 'PerturbBg', 'CropAroundFg',
'GetSpatialDiscountMask', 'RandomDownSampling',
'GenerateTrimapWithDistTransform', 'TransformTrimap',
'GenerateCoordinateAndCell', 'GenerateSegmentIndices', 'MirrorSequence',
'CropLike', 'GenerateHeatmap', 'MATLABLikeResize', 'CopyValues',
'Quantize', 'RandomBlur', 'RandomJPEGCompression', 'RandomNoise',
'DegradationsWithShuffle', 'RandomResize', 'UnsharpMasking',
'RandomVideoCompression', 'CropSequence'
]
| 2,833 | 58.041667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/matlab_like_resize.py | # This code is referenced from matlab_imresize with modifications
# Reference: https://github.com/fatheral/matlab_imresize/blob/master/imresize.py # noqa
# Original licence: Copyright (c) 2020 fatheral, under the MIT License.
import numpy as np
from ..registry import PIPELINES
def get_size_from_scale(input_size, scale_factor):
"""Get the output size given input size and scale factor.
Args:
input_size (tuple): The size of the input image.
scale_factor (float): The resize factor.
Returns:
list[int]: The size of the output image.
"""
output_shape = [
int(np.ceil(scale * shape))
for (scale, shape) in zip(scale_factor, input_size)
]
return output_shape
def get_scale_from_size(input_size, output_size):
"""Get the scale factor given input size and output size.
Args:
input_size (tuple(int)): The size of the input image.
output_size (tuple(int)): The size of the output image.
Returns:
list[float]: The scale factor of each dimension.
"""
scale = [
1.0 * output_shape / input_shape
for (input_shape, output_shape) in zip(input_size, output_size)
]
return scale
def _cubic(x):
""" Cubic function.
Args:
x (ndarray): The distance from the center position.
Returns:
ndarray: The weight corresponding to a particular distance.
"""
x = np.array(x, dtype=np.float32)
x_abs = np.abs(x)
x_abs_sq = x_abs**2
x_abs_cu = x_abs_sq * x_abs
# if |x| <= 1: y = 1.5|x|^3 - 2.5|x|^2 + 1
# if 1 < |x| <= 2: -0.5|x|^3 + 2.5|x|^2 - 4|x| + 2
f = (1.5 * x_abs_cu - 2.5 * x_abs_sq + 1) * (x_abs <= 1) + (
-0.5 * x_abs_cu + 2.5 * x_abs_sq - 4 * x_abs + 2) * ((1 < x_abs) &
(x_abs <= 2))
return f
def get_weights_indices(input_length, output_length, scale, kernel,
kernel_width):
"""Get weights and indices for interpolation.
Args:
input_length (int): Length of the input sequence.
output_length (int): Length of the output sequence.
scale (float): Scale factor.
kernel (func): The kernel used for resizing.
kernel_width (int): The width of the kernel.
Returns:
list[ndarray]: The weights and the indices for interpolation.
"""
if scale < 1: # modified kernel for antialiasing
def h(x):
return scale * kernel(scale * x)
kernel_width = 1.0 * kernel_width / scale
else:
h = kernel
kernel_width = kernel_width
# coordinates of output
x = np.arange(1, output_length + 1).astype(np.float32)
# coordinates of input
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2) # leftmost pixel
p = int(np.ceil(kernel_width)) + 2 # maximum number of pixels
# indices of input pixels
ind = left[:, np.newaxis, ...] + np.arange(p)
indices = ind.astype(np.int32)
# weights of input pixels
weights = h(u[:, np.newaxis, ...] - indices - 1)
weights = weights / np.sum(weights, axis=1)[:, np.newaxis, ...]
# remove all-zero columns
aux = np.concatenate(
(np.arange(input_length), np.arange(input_length - 1, -1,
step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def resize_along_dim(img_in, weights, indices, dim):
"""Resize along a specific dimension.
Args:
img_in (ndarray): The input image.
weights (ndarray): The weights used for interpolation, computed from
[get_weights_indices].
indices (ndarray): The indices used for interpolation, computed from
[get_weights_indices].
dim (int): Which dimension to undergo interpolation.
Returns:
ndarray: Interpolated (along one dimension) image.
"""
img_in = img_in.astype(np.float32)
w_shape = weights.shape
output_shape = list(img_in.shape)
output_shape[dim] = w_shape[0]
img_out = np.zeros(output_shape)
if dim == 0:
for i in range(w_shape[0]):
w = weights[i, :][np.newaxis, ...]
ind = indices[i, :]
img_slice = img_in[ind, :]
img_out[i] = np.sum(np.squeeze(img_slice, axis=0) * w.T, axis=0)
elif dim == 1:
for i in range(w_shape[0]):
w = weights[i, :][:, :, np.newaxis]
ind = indices[i, :]
img_slice = img_in[:, ind]
img_out[:, i] = np.sum(np.squeeze(img_slice, axis=1) * w.T, axis=1)
if img_in.dtype == np.uint8:
img_out = np.clip(img_out, 0, 255)
return np.around(img_out).astype(np.uint8)
else:
return img_out
@PIPELINES.register_module()
class MATLABLikeResize:
"""Resize the input image using MATLAB-like downsampling.
Currently support bicubic interpolation only. Note that the output of
this function is slightly different from the official MATLAB function.
Required keys are the keys in attribute "keys". Added or modified keys
are "scale" and "output_shape", and the keys in attribute "keys".
Args:
keys (list[str]): A list of keys whose values are modified.
scale (float | None, optional): The scale factor of the resize
operation. If None, it will be determined by output_shape.
Default: None.
output_shape (tuple(int) | None, optional): The size of the output
image. If None, it will be determined by scale. Note that if
scale is provided, output_shape will not be used.
Default: None.
kernel (str, optional): The kernel for the resize operation.
Currently support 'bicubic' only. Default: 'bicubic'.
kernel_width (float): The kernel width. Currently support 4.0 only.
Default: 4.0.
"""
def __init__(self,
keys,
scale=None,
output_shape=None,
kernel='bicubic',
kernel_width=4.0):
if kernel.lower() != 'bicubic':
raise ValueError('Currently support bicubic kernel only.')
if float(kernel_width) != 4.0:
raise ValueError('Current support only width=4 only.')
if scale is None and output_shape is None:
raise ValueError('"scale" and "output_shape" cannot be both None')
self.kernel_func = _cubic
self.keys = keys
self.scale = scale
self.output_shape = output_shape
self.kernel = kernel
self.kernel_width = kernel_width
def _resize(self, img):
weights = {}
indices = {}
# compute scale and output_size
if self.scale is not None:
scale = float(self.scale)
scale = [scale, scale]
output_size = get_size_from_scale(img.shape, scale)
else:
scale = get_scale_from_size(img.shape, self.output_shape)
output_size = list(self.output_shape)
# apply cubic interpolation along two dimensions
order = np.argsort(np.array(scale))
for k in range(2):
key = (img.shape[k], output_size[k], scale[k], self.kernel_func,
self.kernel_width)
weight, index = get_weights_indices(img.shape[k], output_size[k],
scale[k], self.kernel_func,
self.kernel_width)
weights[key] = weight
indices[key] = index
output = np.copy(img)
if output.ndim == 2: # grayscale image
output = output[:, :, np.newaxis]
for k in range(2):
dim = order[k]
key = (img.shape[dim], output_size[dim], scale[dim],
self.kernel_func, self.kernel_width)
output = resize_along_dim(output, weights[key], indices[key], dim)
return output
def __call__(self, results):
for key in self.keys:
is_single_image = False
if isinstance(results[key], np.ndarray):
is_single_image = True
results[key] = [results[key]]
results[key] = [self._resize(img) for img in results[key]]
if is_single_image:
results[key] = results[key][0]
results['scale'] = self.scale
results['output_shape'] = self.output_shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, scale={self.scale}, '
f'output_shape={self.output_shape}, '
f'kernel={self.kernel}, kernel_width={self.kernel_width})')
return repr_str
| 9,022 | 31.692029 | 88 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/generate_assistant.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..registry import PIPELINES
from .utils import make_coord
@PIPELINES.register_module()
class GenerateHeatmap:
"""Generate heatmap from keypoint.
Args:
keypoint (str): Key of keypoint in dict.
ori_size (int | Tuple[int]): Original image size of keypoint.
target_size (int | Tuple[int]): Target size of heatmap.
sigma (float): Sigma parameter of heatmap. Default: 1.0
"""
def __init__(self, keypoint, ori_size, target_size, sigma=1.0):
if isinstance(ori_size, int):
ori_size = (ori_size, ori_size)
else:
ori_size = ori_size[:2]
if isinstance(target_size, int):
target_size = (target_size, target_size)
else:
target_size = target_size[:2]
self.size_ratio = (target_size[0] / ori_size[0],
target_size[1] / ori_size[1])
self.keypoint = keypoint
self.sigma = sigma
self.target_size = target_size
self.ori_size = ori_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation. Require keypoint.
Returns:
dict: A dict containing the processed data and information.
Add 'heatmap'.
"""
keypoint_list = [(keypoint[0] * self.size_ratio[0],
keypoint[1] * self.size_ratio[1])
for keypoint in results[self.keypoint]]
heatmap_list = [
self._generate_one_heatmap(keypoint) for keypoint in keypoint_list
]
results['heatmap'] = np.stack(heatmap_list, axis=2)
return results
def _generate_one_heatmap(self, keypoint):
"""Generate One Heatmap.
Args:
landmark (Tuple[float]): Location of a landmark.
results:
heatmap (np.ndarray): A heatmap of landmark.
"""
w, h = self.target_size
x_range = np.arange(start=0, stop=w, dtype=int)
y_range = np.arange(start=0, stop=h, dtype=int)
grid_x, grid_y = np.meshgrid(x_range, y_range)
dist2 = (grid_x - keypoint[0])**2 + (grid_y - keypoint[1])**2
exponent = dist2 / 2.0 / self.sigma / self.sigma
heatmap = np.exp(-exponent)
return heatmap
def __repr__(self):
return (f'{self.__class__.__name__}, '
f'keypoint={self.keypoint}, '
f'ori_size={self.ori_size}, '
f'target_size={self.target_size}, '
f'sigma={self.sigma}')
@PIPELINES.register_module()
class GenerateCoordinateAndCell:
"""Generate coordinate and cell.
Generate coordinate from the desired size of SR image.
Train or val:
1. Generate coordinate from GT.
2. Reshape GT image to (HgWg, 3) and transpose to (3, HgWg).
where `Hg` and `Wg` represent the height and width of GT.
Test:
Generate coordinate from LQ and scale or target_size.
Then generate cell from coordinate.
Args:
sample_quantity (int): The quantity of samples in coordinates.
To ensure that the GT tensors in a batch have the same dimensions.
Default: None.
scale (float): Scale of upsampling.
Default: None.
target_size (tuple[int]): Size of target image.
Default: None.
The priority of getting 'size of target image' is:
1, results['gt'].shape[-2:]
2, results['lq'].shape[-2:] * scale
3, target_size
"""
def __init__(self, sample_quantity=None, scale=None, target_size=None):
self.sample_quantity = sample_quantity
self.scale = scale
self.target_size = target_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require either in results:
1. 'lq' (tensor), whose shape is similar as (3, H, W).
2. 'gt' (tensor), whose shape is similar as (3, H, W).
3. None, the premise is
self.target_size and len(self.target_size) >= 2.
Returns:
dict: A dict containing the processed data and information.
Reshape 'gt' to (-1, 3) and transpose to (3, -1) if 'gt'
in results.
Add 'coord' and 'cell'.
"""
# generate hr_coord (and hr_rgb)
if 'gt' in results:
crop_hr = results['gt']
self.target_size = crop_hr.shape
hr_rgb = crop_hr.contiguous().view(3, -1).permute(1, 0)
results['gt'] = hr_rgb
elif self.scale is not None and 'lq' in results:
_, h_lr, w_lr = results['lq'].shape
self.target_size = (round(h_lr * self.scale),
round(w_lr * self.scale))
else:
assert self.target_size is not None
assert len(self.target_size) >= 2
hr_coord = make_coord(self.target_size[-2:])
if self.sample_quantity is not None and 'gt' in results:
sample_lst = np.random.choice(
len(hr_coord), self.sample_quantity, replace=False)
hr_coord = hr_coord[sample_lst]
results['gt'] = results['gt'][sample_lst]
# Preparations for cell decoding
cell = torch.ones_like(hr_coord)
cell[:, 0] *= 2 / self.target_size[-2]
cell[:, 1] *= 2 / self.target_size[-1]
results['coord'] = hr_coord
results['cell'] = cell
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'sample_quantity={self.sample_quantity}, '
f'scale={self.scale}, target_size={self.target_size}')
return repr_str
| 6,056 | 34.629412 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/normalization.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
from ..registry import PIPELINES
@PIPELINES.register_module()
class Normalize:
"""Normalize images with the given mean and std value.
Required keys are the keys in attribute "keys", added or modified keys are
the keys in attribute "keys" and these keys with postfix '_norm_cfg'.
It also supports normalizing a list of images.
Args:
keys (Sequence[str]): The images to be normalized.
mean (np.ndarray): Mean values of different channels.
std (np.ndarray): Std values of different channels.
to_rgb (bool): Whether to convert channels from BGR to RGB.
"""
def __init__(self, keys, mean, std, to_rgb=False, save_original=False):
self.keys = keys
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
self.save_original = save_original
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
if self.save_original:
results[key + '_unnormalised'] = [
v.copy() for v in results[key]
]
results[key] = [
mmcv.imnormalize(v, self.mean, self.std, self.to_rgb)
for v in results[key]
]
else:
if self.save_original:
results[key + '_unnormalised'] = results[key].copy()
results[key] = mmcv.imnormalize(results[key], self.mean,
self.std, self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, mean={self.mean}, std={self.std}, '
f'to_rgb={self.to_rgb})')
return repr_str
@PIPELINES.register_module()
class RescaleToZeroOne:
"""Transform the images into a range between 0 and 1.
Required keys are the keys in attribute "keys", added or modified keys are
the keys in attribute "keys".
It also supports rescaling a list of images.
Args:
keys (Sequence[str]): The images to be transformed.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = [
v.astype(np.float32) / 255. for v in results[key]
]
else:
results[key] = results[key].astype(np.float32) / 255.
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
| 3,396 | 31.663462 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/setup_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
| 2,219 | 45.25 | 112 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/logger.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmedit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
# root logger name: mmedit
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
| 968 | 33.607143 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/collect_env.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmedit
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMEditing'] = f'{mmedit.__version__}+{get_git_hash()[:7]}'
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| 481 | 24.368421 | 72 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/cli.py | # Copyright (c) OpenMMLab. All rights reserved.
import re
import sys
import warnings
def modify_args():
for i, v in enumerate(sys.argv):
if i == 0:
assert v.endswith('.py')
elif re.match(r'--\w+_.*', v):
new_arg = v.replace('_', '-')
warnings.warn(
f'command line argument {v} is deprecated, '
f'please use {new_arg} instead.',
category=DeprecationWarning,
)
sys.argv[i] = new_arg
| 511 | 25.947368 | 60 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .cli import modify_args
from .logger import get_root_logger
from .setup_env import setup_multi_processes
__all__ = ['get_root_logger', 'setup_multi_processes', 'modify_args']
| 229 | 31.857143 | 69 | py |
SLOPpy | SLOPpy-main/SLOPpy_Run.py | import SLOPpy
import argparse
import os
import sys
import collections
if __name__ == '__main__':
SLOPpy.sloppy_run()
| 123 | 11.4 | 26 | py |
SLOPpy | SLOPpy-main/setup.py | from setuptools import setup
# Inspired from here:
# https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
# https://realpython.com/pypi-publish-python-package/#prepare-your-package-for-publication
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name="SLOPpy-package",
version='1.2',
author="Daniela Sicilia, Luca Malavolta, et al.",
author_email = '[email protected], [email protected]',
url = 'https://github.com/LucaMalavolta/SLOPpy',
packages =['SLOPpy', 'SLOPpy.subroutines', 'SLOPpy.instruments'],
license = 'MIT License',
description ='SLOPpy: Spectral Lines Of Planets with python',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'SLOPpy_run=SLOPpy.sloppy_run:sloppy_run',
]
},
zip_safe=False,
install_requires=[
'numpy>=1.22',
'numba>=0.55.2',
'scipy>=1.8.1',
'matplotlib>=3.5.2',
'astropy>=5.1',
'astroquery>=0.4',
'pyerfa>=2.0',
'argparse>=1.4',
'oyaml>=1.0',
'emcee>=3.1.2',
'pyyaml',
'h5py>=3.7.0',
'tqdm>=4.60',
'pygtc>=0.4.1',
'tinygp>=0.2.2',
'PyAstronomy>=0.18',
'sphinx-book-theme',
'myst-parser',
'myst-nb',
],
setup_requires=['setuptools']
)
| 1,994 | 31.177419 | 90 | py |
SLOPpy | SLOPpy-main/scripts/planetary_velocity_plot.py | """from classes.kepler_exo import *
# Mass of the star HD189733 (in Solar masses)
#Ms = 0.823
Ms = 1.148
# Mass of the planet (in Solar masses)
#Mp = 1.138 / 1.047348644e3
Mp = 0.69 / 1.047348644e3
K1 = kepler_K1(Mp,Ms,3.52474854657,86.59,0.0082)
print K1
## update
"""
import matplotlib.pyplot as plt
import numpy as np
from SLOPpy.subroutines.constants import *
import argparse
from scipy.optimize import fsolve
from SLOPpy.subroutines.kepler_exo import *
def get_mass(M_star2, M_star1, Period, K1, e0):
# M_star1, M_star2 in solar masses
# P in days -> Period is converted in seconds in the routine
# inclination assumed to be 90 degrees
# Gravitational constant is given in m^3 kg^-1 s^-2
# output in m/s
output = K1 - (2. * np.pi * G_grav * M_sun / 86400.0) ** (1.0 / 3.0) * (1.000 / np.sqrt(1.0 - e0 ** 2.0)) * (
Period) ** (
-1.0 / 3.0) * (
M_star2 * (M_star1 + M_star2) ** (-2.0 / 3.0))
return output
star_mass = 0.500
P = 5.
i = 90.
e = 0.00
planet_mass = np.arange(0,20, 0.1)
planet_K = planet_mass*0.
for i_val, m_val in enumerate(planet_mass):
planet_K[i_val] = kepler_K1(m_val * Mjups , star_mass, P, i, e)/ 1000.
plt.plot(planet_mass, planet_K)
plt.show()
#sampler = args.sample[0]
#file_conf = args.config_file[0]
| 1,531 | 26.357143 | 131 | py |
SLOPpy | SLOPpy-main/scripts/absorption_depths.py | import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
#w,f1,f2,f3,fall,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/HD189733_589289_1200_transmission_spectrum_TransSpec_atmcorr.rdb',unpack=True,skiprows=2)
#w,t1,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/transmission_spectrum_1.rdb',unpack=True)
#w,t2,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/transmission_spectrum_2.rdb',unpack=True)
#w,t3,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/transmission_spectrum_3.rdb',unpack=True)
#w,t,sigma=np.loadtxt('/home/sicilia/Astro/HD189733/list/average_transmission_spectrum2.txt',unpack=True)
#w,t,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/test.rdb',unpack=True)
#n, w, t, sigma = np.loadtxt('/home/sicilia/Astro/Wyttenbach/Wyttenbach_by_Casasayas.txt',unpack=True)
#our_average = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_transmission_planetRF_average.p', "rb"))
#data_night1 = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_2006-09-07_transmission_planetRF_second_correction.p',"rb"))
#data_night2 = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_2007-07-19_transmission_planetRF_second_correction.p',"rb"))
data_night3 = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_2007-08-28_transmission_planetRF_second_correction.p',"rb"))
'''
w=our_average['wave']
t=(our_average['average']-1.)*100
t[t>1.1]=0.
sigma=our_average['average_err']
'''
w=data_night3['wave']
t=(data_night3['average']-1.)*100
t[t>1.1]=0.
sigma=data_night3['average_err']
#t = (t + 1)
#sigma = sigma/100
#central band
#C6_D2 = lambda wl: ((wl >= 5888.416) and (wl <= 5891.396)) or ((wl >= 5894.389) and (wl <= 5897.369))
w1=5889.906
w2=5895.879
wc=5892.89
#shifting the spectrum and not the passbands
#w = w - 0.04
#w = w + 0.16
#w = w + 0.075
#w = w + 0.05
#blue band & red band
B = (w >= 5874.89) & (w <= 5886.89)
R = (w >= 5898.89) & (w <= 5910.89)
#red band for Casasayas
#R = (w >= 5898.89) & (w <= 5907.89)
#alla c6 aggiungo e sottraggo 1.48
C12 = (w >= 5886.8925) & (w <= 5898.8925)
C6_D2 = (w >= 5888.426) & (w <= 5891.386)
C6_D1 = (w >= 5894.399) & (w <= 5897.359)
C3_D2 = (w >= 5889.156) & (w <= 5890.656)
C3_D1 = (w >= 5895.129) & (w <= 5896.629)
C1_5_D2 = (w >= 5889.531) & (w <= 5890.281)
C1_5_D1 = (w >= 5895.504) & (w <= 5896.254)
C0_75_D2 = (w >= 5889.718) & (w <= 5890.094)
C0_75_D1 = (w >= 5895.691) & (w <= 5896.067)
C0_375_D2 = (w >= 5889.812) & (w <= 5890.000)
C0_375_D1 = (w >= 5895.785) & (w <= 5895.973)
'''
#including -0.16 A
B = (w >= 5874.73) & (w <= 5886.73)
R = (w >= 5898.73) & (w <= 5910.73)
C12 = (w >= 5886.73) & (w <= 5898.73)
C6_D2 = (w >= 5888.246) & (w <= 5891.246)
C6_D1 = (w >= 5894.219) & (w <= 5897.219)
C3_D2 = (w >= 5888.996) & (w <= 5890.496)
C3_D1 = (w >= 5894.969) & (w <= 5896.469)
C1_5_D2 = (w >= 5889.371) & (w <= 5890.121)
C1_5_D1 = (w >= 5895.344) & (w <= 5896.094)
C0_75_D2 = (w >= 5889.558) & (w <= 5889.934)
C0_75_D1 = (w >= 5895.531) & (w <= 5895.907)
C0_375_D2 = (w >= 5889.652) & (w <= 5889.840)
C0_375_D1 = (w >= 5895.625) & (w <= 5895.813)
#sottraendo le bande come dice W e meno 0.16
C12 = (w >= 5886.73) & (w <= 5898.73)
C6_D2 = (w >= 5886.746) & (w <= 5892.746)
C6_D1 = (w >= 5892.719) & (w <= 5898.719)
C3_D2 = (w >= 5888.246) & (w <= 5891.246)
C3_D1 = (w >= 5894.219) & (w <= 5897.219)
C1_5_D2 = (w >= 5888.996) & (w <= 5890.496)
C1_5_D1 = (w >= 5894.969) & (w <= 5896.469)
C0_75_D2 = (w >= 5889.371) & (w <= 5890.121)
C0_75_D1 = (w >= 5895.344) & (w <= 5896.094)
C0_375_D2 = (w >= 5889.558) & (w <= 5889.934)
C0_375_D1 = (w >= 5895.531) & (w <= 5895.907)
'''
flux_C12, sum_weights_C12 = np.average(t[C12],axis=0,weights=1/sigma[C12]**2,returned=True)
flux_C6_D2, sum_weights_C6_D2 = np.average(t[C6_D2],axis=0,weights=1/sigma[C6_D2]**2,returned=True)
#flux_C6_D2, sum_weights_C6_D2 = np.average((np.array(filter(C6_D2, t))),weights=1/np.array(filter(C6_D2, sigma)**2),returned=True)
flux_C6_D1, sum_weights_C6_D1 = np.average(t[C6_D1],axis=0,weights=1/sigma[C6_D1]**2,returned=True)
flux_C3_D2, sum_weights_C3_D2 = np.average(t[C3_D2],axis=0,weights=1/sigma[C3_D2]**2,returned=True)
flux_C3_D1, sum_weights_C3_D1 = np.average(t[C3_D1],axis=0,weights=1/sigma[C3_D1]**2,returned=True)
flux_C1_5_D2, sum_weights_C1_5_D2 = np.average(t[C1_5_D2],axis=0,weights=1/sigma[C1_5_D2]**2,returned=True)
flux_C1_5_D1, sum_weights_C1_5_D1 = np.average(t[C1_5_D1],axis=0,weights=1/sigma[C1_5_D1]**2,returned=True)
flux_C0_75_D2, sum_weights_C0_75_D2 = np.average(t[C0_75_D2],axis=0,weights=1/sigma[C0_75_D2]**2,returned=True)
flux_C0_75_D1, sum_weights_C0_75_D1 = np.average(t[C0_75_D1],axis=0,weights=1/sigma[C0_75_D1]**2,returned=True)
flux_C0_375_D2, sum_weights_C0_375_D2 = np.average(t[C0_375_D2],axis=0,weights=1/sigma[C0_375_D2]**2,returned=True)
flux_C0_375_D1, sum_weights_C0_375_D1 = np.average(t[C0_375_D1],axis=0,weights=1/sigma[C0_375_D1]**2,returned=True)
flux_B, sum_weights_B = np.average(t[B],axis=0,weights=1/sigma[B]**2,returned=True)
flux_R, sum_weights_R = np.average(t[R],axis=0,weights=1/sigma[R]**2,returned=True)
deltaC12 = flux_C12 - (flux_B + flux_R)/2
deltaC6_D2 = flux_C6_D2 - (flux_B + flux_R)/2
deltaC6_D1 = flux_C6_D1 - (flux_B + flux_R)/2
deltaC3_D2 = flux_C3_D2 - (flux_B + flux_R)/2
deltaC3_D1 = flux_C3_D1 - (flux_B + flux_R)/2
deltaC1_5_D2 = flux_C1_5_D2 - (flux_B + flux_R)/2
deltaC1_5_D1 = flux_C1_5_D1 - (flux_B + flux_R)/2
deltaC0_75_D2 = flux_C0_75_D2 - (flux_B + flux_R)/2
deltaC0_75_D1 = flux_C0_75_D1 - (flux_B + flux_R)/2
deltaC0_375_D2 = flux_C0_375_D2 - (flux_B + flux_R)/2
deltaC0_375_D1 = flux_C0_375_D1 - (flux_B + flux_R)/2
delta_medio_6 = (deltaC6_D2 + deltaC6_D1)/2
delta_medio_3 = (deltaC3_D2 + deltaC3_D1)/2
delta_medio_1_5 = (deltaC1_5_D2 + deltaC1_5_D1)/2
delta_medio_0_75 = (deltaC0_75_D2 + deltaC0_75_D1)/2
delta_medio_0_375 = (deltaC0_375_D2 + deltaC0_375_D1)/2
sigma_deltaC12 = np.sqrt(1/sum_weights_C12 + 1/(2*sum_weights_B) + 1/(2*sum_weights_R)) * 100
sigma_deltaC6 = np.sqrt(1/sum_weights_C6_D2 + 1/sum_weights_C6_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC3 = np.sqrt(1/sum_weights_C3_D2 + 1/sum_weights_C3_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC1_5 = np.sqrt(1/sum_weights_C1_5_D2 + 1/sum_weights_C1_5_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC0_75 = np.sqrt(1/sum_weights_C0_75_D2 + 1/sum_weights_C0_75_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC0_375 = np.sqrt(1/sum_weights_C0_375_D2 + 1/sum_weights_C0_375_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
print 'delta(12) =', deltaC12, ' +- ', sigma_deltaC12
print 'delta(6) = ', delta_medio_6, ' +- ', sigma_deltaC6
print 'delta(3) = ', delta_medio_3, ' +- ', sigma_deltaC3
print 'delta(1.5) = ', delta_medio_1_5, ' +- ', sigma_deltaC1_5
print 'delta(0.75) =', delta_medio_0_75, ' +- ', sigma_deltaC0_75
print 'delta(0.375) =', delta_medio_0_375, ' +- ', sigma_deltaC0_375
fig = plt.figure(figsize=(12, 6))
plt.plot(w,t)
plt.axvline(5892.89,c='k')
plt.axvspan(5886.89,5898.89,facecolor='g',alpha=0.3)
plt.axvspan(5874.89,5886.89,facecolor='b',alpha=0.3)
plt.axvspan(5898.89,5910.89,facecolor='r',alpha=0.3)
plt.axvspan(5888.426,5891.386,facecolor='g',alpha=0.4)
plt.axvspan(5894.399,5897.359,facecolor='g',alpha=0.4)
plt.axvspan(5889.156,5890.656,facecolor='g',alpha=0.5)
plt.axvspan(5895.129,5896.629,facecolor='g',alpha=0.5)
plt.axvspan(5889.531,5890.281,facecolor='g',alpha=0.6)
plt.axvspan(5895.504,5896.254,facecolor='g',alpha=0.6)
plt.axvspan(5889.718,5890.094,facecolor='g',alpha=0.7)
plt.axvspan(5895.691,5896.067,facecolor='g',alpha=0.7)
plt.axvspan(5889.812,5890.000,facecolor='g',alpha=0.8)
plt.axvspan(5895.785,5895.973,facecolor='g',alpha=0.8)
'''
plt.axvspan(5874.89,5886.89,facecolor='b',alpha=0.3)
plt.axvspan(5898.89,5910.89,facecolor='r',alpha=0.3)
plt.axvspan(5888.246,5891.246,facecolor='g',alpha=0.4)
plt.axvspan(5894.219,5897.219,facecolor='g',alpha=0.4)
plt.axvspan(5888.996,5890.496,facecolor='g',alpha=0.5)
plt.axvspan(5894.969,5896.469,facecolor='g',alpha=0.5)
plt.axvspan(5889.371,5890.121,facecolor='g',alpha=0.6)
plt.axvspan(5895.344,5896.094,facecolor='g',alpha=0.6)
plt.axvspan(5886.73,5898.73,facecolor='g',alpha=0.3)
plt.axvspan(5889.558,5889.934,facecolor='g',alpha=0.7)
plt.axvspan(5895.531,5895.907,facecolor='g',alpha=0.7)
plt.axvspan(5889.652,5889.840,facecolor='g',alpha=0.8)
plt.axvspan(5895.625,5895.813,facecolor='g',alpha=0.8)
'''
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('R-1')
plt.show()
| 8,507 | 45.747253 | 155 | py |
SLOPpy | SLOPpy-main/scripts/planetary_velocity.py | """from classes.kepler_exo import *
# Mass of the star HD189733 (in Solar masses)
#Ms = 0.823
Ms = 1.148
# Mass of the planet (in Solar masses)
#Mp = 1.138 / 1.047348644e3
Mp = 0.69 / 1.047348644e3
K1 = kepler_K1(Mp,Ms,3.52474854657,86.59,0.0082)
print K1
## update
"""
import matplotlib.pyplot as plt
import numpy as np
from SLOPpy.subroutines.constants import *
import argparse
from scipy.optimize import fsolve
from SLOPpy.subroutines.kepler_exo import *
def get_mass(M_star2, M_star1, Period, K1, e0):
# M_star1, M_star2 in solar masses
# P in days -> Period is converted in seconds in the routine
# inclination assumed to be 90 degrees
# Gravitational constant is given in m^3 kg^-1 s^-2
# output in m/s
output = K1 - (2. * np.pi * G_grav * M_sun / 86400.0) ** (1.0 / 3.0) * (1.000 / np.sqrt(1.0 - e0 ** 2.0)) * (
Period) ** (
-1.0 / 3.0) * (
M_star2 * (M_star1 + M_star2) ** (-2.0 / 3.0))
return output
parser = argparse.ArgumentParser(prog='planetary_velocity.py', description='Compute the expected semi-amplitude of the planet')
parser.add_argument('star_mass', type=float, nargs=1, help='Stellar mass [solar]')
parser.add_argument('plan_mass', type=float, nargs=1, help='Planet mass [Jupiter/Earth/K units, default Jupiter]')
parser.add_argument('period', type=float, nargs=1, help='Planetary period [days')
parser.add_argument('inclination', type=float, nargs=1, help='Planetary inclination [degrees]')
parser.add_argument('eccentricity', type=float, nargs=1, help='Planetary eccentricity [pure]')
parser.add_argument('-e', type=float, nargs='?', default=False, const=True, help='Planetary mass in Earth units')
parser.add_argument('-k', type=float, nargs='?', default=False, const=True, help='Planetary mass in m/s')
args = parser.parse_args()
star_mass = args.star_mass[0]
P = args.period[0]
i = args.inclination[0]
e = args.eccentricity[0]
if args.e and args.k:
print('Either -k or -e, not both!')
quit()
planet_mass = args.plan_mass[0]
if args.e:
planet_mass *= Mears
elif args.k:
x0 = Mjups
K_input = planet_mass
planet_mass = fsolve(get_mass, x0, args=(star_mass, P, K_input, e))
else:
planet_mass *= Mjups
print(kepler_K1(planet_mass, star_mass, P, i, e))
#sampler = args.sample[0]
#file_conf = args.config_file[0]
| 2,556 | 35.014085 | 131 | py |
SLOPpy | SLOPpy-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# PyORBIT documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 16 17:20:15 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import matplotlib
matplotlib.use('agg')
sys.path.insert(0, os.path.abspath('../'))
import SLOPpy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['myst_parser',
"sphinx.ext.autosummary",
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
"sphinx.ext.viewcode",
'sphinx.ext.napoleon']
myst_enable_extensions = ["dollarmath", "colon_fence"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
#source_suffix = ['.rst', '.md']
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
}
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SLOPpy'
copyright = u'2018-2022, Daniela Sicilia, Luca Malavolta'
author = u'Daniela Sicilia, Luca Malavolta'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(SLOPpy.__version__.split('.')[:-1])
# The full version, including alpha/beta/rc tags.
release = SLOPpy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
#html_logo = "_static/PyORBIT_logo_transp.png"
html_title = "SLOPpy"
html_copy_source = True
html_show_sourcelink = True
html_sourcelink_suffix = ""
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
"path_to_docs": "docs",
"repository_url": "https://github.com/LucaMalavolta/SLOPpy",
"repository_branch": "main",
"use_repository_button": True,
"use_issues_button": True,
#"home_page_in_toc": True,
"show_navbar_depth": 3,
"logo_only": True,
#"show_related": True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
#html_sidebars = {
# '**': ['sidebar-logo.html','search-field.html',
# 'globaltoc.html',
# 'relations.html', # needs 'show_related': True theme option to display
## #'searchbox.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SLOPpy_doc'
| 4,554 | 29.57047 | 80 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum_shortcuts.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.transmission_spectrum import *
from SLOPpy.transmission_spectrum_average import *
#__all__ = ['compute_transmission_spectrum_planetRF_iterative',
# 'plot_transmission_spectrum_planetRF_iterative',
# 'compute_transmission_spectrum_stellarRF_iterative',
# 'plot_transmission_spectrum_stellarRF_iterative',
# 'compute_transmission_spectrum_observerRF_iterative',
# 'plot_transmission_spectrum_observerRF_iterative',
# 'compute_transmission_spectrum_iterative',
# 'plot_transmission_spectrum_iterative']
def compute_transmission_spectrum_planetRF(config_in, lines_label):
compute_transmission_spectrum(config_in, lines_label, reference='planetRF')
def plot_transmission_spectrum_planetRF(config_in, lines_label, night_input, results_input=''):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='planetRF')
def compute_transmission_spectrum_stellarRF(config_in, lines_label):
compute_transmission_spectrum(config_in, lines_label, reference='stellarRF')
def plot_transmission_spectrum_stellarRF(config_in, lines_label, night_input, results_input=''):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='stellarRF')
def compute_transmission_spectrum_observerRF(config_in, lines_label):
compute_transmission_spectrum(config_in, lines_label, reference='observerRF')
def plot_transmission_spectrum_observerRF(config_in, lines_label, night_input, results_input=''):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='observerRF')
def compute_transmission_spectrum_planetRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='planetRF', pca_iteration=it)
def compute_transmission_spectrum_stellarRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='stellarRF', pca_iteration=it)
def compute_transmission_spectrum_observerRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='observerRF', pca_iteration=it)
def compute_transmission_spectrum_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_planetRF_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_stellarRF_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='stellarRF', pca_iteration=it)
def plot_transmission_spectrum_observerRF_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='observerRF', pca_iteration=it)
def plot_transmission_spectrum_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
def compute_transmission_spectrum_average_planetRF(config_in, lines_label):
compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF')
def compute_transmission_spectrum_average_observerRF(config_in, lines_label):
compute_transmission_spectrum_average(config_in, lines_label, reference='observerRF')
def compute_transmission_spectrum_average_stellarRF(config_in, lines_label):
compute_transmission_spectrum_average(config_in, lines_label, reference='stellarRF')
def plot_transmission_spectrum_average_planetRF(config_in, lines_label, night_input='', results_input=''):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='planetRF')
def plot_transmission_spectrum_average_observerRF(config_in, lines_label, night_input='', results_input=''):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='observerRF')
def plot_transmission_spectrum_average_stellarRF(config_in, lines_label, night_input='', results_input=''):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='stellarRF')
def compute_transmission_spectrum_average_planetRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF', pca_iteration=it)
def compute_transmission_spectrum_average_observerRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='observerRF', pca_iteration=it)
def compute_transmission_spectrum_average_stellarRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='stellarRF', pca_iteration=it)
def compute_transmission_spectrum_average_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_average_planetRF_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_average_observerRF_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='observerRF', pca_iteration=it)
def plot_transmission_spectrum_average_stellarRF_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='stellarRF', pca_iteration=it)
def plot_transmission_spectrum_average_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
| 8,420 | 45.783333 | 136 | py |
SLOPpy | SLOPpy-main/SLOPpy/pca_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
__all__ = ["compute_pca_preparation"]
def compute_pca_preparation(config_in, append_name=None):
if append_name:
subroutine_name = 'pca_preparation_' + append_name
filename = 'pca_preparation_' + append_name
else:
subroutine_name = 'pca_preparation'
filename = 'pca_preparation'
night_dict = from_config_get_nights(config_in)
preparation_dict = {
'fit_iters': 5,
'fit_order': 3,
'fit_sigma': 3
}
for night in night_dict:
try:
preparation = load_from_cpickle(filename, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving input and calibration data """
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=True, use_telluric=False, use_interstellar=False,
use_telluric_spline= False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
obs_ref =lists['observations'][0]
n_obs = len(lists['observations'])
n_orders = input_data[obs_ref]['n_orders']
n_pixels = input_data[obs_ref]['n_pixels']
stack_wave = np.zeros([n_obs, n_orders, n_pixels], dtype=np.double)
stack_e2ds = np.zeros([n_obs, n_orders, n_pixels], dtype=np.double)
stack_e2ds_err = np.zeros([n_obs, n_orders, n_pixels], dtype=np.double)
stack_bjd = np.zeros(n_obs, dtype=np.double)
stack_airmass = np.zeros(n_obs, dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
blaze_wave_refactoring = 1. / calib_data['blaze'] / (input_data[obs]['step']/np.median(input_data[obs]['step']))
stack_wave[i_obs, :, :] = input_data[obs]['wave']
stack_e2ds[i_obs, :, :] = input_data[obs]['e2ds'] * blaze_wave_refactoring
stack_e2ds_err[i_obs, :, :] = input_data[obs]['e2ds_err'] * blaze_wave_refactoring
stack_bjd[i_obs] = input_data[obs]['BJD']
stack_airmass[i_obs] = input_data[obs]['AIRMASS']
median = np.nanmedian(stack_e2ds[i_obs, :, :], axis=1)
for i_orders in range(0, n_orders):
stack_e2ds[i_obs, i_orders, :] /= median[i_orders]
stack_e2ds_err[i_obs, i_orders, :] /= median[i_orders]
poly_flag = (stack_e2ds > 0.001)
#poly_flag[:, :, :20]= False
#poly_flag[:, :, 20:]= False
stack_polyfit = np.zeros_like(stack_e2ds)
for i_orders in range(0,n_orders):
order_wave = stack_wave[:, i_orders, :]
order_e2ds = stack_e2ds[:, i_orders, :]
order_flag = poly_flag[:, i_orders, :]
for n_iter in range(0, preparation_dict['fit_iters']):
coeff_order = np.polynomial.chebyshev.chebfit(
order_wave[order_flag],
order_e2ds[order_flag],
preparation_dict['fit_order'])
fit_order = \
np.polynomial.chebyshev.chebval(order_wave, coeff_order)
fit_shaped = np.reshape(fit_order, np.shape(order_wave))
residuals = order_e2ds - fit_shaped
if n_iter < preparation_dict['fit_iters'] - 1:
std = np.std(residuals[order_flag])
order_flag = (order_flag) & (residuals > -preparation_dict['fit_sigma'] * std)
stack_e2ds[:, i_orders, :]/=fit_shaped
stack_e2ds_err[:, i_orders, :]/=fit_shaped
stack_polyfit[:, i_orders, :] =fit_shaped
#plt.imshow(stack_e2ds[:, i_orders, :], interpolation='none', aspect='auto', vmin=0.25, vmax=1.5)
#plt.colorbar()
#plt.show()
preparation = {
'stack_wave': stack_wave,
'stack_e2ds': stack_e2ds,
'stack_e2ds_err': stack_e2ds_err,
'stack_bjd': stack_e2ds,
'stack_airmass': stack_e2ds,
'stack_polyfit': stack_polyfit,
'frame': {
'n_obs': n_obs,
'n_orders': n_orders,
'n_pixels': n_pixels,
},
'fit_pams': preparation_dict
}
save_to_cpickle(filename, preparation, config_in['output'], night)
| 5,114 | 37.458647 | 124 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.transmission_spectrum_preparation import compute_transmission_spectrum_preparation
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_spectrum',
'plot_transmission_spectrum']
subroutine_name = 'transmission_spectrum'
sampler_name = 'emcee'
def compute_transmission_spectrum(config_in, lines_label, reference='planetRF', night_input='', preparation_only=False, pca_iteration=-1):
results_list_default = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
# compute_transmission_spectrum_preparation(config_in)
night_dict = from_config_get_nights(config_in)
### transmission_dict = from_config_get_transmission(config_in)
pca_parameters = from_config_get_pca_parameters(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
norm_dict = lines_dict.get('normalization', {})
norm_pams = {}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
shared_data = load_from_cpickle('shared', config_in['output'])
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
""" Using the line-specific range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= lines_dict['range'][0]) \
& (shared_data['binned']['wave'] < lines_dict['range'][1])
transmission_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
for night in night_dict:
print()
print("Running {0:45s} for {1:20s} Night:{2:15s} ".format(subroutine_name, lines_label, night))
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(pca_parameters.get('ref_iteration', 3)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
results_list = results_list_default.copy()
binned_mcmc_night = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string)
binned_mcmc_global = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
mcmc_night = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string=it_string)
mcmc_global = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
if mcmc_night and mcmc_global:
mcmc_results_night = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string=it_string)
mcmc_results_global = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
print(' Observational parameters from MCMC fit of unbinned data and configuration file')
elif binned_mcmc_night and binned_mcmc_global:
mcmc_results_night = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string=it_string)
mcmc_results_global = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
print(' Observational parameters from MCMC fit of binned data and configuration file')
else:
print(' Observational parameters from configuration file')
results_list = ['user']
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for results_selection in results_list:
try:
transmission = load_from_cpickle(subroutine_name+'_'+reference + '_' +
results_selection, config_in['output'], night, lines_label, it_string=it_string)
print("{0:45s} Night:{1:15s} {2:s} {3:s} {4:s}".format(
subroutine_name, night, lines_label, results_selection, 'Retrieved'))
continue
except (FileNotFoundError, IOError):
print("{0:45s} Night:{1:15s} {2:s} {3:s} {4:s}".format(
subroutine_name, night, lines_label, results_selection, 'Computing'))
transmission = transmission_template.copy()
if len(it_string) > 0:
transmission['pca_output'] = True
else:
transmission['pca_output'] = False
print_warning = True
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['deblazed']
ferr: preparation[obs]['deblazed_err']
"""
transmission[obs] = {}
transmission[obs] = {
'BJD': input_data[obs]['BJD'],
'AIRMASS': input_data[obs]['AIRMASS']
}
""" Shift into planetary reference system is the default
choice"""
if results_selection == 'user':
planet_R_factor = observational_pams.get('Rp_factor', 1.00000)
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
rv_shift_clv = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2PRF']
rv_shift_clv = observational_pams[obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_night_MED':
planet_R_factor = mcmc_results_night['results']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift_clv = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_night_MAP':
planet_R_factor = mcmc_results_night['results_MAP']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_global_MED':
planet_R_factor = mcmc_results_global['results']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_global_MAP':
planet_R_factor = mcmc_results_global['results_MAP']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF']
""" Step 2): rebin the 2D ratio spectra to 1D """
if transmission['pca_output']:
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
rv_shift=rv_shift,
preserve_flux=False,
is_error=True)
else:
preserve_flux = input_data[obs].get('absolute_flux', True)
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
### Small border bugfix
if transmission[obs]['rebinned_err'][0] ==0:
transmission[obs]['rebinned'][0] = transmission[obs]['rebinned'][1]
transmission[obs]['rebinned_err'][0] = transmission[obs]['rebinned_err'][1]
if transmission[obs]['rebinned_err'][-1] ==0:
transmission[obs]['rebinned'][-1] = transmission[obs]['rebinned'][-2]
transmission[obs]['rebinned_err'][-1] = transmission[obs]['rebinned_err'][-2]
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.plot(input_data[obs]['wave'][0,:], preparation[obs]['ratio_err'][0,:])
#plt.scatter(transmission['wave'], transmission[obs]['rebinned_err'], c='b')
#plt.axhline(0.0000, c='C2')
#plt.show()
#quit()
#import matplotlib.pyplot as plt
#plt.scatter(input_data[obs]['wave'], preparation[obs]['ratio'], s=2)
#plt.xlim(lines_dict['range'][0], lines_dict['range'][1])
# plt.show()
if clv_rm_correction:
"""" CLV + RM computation in the planetary reference frame """
transmission[obs]['clv_model_stellarRF'] = interpolate1d_grid_nocheck(planet_R_factor,
clv_rm_models['common']['radius_grid'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'])
transmission[obs]['clv_model_rebinned'] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
transmission[obs]['clv_model_stellarRF'],
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift_clv)
#import matplotlib.pyplot as plt
#print(obs, planet_R_factor)
#plt.plot(clv_rm_models['common']['wave'], transmission[obs]['clv_model_stellarRF'], zorder=100, c='C2')
#plt.scatter(transmission['wave'], transmission[obs]['clv_model_rebinned'], s=2)
# plt.show()
transmission[obs]['corrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['clv_model_rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['clv_model_rebinned']
else:
transmission[obs]['clv_model_rebinned'] = np.ones(transmission['size'])
transmission[obs]['corrected'] = transmission[obs]['rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err']
if print_warning:
print(' *** No CLV correction')
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with lines of interes
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values
"""
transmission[obs]['line_exclusion'] = (transmission['wave'] > 0.)
""" Continuum normalization:
1) exclusion of regions with transmission lines under study, now
in the RF of the lines
"""
for line_key, line_val in lines_dict['lines'].items():
transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] & (
np.abs(transmission['wave']-line_val) > 3.)
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into account the planetary RV semi-amplitude
"""
if clv_rm_correction:
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
transmission['wave'],
transmission['step'],
rv_shift=rv_shift_clv,
preserve_flux=False)
stellar_spectrum_derivative = first_derivative(transmission['wave'], stellar_spectrum_rebinned)
missing_model = (np.abs(stellar_spectrum_rebinned) < 0.0001)
cont_10perc = np.percentile(np.abs(stellar_spectrum_derivative), norm_pams['percentile_selection'])
#transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] \
# & (np.abs(stellar_spectrum_derivative) < cont_10perc) \
# & (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
line_exclusion = transmission[obs]['line_exclusion'] \
& (np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
if np.sum(line_exclusion) < len(line_exclusion)/200:
transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] \
& ( missing_model | ((np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])))
else:
transmission[obs]['line_exclusion'] = line_exclusion
elif print_warning:
print(" No stellar synthetic spectrum from CLV models")
print(" some stellar lines may be included in transmission normalization ")
print_warning = False
""" Continuum normalization:
3) Polynomial fit, everything is hard coded now but personalized
options can be implemented easily in the yaml file
"""
selection = transmission[obs]['line_exclusion'] & (
transmission[obs]['corrected'] > np.std(transmission[obs]['corrected']))
transmission[obs]['continuum_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['corrected'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_coeff'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.scatter(transmission['wave'], transmission[obs]['corrected_err']+0.05, c='b')
#plt.scatter(transmission['wave'], transmission[obs]['normalized_err'], c='r')
#plt.show()
#quit()
transmission[obs]['continuum_uncorrected_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['rebinned'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum_uncorrected'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_uncorrected_coeff'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' ', obs, ' normalization using Savitzky-Golay filter')
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum'] = savgol_filter(transmission[obs]['corrected'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
transmission[obs]['continuum_uncorrected'] = savgol_filter(transmission[obs]['rebinned'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
else:
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized'] = transmission[obs]['corrected'].copy()
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'].copy()
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
# plt.show()
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum_uncorrected'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'].copy()
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'].copy()
print_warning = False
transm_average = np.zeros([len(lists['transit_full']), transmission['size']])
weights_average = np.zeros([len(lists['transit_full']), transmission['size']])
clvrm_average = np.zeros([len(lists['transit_full']), transmission['size']])
uncorr_average = np.zeros([len(lists['transit_full']), transmission['size']])
for i, obs in enumerate(lists['transit_full']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
clvrm_average[i, :] = transmission[obs]['clv_model_rebinned'][:]
uncorr_average[i, :] = transmission[obs]['normalized_uncorrected'][:]
transmission['average'], transmission['sum_weights'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_err'] = 1. / np.sqrt(transmission['sum_weights'])
transmission['average_clv_model'], _ = np.average(
clvrm_average, axis=0, weights=weights_average, returned=True)
transmission['average_uncorrected'], _ = np.average(
uncorr_average, axis=0, weights=weights_average, returned=True)
transmission['binned'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
transmission['binned_clv_model'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_clv_model'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_uncorrected'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_uncorrected'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transm_average = np.zeros([len(lists['transit_out']), transmission['size']])
weights_average = np.zeros([len(lists['transit_out']), transmission['size']])
for i, obs in enumerate(lists['transit_out']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
transmission['average_out'], transmission['sum_weights_out'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_out_err'] = 1./np.sqrt(transmission['sum_weights_out'])
transmission['binned_out'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_out_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
#save_to_cpickle('transmission_'+reference+'_processed', processed, config_in['output'], night)
save_to_cpickle(subroutine_name + '_' + reference + '_' + results_selection,
transmission, config_in['output'], night, lines_label, it_string)
# Forcing memory deallocation
transmission = None
# Forcing memory deallocation
clv_rm_models = None
def plot_transmission_spectrum(config_in, lines_label, night_input='', results_input='', reference='planetRF', pca_iteration=-1):
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
if results_input == '':
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
else:
results_list = np.atleast_1d(results_input)
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
os.system('mkdir -p plots')
interactive_plots = from_config_get_interactive_plots(config_in)
for night in night_list:
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
else:
it_string = ''
preparation_input = None
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for results_selection in results_list:
filename_rad = subroutine_name + '_'+reference+'_'+results_selection
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the analysis"""
try:
#processed = load_from_cpickle('transmission_'+reference+'_processed', config_in['output'], night)
transmission = load_from_cpickle(filename_rad, config_in['output'], night, lines_label, it_string)
except (FileNotFoundError, IOError):
print()
print("No transmission spectrum in {0:s}, no plots".format(reference))
continue
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(transmission[obs]['BJD'] - 2450000.0)
am.append(transmission[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors = color_cmap(color_norm(np.asarray(bjd)))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
ax1.set_ylim(0.925, 1.075)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Lines: {0:s} Night: {1:s} \n In-transit transmission spectrum in {2:s} \n Solution {3:s}'.format(
lines_label, night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_observations',
config_in['output'], night, lines_label, it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
ax2.plot(master_out['wave'],
master_out['rescaled']-0.06,
color='k', zorder=10, label='master-out')
except (FileNotFoundError, IOError):
pass
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
ax2.plot(telluric['template']['input']['wave'],
telluric['template']['input']['flux'] - 0.06,
color='C1', zorder=10, label='telluric')
ax2.plot(telluric['template']['input']['wave'],
(telluric['template']['input']['flux']-1.)*10. + 1. - 0.06,
color='C2', alpha=0.5, zorder=9, label='telluric (x10)')
except (FileNotFoundError, IOError, KeyError):
pass
#master_out = load_from_cpickle('master_out', config_in['output'], night)
# ax1.errorbar(master_out['wave'],
# master_out['rescaled'],
# yerr=master_out['rescaled_err'],
# fmt='.', c='C0', label='master-out ' + night)
ax1.errorbar(transmission['wave'],
transmission['average'],
yerr=transmission['average_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25)
ax1.errorbar(transmission['binned_wave'],
transmission['binned'],
yerr=transmission['binned_err'],
fmt='ro', ms=4, lw=2, zorder=10)
ax2.errorbar(transmission['wave'],
transmission['average_out'],
yerr=transmission['average_out_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25, label='average')
ax2.errorbar(transmission['binned_wave'],
transmission['binned_out'],
yerr=transmission['binned_out_err'],
fmt='ro', ms=4, lw=2, zorder=10, label='binned average')
ax1.set_ylim(0.99, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Lines: {0:s} Night: {1:s} \n In-transit transmission spectrum in {2:s} \n Solution {3:s}'.format(
lines_label, night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
#ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
output_file = get_filename(filename_rad + '_binned',
config_in['output'], night, lines_label, it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
if not clv_rm_correction:
continue
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax1.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax2.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Lines: {0:s} Night: {1:s} \n CLV-RM correction in {2:s} \n Solution {3:s}'.format(
lines_label, night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_clv_rm_models',
config_in['output'], night, lines_label, it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
| 44,684 | 51.447183 | 146 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_binned_mcmc.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.bayesian_emcee import *
# from SLOPpy.subroutines.rebin_subroutines import *
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_binned_mcmc','compute_transmission_binned_mcmc_iterative',
'plot_transmission_binned_mcmc','plot_transmission_binned_mcmc_iterative']
subroutine_name = 'transmission_binned_mcmc'
def compute_transmission_binned_mcmc_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_binned_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=it)
def plot_transmission_binned_mcmc_iterative(config_in, lines_label, night_input='', reference='planetRF'):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_binned_mcmc(config_in, lines_label, night_input=night_input, reference=reference, pca_iteration=it)
def compute_transmission_binned_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" selection of those parameters that are specific of the spectral line(s)
under analysis
"""
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
sampler_pams = lines_dict['sampler_parameters']
sampler_name = sampler_pams.get('sampler_name', 'emcee')
# TODO reference as input parameter
reference = 'planetRF'
"""
- case 0: only one spectral line, default line parameters are contrast, FWHM, rv_shift
- case 1: only one spectral line, no winds
- case 2: only one spectral line, no planetary radius dependance
- case 3: only one spectral line, no winds and no planetary radius dependance
- case 10: more than one spectral lines, all line parameters are free and independent
- case 11: more than one spectral lines, all lines are affected by the same wind
- case 12: more than one spectral lines, all lines have same FWHM
- case 13: more than one spectral lines, all lines are affected by the same wind and have same FWHM
- case 14: more than one spectral lines, no winds
- case 15: more than one spectral lines, no winds, all lines have same FWHM
- case 20: more than one spectral lines, no Rp dependance, all line parameters are free and independent
- case 21: more than one spectral lines, no Rp dependance, all lines are affected by the same wind
- case 22: more than one spectral lines, no Rp dependance, all lines have same FWHM
- case 23: more than one spectral lines, no Rp dependance, all lines are affected by the same wind and have same FWHM
- case 24: more than one spectral lines, no Rp dependance, no winds
- case 25: more than one spectral lines, no Rp dependance, no winds, all lines have same FWHM
free_Rp free_winds shared_winds shared_FWHM
- case 0: True True False False DEFAULT for single line
- case 1: True False False False
- case 2: False True False False
- case 3: False False False False
- case 10: True True False False DEFAULT for multiple lines
- case 11: True True True False
- case 12: True True False True
- case 13: True True True True
- case 14: True False False False
- case 15: True False False True
- case 20: False True False False
- case 21: False True True False
- case 22: False True False True
- case 23: False True True True
- case 24: False False False False
- case 25: False False False True
"""
model_case = 10
norm_dict = lines_dict.get('normalization', clv_rm_dict.get('normalization', {}))
norm_pams={}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
norm_pams['normalize_rebinned'] = norm_dict.get('normalize_rebinned', False)
# Added back-compatibility to old or "wrong" keys
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
fit_pams = lines_dict['fit_parameters']
free_Rp = fit_pams.get('free_Rp', True) \
and fit_pams.get('free_planet_radius', True) \
and clv_rm_correction
free_winds = fit_pams.get('free_winds', True) \
and fit_pams.get('free_offset', True)
shared_winds = fit_pams.get('shared_winds', False) \
or fit_pams.get('shared_offset', False)
shared_FWHM = fit_pams.get('shared_FWHM', False) \
or fit_pams.get('shared_fwhm', False)
prior_dict = fit_pams.get('priors', {}) \
or fit_pams.get('priors', {})
allow_emission = fit_pams.get('allow_emission', False)
if len(lines_dict['lines']) < 2:
if free_Rp is True and free_winds is True:
model_case = 0
if free_Rp is True and free_winds is False:
model_case = 1
if free_Rp is False and free_winds is True:
model_case = 2
if free_Rp is False and free_winds is False:
model_case = 3
else:
if free_Rp is True:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 10
if shared_winds is True and shared_FWHM is False:
model_case = 11
if shared_winds is False and shared_FWHM is True:
model_case = 12
if shared_winds is True and shared_FWHM is True:
model_case = 13
else:
if shared_winds is False and shared_FWHM is False:
model_case = 14
if shared_winds is False and shared_FWHM is True:
model_case = 15
else:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 20
if shared_winds is True and shared_FWHM is False:
model_case = 21
if shared_winds is False and shared_FWHM is True:
model_case = 22
if shared_winds is True and shared_FWHM is True:
model_case = 23
else:
if shared_winds is False and shared_FWHM is False:
model_case = 24
if shared_winds is False and shared_FWHM is True:
model_case = 25
jitter_flag = fit_pams.get('jitter', True)
pyde_flag = fit_pams.get('pyde', True)
print()
print(' free_Rp: (default: True) ', free_Rp)
print(' free_winds: (default: True) ', free_winds)
print(' shared_winds: (default: False) ', shared_winds)
print(' shared_FWHM: (default: False) ', shared_FWHM)
print(' jitter: (default: True) ', jitter_flag)
print(' # lines: ', len(lines_dict['lines']))
print(' model_case: ', model_case)
""" parameters list:
to be updated
pams_dict = {} # dictionary containing the index of a given parameter
pams_list = [] # list with the parameter names ordered according to their index
boundaries = np.empty([0, 2]) # boundaries for MCMC / nested sampling
theta_start = np.empty(0) # starting point for MCMC
lines_center = np.empty(0) # laboratory wavelength of spectral lines
pam_index = 0 # keep track of the number of variables
for line_key, line_val in lines_dict['lines'].items():
pam_name = line_key + '_contrast'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 1.00]], axis=0)
theta_start = np.append(theta_start, 0.010)
pam_index += 1
lines_center = np.append(lines_center, line_val)
# skip the inclusion of FWHM as a free parameter for each line
if the shared FWHM is selected
#
if model_case in [0, 1, 2, 3, 10, 11, 14, 20, 21, 24]:
# if not lines_dict['fit_parameters']['shared_fwhm']:
pam_name = line_key + '_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.0)
pam_index += 1
# if lines_dict['fit_parameters']['fixed_separation']: continue
# if not lines_dict['fit_parameters']['lines_shift']: continue
if model_case in [0, 2, 10, 12, 20, 22]:
pam_name = line_key + '_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.00, 5.00]], axis=0)
theta_start = np.append(theta_start, 0.00)
pam_index += 1
if model_case in [12, 13, 15, 22, 23, 25]:
# if lines_dict['fit_parameters']['shared_fwhm']:
pam_name = 'shared_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.000, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.000)
pam_index += 1
if model_case in [11, 13, 21, 23]:
# if lines_dict['fit_parameters']['fixed_separation'] and lines_dict['fit_parameters']['lines_shift']:
pam_name = 'shared_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.0, 5.0]], axis=0)
theta_start = np.append(theta_start, 0.000)
pam_index += 1
if model_case in [0, 1, 10, 11, 12, 13, 14, 15]:
pams_dict['rp_factor'] = pam_index
pams_list.append('rp_factor')
boundaries = np.append(boundaries, [[0.5, 2.0]], axis=0)
theta_start = np.append(theta_start, 1.0)
pam_index += 1
pams_dict['K_planet'] = pam_index
pams_list.append('K_planet')
boundaries = np.append(boundaries,
[[-300., planet_dict['RV_semiamplitude']
[0]+ 300.]],
axis=0)
theta_start = np.append(
theta_start, planet_dict['RV_semiamplitude'][0])
pam_index += 1
pam_name = 'jitter'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[10**(-12), 0.01]], axis=0)
theta_start = np.append(theta_start, 10**(-11))
pam_index += 1
for ii in range(0, pam_index):
print(pams_list[ii], ' ', boundaries[ii, :],
' ', theta_start[ii])
ndim = pam_index
"""
for night in night_dict:
print()
print("transmission_mcmc Night: {0:s}".format(night))
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
### Moved here to retrieve infomration about wheter PCA correction has been performed or not
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
""" This need to be checked only once, so it's ok to take the output of the last night
and propagate it to the rest of subroutine
"""
if len(it_string) > 0:
pca_output = True
else:
pca_output = False
try:
mcmc_data = load_from_cpickle(subroutine_name + '_data', config_in['output'], night, lines_label, it_string)
clv_rm_radius = mcmc_data['clv_rm_radius']
clv_rm_grid = mcmc_data['clv_rm_grid']
transmission_spec = mcmc_data['transmission_spec']
transmission_spec_err = mcmc_data['transmission_spec_err']
wave_meshgrid = mcmc_data['wave_meshgrid']
time_meshgrid = mcmc_data['time_meshgrid']
planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
jitter_index = mcmc_data['jitter_index']
n_jitter = mcmc_data['n_jitter']
print(" Loading MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
except FileNotFoundError:
print(" Computing MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
calib_data = load_from_cpickle(
'calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(
config_in['output'], night, lists['observations'])
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
else:
# workaround if CLV correction is not available
clv_rm_models = {'common': {}}
clv_rm_models['common']['n_radius_grid'] = 3
clv_rm_models['common']['radius_grid'] = np.asarray(
[0.5, 1.0, 1.5])
processed = {
'subroutine': subroutine_name,
}
processed['common'] = {
'range': lines_dict['fit_parameters']['range']
}
processed['common']['wave'] = np.arange(processed['common']['range'][0],
processed['common']['range'][1],
lines_dict['fit_parameters']['bin_step'],
dtype=np.double)
processed['common']['size'] = len(processed['common']['wave'])
processed['common']['step'] = np.ones(
processed['common']['size'], dtype=np.double) * lines_dict['fit_parameters']['bin_step']
processed['common_extended'] = {
'range': lines_dict['range']
}
processed['common_extended']['wave'] = np.arange(processed['common_extended']['range'][0],
processed['common_extended']['range'][1],
lines_dict['fit_parameters']['bin_step'],
dtype=np.double)
processed['common_extended']['size'] = len(
processed['common_extended']['wave'])
processed['common_extended']['step'] = np.ones(
processed['common_extended']['size'], dtype=np.double) * lines_dict['fit_parameters']['bin_step']
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['ratio']
ferr: preparation[obs]['ratio']
"""
""" First step: we rebin the spectra in the Stellar Reference Frame,
with the step size decided by the user specifically for the fit
"""
if pca_output:
preserve_flux = False
blaze = np.ones_like(calib_data['blaze'])
else:
preserve_flux = input_data[obs].get('absolute_flux', True)
blaze = calib_data['blaze']
processed[obs] = {}
processed[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
blaze,
processed['common']['wave'],
processed['common']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux)
processed[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
blaze,
processed['common']['wave'],
processed['common']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux,
is_error=True)
processed[obs]['rebinned_extended'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
blaze,
processed['common_extended']['wave'],
processed['common_extended']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux)
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with planetary lines
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values, fit is
performed on the extended region and then applied to the fit subset
"""
processed['common_extended']['line_exclusion'] = (
processed['common_extended']['wave'] > 0.)
""" Continuum normalization:
1) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
for line_key, line_val in lines_dict['lines'].items():
line_extension = 1.2 * \
planet_dict['RV_semiamplitude'][0] * \
line_val / speed_of_light_km
processed['common_extended']['line_exclusion'] = processed['common_extended']['line_exclusion'] & (
np.abs(processed['common_extended']['wave']-line_val) > line_extension)
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
try:
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
processed['common_extended']['wave'],
processed['common_extended']['step'],
preserve_flux=False)
stellar_spectrum_derivative = first_derivative(
processed['common_extended']['wave'], stellar_spectrum_rebinned)
cont_10perc = np.percentile(np.abs(stellar_spectrum_derivative), norm_pams['percentile_selection'])
processed['common_extended']['line_exclusion'] = processed['common_extended']['line_exclusion'] \
& (np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
except KeyError:
print(
"No stellar synthetic spectrum from CLV models, some stellar lines may be included transmission normalization ")
for obs in lists['observations']:
selection = processed['common_extended']['line_exclusion'] & (
processed[obs]['rebinned_extended'] > np.std(processed[obs]['rebinned_extended']))
processed[obs]['norm_coeff'] = \
np.polynomial.chebyshev.chebfit(processed['common_extended']['wave'][selection],
processed[obs]['rebinned_extended'][selection],
norm_pams['spectra_poly_degree'])
processed[obs]['continuum'] = np.polynomial.chebyshev.chebval(
processed['common']['wave'], processed[obs]['norm_coeff'])
processed[obs]['normalized'] = processed[obs]['rebinned'] / \
processed[obs]['continuum']
processed[obs]['normalized_err'] = processed[obs]['rebinned_err'] / \
processed[obs]['continuum']
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' Normalization using Savitzky-Golay filter')
for obs in lists['observations']:
if norm_pams['normalize_rebinned']:
processed[obs]['continuum'] = savgol_filter(preparation[obs]['rebinned'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
else:
normalization_model = preparation[obs]['ratio'] * 0.00
for order in range(0, observational_pams['n_orders']):
normalization_model[order,:] = savgol_filter(preparation[obs]['ratio'][order,:],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
processed[obs]['continuum'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
normalization_model,
blaze,
processed['common']['wave'],
processed['common']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux)
processed[obs]['continuum_coeff'] = None
processed[obs]['normalized'] = processed[obs]['rebinned'] / \
processed[obs]['continuum']
processed[obs]['normalized_err'] = processed[obs]['rebinned_err'] / \
processed[obs]['continuum']
else:
for obs in lists['observations']:
processed[obs]['continuum_coeff'] = None
processed[obs]['continuum'] = np.ones_like(processed['common']['wave'])
processed[obs]['normalized'] = processed[obs]['rebinned'].copy()
processed[obs]['normalized_err'] = processed[obs]['rebinned_err'].copy()
processed['common']['n_obs'] = len(lists['transit_full'])
processed['common']['n_radius_grid'] = clv_rm_models['common']['n_radius_grid']
processed['common']['radius_grid'] = clv_rm_models['common']['radius_grid']
clv_rm_radius = clv_rm_models['common']['radius_grid']
""" We are moving the values of interest from dictionaries to arrays
in order to speed up the MCMC
1) clv_rm_grid: array with all the CLV models, as a function of the
radius of the planet
2) time_from_transit: BJD_TDB - T0
3) planet_RVsinusoid: Fractional RV of the planet (K=1) - from a meshgrid
"""
clv_rm_grid = np.ones([processed['common']['n_radius_grid'],
processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
time_from_transit = np.empty(
processed['common']['n_obs'], dtype=np.double)
transmission_spec = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
transmission_spec_err = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
for i_obs, obs in enumerate(lists['transit_full']):
time_from_transit[i_obs] = observational_pams[obs]['BJD'] - \
observational_pams['time_of_transit']
# planet_RVsinusoid[i_obs] = np.sin(2*np.pi / planet_dict['period'][0] * time_from_transit[i_obs])
transmission_spec[i_obs, :] = processed[obs]['normalized']
transmission_spec_err[i_obs,
:] = processed[obs]['normalized_err']
if clv_rm_correction is False:
continue
for i_r in range(0, processed['common']['n_radius_grid']):
""" CLV Synthetic models are in the Stellar Reference system,
so no shift is required """
clv_rm_grid[i_r, i_obs, :] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][i_r, :],
processed['common']['wave'],
processed['common']['step'],
preserve_flux=False)
# preserve_flux should be True or False?
# False if the spectra are already normalized
#colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(
# lists, observational_pams)
#fig = plt.figure(figsize=(12, 6))
#gs = GridSpec(2, 2, width_ratios=[50, 1])
#ax1 = plt.subplot(gs[0, 0])
#ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
#cbax1 = plt.subplot(gs[:, 1])
#i_r = 0
#for i_obs, obs in enumerate(lists['transit_full']):
# ax1.plot(processed['common']['wave'],
# clv_rm_grid[i_r, i_obs, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
#i_r = processed['common']['n_radius_grid']-1
#for i_obs, obs in enumerate(lists['transit_full']):
# ax2.plot(processed['common']['wave'],
# clv_rm_grid[i_r, i_obs, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
#ax1.set_title(
# 'Night: {0:s} \n CLV+RM correction, convolved and normalized '.format(night))
#ax2.set_title('Out of transit')
#ax2.set_xlabel('$\lambda$ [$\AA$]')
#sm = plt.cm.ScalarMappable(
# cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
#sm.set_array([]) # You have to set a dummy-array for this to work...
#cbar = plt.colorbar(sm, cax=cbax1)
#cbar.set_label('BJD - 2450000.0')
#fig.subplots_adjust(wspace=0.05, hspace=0.4)
#plt.show()
#quit()
remove_outliers = (np.abs(transmission_spec - 1.) > 0.5)
transmission_spec[remove_outliers] = 1.0
transmission_spec_err[remove_outliers] = 1.0
wave_meshgrid, time_meshgrid = np.meshgrid(
processed['common']['wave'], time_from_transit)
planet_RVsinusoid = np.sin(
2*np.pi / planet_dict['period'][0] * time_meshgrid)
if jitter_flag:
jitter_index = []
n_jitter = 1
else:
jitter_index = None
n_jitter = 0
mcmc_data = {
'observations': lists['transit_full'],
'common_wave': processed['common']['wave'],
'common_step': processed['common']['step'],
'clv_rm_grid': clv_rm_grid,
'transmission_spec': transmission_spec,
'transmission_spec_err': transmission_spec_err,
'wave_meshgrid': wave_meshgrid,
'time_meshgrid': time_meshgrid,
'planet_RVsinusoid': planet_RVsinusoid,
'clv_rm_radius': clv_rm_models['common']['radius_grid'],
'n_obs': len(lists['transit_full']),
'n_radius_grid': clv_rm_models['common']['n_radius_grid'],
'jitter_index': jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name + '_data', mcmc_data,
config_in['output'], night, lines_label, it_string)
# Forcing memory deallocation
clv_rm_models = None
mcmc_data = None
print()
print("transmission_binned_mcmc ")
try:
results_dict = load_from_cpickle(subroutine_name+'_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string)
print(" Transmission MCMC analysis for lines {0:s}, night: {1:s} already performed".format(
lines_label, night))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
start_average = np.average(results_dict['point_start'], axis=0)
ndim = results_dict['ndim']
med_lines_model = results_dict['results']['lines_model']
if 'derived' in results_dict:
recompute_derived = False
else:
recompute_derived = True
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if recompute_derived and key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
# R(h) = np.sqrt(1+h/delta)
# print(key[-8:], key[:3])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
continue
except FileNotFoundError:
print()
# getting fit parameters
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter, allow_emission=allow_emission)
ndim = len(theta_start)
if pyde_flag:
ngen = sampler_pams.get('n_gen', 64000)
else:
ngen = 0
nwalkers_mult = sampler_pams.get('n_walkers_mult', 2)
nwalkers = sampler_pams.get('n_walkers', nwalkers_mult * ndim)
nthin = sampler_pams.get('n_thin', 50)
nsteps = sampler_pams.get('n_steps', 20000)
nburnin = sampler_pams.get('n_burnin', 10000)
ndata = np.size(wave_meshgrid)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
if pyde_flag:
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
else:
print(' no PyDE optimization, MCMC will start from default values')
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
wave_meshgrid,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
wave_meshgrid,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start,
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
start_average = np.average(results_dict['point_start'], axis=0)
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
save_to_cpickle(subroutine_name+'_'+sampler_name+'_results',
results_dict, config_in['output'], night, lines_label, it_string)
# print(' *** physical output')
#
# results_dict['results'] = {
# 'lines_model': med_lines_model,
# 'clv_model': med_clv_model,
# 'lines_array': med_lines_array,
# 'planet_K': med_planet_K,
# 'planet_R': med_planet_R,
# 'jitter': med_jitter
# }
""" Analysis of the entire dataset """
print()
try:
all_mcmc_data = load_from_cpickle(subroutine_name+'_data', config_in['output'], night='', lines=lines_label, it_string=it_string)
all_clv_rm_radius = all_mcmc_data['clv_rm_radius']
all_clv_rm_grid = all_mcmc_data['clv_rm_grid']
all_transmission_spec = all_mcmc_data['transmission_spec']
all_transmission_spec_err = all_mcmc_data['transmission_spec_err']
all_wave_meshgrid = all_mcmc_data['wave_meshgrid']
all_time_meshgrid = all_mcmc_data['time_meshgrid']
all_planet_RVsinusoid = all_mcmc_data['planet_RVsinusoid']
all_observations = all_mcmc_data['observations']
all_n_obs = all_mcmc_data['n_obs']
all_n_radius_grid = all_mcmc_data['n_radius_grid']
all_jitter_index = all_mcmc_data['jitter_index']
n_jitter = all_mcmc_data['n_jitter']
except:
n_jitter = 0
for night in night_dict:
mcmc_data = load_from_cpickle(subroutine_name+'_data', config_in['output'], night, lines_label, it_string=it_string)
try:
# Building the arrays for the full analysis
all_clv_rm_grid = np.concatenate(
(all_clv_rm_grid, mcmc_data['clv_rm_grid']), axis=1)
all_transmission_spec = np.concatenate(
(all_transmission_spec, mcmc_data['transmission_spec']))
all_transmission_spec_err = np.concatenate(
(all_transmission_spec_err, mcmc_data['transmission_spec_err']))
all_wave_meshgrid = np.concatenate(
(all_wave_meshgrid, mcmc_data['wave_meshgrid']))
all_time_meshgrid = np.concatenate(
(all_time_meshgrid, mcmc_data['time_meshgrid']))
all_planet_RVsinusoid = np.concatenate(
(all_planet_RVsinusoid, mcmc_data['planet_RVsinusoid']))
all_observations = np.concatenate(
(all_observations, mcmc_data['observations']))
all_n_obs += mcmc_data['n_obs']
if jitter_flag:
all_jitter_index = np.concatenate(
(all_jitter_index, n_jitter*np.ones(np.shape(mcmc_data['wave_meshgrid']), dtype=np.int16)))
n_jitter += 1
except NameError:
""" This error is expected when retrieving the data of the first night"""
all_clv_rm_radius = mcmc_data['clv_rm_radius']
all_clv_rm_grid = mcmc_data['clv_rm_grid']
all_transmission_spec = mcmc_data['transmission_spec']
all_transmission_spec_err = mcmc_data['transmission_spec_err']
all_wave_meshgrid = mcmc_data['wave_meshgrid']
all_time_meshgrid = mcmc_data['time_meshgrid']
all_planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
all_observations = mcmc_data['observations']
all_n_obs = mcmc_data['n_obs']
all_n_radius_grid = mcmc_data['n_radius_grid']
if jitter_flag:
all_jitter_index = n_jitter * \
np.ones(
np.shape(mcmc_data['wave_meshgrid']), dtype=np.int16)
n_jitter += 1
else:
all_jitter_index = None
all_mcmc_data = {
'observations': all_observations,
'clv_rm_grid': all_clv_rm_grid,
'transmission_spec': all_transmission_spec,
'transmission_spec_err': all_transmission_spec_err,
'wave_meshgrid': all_wave_meshgrid,
'time_meshgrid': all_time_meshgrid,
'planet_RVsinusoid': all_planet_RVsinusoid,
'clv_rm_radius': all_clv_rm_radius,
'n_obs': all_n_obs,
'n_radius_grid': all_n_radius_grid,
'jitter_index': all_jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name+'_data', all_mcmc_data,
config_in['output'], night='', lines=lines_label, it_string=it_string)
try:
results_dict = load_from_cpickle(subroutine_name+ '_'+ sampler_name+'_results',
config_in['output'], night='', lines=lines_label, it_string=it_string)
print(" Transmission MCMC analysis for lines {0:s} already performed ".format(
lines_label))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
ndim = results_dict['ndim']
start_average = np.average(results_dict['point_start'], axis=0)
if 'derived' in results_dict:
recompute_derived = False
else:
recompute_derived = True
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if recompute_derived and key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
# R(h) = np.sqrt(1+h/delta)
# print(key[-8:], key[:3])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
except FileNotFoundError:
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter, allow_emission=allow_emission)
ndim = len(theta_start)
ngen = sampler_pams.get('n_gen', 64000)
nwalkers_mult = sampler_pams.get('n_walkers_mult', 2)
nwalkers = sampler_pams.get('n_walkers', nwalkers_mult * ndim)
nthin = sampler_pams.get('n_thin', 50)
nsteps = sampler_pams.get('n_steps', 20000)
nburnin = sampler_pams.get('n_burnin', 10000)
ndata = np.size(all_wave_meshgrid)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
all_wave_meshgrid,
all_transmission_spec,
all_transmission_spec_err,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
all_wave_meshgrid,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
all_wave_meshgrid,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start
#'BIC': BIC,
#'BIC_map': BIC_map
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for night in night_dict:
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
""" No differentiation by night """
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
start_average = np.average(results_dict['point_start'], axis=0)
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
print(sample_size)
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
save_to_cpickle(subroutine_name +'_'+sampler_name+'_results',
results_dict, config_in['output'], night='', lines=lines_label, it_string=it_string)
print('MCMC completed')
# Update planet parameters
# deprecated
# try:
# _ = load_from_cpickle(
# 'observational', config_in['output'], night, lines_label)
# print(" Transmission MCMC results for lines {0:s} already store in observational array".format(
# lines_label))
# except FileNotFoundError:
#
# results_full = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night='', lines=lines_label)
#
# for night in night_dict:
#
# results_night = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night=night, lines=lines_label)
# lists = load_from_cpickle('lists', config_in['output'], night)
# observational_pams = load_from_cpickle(
# 'observational_pams', config_in['output'], night)
# for obs in lists['observations']:
#
# """ RV shift from the observer RF to the planet RF
# STRONG ASSUMPTIONS:
# - there is only the transiting planet in the system
# - the planet has null eccentricity
# - linear approximation or the orbit near the transit event
#
# Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
# and finally onto the planet
# """
# observational_pams[obs]['rv_shift_ORF2PRF'] = \
# observational_pams[obs]['BERV'] \
# - observational_pams['RV_star']['RV_systemic'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# """ RV shift from Stellar Rest Frame to Planetary Rest Frame
# We have to take into account the RV of star relatively to the Barycenter
# """
# observational_pams[obs]['rv_shift_SRF2PRF'] = \
# + observational_pams[obs]['RV_bjdshift'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# observational_pams['Rp_factor'] = results_full['results']['planet_R']
# observational_pams['lines_array'] = results_full['results']['lines_array']
# observational_pams['jitter'] = results_full['results']['jitter']
# save_to_cpickle('observational', observational_pams,
# config_in['output'], night, lines_label)
def plot_transmission_binned_mcmc(config_in, lines_label, night_input='', reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
sampler_pams = lines_dict['sampler_parameters']
sampler_name = sampler_pams.get('sampler_name', 'emcee')
if night_input == '':
night_list = ['']
else:
night_list = np.atleast_1d(night_input)
os.system('mkdir -p plots')
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
for night in night_dict:
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration')).zfill(2)
else:
it_string = ''
preparation_input = None
break
for night in night_list:
results_dict = load_from_cpickle(subroutine_name+'_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string)
print(" Transmission MCMC analysis for lines {0:s}, night: {1:s} already performed".format(
lines_label, night))
if night == '':
chains_dir = 'plots/mcmc_binned_chains_full/'
else:
chains_dir = 'plots/mcmc_binned_chains_' + night + '/'
os.system('mkdir -p ' + chains_dir)
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
lnprob_med = results_dict['lnprob_med']
boundaries = results_dict['boundaries']
flat_chain = results_dict['flat_chain']
flat_lnprob = results_dict['flat_lnprob']
nthin = results_dict['nthin']
nsteps = results_dict['nsteps']
nburnin = results_dict['nburnin']
sampler_chain = results_dict['sampler_chain']
start_average = np.average(results_dict['point_start'], axis=0)
ndim = results_dict['ndim']
med_lines_model = results_dict['results']['lines_model']
if 'derived' in results_dict:
recompute_derived = False
else:
recompute_derived = True
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output (plotting the chains)')
sample_size = np.size(flat_chain, axis=0)
dimen_size = np.size(flat_chain, axis=1)
corner_plot = {
'samples': np.zeros([sample_size, dimen_size + 1]),
'labels': [],
'truths': [],
'start': [],
}
i_corner = 0
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if recompute_derived and key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
corner_plot['samples'][:, i_corner] = flat_chain[:, val]
corner_plot['labels'].append(re.sub('_', ' ', key))
corner_plot['truths'].append(chain_med[val, 0])
corner_plot['start'].append(start_average[val])
i_corner += 1
file_name = chains_dir + repr(val) + '.png'
fig = plt.figure(figsize=(12, 12))
plt.title(key)
plt.plot(sampler_chain[:, :, val].T, '-', alpha=0.5)
plt.axvline(nburnin / nthin, c='r')
plt.savefig(file_name, bbox_inches='tight', dpi=300)
plt.close(fig)
corner_plot['samples'][:, -1] = flat_lnprob[:]
corner_plot['labels'].append('ln-prob')
corner_plot['truths'].append(lnprob_med[0])
corner_plot['start'].append(None)
# R(h) = np.sqrt(1+h/delta)
# print(key[-8:], key[:3])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
print(' *** corner plot using pyGTC output ')
filename_rad = subroutine_name + '_' + reference + '_cornerplot'
output_file = get_filename(filename_rad, config_in['output'], night=night, lines=lines_label, it_string=it_string, extension='.pdf')
print(' *** filename: ', output_file)
try:
GTC = pygtc.plotGTC(chains=corner_plot['samples'],
paramNames=corner_plot['labels'],
truths=[corner_plot['truths'],corner_plot['start']],
GaussianConfLevels=True,
nConfidenceLevels=3,
figureSize=12,
labelRotation= (True,True),
plotName='plots/'+output_file)
except:
GTC = pygtc.plotGTC(chains=corner_plot['samples'],
paramNames=corner_plot['labels'],
truths=[corner_plot['truths'],corner_plot['start']],
GaussianConfLevels=True,
nConfidenceLevels=2,
figureSize=12,
labelRotation= (True,True),
plotName='plots/'+output_file)
GTC = None
continue
def plot_transmission_binned_mcmc_deprecated(config_in, lines_label, night_input=''):
night_dict = from_config_get_nights(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the analysis"""
try:
clv_rm_corrected = load_from_cpickle(subroutine_name, config_in['output'], night)
mcmc_data = load_from_cpickle(subroutine_name + '_data', config_in['output'], night, lines_label)
clv_rm_radius = mcmc_data['clv_rm_radius']
clv_rm_grid = mcmc_data['clv_rm_grid']
transmission_spec = mcmc_data['transmission_spec']
transmission_spec_err = mcmc_data['transmission_spec_err']
wave_meshgrid = mcmc_data['wave_meshgrid']
time_meshgrid = mcmc_data['time_meshgrid']
planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
except:
print("No transmission spectrum results, no plots")
print()
continue
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(clv_rm_corrected[obs]['BJD'] - 2450000.0)
am.append(clv_rm_corrected[obs]['AIRMASS'])
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
cmap = plt.get_cmap('coolwarm')
plot_data = transmission_spec.copy()
from SLOPpy.subroutines.math_functions import interpolate2d_grid_nocheck
plot_data = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
#clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
vmin = plot_data.min()
vmax = plot_data.max()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid, plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCM = plt.pcolormesh(wave_meshgrid, time_meshgrid, plot_data,
vmin=vmin, vmax=vmax, cmap=cmap)
cbar = plt.colorbar(PCM)
cbar.ax.set_ylabel('Intensity')
plt.show()
plot_data = transmission_spec.copy()
plot_data = transmission_spec.copy()
#clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
vmin = plot_data.min()
vmax = plot_data.max()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid, plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCM = plt.pcolormesh(wave_meshgrid, time_meshgrid, plot_data,
vmin=vmin, vmax=vmax, cmap=cmap)
cbar = plt.colorbar(PCM)
cbar.ax.set_ylabel('Intensity')
plt.show()
| 77,242 | 45.475933 | 141 | py |
SLOPpy | SLOPpy-main/SLOPpy/interstellar_lines.bkp.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_interstellar_lines", "plot_interstellar_lines"]
subroutine_name = 'interstellar_lines'
#def plot_identify_stellar_lines(config_in)
def compute_interstellar_lines(config_in):
night_dict = from_config_get_nights(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
if not interstellar_lines:
return
for night in night_dict:
print()
print("compute_interstellar_lines Night: ", night)
try:
interstellar = load_from_cpickle('interstellar_lines', config_in['output'], night)
continue
except:
print()
print(" No interstellar correction file found, computing now ")
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': subroutine_name
}
interstellar = {
'subroutine': subroutine_name,
}
import matplotlib.pyplot as plt
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
interstellar[obs] = {}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['flux'] = input_data[obs]['e2ds']/calib_data['blaze']/input_data[obs]['step']
processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds'])/calib_data['blaze']/input_data[obs]['step']
if obs in lists['telluric']:
try:
interstellar['flux_total'] += processed[obs]['flux']
interstellar['flux_total_err'] += processed[obs]['flux_err']**2
except:
interstellar['wave'] = input_data[obs]['wave']
interstellar['flux_total'] = processed[obs]['flux'][:,:]
interstellar['flux_total_err'] = processed[obs]['flux_err']**2
interstellar['flux_total_err'] = np.sqrt(interstellar['flux_total_err'])
""" Zero or negative values are identified, flagged and substituted with another value """
interstellar['flux_total'], interstellar['flux_total_err'], interstellar['null'] = \
replace_values_errors(interstellar['flux_total'], interstellar['flux_total_err'], 0.0001)
"""rescaling"""
interstellar['flux_rescaling'], interstellar['flux_rescaled'],interstellar['flux_rescaled_err'] = \
perform_rescaling(interstellar['wave'],
interstellar['flux_total'],
interstellar['flux_total_err'],
observational_pams['wavelength_rescaling'])
interstellar['correction'] = np.ones(np.shape(interstellar['wave']))
for line_name, line in interstellar_lines.items():
interstellar[line_name] = {}
sel1 = (np.abs(interstellar['wave']-line[0])<line[1])
sel2 = (~sel1) & (np.abs(interstellar['wave']-line[0])<line[2])
sel3 = (sel1 | sel2)
poly_coeff = np.polyfit(interstellar['wave'][sel2], interstellar['flux_rescaled'][sel2], 2)
normalized = interstellar['flux_rescaled'][sel3]/np.polyval(poly_coeff, interstellar['wave'][sel3])
interstellar[line_name]['spline_eval'], \
interstellar[line_name]['spline_coeff'], \
interstellar[line_name]['spline_knots'] = \
compute_spline(interstellar['wave'][sel3], normalized, 0.04)
interstellar['correction'][sel1] = sci_int.splev(interstellar['wave'][sel1], interstellar[line_name]['spline_coeff'])
interstellar[line_name]['sel1'] = sel1
interstellar[line_name]['sel2'] = sel2
interstellar[line_name]['sel3'] = sel3
save_to_cpickle('interstellar_lines_processed', processed, config_in['output'], night)
save_to_cpickle('interstellar_lines', interstellar, config_in['output'], night)
def plot_interstellar_lines(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
if not interstellar_lines:
return
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_interstellar_lines Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('interstellar_lines_processed', config_in['output'], night)
interstellar = load_from_cpickle('interstellar_lines', config_in['output'], night)
except:
print()
print('No interstellar correction, no plots')
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
"""rescaling"""
processed[obs]['flux_rescaling'], processed[obs]['flux_rescaled'], processed[obs]['flux_rescaled_err'] = \
perform_rescaling(interstellar['wave'],
processed[obs]['flux'],
processed[obs]['flux_err'],
observational_pams['wavelength_rescaling'])
ax1.scatter(interstellar['wave'], processed[obs]['flux_rescaled'],
s=1, c=line_colors[i])
#ax1.plot(interstellar['wave'], interstellar['correction'], c='black')
ax2.scatter(interstellar['wave'], processed[obs]['flux_rescaled']/interstellar['correction'],
s=1, c=line_colors[i])
for line_name, line in interstellar_lines.items():
#ax1.axvline(line[0], c='k')
ax1.axvline(line[0]-line[1], c='b')
ax1.axvline(line[0]+line[1], c='b')
ax1.axvline(line[0]-line[2], c='g')
ax1.axvline(line[0]+line[2], c='g')
#ax2.axvline(line[0], c='k')
ax2.axvline(line[0]-line[1], c='b')
ax2.axvline(line[0]+line[1], c='b')
ax2.axvline(line[0]-line[2], c='g')
ax2.axvline(line[0]+line[2], c='g')
try:
wave_min = min(wave_min, line[0])
wave_max = max(wave_max, line[0])
range_max = max(range_max, line[2])
except:
wave_min = line[0]
wave_max = line[0]
range_max = line[2]
#ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax1.set_xlim(wave_min-2*range_max, wave_max+2*range_max)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax1.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 8,408 | 38.478873 | 129 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum_average.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = [
'compute_transmission_spectrum_average',
'plot_transmission_spectrum_average'
]
subroutine_name = 'transmission_spectrum_average'
pick_files = 'transmission_spectrum'
sampler = 'emcee'
def compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
line_iter_dict = spectral_lines[lines_label]
shared_data = load_from_cpickle('shared', config_in['output'])
total_n_transit_full = 0
total_n_transit_out = 0
total_lists = {}
""" Using the user defined range to define the transmission spectrum region
This range can be larger than the one defined for the MCMC range, and it
MUST include the continuum windws for the transmission lightcurve
"""
shared_selection = (shared_data['coadd']['wave'] >= line_iter_dict['range'][0]) \
& (shared_data['coadd']['wave'] < line_iter_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= line_iter_dict['range'][0]) \
& (shared_data['binned']['wave'] < line_iter_dict['range'][1])
transmission_average_template = {
'subroutine': subroutine_name + '_' + reference,
'range': line_iter_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
for results_selection in results_list:
""" First check to see if we need to compute the average transmission
iteratively when PCA has been employed """
for night in night_dict:
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(pca_parameters.get('ref_iteration')).zfill(2)
else:
it_string = ''
preparation = None
break
transmission_average = transmission_average_template.copy()
try:
transmission_average = load_from_cpickle(subroutine_name + '_' + reference + '_' + results_selection, config_in['output'], lines=lines_label, it_string=it_string)
print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Retrieved'))
continue
except (FileNotFoundError, IOError):
skip_iteration = False
#print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Computing'))
#skip_iteration = False
for night in night_dict:
# """ Retrieving the list of observations"""
total_lists[night] = load_from_cpickle('lists', config_in['output'], night)
#try:
# transmission_average[night] = load_from_cpickle('transmission_second_telluric_'+reference, config_in['output'], night)
# print(" Using transmission spectra with second telluric correction for Night: {0:s}".format(night))
#except:
# transmission_average[night] = load_from_cpickle('transmission_'+reference, config_in['output'], night)
try:
transmission_average[night] = load_from_cpickle(pick_files + '_' + reference + '_' + results_selection, config_in['output'], night, lines_label, it_string)
except:
skip_iteration = True
print(skip_iteration)
total_n_transit_full += len(total_lists[night]['transit_full'])
total_n_transit_out += len(total_lists[night]['transit_out'])
if skip_iteration: continue
print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Computing'))
array_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
weights_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
clvrm_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
uncorr_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
i_total_in = 0
array_average_out = np.zeros([total_n_transit_out, transmission_average['size']])
weights_average_out = np.zeros([total_n_transit_out, transmission_average['size']])
i_total_out = 0
for night in night_dict:
for obs in total_lists[night]['transit_full']:
array_average_in[i_total_in, :] = transmission_average[night][obs]['normalized'][:]
weights_average_in[i_total_in, :] = 1./(transmission_average[night][obs]['normalized_err']**2.)
clvrm_average_in[i_total_in, :] = transmission_average[night][obs]['clv_model_rebinned'][:]
uncorr_average_in[i_total_in, :] = transmission_average[night][obs]['normalized_uncorrected'][:]
i_total_in += 1
for obs in total_lists[night]['transit_out']:
array_average_out[i_total_out, :] = transmission_average[night][obs]['normalized'][:]
weights_average_out[i_total_out, :] = 1. / (transmission_average[night][obs]['normalized_err'] ** 2.)
i_total_out += 1
transmission_average['average'], transmission_average['sum_weights'] = np.average(
array_average_in, axis = 0, weights = weights_average_in, returned = True)
transmission_average['average_err'] = 1./np.sqrt(transmission_average['sum_weights'])
transmission_average['average_clv_model'], _ = np.average(
clvrm_average_in, axis = 0, weights = weights_average_in, returned = True)
transmission_average['average_uncorrected'], _ = np.average(
uncorr_average_in, axis = 0, weights = weights_average_in, returned = True)
transmission_average['binned'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['binned_err'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_err'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False,
is_error=True)
transmission_average['binned_clv_model'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_clv_model'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['binned_uncorrected'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_uncorrected'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['average_out'], transmission_average['sum_weights_out'] = np.average(
array_average_out, axis=0, weights=weights_average_out, returned=True)
transmission_average['average_out_err'] = 1./np.sqrt(transmission_average['sum_weights_out'])
transmission_average['binned_out'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_out'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['binned_out_err'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_out_err'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False,
is_error=True)
save_to_cpickle(subroutine_name + '_' + reference + '_' + results_selection, transmission_average, config_in['output'], lines=lines_label, it_string=it_string)
def plot_transmission_spectrum_average(config_in, lines_label, night_input='', results_input='', reference='planetRF', pca_iteration=-1):
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
if results_input=='':
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
else:
results_list = np.atleast_1d(results_input)
os.system('mkdir -p plots')
interactive_plots = from_config_get_interactive_plots(config_in)
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
night_dict = from_config_get_nights(config_in)
for night in night_dict:
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration')).zfill(2)
else:
it_string = ''
preparation_input = None
break
for results_selection in results_list:
try:
transmission_average = load_from_cpickle(subroutine_name + '_' + reference + '_' + results_selection, config_in['output'], lines=lines_label, it_string=it_string)
print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Plotting'))
except (FileNotFoundError, IOError):
print("{0:45s} {1:s}".format(subroutine_name + '_' + reference, 'Plot skipped'))
return
filename_rad = subroutine_name + '_' + reference + '_' + results_selection
fig = plt.figure(figsize=(12, 9))
gs = GridSpec(2, 1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
spec_offset = 0.025
ax1.errorbar(transmission_average['wave'],
transmission_average['average'],
yerr=transmission_average['average_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average')
#ax1.scatter(transmission_average['wave'],
# transmission_average['average'],
# c='black',
# s=2, zorder=15,
# label='average',
# )
ax1.errorbar(transmission_average['binned_wave'],
transmission_average['binned'],
yerr=transmission_average['binned_err'],
fmt='ko', ms=3, zorder=20, label='binned')
#ax1.scatter(transmission_average['wave'],
# transmission_average['average'],
# c='black',
# s=2, zorder=200,
# label='average',
# )
ax2.errorbar(transmission_average['binned_wave'],
transmission_average['binned_out'],
yerr=transmission_average['binned_out_err'],
fmt='ko', ms=3, zorder=20, label='binned out')
ax2.errorbar(transmission_average['wave'],
transmission_average['average_out'],
yerr=transmission_average['average_out_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average out')
for n_night, night in enumerate(night_dict):
ax1.errorbar(transmission_average['wave'],
transmission_average[night]['average']-spec_offset*(1.+n_night),
yerr=transmission_average[night]['average_err'],
color='C'+repr(n_night),
fmt='o', ms=1, zorder=1, alpha=0.25)
ax1.scatter(transmission_average['wave'],
transmission_average[night]['average']-spec_offset*(1.+n_night),
c='C'+repr(n_night),
s=2, zorder=2,
label=night,
)
ax2.errorbar(transmission_average['wave'],
transmission_average[night]['average_out']-spec_offset*(1.+n_night),
yerr=transmission_average[night]['average_out_err'],
color='C'+repr(n_night),
fmt='o', ms=1, zorder=1, alpha=0.25)
ax2.scatter(transmission_average['wave'],
transmission_average[night]['average_out']-spec_offset*(1.+n_night),
c='C'+repr(n_night),
s=2, zorder=2)
#ax1.set_ylim(0.95-spec_offset*(1.+n_night), 1.05)
#ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
ax1.set_ylim(0.985, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax1.legend(loc=3)
ax1.set_title('Average in-transit transmission spectrum in {0:s}'.format(reference))
ax2.set_title('Average out-transit transmission spectrum in {0:s}'.format(reference))
output_file = get_filename(filename_rad + '_binned', config_in['output'], night='', lines=lines_label, extension='.pdf', it_string=it_string)
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
| 15,311 | 44.981982 | 174 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit",
"plot_telluric_molecfit"]
def compute_telluric_molecfit(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
print('UNTESTED PRROCEDUE - I QUIT')
quit()
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
print()
print("compute_telluric_molecfit Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
rebin_step_unit = 0.01000000
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
rebin_step_unit,
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'], dtype=np.double) * rebin_step_unit
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
print(' Writing data and configuration files for molecfit+calctrans')
print()
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p molecfit_'+night)
os.system('mkdir -p molecfit_'+night + '/output/')
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" Molecfit analysis is skipped if the telluric computation has been computed already"""
if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
print(' molecfit+calctrans results for ' + obs + ' already available')
continue
""" the spectra is save onto an ASCII file in a format suitable for molecfit """
fileout = open('./molecfit_'+night +'/'+obs+'_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_ORF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
processed[obs]['rebin_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF'])
fileout = open('./molecfit_'+night +'/'+obs+'_SRF_s1d.dat','w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_SRF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
# TODO: input from configuration file for molecfit installation path
bash_script = open('./molecfit_'+night +'/molecfit_exec_' + obs + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('echo " " executing molecfit+calctrans on '+obs+' \n')
bash_script.write('/usr/local/eso/bin/molecfit '+obs+'.par > ' + obs +'_molecfit.log\n')
bash_script.write('/usr/local/eso/bin/calctrans '+obs+'.par > ' + obs +'_calctrans.log\n')
bash_script.close()
fileout = open('./molecfit_'+night +'/'+obs+'.par', 'w')
fileout.write("### Driver for MOLECFIT\n")
# user working directory only important for REFLEX workflow and GUI
# not used by molecfit itself.
fileout.write("user_workdir:./\n")
## INPUT DATA
# Data file name (path relative to the current directory or absolute path)
fileout.write("filename: " + obs +"_ORF_s1d.dat\n")
# ASCII list of files to be corrected for telluric absorption using the
# transmission curve derived from the input reference file (path of list and
# listed files relative to the current directory or absolute path; default: "none")
fileout.write("listname: none\n")
# Type of input spectrum -- 1 = transmission (default); 0 = emission
fileout.write("trans: 1\n")
# Names of the file columns (table) or extensions (image) containing:
# Wavelength Flux Flux_Err Mask
# - Flux_Err and/or Mask can be avoided by writing 'NULL'
# - 'NULL' is required for Wavelength if it is given by header keywords
# - parameter list: col_lam, col_flux, col_dflux, and col_mask
fileout.write("columns: Wavelength Flux NULL NULL\n")
# Default error relative to mean for the case that the error column is missing
fileout.write("default_error: 0.001\n")
# Multiplicative factor to convert wavelength to micron
# (e.g. nm -> wlgtomicron = 1e-3)
fileout.write("wlgtomicron: 0.0001\n")
# Wavelengths in vacuum (= vac) or air (= air)
fileout.write("vac_air: air\n")
# TODO: input from configuration file for molecfit installation path
# ASCII or FITS table for wavelength ranges in micron to be fitted
# (path relative to the current directory or absolute path; default: "none")
fileout.write("wrange_include: include_"+night+".dat\n")
# ASCII or FITS table for wavelength ranges in micron to be excluded from the
# fit (path relative to the current directory or absolute path; default: "none")
# wrange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_w.dat
# ASCII or FITS table for pixel ranges to be excluded from the fit
# (path relative to the current directory or absolute path; default: "none")
# prange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_p.dat
## RESULTS
# Directory for output files (path relative to the current directory or absolute path)
fileout.write("output_dir:./\n")
# Name for output files
# (supplemented by "_fit" or "_tac" as well as ".asc", ".atm", ".fits",
# ".par, ".ps", and ".res")
fileout.write("output_name: "+ obs + "\n")
# Plot creation: gnuplot is used to create control plots
# W - screen output only (incorporating wxt terminal in gnuplot)
# X - screen output only (incorporating x11 terminal in gnuplot)
# P - postscript file labelled '<output_name>.ps', stored in <output_dir>
# combinations possible, i.e. WP, WX, XP, WXP (however, keep the order!)
# all other input: no plot creation is performed
fileout.write("plot_creation: none\n")
# Create plots for individual fit ranges? -- 1 = yes; 0 = no
fileout.write("plot_range: 0\n")
## FIT PRECISION
# Relative chi2 convergence criterion
fileout.write("ftol: " + input_data[obs]['molecfit']['ftol'] + "\n")
# Relative parameter convergence criterion
fileout.write("xtol: " + input_data[obs]['molecfit']['xtol'] + "\n")
## MOLECULAR COLUMNS
# List of molecules to be included in the model
# (default: 'H2O', N_val: nmolec)
molecules_list = "list_molec:"
for mol in input_data[obs]['molecfit']['molecules']:
molecules_list += " " + mol
fileout.write(molecules_list + "\n")
# Fit flags for molecules -- 1 = yes; 0 = no (N_val: nmolec)
fileout.write("fit_molec: 1 1\n")
# Values of molecular columns, expressed relatively to the input ATM profile
# columns (N_val: nmolec) [1 = 100%]
fileout.write("relcol: 1.0 1.0\n")
## BACKGROUND AND CONTINUUM
# Conversion of fluxes from phot/(s*m2*mum*as2) (emission spectrum only) to
# flux unit of observed spectrum:
# 0: phot/(s*m^2*mum*as^2) [no conversion]
# 1: W/(m^2*mum*as^2)
# 2: erg/(s*cm^2*A*as^2)
# 3: mJy/as^2
# For other units the conversion factor has to be considered as constant term
# of the continuum fit.
fileout.write("flux_unit: 0\n")
# Fit of telescope background -- 1 = yes; 0 = no (emission spectrum only)
fileout.write("fit_back: 0\n")
# Initial value for telescope background fit (range: [0,1])
fileout.write("telback: 0.1\n")
# Polynomial fit of continuum --> degree: cont_n
fileout.write("fit_cont: 1\n")
# Degree of coefficients for continuum fit
fileout.write("cont_n: {0:1.0f}".format(input_data[obs]['molecfit']['cont_n']) + "\n")
# Initial constant term for continuum fit (valid for all fit ranges)
# (emission spectrum: about 1 for correct flux_unit)
fileout.write("cont_const: {0:1.0f}".format(input_data[obs]['molecfit']['cont_const']) + "\n")
## WAVELENGTH SOLUTION
# Refinement of wavelength solution using a polynomial of degree wlc_n
fileout.write("fit_wlc: 1\n")
# Polynomial degree of the refined wavelength solution
fileout.write("wlc_n: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_n']) + "\n")
# Initial constant term for wavelength correction (shift relative to half
# wavelength range)
fileout.write("wlc_const: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_const']) + "\n")
## RESOLUTION
# Fit resolution by boxcar -- 1 = yes; 0 = no
fileout.write("fit_res_box: 0\n")
# Initial value for FWHM of boxcar relative to slit width (>= 0. and <= 2.)
fileout.write("relres_box: 0.0\n")
# Voigt profile approximation instead of independent Gaussian and Lorentzian
# kernels? -- 1 = yes; 0 = no
fileout.write("kernmode: 0\n")
# Fit resolution by Gaussian -- 1 = yes; 0 = no
fileout.write("fit_res_gauss: 1\n")
# Initial value for FWHM of Gaussian in pixels
fileout.write("res_gauss: {0:3.1f}".format(input_data[obs]['molecfit']['res_gauss']) + "\n")
# Fit resolution by Lorentzian -- 1 = yes; 0 = no
fileout.write("fit_res_lorentz: 0\n")
# Initial value for FWHM of Lorentzian in pixels
fileout.write("res_lorentz: 0.0\n")
# Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
fileout.write("kernfac: {0:3.0f}".format(input_data[obs]['molecfit']['kernfac']) + "\n")
# Variable kernel (linear increase with wavelength)? -- 1 = yes; 0 = no
fileout.write("varkern: 0\n")
# ASCII file for kernel elements (one per line; normalisation not required)
# instead of synthetic kernel consisting of boxcar, Gaussian, and Lorentzian
# components (path relative to the current directory or absolute path; default: "none\n")
fileout.write("kernel_file: none\n")
## AMBIENT PARAMETERS
# If the input data file contains a suitable FITS header, the keyword names of
# the following parameters will be read, but the corresponding values will not
# be used. The reading of parameter values from this file can be forced by
# setting keywords to NONE.
# Observing date in years or MJD in days
fileout.write("obsdate: {0:13.5f}".format(input_data[obs]['MJD']) + "\n")
fileout.write("obsdate_key: NONE\n")
# UTC in s
fileout.write("utc: {0:8.1f}".format(input_data[obs]['UTC']) + "\n")
fileout.write("utc_key: NONE\n")
# Telescope altitude angle in deg
fileout.write("telalt: {0:13.5f}".format(input_data[obs]['ELEVATION']) + "\n")
fileout.write("telalt_key: NONE\n")
# Humidity in %
fileout.write("rhum: {0:13.5f}".format(input_data[obs]['HUMIDITY']) + "\n")
fileout.write("rhum_key: NONE\n")
# Pressure in hPa
fileout.write("pres: {0:5.1f}".format(input_data[obs]['PRESSURE']) + "\n")
fileout.write("pres_key: NONE\n")
# Ambient temperature in deg C
# temp: 15.0
fileout.write("temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_EN']) + "\n")
fileout.write("temp_key: NONE\n")
# Mirror temperature in deg C
# m1temp: 15.0
fileout.write("m1temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_M1']) + "\n")
fileout.write("m1temp_key: NONE\n")
# Elevation above sea level in m (default is Paranal: 2635m)
# geoelev: 2387.2
fileout.write("geoelev: {0:4.0f}".format(input_data[obs]['GEOELEV']) + "\n")
fileout.write("geoelev_key: NONE\n")
# Longitude (default is Paranal: -70.4051)
# longitude: -17.889
fileout.write("longitude: {0:9.4f}".format(input_data[obs]['GEOLONG']) + "\n")
fileout.write("longitude_key: NONE\n")
# Latitude (default is Paranal: -24.6276)
# latitude: 28.754
fileout.write("latitude: {0:9.4f}".format(input_data[obs]['GEOLAT']) + "\n")
fileout.write("latitude_key: NONE\n")
## INSTRUMENTAL PARAMETERS
# Slit width in arcsec (taken from FITS header if present)
fileout.write("slitw: {0:3.1f}".format(input_data[obs]['molecfit']['slitwidth']) + "\n")
fileout.write("slitw_key: NONE\n")
# Pixel scale in arcsec (taken from this file only)
fileout.write("pixsc: {0:4.2f}".format(input_data[obs]['molecfit']["pixelscale"]) + "\n")
fileout.write("pixsc_key: NONE\n")
## ATMOSPHERIC PROFILES
# Reference atmospheric profile
fileout.write("ref_atm: equ.atm\n")
# Specific GDAS-like input profile (P[hPa] HGT[m] T[K] RELHUM[%]) (path
# relative to the installation directory or absolute path). In the case of "none", no GDAS
# profiles will be considered. The default "auto" performs an automatic
# retrieval.
fileout.write("gdas_dir: data/profiles/grib\n")
fileout.write("gdas_prof: auto\n")
# Grid of layer heights for merging ref_atm and GDAS profile. Fixed grid = 1
# (default) and natural grid = 0.
fileout.write("layers: 0\n")
# Upper mixing height in km (default: 5) for considering data of a local meteo
# station. If emix is below geoelev, rhum, pres, and temp are not used for
# modifying the corresponding profiles.
fileout.write("emix: 5.0\n")
# PWV value in mm for the input water vapour profile. The merged profile
# composed of ref_atm, GDAS, and local meteo data will be scaled to this value
# if pwv > 0 (default: -1 -> no scaling).
fileout.write("pwv: -1.\n")
# internal GUI specific parameter
fileout.write("clean_mflux: 1\n")
fileout.write("end\n")
fileout.close()
os.system('cd molecfit_' + night + '/ && . ./molecfit_exec_' + obs + '.source')
print()
print(' molecfit+calcatrans completed')
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
""" Loading the telluric spectrum from the output directory of molecfit """
telluric_molecfit = np.genfromtxt('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat', usecols=2)
""" rebinning onto the e2ds wave scale"""
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = np.isfinite(telluric[obs]['spectrum'])
telluric[obs]['spectrum'][temp] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
#
#""" After being rescaled for the proper factor, the template telluric spectrum is rebinned onto the 2D
#scale of the observations """
#
#telluric['template']['rebinned']['flux'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['flux'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False)
#
#telluric['template']['rebinned']['ferr'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['ferr'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False,
# is_error=True)
#
#
#sel_out_of_range = ~((telluric['template']['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
# & (telluric['template']['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
#telluric['template']['rebinned']['flux'][sel_out_of_range] = 1.
#telluric['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
#
#processed['telluric']['spectrum_noairmass'] = \
# (telluric['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
#
#telluric['airmass_ref'] = processed['airmass_ref']
#
#for obs in lists['observations']:
# """ Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
# processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
# processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
#for obs in lists['observations']:
# # Correction of telluric lines
#
# telluric[obs] = {}
#
# telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
#
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
# telluric[obs]['airmass_ref'] = processed['airmass_ref']
#
# """ Set anomalosly low point to one (e.g. when the template is not computed)"""
# telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
# telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
#
# telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
#
# """ No need to compute the spline approximation since we are already dealing with a very high SNR template"""
# telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# """ copy the keyword for future use"""
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
#
# telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
# telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
#
# save_to_cpickle('telluric', telluric, config_in['output'], night)
# save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
quit()
def plot_telluric_molecfit(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 30,915 | 44.801481 | 122 | py |
SLOPpy | SLOPpy-main/SLOPpy/quick_transmission.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from scipy.interpolate import UnivariateSpline
__all__ = ['compute_quick_transmission']
def compute_quick_transmission(config_in, lines_label):
subroutine_name = 'quick_transmission'
night_dict = from_config_get_nights(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
shared_data = load_from_cpickle('shared', config_in['output'])
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= lines_dict['range'][0]) \
& (shared_data['binned']['wave'] < lines_dict['range'][1])
transmission_shared= {
'subroutine': subroutine_name,
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
import matplotlib.pyplot as plt
for night in night_dict:
#try:
# preparation = load_from_cpickle('transmission_preparation',
# config_in['output'],
# night)
# print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
# continue
#except:
# print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
# print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
if config_in['master-out'].get('use_composite', False):
master_out = load_from_cpickle('master_out_composite', config_in['output'], night)
print(' Using composite master-out from all nights')
else:
master_out = load_from_cpickle('master_out', config_in['output'], night)
if config_in['master-out'].get('use_smoothed', False):
master_out['rescaled'] = master_out['smoothed']
master_out['rescaled_err'] = master_out['smoothed_err']
print(' Using smoothed master-out')
quick_transmission = {
'subroutine': subroutine_name,
}
first_obs = lists['observations'][0]
quick_transmission['n_pixels'] = input_data[first_obs]['n_pixels']
quick_transmission['n_orders'] = input_data[first_obs]['n_orders']
quick_transmission['wave'] = input_data[first_obs]['wave']
quick_transmission['step'] = input_data[first_obs]['step']
master_raw = np.zeros([quick_transmission['n_orders'], quick_transmission['n_pixels']])
for obs in lists['transit_out']:
master_raw += input_data[obs]['e2ds']
quick_transmission['master_raw'] = master_raw.copy()
quick_transmission['master'] = master_raw / np.nanmedian(master_raw)
blaze = np.ones([quick_transmission['n_orders'], quick_transmission['n_pixels']])
for obs in lists['observations']:
quick_transmission[obs] = {}
transmission_raw = input_data[obs]['e2ds'] / quick_transmission['master']
quick_transmission[obs]['transmission_raw'] = transmission_raw.copy()
quick_transmission[obs]['transmission'] = transmission_raw / np.nanmedian(transmission_raw)
quick_transmission[obs]['wave'] = input_data[obs]['wave'] #Added for plotting purpose only
quick_transmission[obs]['binned'] = \
rebin_2d_to_1d(quick_transmission['wave'],
quick_transmission['step'],
quick_transmission[obs]['transmission'],
blaze,
transmission_shared['binned_wave'],
transmission_shared['binned_step'],
rv_shift=0,
preserve_flux=False)
#plt.scatter(quick_transmission['wave'],
# quick_transmission[obs]['transmission'],
# c='C1', s=1, zorder=3, alpha=0.25)
for obs in lists['transit_out']:
plt.scatter(transmission_shared['binned_wave'],
quick_transmission[obs]['binned'],
c='b', s=1, zorder=10, alpha=0.5)
for obs in lists['transit_full']:
plt.scatter(transmission_shared['binned_wave'],
quick_transmission[obs]['binned']-0.04,
c='r', s=1, zorder=10, alpha=0.5)
plt.xlim(transmission_shared['binned_wave'][0], transmission_shared['binned_wave'][-1])
plt.show()
print()
""" Keep going from here after preparation, unless the subroutines has been called just
to preform the data preparation step
"""
| 5,497 | 40.651515 | 103 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_v1.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit_v1",
"plot_telluric_molecfit_v1"]
def compute_telluric_molecfit_v1(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
print('UNTESTED PRROCEDUE - I QUIT')
quit()
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
print()
print("compute_telluric_molecfit Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
rebin_step_unit = 0.01000000
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
rebin_step_unit,
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'], dtype=np.double) * rebin_step_unit
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
print(' Writing data and configuration files for molecfit+calctrans')
print()
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p molecfit_'+night)
os.system('mkdir -p molecfit_'+night + '/output/')
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" Molecfit analysis is skipped if the telluric computation has been computed already"""
if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
print(' molecfit+calctrans results for ' + obs + ' already available')
continue
""" the spectra is save onto an ASCII file in a format suitable for molecfit """
fileout = open('./molecfit_'+night +'/'+obs+'_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_ORF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF'])
fileout = open('./molecfit_'+night +'/'+obs+'_SRF_s1d.dat','w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_SRF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
# TODO: input from configuration file for molecfit installation path
bash_script = open('./molecfit_'+night +'/molecfit_exec_' + obs + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('echo " " executing molecfit+calctrans on '+obs+' \n')
bash_script.write('/usr/local/eso/bin/molecfit '+obs+'.par > ' + obs +'_molecfit.log\n')
bash_script.write('/usr/local/eso/bin/calctrans '+obs+'.par > ' + obs +'_calctrans.log\n')
bash_script.close()
fileout = open('./molecfit_'+night +'/'+obs+'.par', 'w')
fileout.write("### Driver for MOLECFIT\n")
# user working directory only important for REFLEX workflow and GUI
# not used by molecfit itself.
fileout.write("user_workdir:./\n")
## INPUT DATA
# Data file name (path relative to the current directory or absolute path)
fileout.write("filename: " + obs +"_ORF_s1d.dat\n")
# ASCII list of files to be corrected for telluric absorption using the
# transmission curve derived from the input reference file (path of list and
# listed files relative to the current directory or absolute path; default: "none")
fileout.write("listname: none\n")
# Type of input spectrum -- 1 = transmission (default); 0 = emission
fileout.write("trans: 1\n")
# Names of the file columns (table) or extensions (image) containing:
# Wavelength Flux Flux_Err Mask
# - Flux_Err and/or Mask can be avoided by writing 'NULL'
# - 'NULL' is required for Wavelength if it is given by header keywords
# - parameter list: col_lam, col_flux, col_dflux, and col_mask
fileout.write("columns: Wavelength Flux NULL NULL\n")
# Default error relative to mean for the case that the error column is missing
fileout.write("default_error: 0.001\n")
# Multiplicative factor to convert wavelength to micron
# (e.g. nm -> wlgtomicron = 1e-3)
fileout.write("wlgtomicron: 0.0001\n")
# Wavelengths in vacuum (= vac) or air (= air)
fileout.write("vac_air: air\n")
# TODO: input from configuration file for molecfit installation path
# ASCII or FITS table for wavelength ranges in micron to be fitted
# (path relative to the current directory or absolute path; default: "none")
fileout.write("wrange_include: include_"+night+".dat\n")
# ASCII or FITS table for wavelength ranges in micron to be excluded from the
# fit (path relative to the current directory or absolute path; default: "none")
# wrange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_w.dat
# ASCII or FITS table for pixel ranges to be excluded from the fit
# (path relative to the current directory or absolute path; default: "none")
# prange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_p.dat
## RESULTS
# Directory for output files (path relative to the current directory or absolute path)
fileout.write("output_dir:./\n")
# Name for output files
# (supplemented by "_fit" or "_tac" as well as ".asc", ".atm", ".fits",
# ".par, ".ps", and ".res")
fileout.write("output_name: "+ obs + "\n")
# Plot creation: gnuplot is used to create control plots
# W - screen output only (incorporating wxt terminal in gnuplot)
# X - screen output only (incorporating x11 terminal in gnuplot)
# P - postscript file labelled '<output_name>.ps', stored in <output_dir>
# combinations possible, i.e. WP, WX, XP, WXP (however, keep the order!)
# all other input: no plot creation is performed
fileout.write("plot_creation: none\n")
# Create plots for individual fit ranges? -- 1 = yes; 0 = no
fileout.write("plot_range: 0\n")
## FIT PRECISION
# Relative chi2 convergence criterion
fileout.write("ftol: " + input_data[obs]['molecfit']['ftol'] + "\n")
# Relative parameter convergence criterion
fileout.write("xtol: " + input_data[obs]['molecfit']['xtol'] + "\n")
## MOLECULAR COLUMNS
# List of molecules to be included in the model
# (default: 'H2O', N_val: nmolec)
molecules_list = "list_molec:"
for mol in input_data[obs]['molecfit']['molecules']:
molecules_list += " " + mol
fileout.write(molecules_list + "\n")
# Fit flags for molecules -- 1 = yes; 0 = no (N_val: nmolec)
fileout.write("fit_molec: 1 1\n")
# Values of molecular columns, expressed relatively to the input ATM profile
# columns (N_val: nmolec) [1 = 100%]
fileout.write("relcol: 1.0 1.0\n")
## BACKGROUND AND CONTINUUM
# Conversion of fluxes from phot/(s*m2*mum*as2) (emission spectrum only) to
# flux unit of observed spectrum:
# 0: phot/(s*m^2*mum*as^2) [no conversion]
# 1: W/(m^2*mum*as^2)
# 2: erg/(s*cm^2*A*as^2)
# 3: mJy/as^2
# For other units the conversion factor has to be considered as constant term
# of the continuum fit.
fileout.write("flux_unit: 0\n")
# Fit of telescope background -- 1 = yes; 0 = no (emission spectrum only)
fileout.write("fit_back: 0\n")
# Initial value for telescope background fit (range: [0,1])
fileout.write("telback: 0.1\n")
# Polynomial fit of continuum --> degree: cont_n
fileout.write("fit_cont: 1\n")
# Degree of coefficients for continuum fit
fileout.write("cont_n: {0:1.0f}".format(input_data[obs]['molecfit']['cont_n']) + "\n")
# Initial constant term for continuum fit (valid for all fit ranges)
# (emission spectrum: about 1 for correct flux_unit)
fileout.write("cont_const: {0:1.0f}".format(input_data[obs]['molecfit']['cont_const']) + "\n")
## WAVELENGTH SOLUTION
# Refinement of wavelength solution using a polynomial of degree wlc_n
fileout.write("fit_wlc: 1\n")
# Polynomial degree of the refined wavelength solution
fileout.write("wlc_n: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_n']) + "\n")
# Initial constant term for wavelength correction (shift relative to half
# wavelength range)
fileout.write("wlc_const: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_const']) + "\n")
## RESOLUTION
# Fit resolution by boxcar -- 1 = yes; 0 = no
fileout.write("fit_res_box: 0\n")
# Initial value for FWHM of boxcar relative to slit width (>= 0. and <= 2.)
fileout.write("relres_box: 0.0\n")
# Voigt profile approximation instead of independent Gaussian and Lorentzian
# kernels? -- 1 = yes; 0 = no
fileout.write("kernmode: 0\n")
# Fit resolution by Gaussian -- 1 = yes; 0 = no
fileout.write("fit_res_gauss: 1\n")
# Initial value for FWHM of Gaussian in pixels
fileout.write("res_gauss: {0:3.1f}".format(input_data[obs]['molecfit']['res_gauss']) + "\n")
# Fit resolution by Lorentzian -- 1 = yes; 0 = no
fileout.write("fit_res_lorentz: 0\n")
# Initial value for FWHM of Lorentzian in pixels
fileout.write("res_lorentz: 0.0\n")
# Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
fileout.write("kernfac: {0:3.0f}".format(input_data[obs]['molecfit']['kernfac']) + "\n")
# Variable kernel (linear increase with wavelength)? -- 1 = yes; 0 = no
fileout.write("varkern: 0\n")
# ASCII file for kernel elements (one per line; normalisation not required)
# instead of synthetic kernel consisting of boxcar, Gaussian, and Lorentzian
# components (path relative to the current directory or absolute path; default: "none\n")
fileout.write("kernel_file: none\n")
## AMBIENT PARAMETERS
# If the input data file contains a suitable FITS header, the keyword names of
# the following parameters will be read, but the corresponding values will not
# be used. The reading of parameter values from this file can be forced by
# setting keywords to NONE.
# Observing date in years or MJD in days
fileout.write("obsdate: {0:13.5f}".format(input_data[obs]['MJD']) + "\n")
fileout.write("obsdate_key: NONE\n")
# UTC in s
fileout.write("utc: {0:8.1f}".format(input_data[obs]['UTC']) + "\n")
fileout.write("utc_key: NONE\n")
# Telescope altitude angle in deg
fileout.write("telalt: {0:13.5f}".format(input_data[obs]['ELEVATION']) + "\n")
fileout.write("telalt_key: NONE\n")
# Humidity in %
fileout.write("rhum: {0:13.5f}".format(input_data[obs]['HUMIDITY']) + "\n")
fileout.write("rhum_key: NONE\n")
# Pressure in hPa
fileout.write("pres: {0:5.1f}".format(input_data[obs]['PRESSURE']) + "\n")
fileout.write("pres_key: NONE\n")
# Ambient temperature in deg C
# temp: 15.0
fileout.write("temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_EN']) + "\n")
fileout.write("temp_key: NONE\n")
# Mirror temperature in deg C
# m1temp: 15.0
fileout.write("m1temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_M1']) + "\n")
fileout.write("m1temp_key: NONE\n")
# Elevation above sea level in m (default is Paranal: 2635m)
# geoelev: 2387.2
fileout.write("geoelev: {0:4.0f}".format(input_data[obs]['GEOELEV']) + "\n")
fileout.write("geoelev_key: NONE\n")
# Longitude (default is Paranal: -70.4051)
# longitude: -17.889
fileout.write("longitude: {0:9.4f}".format(input_data[obs]['GEOLONG']) + "\n")
fileout.write("longitude_key: NONE\n")
# Latitude (default is Paranal: -24.6276)
# latitude: 28.754
fileout.write("latitude: {0:9.4f}".format(input_data[obs]['GEOLAT']) + "\n")
fileout.write("latitude_key: NONE\n")
## INSTRUMENTAL PARAMETERS
# Slit width in arcsec (taken from FITS header if present)
fileout.write("slitw: {0:3.1f}".format(input_data[obs]['molecfit']['slitwidth']) + "\n")
fileout.write("slitw_key: NONE\n")
# Pixel scale in arcsec (taken from this file only)
fileout.write("pixsc: {0:4.2f}".format(input_data[obs]['molecfit']["pixelscale"]) + "\n")
fileout.write("pixsc_key: NONE\n")
## ATMOSPHERIC PROFILES
# Reference atmospheric profile
fileout.write("ref_atm: equ.atm\n")
# Specific GDAS-like input profile (P[hPa] HGT[m] T[K] RELHUM[%]) (path
# relative to the installation directory or absolute path). In the case of "none", no GDAS
# profiles will be considered. The default "auto" performs an automatic
# retrieval.
fileout.write("gdas_dir: data/profiles/grib\n")
fileout.write("gdas_prof: auto\n")
# Grid of layer heights for merging ref_atm and GDAS profile. Fixed grid = 1
# (default) and natural grid = 0.
fileout.write("layers: 0\n")
# Upper mixing height in km (default: 5) for considering data of a local meteo
# station. If emix is below geoelev, rhum, pres, and temp are not used for
# modifying the corresponding profiles.
fileout.write("emix: 5.0\n")
# PWV value in mm for the input water vapour profile. The merged profile
# composed of ref_atm, GDAS, and local meteo data will be scaled to this value
# if pwv > 0 (default: -1 -> no scaling).
fileout.write("pwv: -1.\n")
# internal GUI specific parameter
fileout.write("clean_mflux: 1\n")
fileout.write("end\n")
fileout.close()
os.system('cd molecfit_' + night + '/ && . ./molecfit_exec_' + obs + '.source')
print()
print(' molecfit+calcatrans completed')
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
""" Loading the telluric spectrum from the output directory of molecfit """
telluric_molecfit = np.genfromtxt('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat', usecols=2)
""" rebinning onto the e2ds wave scale"""
preserve_flux = input_data[obs].get('absolute_flux', True)
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = np.isfinite(telluric[obs]['spectrum'])
telluric[obs]['spectrum'][temp] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
#
#""" After being rescaled for the proper factor, the template telluric spectrum is rebinned onto the 2D
#scale of the observations """
#
#telluric['template']['rebinned']['flux'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['flux'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False)
#
#telluric['template']['rebinned']['ferr'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['ferr'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False,
# is_error=True)
#
#
#sel_out_of_range = ~((telluric['template']['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
# & (telluric['template']['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
#telluric['template']['rebinned']['flux'][sel_out_of_range] = 1.
#telluric['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
#
#processed['telluric']['spectrum_noairmass'] = \
# (telluric['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
#
#telluric['airmass_ref'] = processed['airmass_ref']
#
#for obs in lists['observations']:
# """ Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
# processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
# processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
#for obs in lists['observations']:
# # Correction of telluric lines
#
# telluric[obs] = {}
#
# telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
#
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
# telluric[obs]['airmass_ref'] = processed['airmass_ref']
#
# """ Set anomalosly low point to one (e.g. when the template is not computed)"""
# telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
# telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
#
# telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
#
# """ No need to compute the spline approximation since we are already dealing with a very high SNR template"""
# telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# """ copy the keyword for future use"""
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
#
# telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
# telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
#
# save_to_cpickle('telluric', telluric, config_in['output'], night)
# save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
quit()
def plot_telluric_molecfit_v1(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 31,063 | 44.749632 | 122 | py |
SLOPpy | SLOPpy-main/SLOPpy/second_telluric_correction_on_transmission.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from scipy.stats import linregress
__all__ = ["compute_second_telluric_correction_on_transmission", "plot_second_telluric_correction_on_transmission"]
subroutine_name = 'second_telluric_correction_on_transmission'
def compute_second_telluric_correction_on_transmission(config_in):
night_dict = from_config_get_nights(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
for night in night_dict:
try:
transmission = load_from_cpickle('transmission_planetRF_second_correction', config_in['output'], night)
continue
except:
print("No transmission spectra with second correction found, computing now ")
print()
print()
print("compute_second_telluric_correction_on_transmission Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
transmission = load_from_cpickle('transmission_planetRF', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': subroutine_name
}
for obs in lists['observations']:
processed[obs] = {}
processed[obs]['telluric_shifted'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
telluric[obs]['spectrum'],
telluric[obs]['spectrum'],
transmission['wave'],
transmission['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2PRF'],
preserve_flux=False,
skip_blaze_correction=True)
processed[obs]['selection'] = (np.abs(1.0000-transmission[obs]['rescaled']) < 2*np.std(transmission[obs]['rescaled'])) \
& (np.abs(1.0000-processed[obs]['telluric_shifted']) > 0.02)
transmission[obs]['slope'], \
transmission[obs]['intercept'], \
transmission[obs]['rvalue'], \
transmission[obs]['pvalue'], \
transmission[obs]['stderr'], = linregress(processed[obs]['telluric_shifted'][processed[obs]['selection']],
transmission[obs]['rescaled'][processed[obs]['selection']])
transmission[obs]['rescaled'] += 1.000 - (transmission[obs]['intercept'] +
transmission[obs]['slope']*processed[obs]['telluric_shifted'])
array_average = np.zeros([len(lists['transit_in']), transmission['size']])
weights_average = np.zeros([len(lists['transit_in']), transmission['size']])
for i, obs in enumerate(lists['transit_in']):
array_average[i, :] = transmission[obs]['rescaled'][:]
weights_average[i, :] = 1./(transmission[obs]['rescaled_err']**2.)
transmission['average'], transmission['sum_weights'] = np.average(
array_average, axis=0, weights=weights_average, returned=True)
transmission['average_err'] = 1./np.sqrt(transmission['sum_weights'])
array_average = np.zeros([len(lists['transit_out']), transmission['size']])
weights_average = np.zeros([len(lists['transit_out']), transmission['size']])
for i, obs in enumerate(lists['transit_out']):
array_average[i, :] = transmission[obs]['rescaled'][:]
weights_average[i, :] = 1./(transmission[obs]['rescaled_err']**2.)
transmission['average_out'], transmission['sum_weights_out'] = np.average(
array_average, axis=0, weights=weights_average, returned=True)
transmission['average_out_err'] = 1./np.sqrt(transmission['sum_weights_out'])
save_to_cpickle('transmission_planetRF_second_correction_processed', processed, config_in['output'], night)
save_to_cpickle('transmission_planetRF_second_correction', transmission, config_in['output'], night)
def plot_second_telluric_correction_on_transmission(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
previous_transmission = load_from_cpickle('transmission_planetRF', config_in['output'], night)
transmission = load_from_cpickle('transmission_planetRF_second_correction', config_in['output'], night)
processed = load_from_cpickle('transmission_planetRF_second_correction_processed', config_in['output'], night)
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
cmap = plt.cm.Spectral
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
for i, obs in enumerate(lists['observations']):
ax1.axhline(1.+i/10., c='k', zorder=0)
ax2.axhline(1.+i/10., c='k', zorder=0)
ax1.scatter(processed[obs]['telluric_shifted'], previous_transmission[obs]['rescaled']+i / 10., s=2,
c=np.atleast_2d(line_colors[i]), zorder=1)
ax2.scatter(processed[obs]['telluric_shifted'], transmission[obs]['rescaled'] + i / 10., s=2,
#c=np.atleast_2d(line_colors[i]), zorder=2)
c = 'r', zorder = 2)
ax1.set_xlim(0.80, 1.02)
plt.show()
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(transmission[obs]['BJD'] - 2450000.0)
am.append(transmission[obs]['AIRMASS'])
colors = np.asarray(bjd)
cmap = plt.cm.Spectral
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
i_shift = 0.00
for i, obs in enumerate(lists['observations']):
ax.errorbar(transmission['wave'],
transmission[obs]['rescaled']-i_shift,
yerr=transmission[obs]['rescaled_err'],
marker = 'o', c=line_colors[i], ms=1, alpha=0.5)
i_shift += 0.05
ax.set_ylim(0.0-i_shift, 1.2)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.legend(loc=3)
ax.set_title('Night: ' + night)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 7,683 | 40.76087 | 132 | py |
SLOPpy | SLOPpy-main/SLOPpy/clv_rm_modelling.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.kepler_exo import *
from SLOPpy.subroutines.plot_subroutines import *
from astropy.convolution import Gaussian1DKernel, convolve
__all__ = ['compute_clv_rm_modelling', 'plot_clv_rm_modelling']
subroutine_name = 'clv_rm_modelling'
### OLD modelling without the dependency on the planetar yradius at a given wavelngth
def compute_clv_rm_modelling(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if not config_in['settings'].get('full_output', False):
for night in night_dict:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
print("{0:45s} {1:s}".format(subroutine_name, 'Retrieved'))
return
except:
print("{0:45s} {1:s}".format(subroutine_name, 'Computing'))
print()
"""
Loading the spectral synthesis results, at the moment only SME output is supported.
Properties of the synthesis data files
- limb_angles: this is an input to SME, so it is specific on how the synthesis has been performed
- spectra: stellar spectrum as a function of the limb angle, sampled near the spectral lines
- model: integrated spectrum of the star
"""
synthesis_data_limb_angles = np.genfromtxt(clv_rm_dict['synthesis_files'] + '_muvals.txt', dtype=np.double)
synthesis_data_spectra = np.genfromtxt(clv_rm_dict['synthesis_files'] + '_spectra.txt', dtype=np.double)
synthesis_data_model = np.genfromtxt(clv_rm_dict['synthesis_files'] + '_model.txt', dtype=np.double)
synthesis = {
'surface': {
'wave': synthesis_data_spectra[:, 0],
'flux': synthesis_data_spectra[:, 1:],
'n_mu': np.size(synthesis_data_limb_angles),
'mu': synthesis_data_limb_angles
},
'total': {
'wave': synthesis_data_model[:, 0],
'norm': synthesis_data_model[:, 1],
}
}
""" Setting up the array for model computation """
synthesis['total']['step'] = synthesis['total']['wave'] * 0.0
synthesis['total']['step'][1:] = synthesis['total']['wave'][1:] - synthesis['total']['wave'][:-1]
synthesis['total']['step'][0] = synthesis['total']['step'][1]
synthesis['surface']['step'] = synthesis['surface']['wave'] * 0.0
synthesis['surface']['step'][1:] = synthesis['surface']['wave'][1:] - synthesis['surface']['wave'][:-1]
synthesis['surface']['step'][0] = synthesis['surface']['step'][1]
synthesis['surface']['wave_out'] = np.arange(synthesis['surface']['wave'][0],
synthesis['surface']['wave'][-1],
clv_rm_dict['rebinning_step'])
synthesis['surface']['size_out'] = np.size(synthesis['surface']['wave_out'], axis=0)
synthesis['surface']['step_out'] = np.ones(synthesis['surface']['size_out']) * clv_rm_dict['rebinning_step']
synthesis['total']['norm_out'] = rebin_1d_to_1d(synthesis['total']['wave'],
synthesis['total']['step'],
synthesis['total']['norm'],
synthesis['surface']['wave_out'],
synthesis['surface']['step_out'],
method='exact_flux',
preserve_flux=False)
""" Check if the number of spectra corresponds to the number of limb angle values """
if np.size(synthesis['surface']['flux'], axis=1) != synthesis['surface']['n_mu']:
print('ERROR in loading the stellar spectra')
"""
Setting up the grid of stellar spectra for the CLV and RM computation
odd number of points to include the zero value
"""
star_grid = {
'n_grid': clv_rm_dict['n_gridpoints'],
'half_grid': int((clv_rm_dict['n_gridpoints'] - 1) / 2)
}
""" Coordinates of the centers of each grid cell (add offset) """
star_grid['xx'] = np.linspace(-1.0, 1.0, star_grid['n_grid'], dtype=np.double)
star_grid['xc'], star_grid['yc'] = np.meshgrid(star_grid['xx'], star_grid['xx'], indexing='xy')
# check the Note section of the wiki page of meshgrid
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
""" Distance of each grid cell from the center of the stellar disk """
star_grid['rc'] = np.sqrt(star_grid['xc'] ** 2 + star_grid['yc'] ** 2)
star_grid['inside'] = star_grid['rc'] <= 1.0 # Must avoid negative numbers inside the square root
star_grid['outside'] = star_grid['rc'] > 1.0 # Must avoid negative numbers inside the square root
""" Determine the mu angle for each grid cell, as a function of radius. """
star_grid['mu'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix with the mu values
star_grid['mu'][star_grid['inside']] = np.sqrt(1. - star_grid['rc'][star_grid['inside']] ** 2)
""" 2.2 Determine the Doppler shift to apply to the spectrum of each grid cell, from Cegla+2016 """
star_grid['x_ortho'] = star_grid['xc'] * np.cos(star_dict['lambda'][0] * deg2rad) \
- star_grid['yc'] * np.sin(
star_dict['lambda'][0] * deg2rad) # orthogonal distances from the spin-axis
star_grid['y_ortho'] = star_grid['xc'] * np.sin(star_dict['lambda'][0] * deg2rad) \
+ star_grid['yc'] * np.cos(star_dict['lambda'][0] * deg2rad)
star_grid['r_ortho'] = np.sqrt(star_grid['x_ortho'] ** 2 + star_grid['y_ortho'] ** 2)
star_grid['z_ortho'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix
star_grid['z_ortho'][star_grid['inside']] = np.sqrt(1 - star_grid['r_ortho'][star_grid['inside']] ** 2)
""" rotate the coordinate system around the x_ortho axis by an angle: """
star_grid['beta'] = (np.pi / 2.) - star_dict['inclination'][0] * deg2rad
""" orthogonal distance from the stellar equator """
star_grid['yp_ortho'] = star_grid['z_ortho'] * np.sin(star_grid['beta']) + star_grid['y_ortho'] * np.cos(
star_grid['beta'])
""" stellar rotational velocity for a given position """
star_grid['v_star'] = star_grid['x_ortho'] * star_dict['vsini'][0] * (
1 - star_dict['alpha'][0] * star_grid['yp_ortho'] ** 2)
star_grid['v_star'][star_grid['outside']] = 0.0 # Null velocity for points outside the stellar surface
""" Associate a synthetic spectrum to each cell """
star_grid['spectra_mu'] = [[0] * star_grid['n_grid'] for i in range(star_grid['n_grid'])]
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]: continue
index_closer = np.abs(
synthesis['surface']['mu'] - star_grid['mu'][y, x]).argmin() # take the index of the closer value
if star_grid['mu'][y, x] in synthesis['surface']['mu']:
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, index_closer]
continue
elif index_closer == synthesis['surface']['n_mu'] - 1 or \
synthesis['surface']['mu'][index_closer] > star_grid['mu'][y, x]:
mu_ind0 = index_closer - 1
mu_ind1 = index_closer
else:
mu_ind0 = index_closer
mu_ind1 = index_closer + 1
diff_mu = synthesis['surface']['mu'][mu_ind1] - synthesis['surface']['mu'][mu_ind0]
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, mu_ind0] \
+ (star_grid['mu'][y, x] - synthesis['surface']['mu'][mu_ind0]) / diff_mu \
* (synthesis['surface']['flux'][:, mu_ind1]
- synthesis['surface']['flux'][:, mu_ind0])
""" Computation of the continuum level (total flux is already normalized)"""
star_grid['continuum'] = [[0] * star_grid['n_grid'] for i in range(star_grid['n_grid'])]
spectra_window = ((synthesis['surface']['wave'] > clv_rm_dict['continuum_range'][0]) &
(synthesis['surface']['wave'] < clv_rm_dict['continuum_range'][1]))
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]: continue
star_grid['continuum'][x][y] = np.median(star_grid['spectra_mu'][x][y][spectra_window])
star_grid['continuum_level'] = np.sum(star_grid['continuum'])
for night in night_dict:
""" Retrieving the list of observations"""
print()
print('compute_CLV_RM_modelling Night: ', night)
try:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
continue
except:
print()
print(' No CLV & RM correction files found, computing now ')
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
clv_rm_modelling = {
'common': {
'wave': synthesis['surface']['wave_out'],
'step': synthesis['surface']['step_out'],
'norm': synthesis['total']['norm_out'],
'continuum_level': star_grid['continuum_level']
}
}
clv_rm_modelling['common']['convolution_dlambda'] = \
np.median(clv_rm_modelling['common']['wave']) / instrument_dict[instrument]['resolution']
clv_rm_modelling['common']['convolution_sigma'] = \
clv_rm_modelling['common']['convolution_dlambda'] / np.median(clv_rm_modelling['common']['step'])
gaussian = Gaussian1DKernel(stddev=clv_rm_modelling['common']['convolution_sigma'])
clv_rm_modelling['common']['norm_convolved'] = convolve(clv_rm_modelling['common']['norm'], gaussian)
processed = {}
print()
for obs in lists['observations']:
print(' Computing CLV+RM correction for ', obs)
processed[obs] = {}
clv_rm_modelling[obs] = {}
n_oversampling = int(observational_pams[obs]['EXPTIME'] / clv_rm_dict['time_step'])
if n_oversampling % 2 == 0: n_oversampling += 1
half_time = observational_pams[obs]['EXPTIME'] / 2 / 86400.
processed[obs]['bjd_oversampling'] = np.linspace(observational_pams[obs]['BJD'] - half_time,
observational_pams[obs]['BJD'] + half_time,
n_oversampling, dtype=np.double)
if planet_dict['orbit'] == 'circular':
# Time of pericenter concides with transit time, if we assume e=0 and omega=np.pi/2.
eccentricity = 0.00
omega_rad = np.pi / 2.
# Tcent is assumed as reference time
Tref = planet_dict['reference_time_of_transit'][0]
Tcent_Tref = 0.000
else:
omega_rad = planet_dict['omega'][0] * deg2rad
Tref = planet_dict['reference_time']
Tcent_Tref = planet_dict['reference_time_of_transit'][0] - Tref
eccentricity = planet_dict['eccentricity'][0]
inclination_rad = planet_dict['inclination'][0] * deg2rad
true_anomaly, orbital_distance_ratio = kepler_true_anomaly_orbital_distance(
processed[obs]['bjd_oversampling'] - Tref,
Tcent_Tref,
planet_dict['period'][0],
eccentricity,
omega_rad,
planet_dict['semimajor_axis_ratio'][0])
""" planet position during its orbital motion, in unit of stellar radius"""
# Following Murray+Correia 2011 , with the argument of the ascending node set to zero.
# 1) the ascending node coincide with the X axis
# 2) the reference plance coincide with the plane of the sky
processed[obs]['planet_position'] = {
'xp': -orbital_distance_ratio * (np.cos(omega_rad + true_anomaly)),
'yp': orbital_distance_ratio * (np.sin(omega_rad + true_anomaly) * np.cos(inclination_rad)),
'zp': orbital_distance_ratio * (np.sin(inclination_rad) * np.sin(omega_rad + true_anomaly))
}
# projected distance of the planet's center to the stellar center
processed[obs]['planet_position']['rp'] = np.sqrt(processed[obs]['planet_position']['xp'] ** 2 \
+ processed[obs]['planet_position']['yp'] ** 2)
# obscured flux integrated over the full epoch
clv_rm_modelling[obs]['missing_flux'] = np.zeros(synthesis['surface']['size_out'], dtype=np.double)
# iterating on the sub-exposures
for j, zeta in enumerate(processed[obs]['planet_position']['zp']):
if zeta > 0 and processed[obs]['planet_position']['rp'][j] < 1 + planet_dict['radius_ratio'][0]:
# the planet is in the foreground or inside the stellar disk, continue
# adjustment: computation is performed even if only part of the planet is shadowing the star
rd = np.sqrt((processed[obs]['planet_position']['xp'][j] - star_grid['xc']) ** 2 + \
(processed[obs]['planet_position']['yp'][j] - star_grid['yc']) ** 2)
# iterating on the cell grid
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
# skip the step if the cell is outside the stellar disk
# or if the cell is not shadowed by the planet
if star_grid['outside'][y, x] or rd[y, x] > planet_dict['radius_ratio'][0]: continue
flux_tmp = rebin_1d_to_1d(synthesis['surface']['wave'],
synthesis['surface']['step'],
star_grid['spectra_mu'][x][y],
clv_rm_modelling['common']['wave'],
clv_rm_modelling['common']['step'],
rv_shift=star_grid['v_star'][y, x],
method='exact_flux',
preserve_flux=False)
# fixing zero values that may have been introduced by
# the rebinning process from an extremely irregular sampling
ind_sel = np.where(flux_tmp < 0.)[0]
for ii in ind_sel:
if ii == 0:
flux_tmp[ii] = flux_tmp[ii + 1]
elif ii == np.size(flux_tmp) - 1:
flux_tmp[ii] = flux_tmp[ii - 1]
else:
flux_tmp[ii] = (flux_tmp[ii - 1] + flux_tmp[ii + 1]) / 2.
clv_rm_modelling[obs]['missing_flux'] += flux_tmp
clv_rm_modelling[obs]['missing_flux'] /= n_oversampling
clv_rm_modelling[obs]['stellar_spectra'] = clv_rm_modelling['common']['norm'] \
- (clv_rm_modelling[obs]['missing_flux']
/ clv_rm_modelling['common']['continuum_level'])
clv_rm_modelling[obs]['stellar_spectra_convolved'] = \
convolve(clv_rm_modelling[obs]['stellar_spectra'], gaussian)
save_to_cpickle('clv_rm_modelling', clv_rm_modelling, config_in['output'], night)
if not config_in['settings'].get('full_output', False):
del star_grid['spectra_mu']
save_to_cpickle('clv_rm_star_grid', star_grid, config_in['output'])
save_to_cpickle('clv_rm_synthesis', synthesis, config_in['output'])
def plot_clv_rm_modelling(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if night_input == '':
# Visualize the mu of star
fig = plt.figure(figsize=(8, 6.5))
plt.title('Limb angle')
plt.contourf(star_grid['xx'], star_grid['xx'], star_grid['mu'], 60, cmap=plt.cm.viridis)
plt.colorbar(label='$\mu$') # draw colorbar
# plot data points.
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
# Visualize the RV of star
fig = plt.figure(figsize=(8, 6.5))
# CS = plt.contour(xx,xx,v_star,50,linewidths=0.5,colors='k')
plt.title('Radial velocity field')
plt.contourf(star_grid['xx'], star_grid['xx'], star_grid['v_star'], 100, cmap=plt.cm.seismic)
plt.colorbar(label='v_star') # draw colorbar
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
lists = load_from_cpickle('lists', config_in['output'], night)
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for obs in lists['transit_in']:
ax1.plot(clv_rm_modelling['common']['wave'],
clv_rm_modelling[obs]['stellar_spectra'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_modelling['common']['wave'],
clv_rm_modelling[obs]['missing_flux'] / clv_rm_modelling['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
for obs in lists['transit_out']:
ax2.plot(clv_rm_modelling['common']['wave'],
clv_rm_modelling[obs]['stellar_spectra'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax2.set_title('Out of transit spectra')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 20,737 | 48.971084 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/clv_rm_models.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.kepler_exo import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.math_functions import *
from astropy.convolution import Gaussian1DKernel, convolve
__all__ = ['compute_clv_rm_models', 'plot_clv_rm_models']
subroutine_name = 'clv_rm_models'
def compute_clv_rm_models(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
# un-convolved portion of the spectrum given by range_boundaries +-
wave_fix_convo = 1.0
# Added back-compatibility to old or "wrong" keys
norm_dict = clv_rm_dict.get('normalization', {})
norm_pams={}
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if not config_in['settings'].get('full_output', False):
for night in night_dict:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
print("{0:45s} {1:s}".format(
subroutine_name, 'Retrieved'))
except:
print("{0:45s} {1:s}".format(
subroutine_name, 'Computing'))
print()
"""
Loading the spectral synthesis results, at the moment only SME output is supported.
Properties of the synthesis data files
- limb_angles: this is an input to SME, so it is specific on how the synthesis has been performed
- spectra: stellar spectrum as a function of the limb angle, sampled near the spectral lines
- model: integrated spectrum of the star
"""
synthesis_data_limb_angles = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_muvals.txt', dtype=np.double)
synthesis_data_spectra = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_spectra.txt', dtype=np.double)
synthesis_data_model = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_model.txt', dtype=np.double)
synthesis = {
'surface': {
'wave': synthesis_data_spectra[:, 0],
'flux': synthesis_data_spectra[:, 1:],
'n_mu': np.size(synthesis_data_limb_angles),
'mu': synthesis_data_limb_angles
},
'total': {
'wave': synthesis_data_model[:, 0],
'norm': synthesis_data_model[:, 1],
}
}
""" Setting up the array for model computation """
synthesis['total']['step'] = synthesis['total']['wave'] * 0.0
synthesis['total']['step'][1:] = synthesis['total']['wave'][1:] - \
synthesis['total']['wave'][:-1]
synthesis['total']['step'][0] = synthesis['total']['step'][1]
synthesis['surface']['step'] = synthesis['surface']['wave'] * 0.0
synthesis['surface']['step'][1:] = synthesis['surface']['wave'][1:] - \
synthesis['surface']['wave'][:-1]
synthesis['surface']['step'][0] = synthesis['surface']['step'][1]
synthesis['surface']['wave_out'] = np.arange(synthesis['surface']['wave'][0],
synthesis['surface']['wave'][-1],
clv_rm_dict['rebinning_step'])
synthesis['surface']['size_out'] = np.size(
synthesis['surface']['wave_out'], axis=0)
synthesis['surface']['step_out'] = np.ones(
synthesis['surface']['size_out']) * clv_rm_dict['rebinning_step']
synthesis['total']['norm_out'] = rebin_1d_to_1d(synthesis['total']['wave'],
synthesis['total']['step'],
synthesis['total']['norm'],
synthesis['surface']['wave_out'],
synthesis['surface']['step_out'],
method='exact_flux',
preserve_flux=False)
""" Check if the number of spectra corresponds to the number of limb angle values """
if np.size(synthesis['surface']['flux'], axis=1) != synthesis['surface']['n_mu']:
print('ERROR in loading the stellar spectra')
"""
Setting up the grid of stellar spectra for the CLV and RM computation
odd number of points to include the zero value
"""
star_grid = {
'n_grid': clv_rm_dict['n_gridpoints'],
'half_grid': int((clv_rm_dict['n_gridpoints'] - 1) / 2)
}
""" Coordinates of the centers of each grid cell (add offset) """
star_grid['xx'] = np.linspace(-1.0000000000000, 1.0000000000000,
star_grid['n_grid'], dtype=np.double)
star_grid['xc'], star_grid['yc'] = np.meshgrid(
star_grid['xx'], star_grid['xx'], indexing='xy')
# check the Note section of the wiki page of meshgrid
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
""" Distance of each grid cell from the center of the stellar disk """
star_grid['rc'] = np.sqrt(star_grid['xc'] ** 2 + star_grid['yc'] ** 2)
# Must avoid negative numbers inside the square root
star_grid['inside'] = star_grid['rc'] < 1.0000000000000
# Must avoid negative numbers inside the square root
star_grid['outside'] = star_grid['rc'] >= 1.00000000000000
""" Determine the mu angle for each grid cell, as a function of radius. """
star_grid['mu'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix with the mu values
star_grid['mu'][star_grid['inside']] = np.sqrt(
1. - star_grid['rc'][star_grid['inside']] ** 2)
""" 2.2 Determine the Doppler shift to apply to the spectrum of each grid cell, from Cegla+2016 """
star_grid['x_ortho'] = star_grid['xc'] * np.cos(star_dict['lambda'][0] * deg2rad) \
- star_grid['yc'] * np.sin(
star_dict['lambda'][0] * deg2rad) # orthogonal distances from the spin-axis
star_grid['y_ortho'] = star_grid['xc'] * np.sin(star_dict['lambda'][0] * deg2rad) \
+ star_grid['yc'] * np.cos(star_dict['lambda'][0] * deg2rad)
star_grid['r_ortho'] = np.sqrt(
star_grid['x_ortho'] ** 2 + star_grid['y_ortho'] ** 2)
star_grid['z_ortho'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix
star_grid['z_ortho'][star_grid['inside']] = np.sqrt(
1. -star_grid['r_ortho'][star_grid['inside']] ** 2)
""" rotate the coordinate system around the x_ortho axis by an angle: """
star_grid['beta'] = (np.pi / 2.) - \
star_dict['inclination'][0] * deg2rad
""" orthogonal distance from the stellar equator """
star_grid['yp_ortho'] = star_grid['z_ortho'] * np.sin(star_grid['beta']) + star_grid['y_ortho'] * np.cos(
star_grid['beta'])
""" stellar rotational velocity for a given position """
star_grid['v_star'] = star_grid['x_ortho'] * star_dict['vsini'][0] * (
1. -star_dict['alpha'][0] * star_grid['yp_ortho'] ** 2)
# Null velocity for points outside the stellar surface
star_grid['v_star'][star_grid['outside']] = 0.0
""" Associate a synthetic spectrum to each cell """
""" recomputation of spectra_mu - most likely it has been deleted from the
output file
"""
star_grid['spectra_mu'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
index_closer = np.abs(
synthesis['surface']['mu'] - star_grid['mu'][y, x]).argmin() # take the index of the closer value
if star_grid['mu'][y, x] in synthesis['surface']['mu']:
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, index_closer]
continue
elif index_closer == synthesis['surface']['n_mu'] - 1 or \
synthesis['surface']['mu'][index_closer] > star_grid['mu'][y, x]:
mu_ind0 = index_closer - 1
mu_ind1 = index_closer
else:
mu_ind0 = index_closer
mu_ind1 = index_closer + 1
diff_mu = synthesis['surface']['mu'][mu_ind1] - \
synthesis['surface']['mu'][mu_ind0]
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, mu_ind0] \
+ (star_grid['mu'][y, x] - synthesis['surface']['mu'][mu_ind0]) / diff_mu \
* (synthesis['surface']['flux'][:, mu_ind1]
- synthesis['surface']['flux'][:, mu_ind0])
""" Computation of the continuum level (total flux is already normalized)"""
star_grid['continuum'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
spectral_window = ((synthesis['surface']['wave'] > clv_rm_dict['continuum_range'][0]) &
(synthesis['surface']['wave'] < clv_rm_dict['continuum_range'][1]))
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
star_grid['continuum'][x][y] = np.median(
star_grid['spectra_mu'][x][y][spectral_window])
star_grid['continuum_level'] = np.sum(star_grid['continuum'])
"""
Setting up the grid for the rescaling factor of the planetary radius
"""
try:
radius_grid = np.arange(clv_rm_dict['radius_factor'][0],
clv_rm_dict['radius_factor'][1] +
clv_rm_dict['radius_factor'][2],
clv_rm_dict['radius_factor'][2])
except KeyError:
radius_grid = np.arange(0.5, 2.6, 0.1)
for night in night_dict:
""" Retrieving the list of observations"""
print()
print('compute_CLV_RM_models Night: ', night)
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
continue
except:
print()
print(' No CLV & RM correction files found, computing now ')
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
clv_rm_models = {
'common': {
'wave': synthesis['surface']['wave_out'],
'step': synthesis['surface']['step_out'],
'norm': synthesis['total']['norm_out'],
'continuum_level': star_grid['continuum_level'],
'radius_grid': radius_grid,
'n_radius_grid': len(radius_grid)
}
}
clv_rm_models['common']['convolution_dlambda'] = \
np.median(clv_rm_models['common']['wave']) / \
instrument_dict[instrument]['resolution']
clv_rm_models['common']['convolution_sigma'] = \
clv_rm_models['common']['convolution_dlambda'] / \
np.median(clv_rm_models['common']['step'])
gaussian = Gaussian1DKernel(
stddev=clv_rm_models['common']['convolution_sigma'])
clv_rm_models['common']['norm_convolved'] = convolve(
clv_rm_models['common']['norm'], gaussian)
""" Fixing border effect (we took already wave_extension angstrom outside of the
actual range, so doing it this way is fine)"""
wave_fix_convolution = (clv_rm_models['common']['wave'] > clv_rm_models['common']['wave'][0]+wave_fix_convo) \
| (clv_rm_models['common']['wave'] > clv_rm_models['common']['wave'][-1]-wave_fix_convo)
clv_rm_models['common']['norm_convolved'][wave_fix_convolution] = clv_rm_models['common']['norm'][wave_fix_convolution]
"""
Computation of the first derivative, useful to identify
continuum level. This method is prone to errors for
observational data, but it's quite robust for synthetic spectra
if jumps in wavelngth are small
"""
clv_rm_models['common']['norm_convolved_derivative'] = \
first_derivative(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm_convolved'])
# Using only the 10percentile of values of the derivative around zero
cont_10perc = np.percentile(np.abs(clv_rm_models['common']['norm_convolved_derivative']), norm_pams['percentile_selection'])
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
print(' Number of points within 10percentile: {0:10.0f}'.format(np.sum((np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc))))
print(' Number of points above threshold: {0:10.0f}'.format(np.sum( (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']))))
norm_convolved_bool = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
if np.sum(norm_convolved_bool) < 100:
print(' Lower threshold decreased by 80% to allow point selection ', norm_pams['lower_threshold']*0.80)
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']*0.80)
else:
clv_rm_models['common']['norm_convolved_bool'] = norm_convolved_bool
processed = {}
print()
for obs in lists['observations']:
print(' Computing CLV+RM correction for ', obs)
processed[obs] = {}
clv_rm_models[obs] = {}
n_oversampling = int(
observational_pams[obs]['EXPTIME'] / clv_rm_dict['time_step'])
if n_oversampling % 2 == 0:
n_oversampling += 1
half_time = observational_pams[obs]['EXPTIME'] / 2 / 86400.
processed[obs]['bjd_oversampling'] = np.linspace(observational_pams[obs]['BJD'] - half_time,
observational_pams[obs]['BJD'] + half_time,
n_oversampling, dtype=np.double)
if planet_dict['orbit'] == 'circular':
# Time of pericenter concides with transit time, if we assume e=0 and omega=np.pi/2.
eccentricity = 0.00
omega_rad = np.pi / 2.
# Tcent is assumed as reference time
Tref = planet_dict['reference_time_of_transit'][0]
Tcent_Tref = 0.000
else:
omega_rad = planet_dict['omega'][0] * deg2rad
Tref = planet_dict['reference_time']
Tcent_Tref = planet_dict['reference_time_of_transit'][0] - Tref
eccentricity = planet_dict['eccentricity'][0]
inclination_rad = planet_dict['inclination'][0] * deg2rad
true_anomaly, orbital_distance_ratio = kepler_true_anomaly_orbital_distance(
processed[obs]['bjd_oversampling'] - Tref,
Tcent_Tref,
planet_dict['period'][0],
eccentricity,
omega_rad,
planet_dict['semimajor_axis_ratio'][0])
""" planet position during its orbital motion, in unit of stellar radius"""
# Following Murray+Correia 2011 , with the argument of the ascending node set to zero.
# 1) the ascending node coincide with the X axis
# 2) the reference plance coincide with the plane of the sky
processed[obs]['planet_position'] = {
'xp': -orbital_distance_ratio * (np.cos(omega_rad + true_anomaly)),
'yp': orbital_distance_ratio * (np.sin(omega_rad + true_anomaly) * np.cos(inclination_rad)),
'zp': orbital_distance_ratio * (np.sin(inclination_rad) * np.sin(omega_rad + true_anomaly))
}
# projected distance of the planet's center to the stellar center
processed[obs]['planet_position']['rp'] = np.sqrt(processed[obs]['planet_position']['xp'] ** 2
+ processed[obs]['planet_position']['yp'] ** 2)
# obscured flux integrated over the full epoch
# grid n_radius_grid X size_out (of spectral model)
clv_rm_models[obs]['missing_flux'] = np.zeros(
[len(radius_grid), synthesis['surface']['size_out']], dtype=np.double)
# iterating on the sub-exposures
for j, zeta in enumerate(processed[obs]['planet_position']['zp']):
if zeta > 0 and processed[obs]['planet_position']['rp'][j] < 1. + planet_dict['radius_ratio'][0]:
# the planet is in the foreground or inside the stellar disk, continue
# adjustment: computation is performed even if only part of the planet is shadowing the star
rd = np.sqrt((processed[obs]['planet_position']['xp'][j] - star_grid['xc']) ** 2 +
(processed[obs]['planet_position']['yp'][j] - star_grid['yc']) ** 2)
# iterating on the cell grid
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
# skip the step if the cell is outside the stellar disk
# or if the cell is not shadowed by the planet when the largest possible size is considered
if star_grid['outside'][y, x] or rd[y, x] > planet_dict['radius_ratio'][0]*radius_grid[-1]:
continue
# rescaled planetary radius selection
grid_sel = (
rd[y, x] <= planet_dict['radius_ratio'][0]*radius_grid)
# stellar flux in the masked region
flux_tmp = rebin_1d_to_1d(synthesis['surface']['wave'],
synthesis['surface']['step'],
star_grid['spectra_mu'][x][y],
clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
rv_shift=star_grid['v_star'][y, x],
method='exact_flux',
preserve_flux=False)
# fixing zero values that may have been introduced by
# the rebinning process from an extremely irregular sampling
ind_sel = np.where(flux_tmp < 0.)[0]
for ii in ind_sel:
if ii == 0:
flux_tmp[ii] = flux_tmp[ii + 1]
elif ii == np.size(flux_tmp) - 1:
flux_tmp[ii] = flux_tmp[ii - 1]
else:
flux_tmp[ii] = (
flux_tmp[ii - 1] + flux_tmp[ii + 1]) / 2.
"""
Outer product of the radius selection array (size=M)
and the flux array (N) so that it can be summed
properly to the MxN missing_flux matrix.
"""
clv_rm_models[obs]['missing_flux'] += \
np.outer(grid_sel, flux_tmp)
clv_rm_models[obs]['missing_flux'] /= n_oversampling
clv_rm_models[obs]['stellar_spectra'] = \
np.outer(np.ones(len(radius_grid)), clv_rm_models['common']['norm']) \
- (clv_rm_models[obs]['missing_flux'] /
clv_rm_models['common']['continuum_level'])
clv_rm_models[obs]['stellar_spectra_convolved'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_derivative'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_continuum_bool'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=bool)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
for ii in range(0, len(radius_grid)):
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] = \
convolve(clv_rm_models[obs]['stellar_spectra'][ii, :],
gaussian)
clv_rm_models[obs]['stellar_spectra_convolved'][ii, wave_fix_convolution] = \
clv_rm_models[obs]['stellar_spectra'][ii, wave_fix_convolution]
"""
This is the theoretical transmission spectrum in the stellar reference frame
when only CLV and RM effects are present (no atmospheric
transmission)
"""
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] = \
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] \
/ clv_rm_models['common']['norm_convolved']
"""
High-resolution transmission spectra are always rescaled for
their continuum because in fiber-fed spectrographs the
information on the absolute flux of the star is lost.
If not using the normalized spectrum, normalization factor must
be included somehow when correcting for the CLV+RM, before
fitting the atomic absoprtion lines
"""
normalization_function = np.polynomial.chebyshev.Chebyshev.fit(
clv_rm_models['common']['wave'][clv_rm_models['common']['norm_convolved_bool']],
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :][clv_rm_models['common']['norm_convolved_bool']],
deg=norm_pams['model_poly_degree']
)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :] = clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] / normalization_function(clv_rm_models['common']['wave'])
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :])
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved'][ii, :])
#plt.show()
""" In the planetary reference frame, the corrected transmission
spectrum T_corr is given by
T_corr = T_input * (synthetic_convolved /
stellar_spectra_convolved),
where: T_input: transmission spectrum before the correction
synthetic_convolved: integrated synthetic stellar spectrum,
convolved for the instrumental resolution.
stellar_spectra_convolved: stellar spectrum after removing the
contribute of the stellar surface covered by the planet, convolved
for the instrumental resolution (synthetic_convolved and
stellar_spectra_convolved are in the stellar rest frame must be
rebinned in the planetary rest frame)
Since clv_rm_model_convolved = stellar_spectra_convolved /
synthetic_convolved the observed transmission spectrum must be
DIVIDED by clv_rm_model_convolved
"""
save_to_cpickle('clv_rm_models', clv_rm_models,
config_in['output'], night)
clv_rm_models = None # Forcing memory de-allocation
if not config_in['settings'].get('full_output', False):
del star_grid['spectra_mu']
save_to_cpickle('clv_rm_star_grid', star_grid, config_in['output'])
save_to_cpickle('clv_rm_synthesis', synthesis, config_in['output'])
def plot_clv_rm_models(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if night_input == '':
# Visualize the mu of star
fig = plt.figure(figsize=(8, 6.5))
plt.title('Limb angle')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['mu'], 60, cmap=plt.cm.viridis)
plt.colorbar(label='$\mu$') # draw colorbar
# plot data points.
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
# Visualize the RV of star
fig = plt.figure(figsize=(8, 6.5))
# CS = plt.contour(xx,xx,v_star,50,linewidths=0.5,colors='k')
plt.title('Radial velocity field')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['v_star'], 100, cmap=plt.cm.seismic)
plt.colorbar(label='v_star') # draw colorbar
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
lists = load_from_cpickle('lists', config_in['output'], night)
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(
lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][i0_radius, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][-1, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][-1, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n Input spectra, stellar radius'.format(night))
ax2.set_title('Stellar radius x {0:2.2f}'.format(
clv_rm_models['common']['radius_grid'][-1]))
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 30,251 | 46.566038 | 190 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_lightcurve_average.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_transmission_lightcurve_average_planetRF',
'plot_transmission_lightcurve_average_planetRF',
'compute_transmission_lightcurve_average_stellarRF',
'plot_transmission_lightcurve_average_stellarRF',
'compute_transmission_lightcurve_average_observerRF',
'plot_transmission_lightcurve_average_observerRF',
'compute_transmission_lightcurve_average',
'plot_transmission_lightcurve_average'
]
def compute_transmission_lightcurve_average_planetRF(config_in, lines_label):
compute_transmission_lightcurve_average(config_in, lines_label, reference='planetRF')
def plot_transmission_lightcurve_average_planetRF(config_in, night_input):
plot_transmission_lightcurve_average(config_in, night_input, reference='planetRF')
def compute_transmission_lightcurve_average_stellarRF(config_in, lines_label):
compute_transmission_lightcurve_average(config_in, lines_label, reference='stellarRF')
def plot_transmission_lightcurve_average_stellarRF(config_in, night_input):
plot_transmission_lightcurve_average(config_in, night_input, reference='stellarRF')
def compute_transmission_lightcurve_average_observerRF(config_in, lines_label):
compute_transmission_lightcurve_average(config_in, lines_label, reference='observerRF')
def plot_transmission_lightcurve_average_observerRF(config_in, night_input):
plot_transmission_lightcurve_average(config_in, night_input, reference='observerRF')
subroutine_name = 'transmission_lightcurve_average'
pick_files = 'transmission_lightcurve'
def compute_transmission_lightcurve_average(config_in, lines_label, reference='planetRF'):
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label] # from_config_get_transmission_lightcurve(config_in)
output_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP'
'user_uncorrected']
append_list = ['', '_uncorrected', '_clv_model']
shared_data = load_from_cpickle('shared', config_in['output'])
""" Using the line-specific range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= lines_dict['range'][0]) \
& (shared_data['binned']['wave'] < lines_dict['range'][1])
lightcurve_average_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
for output_selection in output_list:
skip_iteration = False
try:
lightcurve_average = load_from_cpickle(subroutine_name+'_'+reference+ '_' + output_selection, config_in['output'], lines=lines_label)
continue
except (FileNotFoundError, IOError):
print(" No average transmission lightcurve found for case:{0:s}, computing now ".format(output_selection))
print()
lightcurve_average = lightcurve_average_template.copy()
# doublet sodium in the lab reference frame
"""
C stabds for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(lightcurve_average['wave'] - line_val)*2. <passband_val)
"""
S stand for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (lightcurve_average['wave'] >= band_val[0]) & (lightcurve_average['wave'] <= band_val[1])
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
lightcurve_average['transit_in_flag'] = []
lightcurve_average['transit_full_flag'] = []
lightcurve_average['transit_out_flag'] = []
lightcurve_average['transit_in'] = {}
lightcurve_average['transit_full'] = {}
lightcurve_average['transit_out'] = {}
lightcurve_average['observations'] = {'phase': []}
lightcurve_average['bands_list'] = []
lightcurve_average['C_bands'] = C_bands
lightcurve_average['S_bands'] = S_bands
lightcurve_average['bins'] = {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['observations']['delta_' + band_key + name_append] = []
lightcurve_average['bands_list'].extend([band_key])
for night in night_dict:
try:
lightcurve = load_from_cpickle(pick_files+'_'+reference+ '_' + output_selection, config_in['output'], night, lines_label)
except:
skip_iteration = True
continue
print("compute_transmission_lightcurve Night: ", night)
lightcurve_average['observations']['phase'].extend(lightcurve['arrays']['observations']['phase'].tolist())
lightcurve_average['transit_in_flag'].extend(
lightcurve['arrays']['observations']['transit_in_flag'].tolist())
lightcurve_average['transit_full_flag'].extend(
lightcurve['arrays']['observations']['transit_full_flag'].tolist())
lightcurve_average['transit_out_flag'].extend(
lightcurve['arrays']['observations']['transit_out_flag'].tolist())
for band_key in lightcurve_average['bands_list']:
for name_append in append_list:
lightcurve_average['observations']['delta_' + band_key + name_append].extend(
lightcurve['arrays']['observations']['delta_' + band_key + name_append].tolist())
if skip_iteration: continue
sorting_index = np.argsort(lightcurve_average['observations']['phase'])
lightcurve_average['observations']['phase'] = np.asarray(lightcurve_average['observations']['phase'])[sorting_index]
lightcurve_average['transit_in_flag'] = np.asarray(lightcurve_average['transit_in_flag'])[sorting_index]
lightcurve_average['transit_full_flag'] = np.asarray(lightcurve_average['transit_full_flag'])[sorting_index]
lightcurve_average['transit_out_flag'] = np.asarray(lightcurve_average['transit_out_flag'])[sorting_index]
lightcurve_average['transit_in']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_out_flag']]
for band_key in lightcurve_average['bands_list']:
for name_append in append_list:
lightcurve_average['observations']['delta_' + band_key + name_append] = \
np.asarray(lightcurve_average['observations']['delta_' + band_key + name_append])[sorting_index]
lightcurve_average['transit_in']['delta_' + band_key + name_append] = \
lightcurve_average['observations']['delta_' + band_key + name_append][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['delta_' + band_key + name_append] = \
lightcurve_average['observations']['delta_' + band_key + name_append][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['delta_' + band_key + name_append] = \
lightcurve_average['observations']['delta_' + band_key + name_append][lightcurve_average['transit_out_flag']]
pre_duration = transit_full_bins[0] - lightcurve_average['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration / transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
post_duration = lightcurve_average['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
transit_bins = np.arange(transit_full_bins[0] - nsteps_pre * transit_full_step,
transit_full_bins[-1] + (nsteps_post + 1.1) * transit_full_step,
transit_full_step)
lightcurve_average['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_step': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['delta_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins) - 1):
sel = (lightcurve_average['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve_average['observations']['phase'] < transit_bins[nb + 1])
if np.sum(sel) <= 0: continue
lightcurve_average['binned']['observations']['phase'][n_a] = np.average(
lightcurve_average['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve_average['observations']['delta_' + band_key + name_append][sel, 0],
weights=1. / lightcurve_average['observations']['delta_' + band_key + name_append][sel, 1] ** 2,
returned=True)
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve_average['binned']['transit_in']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_in_flag]
lightcurve_average['binned']['transit_full']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_full_flag]
lightcurve_average['binned']['transit_out']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_out_flag]
lightcurve_average['binned']['observations']['phase'] = lightcurve_average['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['transit_in']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][transit_in_flag, :]
lightcurve_average['binned']['transit_full']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][transit_full_flag, :]
lightcurve_average['binned']['transit_out']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][transit_out_flag, :]
lightcurve_average['binned']['observations']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name + '_'+reference+ '_' + output_selection, lightcurve_average, config_in['output'], lines=lines_label)
def plot_transmission_lightcurve_average(config_in, night_input='', reference='planetRF'):
import matplotlib.pyplot as plt
lightcurve_average = load_from_cpickle('transmission_lightcurve_average_'+reference, config_in['output'])
for band_key in lightcurve_average['C_bands']:
plt.figure(figsize=(12, 6))
plt.title('Average transmission lightcurve\n {0:s}'.format(band_key))
plt.errorbar(lightcurve_average['observations']['phase'],
lightcurve_average['observations']['delta_' + band_key][:,0],
yerr= lightcurve_average['observations']['delta_' + band_key][:,1] ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve_average['binned']['observations']['phase'],
lightcurve_average['binned']['observations']['delta_' + band_key][:, 0],
yerr= lightcurve_average['binned']['observations']['delta_' + band_key][:,1],
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve_average['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve_average['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve_average['observations']['phase'][0]-0.01,
lightcurve_average['observations']['phase'][-1]+0.01)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('$\mathcal{R}$ - 1.')
plt.legend()
plt.show()
| 16,394 | 47.794643 | 145 | py |
SLOPpy | SLOPpy-main/SLOPpy/differential_refraction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.differential_refraction_preparation import compute_differential_refraction_preparation
__all__ = ["compute_differential_refraction",
"plot_differential_refraction",
"compute_differential_refraction_update",
"plot_differential_refraction_update"]
def compute_differential_refraction_update(config_in):
compute_differential_refraction(config_in, append_name='update')
def plot_differential_refraction_update(config_in, night_input=''):
plot_differential_refraction(config_in, night_input, append_name='update')
def compute_differential_refraction(config_in, append_name=None):
if append_name:
subroutine_name = 'differential_refraction_' + append_name
filename = 'refraction_' + append_name
else:
subroutine_name = 'differential_refraction'
filename = 'refraction'
compute_differential_refraction_preparation(config_in, append_name)
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
refraction = load_from_cpickle(filename, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
refraction_dict = from_config_refraction(config_in, night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving input and calibration data """
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
preparation = load_from_cpickle(filename + '_preparation', config_in['output'], night)
defined_reference = night_dict[night]['refraction'].get('reference', False)
if defined_reference:
reference = load_from_cpickle(filename + '_reference', config_in['output'])
preparation['coadd']['wave'] = reference['wave']
preparation['coadd']['step'] = reference['step']
preparation['coadd']['rescaled'] = reference['rescaled']
if not preparation.get('absolute_SRF', False):
print(" Observations and reference spectra are in different reference system ")
quit()
processed = {
'subroutine': subroutine_name,
'coadd': {
'wave': preparation['coadd']['wave'],
'step': preparation['coadd']['step'],
'size': preparation['coadd']['size']
}
}
refraction = {
'subroutine': subroutine_name,
'wave': preparation['coadd']['wave'],
'binned': {}
}
refraction['binned']['wave'] = np.arange(preparation['coadd']['wave'][0],
preparation['coadd']['wave'][-11],
20.*preparation['coadd']['step'][0], dtype=np.double)
refraction['binned']['size'] = np.size(refraction['binned']['wave'])
refraction['binned']['step'] = np.ones(refraction['binned']['size'], dtype=np.double) \
* 20. * preparation['coadd']['step'][0]
if refraction_dict['approach'] == 'full_spectrum':
print(" Differential refraction performed over the full spectrum")
elif refraction_dict['approach'] == 'individual_order':
print(" Differential refraction performed order-by-order ")
else:
raise ValueError("ERROR: fitting approach for differential refraction not implemented")
if refraction_dict['method'] == 'spline':
print(" Modelling performed with spline")
print(" Spline order for differential refraction fit: ",
refraction_dict['fit_order'])
print(" Knots spacing (in Angstrom) for refraction fit: ",
refraction_dict['knots_spacing'])
elif refraction_dict['method'] == 'polynomial':
print(" Modelling performed with polynomials")
print(" Chebyshev polynomial order for differential refraction fit: ",
refraction_dict['fit_order'])
else:
raise ValueError("ERROR: fitting method for differential refraction not implemented")
print(" Number of iterations: ",
refraction_dict['fit_iters'])
print()
""" Now each observation is divided by the reference spectrum, after being doppler-shifted to the observer RF
The result is then used to model the flux variation
"""
approach = refraction_dict.get('approach', 'full_spectrum')
if approach == 'full_spectrum':
for obs in lists['observations']:
print(" Division by reference spectrum and fit of the flux variation: ", obs)
refraction[obs] = {}
processed[obs] = {}
if preparation['absolute_SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF_mod']
processed[obs]['ratio'] = preparation[obs]['rescaled'] / preparation['coadd']['rescaled']
processed[obs]['ratio_err'] = processed[obs]['ratio'] * \
np.sqrt((preparation[obs]['rescaled_err']/preparation[obs]['rescaled'])**2
+ (preparation['coadd']['rescaled_err']/preparation['coadd']['rescaled'])**2)
refraction[obs]['fit_flag'] = (processed[obs]['ratio'] > 0.01)
if refraction_dict['method'] == 'spline':
for n_iter in range(0, refraction_dict['fit_iters']):
wave = processed['coadd']['wave'][refraction[obs]['fit_flag']]
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / refraction_dict['knots_spacing'])
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave) - 1, (len(wave) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
processed[obs]['knots'] = wave[idx_knots]
refraction[obs]['coeff'] = \
sci_int.splrep(
processed['coadd']['wave'][refraction[obs]['fit_flag']],
processed[obs]['ratio'][refraction[obs]['fit_flag']],
task=-1,
k=refraction_dict['fit_order'],
t=processed[obs]['knots'])
refraction[obs]['fit_s1d'] = sci_int.splev(processed['coadd']['wave'], refraction[obs]['coeff'])
processed[obs]['residuals'] = processed[obs]['ratio'] - refraction[obs]['fit_s1d']
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'])
refraction[obs]['fit_flag'] = (refraction[obs]['fit_flag']) \
& (np.abs(processed[obs]['residuals']) <
refraction_dict['fit_sigma'] * std)
elif refraction_dict['method'] == 'polynomial':
refraction[obs]['fit_flag'][:50] = False
refraction[obs]['fit_flag'][-50:] = False
for n_iter in range(0, refraction_dict['fit_iters']):
refraction[obs]['coeff'] = np.polynomial.chebyshev.chebfit(
processed['coadd']['wave'][refraction[obs]['fit_flag']],
processed[obs]['ratio'][refraction[obs]['fit_flag']],
refraction_dict['fit_order'])
refraction[obs]['fit_s1d'] = \
np.polynomial.chebyshev.chebval(processed['coadd']['wave'], refraction[obs]['coeff'])
processed[obs]['residuals'] = processed[obs]['ratio'] - refraction[obs]['fit_s1d']
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'])
refraction[obs]['fit_flag'] = (refraction[obs]['fit_flag']) \
& (np.abs(processed[obs]['residuals']) <
refraction_dict['fit_sigma'] * std)
""" Going back to the observer RF and rebinning the polynomial fit into the observed orders """
refraction[obs]['fit_e2ds'] = \
rebin_1d_to_2d(processed['coadd']['wave'],
input_data['coadd']['step'],
refraction[obs]['fit_s1d'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-rv_shift,
preserve_flux=False,
)
""" Zero or negative values are identified, flagged and substituted with another value """
refraction[obs]['null'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']],
dtype=bool)
for order in range(0, observational_pams['n_orders']):
refraction[obs]['fit_e2ds'][order, :], _, refraction[obs]['null'][order, :] = \
replace_values_errors_with_interpolation_1d(refraction[obs]['fit_e2ds'][order, :],
refraction[obs]['fit_e2ds'][order, :],
less_than=0.001)
processed[obs]['flux_rebinned_stellarRF_corrected'] = preparation[obs]['flux_rebinned_stellarRF'] \
/ refraction[obs]['fit_s1d']
refraction[obs]['binned_residuals'] = \
rebin_1d_to_1d(processed['coadd']['wave'],
processed['coadd']['step'],
processed[obs]['residuals'],
refraction['binned']['wave'],
refraction['binned']['step'],
preserve_flux=False)
elif approach == 'individual_order':
for obs in lists['observations']:
print(" Division by reference spectrum and fit of the flux variation: ", obs)
refraction[obs] = {}
processed[obs] = {}
if preparation['absolute_SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF_mod']
""" Going back to the observer RF and rebinning the spectrum into the observed orders """
preserve_flux = input_data[obs].get('absolute_flux', True)
# processed[obs]['master_flux'] = \
processed[obs]['master_flux'] = \
rebin_1d_to_2d(preparation['coadd']['wave'],
preparation['coadd']['step'],
preparation['coadd']['rescaled'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-rv_shift)
# processed[obs]['master_ferr'] = \
processed[obs]['master_ferr'] = \
rebin_1d_to_2d(preparation['coadd']['wave'],
preparation['coadd']['step'],
preparation['coadd']['rescaled_err'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-rv_shift,
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['master_flux'], processed[obs]['master_ferr'], processed[obs]['master_null'] = \
replace_values_errors_with_interpolation_2d(processed[obs]['master_flux'],
processed[obs]['master_ferr'],
less_than=0.001)
# processed[obs]['ratio'] = preparation[obs]['rescaled_blazed'] / master_flux
processed[obs]['ratio'] = input_data[obs]['e2ds'] \
/ preparation[obs]['rescaling'] \
/ (processed[obs]['master_flux'] * calib_data['blaze'])
processed[obs]['ratio_err'] = processed[obs]['ratio'] * \
np.sqrt(preparation[obs]['rescaling']/input_data[obs]['e2ds']
+ (processed[obs]['master_ferr']/processed[obs]['master_flux'])**2)
refraction[obs]['fit_e2ds'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
processed[obs]['residuals'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
refraction[obs]['fit_flag'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']],
dtype=bool)
for order in range(0, input_data[obs]['n_orders']):
order_coeff_name = 'order_' + repr(order)
refraction[obs]['fit_flag'][order, :] = (processed[obs]['ratio'][order, :] > 0.1)
if refraction_dict['method'] == 'spline':
for n_iter in range(0, refraction_dict['fit_iters']):
wave = input_data[obs]['wave'][order, refraction[obs]['fit_flag'][order, :]]
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / refraction_dict['knots_spacing'])
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave) - 1, (len(wave) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
refraction[obs]['knots'] = wave[idx_knots]
refraction[obs][order_coeff_name] = \
sci_int.splrep(
input_data[obs]['wave'][order, refraction[obs]['fit_flag'][order, :]],
processed[obs]['ratio'][order, refraction[obs]['fit_flag'][order, :]],
task=-1,
k=refraction_dict['fit_order'],
t=refraction[obs]['knots'])
refraction[obs]['fit_e2ds'][order, :] = \
sci_int.splev(input_data[obs]['wave'][order, :],
refraction[obs][order_coeff_name])
processed[obs]['residuals'][order, :] = processed[obs]['ratio'][order, :] \
- refraction[obs]['fit_e2ds'][order, :]
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'][order, :])
refraction[obs]['fit_flag'][order, :] = (refraction[obs]['fit_flag'][order, :]) \
& (np.abs(processed[obs]['residuals'][order, :]) <
refraction_dict['fit_sigma'] * std)
elif refraction_dict['method'] == 'polynomial':
refraction[obs]['fit_flag'][order, :50] = False
refraction[obs]['fit_flag'][order, -50:] = False
for n_iter in range(0, refraction_dict['fit_iters']):
refraction[obs][order_coeff_name] = np.polynomial.chebyshev.chebfit(
input_data[obs]['wave'][order, refraction[obs]['fit_flag'][order, :]],
processed[obs]['ratio'][order, refraction[obs]['fit_flag'][order, :]],
refraction_dict['fit_order'])
refraction[obs]['fit_e2ds'][order, :] = \
np.polynomial.chebyshev.chebval(input_data[obs]['wave'][order, :],
refraction[obs][order_coeff_name])
processed[obs]['residuals'][order, :] = processed[obs]['ratio'][order, :] \
- refraction[obs]['fit_e2ds'][order, :]
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'][order, :])
refraction[obs]['fit_flag'][order, :] = (refraction[obs]['fit_flag'][order, :]) \
& (np.abs(processed[obs]['residuals'][order, :]) <
refraction_dict['fit_sigma'] * std)
e2ds_corrected = input_data[obs]['e2ds'] / refraction[obs]['fit_e2ds']
e2ds_corrected_err = input_data[obs]['e2ds_err'] / refraction[obs]['fit_e2ds']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_rebinned_stellarRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
e2ds_corrected,
calib_data['blaze'],
processed['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
processed[obs]['err_flux_rebinned_stellarRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
e2ds_corrected_err,
calib_data['blaze'],
processed['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
processed[obs]['flux_rebinned_stellarRF_corrected'], \
processed[obs]['err_flux_rebinned_stellarRF_corrected'], _ = \
replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF_corrected'],
processed[obs]['err_flux_rebinned_stellarRF_corrected'],
less_than=0.001)
refraction[obs]['binned_residuals'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['residuals'],
np.ones_like(processed[obs]['residuals']),
refraction['binned']['wave'],
refraction['binned']['step'],
rv_shift=0.0000,
preserve_flux=False)
else:
print(" Please choose either full_spectrum or individual_order as preferred approach")
quit()
if not config_in['settings'].get('full_output', False):
for obs in lists['observations']:
del processed[obs]['ratio_err']
try:
del processed[obs]['err_flux_rebinned_stellarRF_corrected']
del processed[obs]['master_flux']
del processed[obs]['master_ferr']
del processed[obs]['master_null']
except:
pass
if append_name:
save_to_cpickle('refraction_' + append_name + '_processed', processed, config_in['output'], night)
save_to_cpickle('refraction_' + append_name, refraction, config_in['output'], night)
else:
save_to_cpickle('refraction_processed', processed, config_in['output'], night)
save_to_cpickle('refraction', refraction, config_in['output'], night)
def plot_differential_refraction(config_in, night_input='', append_name=None):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
refraction_dict = from_config_refraction(config_in, night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
""" Retrieving the analysis"""
if append_name:
processed = load_from_cpickle('refraction_' + append_name + '_processed', config_in['output'], night)
preparation = load_from_cpickle('refraction_' + append_name + '_preparation', config_in['output'],
night)
refraction = load_from_cpickle('refraction_' + append_name, config_in['output'], night)
else:
processed = load_from_cpickle('refraction_processed', config_in['output'], night)
preparation = load_from_cpickle('refraction_preparation', config_in['output'], night)
refraction = load_from_cpickle('refraction', config_in['output'], night)
except:
print(" Failed in retrieving processed data")
return
approach = refraction_dict.get('approach', 'full_spectrum')
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
offset = 0.10
y_limits = [0.8, 1.2]
flag_e2ds = {}
flag_coadd = {}
for i, obs in enumerate(lists['observations']):
shrink_factor = 4
if input_data[obs]['n_orders'] > shrink_factor:
factor = (input_data[obs]['n_orders'] * input_data[obs]['n_pixels']) \
// (input_data[obs]['n_pixels'] * shrink_factor)
flag_e2ds[obs] = (np.random.choice(a=([False] * (factor-1)) + [True],
size=(input_data[obs]['n_orders'], input_data[obs]['n_pixels'])))
flag_coadd[obs] = \
np.random.choice(a=([False] * factor) + [True], size=input_data['coadd']['size'])
else:
flag_e2ds[obs] = np.ones([input_data[obs]['n_orders'], input_data[obs]['n_pixels']], dtype=bool)
flag_coadd[obs] = np.ones(input_data['coadd']['size'], dtype=bool)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
""" We slim down the plot """
if i == 0:
ax1.scatter(preparation['coadd']['wave'][flag_coadd[obs]],
preparation[obs]['flux_rebinned_stellarRF'][flag_coadd[obs]] /
preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.2, label='observation (SRF)')
else:
ax1.scatter(preparation['coadd']['wave'][flag_coadd[obs]],
preparation[obs]['flux_rebinned_stellarRF'][flag_coadd[obs]] /
preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.2)
ax2.scatter(processed['coadd']['wave'][flag_coadd[obs]],
processed[obs]['flux_rebinned_stellarRF_corrected'][flag_coadd[obs]] /
preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=3, alpha=0.2)
ax1.plot(preparation['coadd']['wave'], preparation['coadd']['rescaled'], c='k', lw=1, alpha=0.5,
label='reference spectrum')
ax2.plot(preparation['coadd']['wave'], preparation['coadd']['rescaled'], c='k', lw=1, alpha=0.5)
ax1.set_xlim(processed['coadd']['wave'][0], processed['coadd']['wave'][-1])
ax1.set_ylim(y_limits)
ax2.set_ylim(y_limits)
ax1.legend(loc=1)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax2.set_title('After differential refraction correction')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
if i == 0:
offset = np.std(processed[obs]['ratio'][refraction[obs]['fit_flag']].flatten()) * 6
average = np.average(processed[obs]['ratio'][refraction[obs]['fit_flag']].flatten())
y_limits = [average - offset, average + offset]
if approach == 'full_spectrum':
flag = flag_coadd[obs] & refraction[obs]['fit_flag']
wave = refraction['wave']
elif approach == 'individual_order':
flag = flag_e2ds[obs] & refraction[obs]['fit_flag']
wave = input_data[obs]['wave']
ax.scatter(wave[flag],
processed[obs]['ratio'][flag] + offset * i,
c=colors_scatter['mBJD'][obs], s=1, alpha=0.50, zorder=2)
ax.scatter(wave[~refraction[obs]['fit_flag']],
processed[obs]['ratio'][~refraction[obs]['fit_flag']] + offset * i,
c='k', s=2, alpha=0.1, zorder=1)
for order in range(0, input_data[obs]['n_orders']):
ax.plot(input_data[obs]['wave'][order, :],
refraction[obs]['fit_e2ds'][order, :] + offset * i,
c='k', lw=1, alpha=0.5, zorder=5)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
# ax.legend(loc=3)
ax.set_title('Night: {0:s} \n Differential refraction correction - Fit of the ratio obs/master'.format(night))
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT: residuals of the fit
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
approach = refraction_dict.get('approach', 'full_spectrum')
for i, obs in enumerate(lists['observations']):
if i == 0:
median = np.median(processed[obs]['residuals'][refraction[obs]['fit_flag']].flatten())
offset = np.std(processed[obs]['residuals'][refraction[obs]['fit_flag']].flatten()) * 6
y_limits = [median - offset, median + offset]
if approach == 'full_spectrum':
flag = flag_coadd[obs] & refraction[obs]['fit_flag']
wave = refraction['wave']
elif approach == 'individual_order':
flag = flag_e2ds[obs] & refraction[obs]['fit_flag']
wave = input_data[obs]['wave']
ax.scatter(wave[flag],
processed[obs]['residuals'][flag] + offset * i,
c=colors_scatter['mBJD'][obs], s=1, alpha=0.50, zorder=2)
ax.scatter(wave[~refraction[obs]['fit_flag']],
processed[obs]['residuals'][~refraction[obs]['fit_flag']] + offset * i,
c='k', s=2, alpha=0.1, zorder=1)
ax.axhline(offset * i, c='k', zorder=3)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
# ax.legend(loc=3)
ax.set_title(
'Night: {0:s} \n Differential refraction correction - Residuals of the fit on ratio obs/master'.format(
night))
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
continue
"""
PLOT: corrected e2ds
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
e2ds_corrected = input_data[obs]['e2ds'] / refraction[obs]['fit_e2ds']
ax.scatter(input_data[obs]['wave'],
e2ds_corrected / preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.20)
# for order in range(0, np.size(input_data[obs]['wave'][:, 0])):
#
# ax.plot(input_data[obs]['wave'][order, :],
# refraction[obs]['fit_e2ds'][order, :],
# c=color_array, lw=1)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.set_title('Night: {0:s} \n Rescaled e2ds spectra after differential refraction correction'.format(night))
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 32,600 | 48.395455 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_lightcurve.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_transmission_lightcurve_planetRF',
'plot_transmission_lightcurve_planetRF',
'compute_transmission_lightcurve_stellarRF',
'plot_transmission_lightcurve_stellarRF',
'compute_transmission_lightcurve_observerRF',
'plot_transmission_lightcurve_observerRF',
'compute_transmission_lightcurve',
'plot_transmission_lightcurve'
]
def compute_transmission_lightcurve_planetRF(config_in, lines_label):
compute_transmission_lightcurve(config_in, lines_label, reference='planetRF')
def plot_transmission_lightcurve_planetRF(config_in, night_input):
plot_transmission_lightcurve(config_in, night_input, reference='planetRF')
def compute_transmission_lightcurve_stellarRF(config_in, lines_label):
compute_transmission_lightcurve(config_in, lines_label, reference='stellarRF')
def plot_transmission_lightcurve_stellarRF(config_in, night_input):
plot_transmission_lightcurve(config_in, night_input, reference='stellarRF')
def compute_transmission_lightcurve_observerRF(config_in, lines_label):
compute_transmission_lightcurve(config_in, lines_label, reference='observerRF')
def plot_transmission_lightcurve_observerRF(config_in, night_input):
plot_transmission_lightcurve(config_in, night_input, reference='observerRF')
subroutine_name = 'transmission_lightcurve'
pick_files = 'transmission_spectrum'
def compute_transmission_lightcurve(config_in, lines_label, reference='planetRF'):
do_average_instead_of_sum = True
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
#shared_data = load_from_cpickle('shared', config_in['output'])
lines_dict = spectral_lines[lines_label] # from_config_get_transmission_lightcurve(config_in)
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
output_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
append_list = ['', '_uncorrected', '_clv_model']
for output_selection in output_list:
skip_iteration = False
for night in night_dict:
print("compute_transmission_lightcurve Night: ", night)
try:
transmission = load_from_cpickle(pick_files+'_'+reference + '_' + output_selection, config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
print('No transmission spectra found for case:{0:s}, be sure to run transmission_spectra before this step'.format(output_selection))
skip_iteration = True
if skip_iteration: continue
try:
lightcurve = load_from_cpickle(subroutine_name +'_'+reference+ '_' + output_selection, config_in['output'], night, lines_label)
print()
continue
except (FileNotFoundError, IOError):
print(" No transmission_lightcurve file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
#calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
#input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# doublet sodium in the lab reference frame
"""
C stands for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(transmission['wave'] - line_val) * 2. < passband_val)
"""
S stands for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (transmission['wave'] >= band_val[0]) & (transmission['wave'] <= band_val[1])
processed = {
'subroutine': subroutine_name
}
lightcurve = {
'subroutine': subroutine_name,
'arrays': {
'observations': {
'obs_name': np.zeros(len(lists['observations']), dtype=str),
'phase': np.zeros(len(lists['observations'])),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
},
'C_bands': C_bands,
'S_bands': S_bands,
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
}
""" Adding the C-bands arrays to the dictionary"""
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['delta_' + band_key + name_append] = np.zeros([len(lists['observations']), 2])
transit_out_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_in_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_full_flag = np.zeros(len(lists['observations']), dtype=bool)
for n_obs, obs in enumerate( lists['observations']):
processed[obs] = {}
lightcurve[obs] = {}
try:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
processed[obs]['bands'] = {
'phase': phase_internal
}
processed[obs]['bands_uncorrected'] = {
'phase': phase_internal
}
processed[obs]['bands_clv_model'] = {
'phase': phase_internal
}
processed[obs]['s_integrated'] = 0.000
processed[obs]['s_integrated_uncorrected'] = 0.000
processed[obs]['s_integrated_clv_model'] = 0.000
processed[obs]['s_sigmaq_sum'] = 0.000
n_bands = 0.0
for band_key, band_val in S_bands.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key] = \
[np.average(transmission[obs]['normalized'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)
/len(transmission[obs]['normalized_err'][band_val])**2]
processed[obs]['bands_uncorrected'][band_key] = \
[np.average(transmission[obs]['normalized_uncorrected'][band_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][band_val])**2)
/len(transmission[obs]['normalized_uncorrected_err'][band_val])**2]
processed[obs]['bands_clv_model'][band_key] = \
[np.average(transmission[obs]['clv_model_rebinned'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)
/len(transmission[obs]['normalized_err'][band_val])**2]
else:
processed[obs]['bands'][band_key] = \
[np.sum(transmission[obs]['normalized'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)]
processed[obs]['bands_uncorrected'][band_key] = \
[np.sum(transmission[obs]['normalized_uncorrected'][band_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][band_val])**2)]
processed[obs]['bands_clv_model'][band_key] = \
[np.sum(transmission[obs]['clv_model_rebinned'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)]
processed[obs]['s_integrated'] += processed[obs]['bands'][band_key][0]
processed[obs]['s_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][0]
processed[obs]['s_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][0]
processed[obs]['s_sigmaq_sum'] += processed[obs]['bands'][band_key][1]
n_bands += 1.
processed[obs]['s_integrated'] /= n_bands
processed[obs]['s_integrated_uncorrected'] /= n_bands
processed[obs]['s_integrated_clv_model'] /= n_bands
processed[obs]['s_sigmaq_sum'] /= n_bands**2
#processed[obs]['s_factor'] =np.power(s_integrated, -2.0)
#processed[obs]['s_factor_clv_model'] =np.power(s_integrated, -2.0)
#processed[obs]['s_factor_uncorrected'] = np.power(s_integrated, -2.0)
for band_key, band_dict in C_bands.items():
processed[obs]['bands'][band_key] = {}
processed[obs]['bands_uncorrected'][band_key] = {}
processed[obs]['bands_clv_model'][band_key] = {}
processed[obs]['c_integrated'] = 0.000
processed[obs]['c_integrated_uncorrected'] = 0.000
processed[obs]['c_integrated_clv_model'] = 0.000
processed[obs]['c_sigmaq_sum'] = 0.000
n_bands = 0.0
for line_key, line_val in band_dict.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key][line_key] = \
[np.average(transmission[obs]['normalized'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val])**2)
/ len(transmission[obs]['normalized_err'][line_val])**2]
processed[obs]['bands_uncorrected'][band_key][line_key] = \
[np.average(transmission[obs]['normalized_uncorrected'][line_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][line_val])**2)
/ len(transmission[obs]['normalized_uncorrected_err'][line_val])**2]
processed[obs]['bands_clv_model'][band_key][line_key] = \
[np.average(transmission[obs]['clv_model_rebinned'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val])**2)
/ len(transmission[obs]['normalized_err'][line_val])**2]
else:
processed[obs]['bands'][band_key][line_key] = \
[np.sum(transmission[obs]['normalized'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val]) ** 2)]
processed[obs]['bands_uncorrected'][band_key][line_key] = \
[np.sum(transmission[obs]['normalized_uncorrected'][line_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][line_val]) ** 2)]
processed[obs]['bands_clv_model'][band_key][line_key] = \
[np.sum(transmission[obs]['clv_model_rebinned'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val]) ** 2)]
processed[obs]['c_integrated'] += processed[obs]['bands'][band_key][line_key][0]
processed[obs]['c_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][line_key][0]
processed[obs]['c_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][line_key][0]
processed[obs]['c_sigmaq_sum'] += processed[obs]['bands'][band_key][line_key][1]
n_bands += 1.
processed[obs]['c_integrated'] /= n_bands
processed[obs]['c_integrated_uncorrected'] /= n_bands
processed[obs]['c_integrated_clv_model'] /= n_bands
processed[obs]['c_sigmaq_sum'] /= n_bands ** 2
for name_append in append_list:
lightcurve[obs]['delta_' + band_key + name_append] = [processed[obs]['c_integrated' + name_append]
- processed[obs]['s_integrated' + name_append],
np.sqrt(processed[obs]['s_sigmaq_sum'] + processed[obs]['c_sigmaq_sum'])]
lightcurve['arrays']['observations']['delta_' + band_key + name_append][n_obs, :] = \
lightcurve[obs]['delta_' + band_key + name_append][:]
lightcurve[obs]['phase'] = processed[obs]['bands']['phase']
lightcurve['arrays']['observations']['obs_name'][n_obs] = obs
lightcurve['arrays']['observations']['phase'][n_obs] = lightcurve[obs]['phase']
if obs in lists['transit_out']:
transit_out_flag[n_obs] = True
if obs in lists['transit_in']:
transit_in_flag[n_obs] = True
if obs in lists['transit_full']:
transit_full_flag[n_obs] = True
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['rescaling_' + band_key + name_append] = \
np.average(lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_out_flag, 0], axis=0)
sorting_index = np.argsort(lightcurve['arrays']['observations']['phase'])
transit_out_flag = transit_out_flag[sorting_index]
transit_in_flag = transit_in_flag[sorting_index]
transit_full_flag = transit_full_flag[sorting_index]
lightcurve['arrays']['observations']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][sorting_index]
lightcurve['arrays']['observations']['phase'] = lightcurve['arrays']['observations']['phase'][sorting_index]
lightcurve['arrays']['transit_in']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_in_flag]
lightcurve['arrays']['transit_in']['phase'] = lightcurve['arrays']['observations']['phase'][transit_in_flag]
lightcurve['arrays']['transit_full']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_full_flag]
lightcurve['arrays']['transit_full']['phase'] = lightcurve['arrays']['observations']['phase'][transit_full_flag]
lightcurve['arrays']['transit_out']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_out_flag]
lightcurve['arrays']['transit_out']['phase'] = lightcurve['arrays']['observations']['phase'][transit_out_flag]
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][sorting_index]
# / lightcurve['arrays']['rescaling_' + band_key]
lightcurve['arrays']['transit_in']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_in_flag]
lightcurve['arrays']['transit_full']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_full_flag]
lightcurve['arrays']['transit_out']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_out_flag]
lightcurve['arrays']['observations']['transit_out_flag'] = transit_out_flag
lightcurve['arrays']['observations']['transit_in_flag'] = transit_in_flag
lightcurve['arrays']['observations']['transit_full_flag'] = transit_full_flag
pre_duration = transit_full_bins[0] - lightcurve['arrays']['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration/transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
post_duration = lightcurve['arrays']['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
transit_bins = np.arange(transit_full_bins[0]-nsteps_pre*transit_full_step,
transit_full_bins[-1] + (nsteps_post+1.1) * transit_full_step,
transit_full_step)
lightcurve['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['delta_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins)-1):
sel = (lightcurve['arrays']['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve['arrays']['observations']['phase'] < transit_bins[nb+1])
if np.sum(sel) <= 0: continue
lightcurve['binned']['observations']['phase'][n_a] = np.average(lightcurve['arrays']['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['delta_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve['arrays']['observations']['delta_' + band_key + name_append][sel, 0],
weights=1. / lightcurve['arrays']['observations']['delta_' + band_key + name_append][sel, 1]**2,
returned=True)
lightcurve['binned']['observations']['delta_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve['binned']['transit_in']['phase'] = lightcurve['binned']['observations']['phase'][transit_in_flag]
lightcurve['binned']['transit_full']['phase'] = lightcurve['binned']['observations']['phase'][transit_full_flag]
lightcurve['binned']['transit_out']['phase'] = lightcurve['binned']['observations']['phase'][transit_out_flag]
lightcurve['binned']['observations']['phase'] = lightcurve['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['transit_in']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][transit_in_flag, :]
lightcurve['binned']['transit_full']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][transit_full_flag, :]
lightcurve['binned']['transit_out']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][transit_out_flag, :]
lightcurve['binned']['observations']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name + '_'+reference + '_' + output_selection +'_processed', processed, config_in['output'], night, lines_label)
save_to_cpickle(subroutine_name + '_'+reference + '_' + output_selection, lightcurve, config_in['output'], night, lines_label)
def plot_transmission_lightcurve(config_in, night_input='', reference='planetRF'):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_transmission_lightcurve Night: ", night)
""" Retrieving the analysis"""
try:
lightcurve = load_from_cpickle(subroutine_name+'_'+reference, config_in['output'], night)
except:
print()
print("No transmission lightcurve dataset, no plots")
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
C_bands = lightcurve['C_bands']
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Transmission lightcurve - night {0:s} \n {1:s}'.format(night, band_key))
plt.errorbar(lightcurve['arrays']['observations']['phase'],
lightcurve['arrays']['observations']['delta_' + band_key][:,0],
yerr= lightcurve['arrays']['observations']['delta_' + band_key][:,1],
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve['binned']['observations']['phase'],
lightcurve['binned']['observations']['delta_' + band_key][:, 0],
yerr= lightcurve['binned']['observations']['delta_' + band_key][:,1],
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve['arrays']['observations']['phase'][0]-0.01,
lightcurve['arrays']['observations']['phase'][-1]+0.01)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('$\mathcal{R}$ - 1.')
plt.legend()
plt.show()
| 25,556 | 51.58642 | 151 | py |
SLOPpy | SLOPpy-main/SLOPpy/config_default.py | config_default = {
'molecfit': {
'installation_path': '/usr/local/eso/bin/',
'include_telluric': './additional_files/include_regions_ORF.dat',
'include_stellar': './additional_files/include_regions_SRF.dat',
'exptime_coadd': 2600,
'rebinning_step': 0.010
},
'instruments': {
'wavelength_step': 0.01000000,
'use_rv_from_ccf': False,
'use_analytical_rvs': False,
'linear_fit_method': 'linear_curve_fit',
'orders': None,
'telluric': None,
'telluric_template': None
},
'refraction': {
'approach': 'individual_order',
'method': 'polynomial',
'fit_order': 5,
'fit_iters': 3,
'fit_sigma': 5,
'knots_spacing': 25.00,
'reference_night': False,
'reference_instrument': False
},
'master-out': {
'wavelength_range': [5880.000, 5906.000],
'wavelength_step': 0.010000000,
'binning_factor': 20,
'use_smoothed': False,
'use_composite': False,
'boxcar_smoothing': 3,
},
'pca_parameters': {
'iterations': 5,
'ref_iteration': 0,
},
'settings': {
'full_output': False
}
}
""" List ok keywords that are copied from the instrument dictionary
when they are not explicitily specified
"""
copy_from_instrument = [
'telluric_template',
'telluric',
'mask',
'use_rv_from_ccf',
'use_analytical_rvs'
]
| 1,477 | 25.872727 | 73 | py |
SLOPpy | SLOPpy-main/SLOPpy/differential_refraction_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
__all__ = ["compute_differential_refraction_preparation"]
def compute_differential_refraction_preparation(config_in, append_name=None):
if append_name:
subroutine_name = 'differential_refraction_preparation_' + append_name
filename = 'refraction_'+append_name
else:
subroutine_name = 'differential_refraction_preparation'
filename = 'refraction'
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
reference_flux = np.empty([len(night_dict), shared_data['coadd']['size']], dtype=np.double)
reference_wght = np.zeros([len(night_dict), shared_data['coadd']['size']], dtype=np.double)
reference_mask = np.ones([len(night_dict), shared_data['coadd']['size']], dtype=bool)
compute_reference = False
""" make sure that all the spectra are computed in the same reference system if cross-calibrations is used"""
absolute_SRF = False
for n_night, night in enumerate(night_dict):
if night_dict[night]['refraction'].get('reference_night', False) \
or night_dict[night]['refraction'].get('reference_instrument', False):
absolute_SRF = True
for n_night, night in enumerate(night_dict):
try:
preparation = load_from_cpickle(filename +'_preparation', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
instrument = night_dict[night]['instrument']
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
defined_reference = night_dict[night]['refraction'].get('reference', False)
if defined_reference:
preparation = {
'subroutine': 'differential_refraction_preparation',
'coadd': {
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size']
},
'absolute_SRF': True,
'reference_coadd': True
}
else:
preparation = {
'subroutine': 'differential_refraction_preparation',
'coadd': {
'wave': input_data['coadd']['wave'],
'step': input_data['coadd']['step'],
'size': input_data['coadd']['size'],
},
'absolute_SRF': absolute_SRF,
'reference_coadd': False
}
total_flux = np.empty([len(lists['observations']), preparation['coadd']['size']], dtype=np.double)
total_wght = np.zeros([len(lists['observations']), preparation['coadd']['size']], dtype=np.double)
total_mask = np.ones([len(lists['observations']), preparation['coadd']['size']], dtype=bool)
""" Rebinning of all the spectra """
for n_obs, obs in enumerate(lists['observations']):
print(" Spectral rebinning - Processing: ", obs)
preparation[obs] = {}
""" Rebinning of the spectra in the SRF, except for a fixed constant in order to minimize
the difference between
"""
if preparation['absolute_SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF_mod']
preserve_flux = input_data[obs].get('absolute_flux', True)
preparation[obs]['flux_rebinned_stellarRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
preparation['coadd']['wave'],
preparation['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
err_flux_rebinned_SRF = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
preparation['coadd']['wave'],
preparation['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
#processed[obs]['flux_rebinned_stellarRF'], \
#processed[obs]['err_flux_rebinned_SRF'], \
#processed[obs]['flux_rebinned_SRF_null'] = \
# replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF'],
# processed[obs]['err_flux_rebinned_SRF'],
# force_positive=True)
#processed[obs]['rescaling'], processed[obs]['rescaled'], processed[obs]['rescaled_err'] = \
# perform_rescaling(processed['coadd']['wave'],
# processed[obs]['flux_rebinned_stellarRF'],
# processed[obs]['err_flux_rebinned_SRF'],
# observational_pams['wavelength_rescaling'])
#
""" Zero or negative values are identified, flagged and substituted
with another value """
preparation[obs]['flux_rebinned_stellarRF'], \
err_flux_rebinned_SRF, \
flux_rebinned_SRF_null = \
replace_values_errors_with_interpolation_1d(preparation[obs]['flux_rebinned_stellarRF'],
err_flux_rebinned_SRF,
force_positive=True)
preparation[obs]['rescaling'], preparation[obs]['rescaled'], preparation[obs]['rescaled_err'] = \
perform_rescaling(preparation['coadd']['wave'],
preparation[obs]['flux_rebinned_stellarRF'],
err_flux_rebinned_SRF,
observational_pams['wavelength_rescaling'])
#if instrument_dict[instrument]['refraction'].get('reference_night', False):
# if night != instrument_dict[instrument]['refraction']['reference_night']:
# continue
#
#if instrument_dict[instrument]['refraction'].get('reference_instrument', False):
# if instrument != instrument_dict[instrument]['refraction']['reference_instrument']:
# continue
if night_dict[night]['refraction'].get('use_all_observations', False) or obs in lists['telluric']:
total_flux[n_obs, :] = preparation[obs]['rescaled']
total_mask[n_obs, :] = flux_rebinned_SRF_null
total_wght[n_obs, :] = 1. / (preparation[obs]['rescaled_err'] ** 2)
print(" Observation added to reference spectrum")
masked_array = np.ma.array(total_flux, mask=total_mask)
rescaled_mask, sum_weights = np.ma.average(masked_array,
weights=total_wght,
axis=0,
returned=True)
preparation['coadd']['rescaled'] = rescaled_mask.filled(0.00)
sum_weights[sum_weights <= 0.0] = 1.0
preparation['coadd']['rescaled_err'] = 1. / np.sqrt(sum_weights)
preparation['coadd']['rescaled'], preparation['coadd']['rescaled_err'], preparation['coadd']['null'] = \
replace_values_errors_with_interpolation_1d(preparation['coadd']['rescaled'],
preparation['coadd']['rescaled_err'],
force_positive=True)
save_to_cpickle(filename + '_preparation', preparation, config_in['output'], night)
if defined_reference == night \
or defined_reference == instrument \
or defined_reference == 'all' :
compute_reference = True
reference_flux[n_night, :] = preparation['coadd']['rescaled']
reference_mask[n_night, :] = preparation['coadd']['null']
reference_wght[n_night, :] = 1. / (preparation['coadd']['rescaled_err'] ** 2)
if compute_reference:
reference = {
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
}
masked_array = np.ma.array(reference_flux, mask=reference_mask)
rescaled_mask, sum_weights = np.ma.average(masked_array,
weights=reference_wght,
axis=0,
returned=True)
reference['rescaled'] = rescaled_mask.filled(0.00)
sum_weights[sum_weights <= 0.0] = 1.0
reference['rescaled_err'] = 1. / np.sqrt(sum_weights)
reference['rescaled'], reference['rescaled_err'], reference['null'] = \
replace_values_errors_with_interpolation_1d(reference['rescaled'],
reference['rescaled_err'],
force_positive=True)
save_to_cpickle(filename + '_reference', reference, config_in['output'])
print()
| 10,775 | 47.981818 | 113 | py |
SLOPpy | SLOPpy-main/SLOPpy/sysrem_correction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.pca_preparation import compute_pca_preparation
from PyAstronomy import pyasl
from scipy.interpolate import UnivariateSpline
__all__ = ['compute_sysrem_correction',
'plot_sysrem_correction']
def compute_sysrem_correction(config_in):
subroutine_name = 'sysrem_correction'
compute_pca_preparation(config_in)
print()
night_dict = from_config_get_nights(config_in)
pca_parameters = from_config_get_pca_parameters(config_in)
for night in night_dict:
try:
sysrem_output = load_from_cpickle('transmission_preparation',
config_in['output'],
night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
n_iter = pca_parameters.get('iterations',5 )
ref_iter = pca_parameters.get('ref_iteration',3 )
sysrem_output = {
'subroutine': subroutine_name,
'iterations': n_iter,
'pca_output': True,
'ref_iteration': ref_iter
}
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
preparation = load_from_cpickle('pca_preparation', config_in['output'], night)
n_obs, n_orders, n_pixels = np.shape(preparation['stack_e2ds'])
model_iter = np.ones([n_iter, n_obs, n_orders, n_pixels], dtype=np.double)
model_out = np.ones([n_iter, n_obs, n_orders, n_pixels], dtype=np.double)
for order in range(0, observational_pams['n_orders']):
obs = preparation['stack_e2ds'][:,order,:]
sigs = preparation['stack_e2ds_err'][:,order,:]
sr = pyasl.SysRem(obs, sigs)
previous_residuals = obs.copy()
for it in range(0, n_iter):
r, a, c = sr.iterate()
model_iter[it,:,order,:] = previous_residuals - r
previous_residuals = r.copy()
for it in range(0, n_iter):
# Full model is the sum of all the models until the given iteration
model_out[it,:, :, :] = np.sum(model_iter[:it+1,:, :, :], axis=0)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.title("Model " + repr(it))
#plt.imshow( model_out[it,:, 10, :], origin='lower', aspect="auto")
#plt.show()
it_string = str(it).zfill(2)
sysrem_output[it_string] = {
'model': model_out[it,:, :, :]
}
for i_obs, obs in enumerate(lists['observations']):
sysrem_output[it_string][obs] = {}
sysrem_output[it_string][obs]['ratio'] = preparation['stack_e2ds'][i_obs, :, :]/model_out[it, i_obs, :, :]
sysrem_output[it_string][obs]['ratio_err'] = preparation['stack_e2ds_err'][i_obs, :, :]/model_out[it, i_obs, :, :]
save_to_cpickle('transmission_preparation', sysrem_output, config_in['output'], night)
print()
""" Keep going from here after preparation, unless the subroutines has been called just
to preform the data preparation step
"""
def plot_sysrem_correction(config_in, night_input=''):
subroutine_name = 'transmission_spectrum_preparation'
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# ! To be removed when testing is done
# ! This plots do not make any sense anymore
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
""" Retrieving the analysis"""
try:
preparation = load_from_cpickle('transmission_preparation', config_in['output'], night)
except:
print("No transmission spectrum results, no plots")
print()
continue
#from SLOPpy.subroutines.lines_fit_functions import logprob_case12
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
len_y = len(lists['observations'])
len_x = 4096
order= 11
time_from_transit = np.empty(len_y, dtype=np.double)
plot_data = np.empty([len_y, len_x], dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
print(np.median(preparation[obs]['deblazed'][order ,:]))
time_from_transit[i_obs] = input_data[obs]['BJD'] - observational_pams['time_of_transit']
plot_data[i_obs, :] = preparation[obs]['deblazed'][order ,:]/ np.median(preparation[obs]['ratio'][order ,:])
wave = preparation[obs]['wave'][order, :]
wave_meshgrid, time_meshgrid = np.meshgrid(wave, time_from_transit)
print('COOLWARM')
cmap = plt.get_cmap('coolwarm')
levels = MaxNLocator(nbins=15).tick_values(
plot_data.min(), plot_data.max())
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
res = plot_data * 1.
from scipy.interpolate import UnivariateSpline
for ii in range(0,4096):
spl = UnivariateSpline(time_from_transit, plot_data[:, ii])
val = spl(time_from_transit)
res[:,ii] -= val
res[:,ii] /= val
print('COOLWARM')
cmap = plt.get_cmap('coolwarm')
levels = MaxNLocator(nbins=10).tick_values(
-0.05, 0.05)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
res, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
""" Creation of the color array, based on the BJD of the observations
"""
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
#ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for obs in lists['transit_in']:
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
preparation[obs]['wave'],
preparation[obs]['deblazed'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
preparation[obs]['deblazed_err'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
observational_pams['wavelength_rescaling'])
ax1.scatter(preparation[obs]['wave'],
preparation[obs]['rescaled'],
s=1, alpha=0.25,
color=colors_plot['mBJD'][obs])
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 8,372 | 34.935622 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_template_alternative.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_template_alternative", "plot_telluric_template_alternative"]
def compute_telluric_template_alternative(config_in):
compute_telluric_template_alternative_routine(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=False,
use_template=True,
subroutine_name='telluric_template')
def compute_telluric_template_alternative_routine(config_in, **kwargs):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
for night in night_dict:
instrument_name = night_dict[night]['instrument']
print()
print("compute_telluric_template Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0,
'telluric': {}
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer',
'template': {},
'linear' : {}
}
# There must be a more elegant way to do this, but I'm, not aware of it
""" computation of the rescaled spectra, for later use in the analysis and in the plotting subroutines"""
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
""" Reference airmass for iterative correction of airmass - disabled here"""
processed['airmass_ref'] = 0.000
"""
Definiton of the wavelength scale for the output telluric spectrum
We assume that the wavelength solution did not change during the night
"""
obs_reference = lists['observations'][0]
telluric['rebinned'] = {
'wave': input_data[obs_reference]['wave'],
'step': input_data[obs_reference]['step'],
'flux': np.ones(input_data[obs_reference]['step'].shape),
'ferr': np.ones(input_data[obs_reference]['step'].shape)*0.001
}
processed['telluric']['spectrum_noairmass'] = telluric['rebinned']['flux'].copy()
processed['telluric']['spectrum_noairmass_err'] = telluric['rebinned']['ferr'].copy()
processed['n_orders'] = input_data[obs_reference]['orders']
processed['n_pixels'] = input_data[obs_reference]['wave_size']
"""
Computation of the rescaling factor for the telluric template. This factor has no physical value, it's just
the ratio of the observed telluric features (in a normalized spectra) to the input template.
"""
try:
if 'telluric_template' not in instrument_dict[instrument_name]:
raise MissingKeywordException()
template_dict = instrument_dict[instrument_name]['telluric_template']
if template_dict['fit_range'][0] > shared_data['coadd']['wavelength_range'][1] or \
template_dict['fit_range'][1] < shared_data['coadd']['wavelength_range'][0]:
raise OutOfRangeException()
print(' rescaled telluric template')
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the template data"""
telluric_template_data = np.genfromtxt(template_dict['file'])
telluric['template']['input'] = {
'range': [np.amin(telluric_template_data[:, 0]), np.amax(telluric_template_data[-1, 0])],
'wave': telluric_template_data[:, 0],
'flux': telluric_template_data[:, 1],
'ferr': telluric_template_data[:, 2],
'step': telluric_template_data[:, 3]
}
processed['template'] = {
'array_wave': [],
'array_data': {},
'array_move': {}
}
""" Again we assume that the wavelength solution did not change during the night """
tel_selection = (input_data[obs_reference]['wave'] > template_dict['fit_range'][0]) & \
(input_data[obs_reference]['wave'] < template_dict['fit_range'][1])
processed['template']['wave_sel'] = input_data[obs_reference]['wave'][tel_selection]
processed['template']['step_sel'] = input_data[obs_reference]['step'][tel_selection]
for obs in lists['telluric']:
processed['template'][obs] = processed[obs]['e2ds_rescaled'][tel_selection]
""" saving the wavelength array for plotting purpose"""
processed['template']['array_wave'].extend(processed['template']['wave_sel'])
rescaling_array = np.arange(0.05, 2.0, 0.05)
computed_std = np.zeros(len(rescaling_array))
for n_rescaling_factor, rescaling_factor in enumerate(rescaling_array):
"""
Template spectrum is rebinned onto the observations wavelength scale, using only the wavelength
range selected for the computation of the rescaling factor
"""
template_rebinned_flux = \
rebin_1d_to_1d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
processed['template']['wave_sel'],
processed['template']['step_sel'],
preserve_flux=False)
""" Saving the outcome to dictionary """
template_rebinned_flux -= 1.00
template_rebinned_flux *= rescaling_factor
template_rebinned_flux += 1.00
e2ds_corrected = []
for obs in lists['telluric']:
e2ds_corrected.extend(processed['template'][obs] /
np.power(template_rebinned_flux, input_data[obs]['AIRMASS']))
computed_std[n_rescaling_factor] = np.nanstd(e2ds_corrected)
#print(n_rescaling_factor, rescaling_factor, computed_std[n_rescaling_factor])
#plt.scatter(processed['template']['array_wave'], e2ds_corrected)
#plt.plot(processed['template']['wave_sel'], template_rebinned_flux)
#plt.show()
label_dict = '{0}'.format(rescaling_factor)
processed['template']['array_data'][label_dict] = e2ds_corrected
processed['template']['array_move'][label_dict] = rescaling_factor
#plt.scatter(rescaling_array, computed_std)
#plt.show()
""" Selection of the rescaling factor with the lowest scatter """
ind_factor = np.argmin(computed_std)
ind_range = 3
if ind_factor < ind_range:
sel_factor = rescaling_array[0:ind_factor+ind_range]
sel_stdev = computed_std[0:ind_factor+ind_range]
elif ind_factor > len(rescaling_array) - ind_range:
sel_factor = rescaling_array[ind_factor-ind_range:]
sel_stdev = computed_std[ind_factor-ind_range:]
else:
sel_factor = rescaling_array[ind_factor-ind_range:ind_factor+ind_range]
sel_stdev = computed_std[ind_factor-ind_range:ind_factor+ind_range]
coeff = np.polyfit(sel_factor, sel_stdev, 2)
telluric_factor = - coeff[1] / (2*coeff[0])
print(' telluric factor: {0:7f}'.format(telluric_factor))
print()
processed['template']['telluric_factor'] = telluric_factor
processed['template']['rescaling_factor'] = sel_factor
processed['template']['computed_std'] = computed_std
processed['template']['polyfit'] = {
'package': 'numpy', # in case we forget what we used...
'order': 2,
'coeff': coeff,
'sel_factor': sel_factor,
'sel_stdev': sel_stdev
}
"""
The template telluric spectrum is rebinned onto the 2D scale of the observations.
Then it is rescaled according to the computed factor
We assume that the wavelength solution did not change during the night
"""
processed['template']['rebinned'] = {}
processed['template']['rebinned']['flux'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False)
processed['template']['rebinned']['ferr'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['ferr'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False,
is_error=True)
sel_out_of_range = ~((telluric['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
& (telluric['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
processed['template']['rebinned']['flux'][sel_out_of_range] = 1.
processed['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
processed['telluric']['spectrum_noairmass'] = \
(processed['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
processed['telluric']['spectrum_noairmass_err'] = processed['template']['rebinned']['ferr'] * telluric_factor
except MissingFileException:
print(' *** Missing telluric_template keyword in configuration file ***')
print()
except OutOfRangeException:
print(' *** Wavelength range for the calculation of the rescaling factor is outside the boundaries ***')
print(' Rescaling factor wavelength range: {0:7.2f} to {1:7.2f}'.format(
template_dict['fit_range'][0], template_dict['fit_range'][1]))
print(' Shared data wavelength range : {0:7.2f} to {1:7.2f}'.format(
shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1]))
print()
"""
Introduction of a second telluric correction, where the telluric spectrum depends linearly on the precipitable
water vapour (PWV). As such, the values stored in the files are the coefficient of the PWV term, while the
baseline is given by the outcome of the previous step, i.e., the spectrum_noairmass array
"""
try:
"""
The algorith is essentially the same as in the previous step, with the exception of the calculation
of the telluric spectrum at each iteration of the chi-square minimization
"""
if 'telluric_linear_term' not in instrument_dict[instrument_name]:
raise MissingKeywordException()
linear_dict = instrument_dict[instrument_name]['telluric_linear_term']
if linear_dict['fit_range'][0] > shared_data['coadd']['wavelength_range'][1] or \
linear_dict['fit_range'][1] < shared_data['coadd']['wavelength_range'][0]:
raise OutOfRangeException()
print(' PWV-dependent telluric spectrum')
print(' instrument :', instrument_name)
print(' linear :', linear_dict['file'])
print(' fit_range :', linear_dict['fit_range'])
print()
""" Retrieving the linear data"""
telluric_linear_data = np.genfromtxt(linear_dict['file'])
telluric['linear']['input'] = {
'range': [np.amin(telluric_linear_data[:, 0]), np.amax(telluric_linear_data[-1, 0])],
'wave': telluric_linear_data[:, 0],
'coef': telluric_linear_data[:, 1],
'cerr': telluric_linear_data[:, 2],
'step': telluric_linear_data[:, 3]
}
processed['linear'] = {
'array_wave': [],
'array_data': {},
'array_move': {}
}
""" Again we assume that the wavelength solution did not change during the night """
tel_selection = (input_data[obs_reference]['wave'] > linear_dict['fit_range'][0]) & \
(input_data[obs_reference]['wave'] < linear_dict['fit_range'][1])
processed['linear']['wave_sel'] = input_data[obs_reference]['wave'][tel_selection]
processed['linear']['step_sel'] = input_data[obs_reference]['step'][tel_selection]
for obs in lists['telluric']:
processed['linear'][obs] = processed[obs]['e2ds_rescaled'][tel_selection]
""" saving the wavelength array for plotting purpose"""
processed['linear']['array_wave'].extend(processed['linear']['wave_sel'])
rescaling_array = 10**np.arange(-1, np.log10(50), 0.1)
computed_std = np.zeros(len(rescaling_array))
for n_rescaling_factor, rescaling_factor in enumerate(rescaling_array):
"""
Template spectrum is rebinned onto the observations wavelength scale, using only the wavelength
range selected for the computation of the rescaling factor
"""
linear_rebinned_flux = \
rebin_1d_to_1d(telluric['linear']['input']['wave'],
telluric['linear']['input']['step'],
telluric['linear']['input']['coef'],
processed['linear']['wave_sel'],
processed['linear']['step_sel'],
preserve_flux=False)
""" Saving the outcome to dictionary """
linear_rebinned_flux *= rescaling_factor
linear_rebinned_flux += processed['telluric']['spectrum_noairmass'][tel_selection]
e2ds_corrected = []
for obs in lists['telluric']:
e2ds_corrected.extend(processed['linear'][obs] /
np.power(linear_rebinned_flux, input_data[obs]['AIRMASS']))
computed_std[n_rescaling_factor] = np.nanstd(e2ds_corrected)
label_dict = '{0}'.format(rescaling_factor)
processed['linear']['array_data'][label_dict] = e2ds_corrected
processed['linear']['array_move'][label_dict] = rescaling_factor
#print(n_rescaling_factor, rescaling_factor, computed_std[n_rescaling_factor])
#plt.scatter(processed['linear']['array_wave'], e2ds_corrected)
#plt.plot(processed['linear']['wave_sel'], linear_rebinned_flux)
#plt.show()
#plt.scatter(rescaling_array, computed_std)
#plt.show()
""" Selection of the PWV value with the lowest scatter """
ind_factor = np.argmin(computed_std)
ind_range = 3
if ind_factor < ind_range:
sel_factor = rescaling_array[0:ind_factor+ind_range]
sel_stdev = computed_std[0:ind_factor+ind_range]
elif ind_factor > len(rescaling_array) - ind_range:
sel_factor = rescaling_array[ind_factor-ind_range:]
sel_stdev = computed_std[ind_factor-ind_range:]
else:
sel_factor = rescaling_array[ind_factor-ind_range:ind_factor+ind_range]
sel_stdev = computed_std[ind_factor-ind_range:ind_factor+ind_range]
coeff = np.polyfit(sel_factor, sel_stdev, 2)
PWV_value = - coeff[1] / (2*coeff[0])
print(' PWV value : {0:7f}'.format(PWV_value))
print()
processed['linear']['PWV_value'] = PWV_value
processed['linear']['PWV_closest'] = sel_factor
processed['linear']['computed_std'] = computed_std
processed['linear']['polyfit'] = {
'package': 'numpy', # in case we forget what we used...
'order': 2,
'coeff': coeff,
'sel_factor': sel_factor,
'sel_stdev': sel_stdev
}
"""
The linear coefficient for the PWV-dependent part are rebinned onto the 2D scale of the observations.
Then the teluric spectrum is computed, using also the baseline from the previous step
"""
processed['linear']['rebinned'] = {}
processed['linear']['rebinned']['coef'] = \
rebin_1d_to_2d(telluric['linear']['input']['wave'],
telluric['linear']['input']['step'],
telluric['linear']['input']['coef'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False)
processed['linear']['rebinned']['cerr'] = \
rebin_1d_to_2d(telluric['linear']['input']['wave'],
telluric['linear']['input']['step'],
telluric['linear']['input']['cerr'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False,
is_error=True)
sel_out_of_range = ~((telluric['rebinned']['wave'] > telluric['linear']['input']['range'][0]+1.) \
& (telluric['rebinned']['wave'] < telluric['linear']['input']['range'][1]-1.))
processed['linear']['rebinned']['coef'][sel_out_of_range] = 0.
processed['linear']['rebinned']['cerr'][sel_out_of_range] = 0.1
processed['telluric']['spectrum_noairmass'] += processed['linear']['rebinned']['coef'] * PWV_value
processed['telluric']['spectrum_noairmass_err'] = np.sqrt(
processed['telluric']['spectrum_noairmass_err']**2 +
(processed['linear']['rebinned']['cerr'] * PWV_value)**2)
except MissingFileException:
print(' *** Missing telluric_linear_coeff keyword in configuration file ***')
print()
except OutOfRangeException:
print(' *** Wavelength range for the calculation of the PWV-dependent telluric spectrum is outside the boundaries ***')
print()
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
""" Set anomalosly low point to one (e.g. when the template is not computed)"""
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
""" No need to compute the spline approximation since we are already dealing with a very high SNR template"""
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
""" copy the keyword for future use"""
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_template_alternative(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 28,925 | 45.654839 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/prepare_datasets.py | from __future__ import print_function, division
from SLOPpy.instruments.get_data import *
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["prepare_datasets", "plot_dataset"]
def _check_wavelength_rescaling(wave_rescaling, wave_observations):
if wave_rescaling[0] < wave_observations[0] or \
wave_rescaling[1] > wave_observations[1]:
warnings.warn("Valid wavelength rescaling window must be between {0:8.2f} and {1:8.2f}".format(
wave_observations[0], wave_observations[1]))
return False
else:
return True
def _check_coadd_in_shared_data(shared_data, wavelength_range):
if 'coadd' not in shared_data:
shared_data['coadd'] = {
'wavelength_range': wavelength_range[:]
}
shared_data['binned'] = {
'wavelength_range': wavelength_range[:]
}
else:
shared_data['coadd']['wavelength_range'][0] = min(shared_data['coadd']['wavelength_range'][0],
wavelength_range[0])
shared_data['coadd']['wavelength_range'][1] = max(shared_data['coadd']['wavelength_range'][1],
wavelength_range[1])
shared_data['binned']['wavelength_range'][0] = min(shared_data['binned']['wavelength_range'][0],
wavelength_range[0])
shared_data['binned']['wavelength_range'][1] = max(shared_data['binned']['wavelength_range'][1],
wavelength_range[1])
return shared_data
def prepare_datasets(config_in):
"""
FITS files, telluric list etc. are retrieved at the beginning and converted to a pickle object
to be processed to the next steps in the pipeline
In this way all the changes performed on the fits files are preserved (sky correction, differential correction)
"""
""" config_dictionary: dictionary with all the configuration parameters from config_in
lists_dictionary: for each night, the list of files
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
#try:
# shared_data = load_from_cpickle('shared', config_in['output'])
# loaded_shared_data = True
#except:
# shared_data = {}
# loaded_shared_data = False
if check_existence_cpickle('shared', config_in['output']):
loaded_shared_data = True
else:
shared_data = {}
loaded_shared_data = False
pass_wavelength_rescaling = True
pass_wavelength_master_out = True
for night in night_dict:
print('Processing data for night: ', night)
print()
""" List files are supposed to be in the same directory of the yaml file,
NOT on the archive directory: in this way it is possible to try different
combinations of nights and files without making a mess in the archive """
files_list, files_transit_out, files_transit_in, files_transit_full, files_telluric, files_star_telluric = get_filelists(
night_dict[night])
lists_dictionary = {
'observations': files_list,
'star_telluric': files_star_telluric,
'n_observations': len(files_list),
}
try:
lists_dictionary['transit_out'] = files_transit_out
lists_dictionary['transit_in'] = files_transit_in
lists_dictionary['transit_full'] = files_transit_full
lists_dictionary['n_transit_out'] = len(files_transit_out)
lists_dictionary['n_transit_in'] = len(files_transit_in)
lists_dictionary['n_transit_full'] = len(files_transit_full)
lists_dictionary['telluric'] = files_telluric
lists_dictionary['n_tellurics'] = len(files_telluric)
write_transit_list = False
except:
print(' Input lists for transit in/out not found, proceeding to automatic selection and writing ')
print()
write_transit_list = True
""" Retrieval on instrument characteristics """
instrument = night_dict[night]['instrument']
mask = night_dict[night]['mask']
archive_dir = instrument_dict[instrument]['data_archive']
order_selection = instrument_dict[instrument]['orders']
wavelength_rescaling = instrument_dict[instrument]['wavelength_rescaling']
time_of_transit = night_dict[night]['time_of_transit']
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
print(" # observations: ", lists_dictionary['n_observations'])
try:
print(" # out-transit obs: ", lists_dictionary['n_transit_out'])
print(" # in-transit obs: ", lists_dictionary['n_transit_in'])
except:
pass
print(" Instrument: ", instrument)
print(" Archive DIR: ", archive_dir)
print(" Night: ", night)
print(" Mask: ", mask)
print(" WL rescaling: ", wavelength_rescaling)
print()
try:
if not check_existence_cpickle('input_dataset_fibA', config_in['output'], night):
raise ValueError()
#pass_wavelength_rescaling = _check_wavelength_rescaling(wavelength_rescaling,
# observations_A['coadd']['wavelength_range']
# )
#shared_data = _check_coadd_in_shared_data(shared_data,
# observations_A['coadd']['wavelength_range'])
print(" Input data for night {0:s} successfully retrieved".format(night))
if write_transit_list:
observations_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
lists_dictionary = _write_transit_list(observations_A,
lists_dictionary,
night_dict[night],
planet_dict)
save_to_cpickle('lists', lists_dictionary, config_in['output'], night)
print(" List rewritten, be careful however that you may incur in extra problems if they have changed")
try:
observational_parameters = load_from_cpickle('observational_pams', config_in['output'], night)
except:
observations_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
observational_parameters = _get_observational_parameters(observations_A,
lists_dictionary,
night_dict[night],
instrument_dict[instrument],
star_dict,
planet_dict)
save_to_cpickle('observational_pams', observational_parameters, config_in['output'], night)
print(" New observational parameters loaded successfully")
for key_name, key_val in observational_parameters['RV_star'].items():
print(" RV star {0:s}: {1}".format(key_name, key_val))
print()
continue
except ValueError:
pass
observations_A = {}
observations_s1d_A = {}
observations_B = {}
calib_data_A = {}
calib_data_B = {}
for obs in lists_dictionary['observations']:
print(" Reading ", obs, " associated files")
observations_A[obs], observations_s1d_A[obs] = \
get_input_data(instrument, archive_dir + night, obs, mask, skip_s1d=False,
order_selection=order_selection)
#""" Zero or negative values are identified, flagged and substituted with another value """
#replacement = 0.01
#observations_A[obs]['null'] = (observations_A[obs]['e2ds'] <= replacement)
#observations_A[obs]['e2ds'][observations_A[obs]['null']] = replacement
""" Negative values are just statistical noise around the null flux points,
removing them would bias the flux level of the sky towards
higher values
We proceed in this way:
- identify the negative values and make a statistics of their
average value
- if in relevant number, we assume that the median of their absolute
values corresponds to the noise floor
- we add the noise floor to the error estimate
"""
observations_A[obs]['null'] = (observations_A[obs]['e2ds'] <= 0.0)
if (np.sum(observations_A[obs]['null']) > 30):
observations_A[obs]['noise_floor'] = np.median(np.abs(
observations_A[obs]['e2ds'][observations_A[obs]['null']]))
else:
if observations_A[obs].get('absolute_flux', True):
observations_A[obs]['noise_floor'] = 1.0000
else:
observations_A[obs]['noise_floor'] = 0.00001
observations_A[obs]['e2ds_err'] = np.sqrt(observations_A[obs]['e2ds_err']**2 + observations_A[obs]['noise_floor']**2)
#observations_A[obs]['e2ds_err'] = np.sqrt(observations_A[obs]['e2ds'])
if 'n_orders' not in observations_A or 'n_pixels' not in observations_A:
observations_A['n_orders'] = observations_A[obs]['n_orders']
observations_A['n_pixels'] = observations_A[obs]['n_pixels']
calib_data_A = get_calib_data(instrument, archive_dir + night, obs,
order_selection=order_selection)
""" Updating info on shared data """
if 'coadd' not in observations_A:
observations_A['coadd'] = {
'wavelength_range': [np.min(observations_A[obs]['wave'][0, :]),
np.max(observations_A[obs]['wave'][-1, :])]
}
else:
observations_A['coadd']['wavelength_range'][0] = min(observations_A['coadd']['wavelength_range'][0],
np.min(observations_A[obs]['wave'][0, :]))
observations_A['coadd']['wavelength_range'][1] = max(observations_A['coadd']['wavelength_range'][1],
np.max(observations_A[obs]['wave'][-1, :]))
""" Reading the fiber B counter part
If the target has been observed in ThAr or FP mode, fiber B data will not be accessible
"""
has_fiber_B = False
try:
observations_B[obs], _ = get_input_data(instrument, archive_dir + night, obs, mask,
fiber='B', order_selection=order_selection)
""" Negative values are just statistical noise around the null flux points,
removing them would bias the flux level of the sky towards
higher values
We proceed in this way:
- identify the negative values and make a statistics of their
average value
- if in relevant number, we assume that the their median value
corresponds to the noise floor
- we add the noise floor to the error estimate
"""
#""" Zero or negative values are identified, flagged and substituted with another value """
#replacement = 0.01
observations_B[obs]['null'] = (observations_B[obs]['e2ds'] <= 0.0)
if (np.sum(observations_B[obs]['null']) > 30):
observations_B[obs]['noise_floor'] = np.median(np.abs(
observations_B[obs]['e2ds'][observations_B[obs]['null']]))
else:
observations_B[obs]['noise_floor'] = 1.
#observations_B[obs]['e2ds'][observations_B[obs]['null']] = replacement
observations_B[obs]['e2ds_err'] = np.sqrt(np.abs(observations_B[obs]['e2ds'])) + observations_B[obs]['noise_floor']
if 'n_orders' not in observations_B or 'n_pixels' not in observations_B:
observations_B['n_orders'] = observations_B[obs]['n_orders']
observations_B['n_pixels'] = observations_B[obs]['n_pixels']
calib_data_B = get_calib_data(instrument, archive_dir + night, obs,
fiber='B', order_selection=order_selection)
has_fiber_B = True
except:
pass
""" Building the base (array of wavelengths) for coadded spectra within the same night """
observations_A['coadd']['wave'] = np.arange(observations_A['coadd']['wavelength_range'][0],
observations_A['coadd']['wavelength_range'][1],
instrument_dict[instrument]['wavelength_step'], dtype=np.double)
observations_A['coadd']['size'] = np.size(observations_A['coadd']['wave'])
observations_A['coadd']['step'] = np.ones(observations_A['coadd']['size'], dtype=np.double) * \
instrument_dict[instrument]['wavelength_step']
print()
print(" Fixing the observation lists if they are missing")
if write_transit_list:
lists_dictionary = _write_transit_list(observations_A,
lists_dictionary,
night_dict[night],
planet_dict)
print()
print(" Computing the RV shift outside the transit, and store it to an additional file for quick access ")
observational_parameters = _get_observational_parameters(observations_A,
lists_dictionary,
night_dict[night],
instrument_dict[instrument],
star_dict,
planet_dict)
print()
for key_name, key_val in observational_parameters['RV_star'].items():
print(" RV star {0:s}: {1:f}".format(key_name, key_val))
print()
print(" Writing dataset files for night ", night, " fiber A")
save_to_cpickle('lists', lists_dictionary, config_in['output'], night)
save_to_cpickle('input_dataset_fibA', observations_A, config_in['output'], night)
save_to_cpickle('input_dataset_s1d_fibA', observations_s1d_A, config_in['output'], night)
save_to_cpickle('calibration_fibA', calib_data_A, config_in['output'], night)
save_to_cpickle('observational_pams', observational_parameters, config_in['output'], night)
if has_fiber_B:
observations_B['coadd'] = {}
observations_B['coadd']['wave'] = observations_A['coadd']['wave'].copy()
observations_B['coadd']['size'] = np.size(observations_B['coadd']['wave'])
observations_B['coadd']['step'] = np.ones(observations_B['coadd']['size'], dtype=np.double) * \
instrument_dict[instrument]['wavelength_step']
print(" Writing dataset files for night ", night, " fiber B")
save_to_cpickle('input_dataset_fibB', observations_B, config_in['output'], night)
save_to_cpickle('calibration_fibB', calib_data_B, config_in['output'], night)
""" Running some checks to see if input parameters have been configured properly """
wavelength_rescaling = instrument_dict[instrument]['wavelength_rescaling']
pass_wavelength_rescaling = _check_wavelength_rescaling(
wavelength_rescaling,
observations_A['coadd']['wavelength_range']
)
print()
if loaded_shared_data: continue
""" Setting up the base arrays for all the nights"""
shared_data = _check_coadd_in_shared_data(shared_data, observations_A['coadd']['wavelength_range'])
if loaded_shared_data: return
""" Building the base (array of wavelengths) for master-out and coadd spectra
We do it now to be sure that it will be the same for the whole pipeline
"""
print(" Creating the shared arrays")
print()
shared_data['coadd']['wavelength_range'][0] += 2.0
shared_data['coadd']['wavelength_range'][1] -= 2.0
shared_data['coadd']['wave'] = np.arange(shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1],
config_in['instruments']['shared']['wavelength_step'], dtype=np.double)
shared_data['coadd']['size'] = np.size(shared_data['coadd']['wave'])
shared_data['coadd']['step'] = np.ones(shared_data['coadd']['size'], dtype=np.double) * \
config_in['instruments']['shared']['wavelength_step']
shared_data['binned']['wave'] = np.arange(shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1],
config_in['instruments']['shared']['wavelength_step']
* config_in['master-out']['binning_factor'], dtype=np.double)
shared_data['binned']['size'] = np.size(shared_data['binned']['wave'])
shared_data['binned']['step'] = np.ones(shared_data['binned']['size'], dtype=np.double) * \
config_in['instruments']['shared']['wavelength_step'] * \
config_in['master-out']['binning_factor']
if config_in['master-out']['wavelength_range'][0] < shared_data['coadd']['wavelength_range'][0] or \
config_in['master-out']['wavelength_range'][1] > shared_data['coadd']['wavelength_range'][1]:
warnings.warn("ERROR: Valid master_out wavelength window must be between {0:8.2f} and {1:8.2f}".format(
shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1]))
pass_wavelength_master_out = False
shared_data['master-out'] = {}
shared_data['master-out']['wave'] = np.arange(config_in['master-out']['wavelength_range'][0],
config_in['master-out']['wavelength_range'][1],
config_in['master-out']['wavelength_step'], dtype=np.double)
shared_data['master-out']['size'] = np.size(shared_data['master-out']['wave'])
shared_data['master-out']['step'] = np.ones(shared_data['master-out']['size'], dtype=np.double) * \
config_in['master-out']['wavelength_step']
if not (pass_wavelength_master_out and pass_wavelength_rescaling):
raise ValueError("ERROR: check the previous warnings to see where you are doing it worng")
print(" COADD wavelength range between {0:8.2f} and {1:8.2f}".format(
shared_data['coadd']['wavelength_range'][0], shared_data['coadd']['wavelength_range'][1]))
print(" COADD wavelength step: {0:5.3f}".format(config_in['instruments']['shared']['wavelength_step']))
print()
print("Saving shared data")
save_to_cpickle('shared', shared_data, config_in['output'])
print()
def _write_transit_list(observations_A, lists_dict, night_dict_key, planet_dict):
fileout_transit_in_list = open(night_dict_key['in_transit'], 'w')
fileout_transit_out_list = open(night_dict_key['out_transit'], 'w')
fileout_transit_full_list = open(night_dict_key['full_transit'], 'w')
try:
total_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['total_transit_duration'])[0] / 2.
total_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['total_transit_duration'])[0] / 2.
full_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['full_transit_duration'])[0] / 2.
full_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['full_transit_duration'])[0] / 2.
except KeyError:
total_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
total_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
full_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
full_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
print('*** unclear transit duration, ingress/egress observations will be considered full-transit')
for obs in lists_dict['observations']:
""" Check if the file should be in transit_in or transit_out list, in case they are not present"""
#phase_internal = (observations_A[obs]['BJD'] - np.atleast_1d(night_dict_key['time_of_transit'])[0]) / \
# np.atleast_1d(planet_dict['period'])[0]
#if np.abs(phase_internal) <= planet_dict['transit_duration'][0] / 2. / planet_dict['period'][0]:
# fileout_transit_in_list.write('{0:s}\n'.format(obs))
#else:
# fileout_transit_out_list.write('{0:s}\n'.format(obs))
exptime_seconds = observations_A[obs]['EXPTIME'] / 86400.
"""BJD times have been already corrected to match mid-exposure epochs """
if observations_A[obs]['BJD'] + exptime_seconds/2. < total_transit_start \
or observations_A[obs]['BJD'] - exptime_seconds/2. > total_transit_end:
fileout_transit_out_list.write('{0:s}\n'.format(obs))
else:
fileout_transit_in_list.write('{0:s}\n'.format(obs))
if observations_A[obs]['BJD'] - exptime_seconds/2. > full_transit_start \
and observations_A[obs]['BJD'] + exptime_seconds/2. < full_transit_end:
fileout_transit_full_list.write('{0:s}\n'.format(obs))
fileout_transit_in_list.close()
fileout_transit_out_list.close()
fileout_transit_full_list.close()
files_list, files_transit_out, files_transit_in, files_transit_full, files_telluric, files_star_telluric = get_filelists(night_dict_key)
lists_dict['transit_out'] = files_transit_out
lists_dict['transit_in'] = files_transit_in
lists_dict['transit_full'] = files_transit_full
lists_dict['n_transit_out'] = np.size(files_transit_out)
lists_dict['n_transit_in'] = np.size(files_transit_in)
lists_dict['n_transit_full'] = np.size(files_transit_full)
try:
lists_dict['telluric'] = files_telluric
lists_dict['n_tellurics'] = np.size(files_telluric)
except:
lists_dict['telluric'] = files_transit_out.copy()
lists_dict['n_tellurics'] = np.size(files_transit_out)
print(" # observations: ", lists_dict['n_observations'])
print(" # out-transit obs: ", lists_dict['n_transit_out'])
print(" # in-transit obs: ", lists_dict['n_transit_in'])
print(" # full-transit obs: ", lists_dict['n_transit_full'])
return lists_dict
def _get_observational_parameters(observations_A, lists_dict, night_dict_key, instrument_dict_key, star_dict, planet_dict):
observational_parameters = {
'instrument': night_dict_key['instrument'],
'mask': night_dict_key['mask'],
'archive_dir': instrument_dict_key['data_archive'],
'wavelength_rescaling': instrument_dict_key['wavelength_rescaling'],
'time_of_transit': np.atleast_1d(night_dict_key['time_of_transit'])[0],
'refraction_method': instrument_dict_key['refraction']['method'],
'refraction_fit_order': instrument_dict_key['refraction']['fit_order'],
'refraction_fit_iters': instrument_dict_key['refraction']['fit_iters'],
'refraction_fit_sigma': instrument_dict_key['refraction']['fit_sigma'],
'refraction_knots_spacing': instrument_dict_key['refraction']['knots_spacing'],
'linear_fit_method': instrument_dict_key['linear_fit_method'],
'n_orders': observations_A['n_orders'],
'n_pixels': observations_A['n_pixels'],
'RV_star': {}
}
rv_out = []
bjd0_out = []
for obs in lists_dict['transit_out']:
bjd0_out.extend([observations_A[obs]['BJD'] - observational_parameters['time_of_transit']])
rv_out.extend([observations_A[obs]['RVC']])
observational_parameters['RV_star']['slope'], \
observational_parameters['RV_star']['intercept'], \
observational_parameters['RV_star']['r_value'], \
observational_parameters['RV_star']['p_value'], \
observational_parameters['RV_star']['std_err'] = sci_stats.linregress(bjd0_out, rv_out)
berv_list = []
rvc_stack = np.zeros(len(lists_dict['observations']))
for i, obs in enumerate(lists_dict['observations']):
berv_list.extend([observations_A[obs]['BERV']])
observational_parameters[obs] = {
'BJD': observations_A[obs]['BJD'],
'mBJD': observations_A[obs]['BJD']-2450000.0000,
'RVC': observations_A[obs]['RVC'],
'AIRMASS': observations_A[obs]['AIRMASS'],
'BERV': observations_A[obs]['BERV'],
'EXPTIME': observations_A[obs]['EXPTIME']
}
rvc_stack[i] = observations_A[obs]['RVC']
observational_parameters['BERV_avg'] = np.average(berv_list)
observational_parameters['RV_star']['RV_from_CCF'] = False
observational_parameters['RV_star']['RV_from_analytical_solution'] = False
if night_dict_key['use_rv_from_ccf']:
observational_parameters['RV_star']['RV_from_CCF'] = True
rvc_systemic = np.average(rvc_stack)
elif night_dict_key['use_analytical_rvs']:
observational_parameters['RV_star']['RV_from_analytical_solution'] = True
rvc_systemic = star_dict['RV_gamma'][0]
observational_parameters['RV_star']['RV_semiamplitude'] = star_dict['RV_semiamplitude'][0]
else:
rvc_systemic = observational_parameters['RV_star']['intercept']
observational_parameters['RV_star']['RV_systemic'] = rvc_systemic
for obs in lists_dict['observations']:
if night_dict_key['use_rv_from_ccf']:
rvc_bjdshift = observations_A[obs]['RVC']
elif night_dict_key['use_analytical_rvs']:
rvc_bjdshift = - observational_parameters['RV_star']['RV_semiamplitude'] * np.sin(2 * np.pi * \
(observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit'])/planet_dict['period'][0])
else:
rvc_bjdshift = observational_parameters['RV_star']['slope'] * \
(observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit'])
observational_parameters[obs]['RV_bjdshift'] = rvc_bjdshift
observational_parameters[obs]['rv_shift_ORF2SRF'] = observational_parameters[obs]['BERV'] - \
(rvc_systemic + rvc_bjdshift)
""" Old definition
observational_parameters[obs]['rv_shift_ORF2SRF'] = observational_parameters[obs]['BERV'] - \
(observational_parameters['RV_star']['intercept'] +
observational_parameters['RV_star']['slope'] *
(observational_parameters[obs][
'BJD'] - time_of_transit))
"""
""" Slight modification of the RV shift to minimize the rebinning error at the wings of the spectra
BRF = Solar System Barycentric Reference frame
rv_shift_ORF2BRF = rv_shift_ORF2SRF_mod + rv_shift_ORF2SRF_res
"""
observational_parameters[obs]['rv_shift_ORF2BRF'] = \
observational_parameters[obs]['BERV']
observational_parameters[obs]['rv_shift_ORF2BRF_mod'] = \
observational_parameters[obs]['BERV'] - observational_parameters['BERV_avg']
""" Slight modification of the RV shift to minimize the rebinning error at the wings of the spectra
rv_shift_ORF2SRF = rv_shift_ORF2SRF_mod + rv_shift_ORF2SRF_res
"""
observational_parameters[obs]['rv_shift_ORF2SRF_mod'] = \
observational_parameters[obs]['BERV'] - observational_parameters['BERV_avg'] - rvc_bjdshift
observational_parameters[obs]['rv_shift_ORF2SRF_res'] = \
observational_parameters['BERV_avg'] - rvc_systemic
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
observational_parameters[obs]['rv_shift_ORF2PRF'] = \
observational_parameters[obs]['BERV'] \
- rvc_systemic \
- planet_dict['RV_semiamplitude'][0] \
* (observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
observational_parameters[obs]['rv_shift_SRF2PRF'] = \
+ rvc_bjdshift \
- planet_dict['RV_semiamplitude'][0] \
* (observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
observational_parameters['rv_shift_ORF2SRF_res'] = \
observational_parameters['BERV_avg'] - rvc_systemic
return observational_parameters
def plot_dataset(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_dict:
print()
print("Plotting dataset Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
instrument = night_dict[night]['instrument']
wavelength_rescaling = instrument_dict[instrument]['wavelength_rescaling']
""" Retrieving the observations"""
input_data = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
""" Retrieving the calibration data """
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
colors, cmap, line_colors = make_color_array(lists, input_data)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
rescaling = compute_rescaling(input_data[obs]['wave'], input_data[obs]['e2ds'], wavelength_rescaling)
for order in range(0,input_data[obs]['n_orders']):
ax1.plot(input_data[obs]['wave'][order,:], input_data[obs]['e2ds'][order,:]/ rescaling, zorder=i, lw=1,
c=line_colors[i], alpha=0.5)
ax2.plot(input_data[obs]['wave'][order,:], input_data[obs]['e2ds'][order,:] / rescaling, zorder=-i, lw=1,
c=line_colors[i], alpha=0.5)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 33,511 | 49.092676 | 144 | py |
SLOPpy | SLOPpy-main/SLOPpy/master_out.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_master_out', 'plot_master_out', 'plot_compare_master_out']
subroutine_name = 'master_out'
def compute_master_out(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
master_out_composite = {
'subroutine': 'master_out',
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
}
wmean_wflux = np.zeros(master_out_composite['size'])
wmean_weight = np.zeros(master_out_composite['size'])
box_kernel = Box1DKernel(config_in['master-out'].get('boxcar_smoothing', 1))
for night in night_dict:
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
wmean_wflux += master_out['rescaled'] / master_out['rescaled_err'] ** 2
wmean_weight += 1. // master_out['rescaled_err'] ** 2
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'master_out'
}
master_out = {
'subroutine': subroutine_name,
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
'total_flux': np.zeros(shared_data['coadd']['size'], dtype=np.double),
'total_flux_err': np.zeros(shared_data['coadd']['size'], dtype=np.double)
}
for obs in lists['transit_out']:
processed[obs] = {}
processed[obs]['rescaling'], \
processed[obs]['rescaled'], \
processed[obs]['rebinned_err'] = perform_rescaling(
input_data[obs]['wave'], input_data[obs]['e2ds'], input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
master_out['wave'],
master_out['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
master_out['wave'],
master_out['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
master_out['total_flux'] += processed[obs]['rebinned']
master_out['total_flux_err'] += processed[obs]['rebinned_err']**2.0
master_out['total_flux_err'] = np.sqrt(master_out['total_flux_err'])
master_out['rescaling'], \
master_out['rescaled'], \
master_out['rescaled_err'] = perform_rescaling(
master_out['wave'], master_out['total_flux'],
master_out['total_flux_err'],
observational_pams['wavelength_rescaling'])
master_out['rescaled'], master_out['rescaled_err'], master_out['null'] = \
replace_values_errors(master_out['rescaled'], master_out['rescaled_err'],
threshold=0.0001, replacement=1.0000)
master_out['smoothed'] = convolve(master_out['rescaled'].copy(), box_kernel)
master_out['smoothed_err'] = np.sqrt(convolve((master_out['rescaled_err'])**2, box_kernel))
sel = (master_out['smoothed']<0.01) | (master_out['smoothed']>1.5)
master_out['smoothed'][sel] = 1.0
master_out['smoothed_err'][sel] = 1.0
selection = (master_out['wave']>0)
spline_iter = 5
for n_iter in range(0, spline_iter):
residuals = master_out['rescaled'] / master_out['smoothed']
wave = master_out_composite['wave']
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / config_in['master-out'].get('spline_step', 0.10))
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave[selection]) - 1, (len(wave[selection]) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
knots = wave[selection][idx_knots]
coeff = sci_int.splrep(wave[selection], residuals[selection], task=-1, k=2, t=knots)
spline = sci_int.splev(wave, coeff)
dif = residuals - spline
std = np.std(dif)
selection = np.where(np.abs(dif) < 4 * std) # & (refraction[obs]['flag'])
master_out['spline'] = spline
master_out['smoothed'] *= spline
master_out['smoothed_err'] *= spline
master_out['SRF'] = {}
master_out['SRF']['rescaled']= \
rebin_1d_to_1d(master_out['wave'],
master_out['step'],
master_out['rescaled'],
master_out['wave'], master_out['step'],
rv_shift=observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False)
master_out['SRF']['rescaled_err']= \
rebin_1d_to_1d(master_out['wave'],
master_out['step'],
master_out['rescaled_err'],
master_out['wave'], master_out['step'],
rv_shift=observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False,
is_error=True)
wmean_wflux += master_out['SRF']['rescaled']/master_out['SRF']['rescaled_err']**2
wmean_weight += 1.//master_out['SRF']['rescaled_err']**2
"""
rv_shift = observational_pams['BERV_avg'] - observational_pams['RV_star']['intercept']
# bringing the master-out to the aboslute reference system
wave_shifted, _ = shift_wavelength(master_out['wave'], master_out['step'], rv_shift)
# master-out is printed to .dat for compatibility with other programs
master_data_out = get_filename('master_out', config_in['output'], night, extension=".dat")
file_out = open(master_data_out, 'w')
for w, f, e in zip(wave_shifted, master_out['rescaled'], master_out['rescaled_err']):
file_out.write('{0:10.4f} {1:f} {2:f}\n'.format(w,f,e))
file_out.close()
print()
print("NON OPTIMAL MASTER-OUT DAT FILE!!!!")
print()
"""
save_to_cpickle('master_out_processed', processed, config_in['output'], night)
save_to_cpickle('master_out', master_out, config_in['output'], night)
master_out_composite['SRF'] = {}
master_out_composite['SRF']['rescaled'] = wmean_wflux/wmean_weight
master_out_composite['SRF']['rescaled_err'] = np.sqrt(1./wmean_weight)
master_out_composite['SRF']['smoothed'] = convolve(master_out_composite['SRF']['rescaled'].copy(), box_kernel)
master_out_composite['SRF']['smoothed_err'] = \
np.sqrt(convolve((master_out_composite['SRF']['rescaled_err']) ** 2, box_kernel))
print()
for night in night_dict:
try:
master_out_composite = load_from_cpickle('master_out_composite', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format('master_out_composite', night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format('master_out_composite', night, 'Computing'))
print()
master_out = load_from_cpickle('master_out', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
master_out_composite['rescaled']= \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['rescaled'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False)
master_out_composite['rescaled_err']= \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['rescaled_err'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False,
is_error=True)
master_out_composite['smoothed'] = \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['smoothed'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False)
master_out_composite['smoothed_err']= \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['smoothed_err'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False,
is_error=True)
#master_out_composite['smoothed'] = convolve(master_out_composite['rescaled'].copy(), box_kernel)
#master_out_composite['smoothed_err'] = \
# np.sqrt(convolve((master_out_composite['rescaled_err']) ** 2, box_kernel))
sel = (master_out_composite['smoothed']<0.01) | (master_out_composite['smoothed']>1.5)
master_out_composite['smoothed'][sel] = 1.0
master_out_composite['smoothed_err'][sel] = 1.0
selection = (master_out_composite['wave']>0)
spline_iter = 5
for n_iter in range(0, spline_iter):
residuals = master_out['rescaled'] / master_out_composite['smoothed']
wave = master_out_composite['wave']
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / config_in['master-out'].get('spline_step', 0.10))
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave[selection]) - 1, (len(wave[selection]) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
knots = wave[selection][idx_knots]
coeff = sci_int.splrep(wave[selection], residuals[selection], task=-1, k=2, t=knots)
spline = sci_int.splev(wave, coeff)
dif = residuals - spline
std = np.std(dif)
selection = np.where(np.abs(dif) < 4 * std) # & (refraction[obs]['flag'])
master_out_composite['spline'] = spline
master_out_composite['smoothed'] *= spline
master_out_composite['smoothed_err'] *= spline
save_to_cpickle('master_out_composite', master_out_composite, config_in['output'], night)
def plot_master_out(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_master_out Night: ", night)
""" Retrieving the analysis"""
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
master_out_composite = load_from_cpickle('master_out_composite', config_in['output'], night)
except:
print()
print("No master_out , no plots")
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
plt.figure(figsize=(12, 6))
plt.title('Master out - night ' + night)
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
obs = lists['transit_out'][0]
plt.scatter(input_data[obs]['wave'],
calib_data['blaze'],
color='C3', zorder=3., label='blaze', alpha=0.25)
plt.errorbar(master_out['wave'],
master_out['rescaled'],
yerr=master_out['rescaled_err'],
fmt='.', c='C0', label='master-out ' + night)
plt.plot(master_out['wave'],
master_out['smoothed'],
color='C1', zorder=3., label='smoothed master-out ' + night)
plt.scatter(master_out_composite['wave'],
master_out_composite['rescaled'],
s=2, c='C3')
plt.plot(master_out_composite['wave'],
master_out_composite['smoothed'],
c='C3', label='composite master-out')
plt.scatter(master_out['wave'],
master_out['rescaled']/master_out['smoothed']*master_out['spline']+0.05,
s=2, c='C4', label='rescaled/smoothed')
plt.scatter(master_out['wave'],
master_out['rescaled']/master_out_composite['smoothed']*master_out_composite['spline']+0.1,
s=2, c='C5', label='rescaled/ comp smoothed')
plt.plot(master_out['wave'],
master_out['spline']+0.05,
c='C7', label='spline fit of the residuals')
plt.plot(master_out['wave'],
master_out_composite['spline']+0.1,
c='C7')
plt.ylim(0, 1.25)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('Rescaled flux')
plt.legend()
plt.show()
def plot_compare_master_out(config_in):
plt.figure(figsize=(12, 6))
plt.title('Master out - comparison between nights ')
night_dict = from_config_get_nights(config_in)
for i, night in enumerate(night_dict):
""" Retrieving the analysis"""
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
master_out_composite = load_from_cpickle('master_out_composite', config_in['output'], night)
except:
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
plt.errorbar(master_out['wave'],
master_out['SRF']['rescaled'],
yerr=master_out['SRF']['rescaled_err'],
fmt='.', c='C'+repr(i), label='master-out ' + night, alpha=0.5)
if i == 0:
plt.plot(master_out_composite['wave'],
master_out_composite['SRF']['rescaled'],
color='k', zorder=10, label='composite master-out')
plt.ylim(0, 1.25)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('Rescaled flux')
plt.legend()
plt.show() | 16,971 | 43.197917 | 116 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_template.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_template",
"plot_telluric_template",
"compute_telluric_template_reference",
"plot_telluric_template_reference"]
def compute_telluric_template(config_in):
compute_telluric_template_routine(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=False,
use_template=True,
subroutine_name='telluric_template')
def compute_telluric_template_reference(config_in):
compute_telluric_template_routine(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=True,
use_template=True,
subroutine_name='telluric_template')
def plot_telluric_template_reference(config_in, night_input=''):
plot_telluric_template(config_in, night_input=night_input)
def compute_telluric_template_routine(config_in, **kwargs):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
print()
print("compute_telluric_template Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer'
}
""" Retrieving the template data"""
telluric_template_data = np.genfromtxt(template_dict['file'])
obs_reference = lists['observations'][0]
telluric['template'] = {
'input': {
'range': [np.amin(telluric_template_data[:, 0]), np.amax(telluric_template_data[-1, 0])],
'wave': telluric_template_data[:, 0],
'flux': telluric_template_data[:, 1],
'ferr': telluric_template_data[:, 2],
'step': telluric_template_data[:, 3]
},
'rebinned':{
'wave': input_data[obs_reference]['wave'],
'step': input_data[obs_reference]['step']
}
}
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
airmass = np.zeros(lists['n_observations'], dtype=np.double)
berv = np.zeros(lists['n_observations'], dtype=np.double)
rvc = np.zeros(lists['n_observations'], dtype=np.double)
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
tel_selection = (input_data[obs]['wave'] > template_dict['fit_range'][0]) & \
(input_data[obs]['wave'] < template_dict['fit_range'][1])
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
processed[obs]['e2ds_sel'] = processed[obs]['e2ds_rescaled'][tel_selection]
if processed['n_orders'] == 0:
processed['wave_sel'] = input_data[obs]['wave'][tel_selection]
processed['step_sel'] = input_data[obs]['step'][tel_selection]
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
processed['template_fit'] = {}
rescaling_array = np.arange(0.05, 2.0, 0.05)
computed_std = np.zeros(len(rescaling_array))
processed['template_fit']['array_data'] = {}
processed['template_fit']['array_move'] = {}
processed['template_fit']['array_wave'] = []
""" saving the wavelength array for plotting purpose"""
for obs in lists['telluric']:
processed['template_fit']['array_wave'].extend(processed['wave_sel'])
for n_rescaling_factor, rescaling_factor in enumerate(rescaling_array):
""" Template spectrum is rebinned onto the observations wavelength scale, using only the wavelength range
selected for the computation of the rescaling factor
"""
template_rebinned_flux = \
rebin_1d_to_1d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
processed['wave_sel'],
processed['step_sel'],
preserve_flux=False)
""" Saving the outcome to dictionary """
template_rebinned_flux -= 1.00
template_rebinned_flux *= rescaling_factor
template_rebinned_flux += 1.00
e2ds_corrected = []
## DEBUG
# wave_corrected = []
# e2ds_original = []
for obs in lists['telluric']:
e2ds_corrected.extend(processed[obs]['e2ds_sel'] /
np.power(template_rebinned_flux, input_data[obs]['AIRMASS']))
# wave_corrected.extend(processed['wave_sel'])
# e2ds_original.extend(processed[obs]['e2ds_sel'])
computed_std[n_rescaling_factor] = np.std(e2ds_corrected)
label_dict = '{0}'.format(rescaling_factor)
# print(label_dict, computed_std[n_rescaling_factor])
# processed['template_fit']['array_data'][label_dict] = e2ds_corrected
# processed['template_fit']['array_move'][label_dict] = rescaling_factor
# plt.scatter(wave_corrected, e2ds_original, s=2, alpha=0.5)
# plt.scatter(wave_corrected, e2ds_corrected, s=2, alpha=0.5)
# plt.plot(processed['wave_sel'],template_rebinned_flux)
# plt.show()
""" selection of the rescaling factor with the lowest scatter """
ind_factor = np.argmin(computed_std)
ind_range = 3
if ind_factor < ind_range:
sel_factor = rescaling_array[0:ind_factor+ind_range]
sel_stdev = computed_std[0:ind_factor+ind_range]
elif ind_factor > len(rescaling_array) - ind_range:
sel_factor = rescaling_array[ind_factor-ind_range:]
sel_stdev = computed_std[ind_factor-ind_range:]
else:
sel_factor = rescaling_array[ind_factor-ind_range:ind_factor+ind_range]
sel_stdev = computed_std[ind_factor-ind_range:ind_factor+ind_range]
coeff = np.polyfit(sel_factor, sel_stdev, 2)
telluric_factor = - coeff[1] / (2*coeff[0])
print(' telluric factor: {0:7f}'.format(telluric_factor))
print()
processed['template_fit']['telluric_factor'] = telluric_factor
processed['template_fit']['rescaling_factor'] = rescaling_factor
processed['template_fit']['computed_std'] = computed_std
processed['template_fit']['polyfit'] = {
'package': 'numpy', # in case we forget what we used...
'order': 2,
'coeff': coeff,
'sel_factor': sel_factor,
'sel_stdev': sel_stdev
}
""" After being rescaled for the proper factor, the template telluric spectrum is rebinned onto the 2D
scale of the observations """
telluric['template']['rebinned']['flux'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False)
telluric['template']['rebinned']['ferr'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['ferr'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False,
is_error=True)
sel_out_of_range = ~((telluric['template']['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
& (telluric['template']['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
telluric['template']['rebinned']['flux'][sel_out_of_range] = 1.
telluric['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
processed['telluric']['spectrum_noairmass'] = \
(telluric['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
telluric['airmass_ref'] = processed['airmass_ref']
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
""" Set anomalosly low point to one (e.g. when the template is not computed)"""
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
""" No need to compute the spline approximation since we are already dealing with a very high SNR template"""
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
""" copy the keyword for future use"""
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_template(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 19,545 | 43.221719 | 121 | py |
SLOPpy | SLOPpy-main/SLOPpy/spectra_lightcurve_bkp.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.clv_rm_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_spectra_lightcurve',
'compute_spectra_lightcurve_clv_rm_correction',
'plot_spectra_lightcurve',
'plot_spectra_lightcurve_clv_rm_correction']
def compute_spectra_lightcurve_clv_rm_correction(config_in, lines_label):
compute_spectra_lightcurve(config_in, lines_label)
def plot_spectra_lightcurve_clv_rm_correction(config_in, night_input=''):
plot_spectra_lightcurve(config_in, night_input)
def compute_spectra_lightcurve(config_in, lines_label):
subroutine_name = 'spectra_lightcurve'
sampler_name = 'emcee'
do_average_instead_of_sum = True
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
# from_config_get_transmission_lightcurve(config_in)
#lightcurve_dict = from_config_get_transmission_lightcurve(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" Using the MCMC fit range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
processed_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
}
# doublet sodium in the lab reference frame
"""
C stands for central
"""
C_bands = {}
for passband_key, passband_val in spectral_lines['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in spectral_lines['lines'].items():
C_bands[passband_key][line_key] = (np.abs(shared_data['coadd']['wave'] - line_val) < passband_val / 2.)
"""
S stands for side
"""
S_bands = {}
for band_key, band_val in spectral_lines['continuum'].items():
S_bands[band_key] = (shared_data['coadd']['wave'] >= band_val[0]) & (shared_data['coadd']['wave'] <= band_val[1])
"""
The transit phase [0-1] is divided in N (=5) bins. Two arrays are computed:
- transit_in_bins: array with the boundaries of the bins, size=N+1
- transit_in_step: average size of the bin, size=1
"""
transit_in_bins = np.linspace(
-planet_dict['transit_duration'][0]/2./planet_dict['period'][0],
planet_dict['transit_duration'][0]/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
for night in night_dict:
try:
lightcurve = load_from_cpickle(subroutine_name, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = processed_template.copy()
lightcurve = {
'subroutine': subroutine_name,
'arrays': {
'observations': {
'obs_name': np.zeros(len(lists['observations']), dtype=str),
'phase': np.zeros(len(lists['observations'])),
},
'transit_in': {},
'transit_out': {},
},
'C_bands': C_bands,
'S_bands': S_bands,
'average': {},
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step
}
}
""" Adding the C-bands arrays to the dictionary"""
for band_key in C_bands:
lightcurve['arrays']['observations']['ratio_' + band_key] = np.zeros([len(lists['observations']), 2])
transit_out_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_in_flag = np.zeros(len(lists['observations']), dtype=bool)
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for n_obs, obs in enumerate( lists['observations']):
processed[obs] = {}
lightcurve[obs] = {}
processed[obs]['rescaling'], \
processed[obs]['rescaled'], \
processed[obs]['rescaled_err'] = perform_rescaling(
input_data[obs]['wave'], input_data[obs]['e2ds'], input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['uncorrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
processed['wave'],
processed['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
processed[obs]['uncorrected_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
processed['wave'],
processed['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
if clv_rm_correction:
rv_shift = 0.0 # we always stay in SRF
correction, _ = clv_rm_correction_factor_computation(
clv_rm_modelling, shared_data['coadd']['wave'], shared_data['coadd']['step'], rv_shift, obs)
processed[obs]['clv_rm_correction'] = correction
processed[obs]['corrected'] /= correction
processed[obs]['corrected_err'] /= correction
try:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
processed[obs]['bands'] = {
'phase': phase_internal
}
s_integrated = 0.000
s_sigmaq_sum = 0.00
n_bands = 0.00
for band_key, band_val in S_bands.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key] = \
[np.average(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)
/ len(processed[obs]['rebinned_err'][band_val])**2]
else:
processed[obs]['bands'][band_key] = \
[np.sum(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)]
s_integrated += processed[obs]['bands'][band_key][0]
s_sigmaq_sum += processed[obs]['bands'][band_key][1]
n_bands += 1
s_integrated *= (2. / n_bands)
s_sigmaq_sum *= (2. / n_bands)**2
s_factor_term = np.power(s_integrated, -2.0)
for band_key, band_dict in C_bands.items():
processed[obs]['bands'][band_key] = {}
c_integrated = 0.000
c_sigmaq_sum = 0.000
n_bands = 0.00
for line_key, line_val in band_dict.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key][line_key] = \
[np.average(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
else:
processed[obs]['bands'][band_key][line_key] = \
[np.sum(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)]
c_integrated += processed[obs]['bands'][band_key][line_key][0]
c_sigmaq_sum += processed[obs]['bands'][band_key][line_key][1]
n_bands += 1
c_integrated *= (2. / n_bands)
c_sigmaq_sum *= (2. / n_bands) ** 2
ratio = c_integrated / s_integrated
lightcurve[obs]['ratio_' + band_key] = [ratio, ratio * np.sqrt( c_sigmaq_sum / c_integrated ** 2 +
s_sigmaq_sum / s_integrated ** 2)]
#np.sqrt(s_factor_term
# * (c_sigmaq_sum + ratio**2 * s_sigmaq_sum))]
lightcurve['arrays']['observations']['ratio_' + band_key][n_obs, :] = \
lightcurve[obs]['ratio_' + band_key][:]
lightcurve[obs]['phase'] = processed[obs]['bands']['phase']
lightcurve['arrays']['observations']['obs_name'][n_obs] = obs
lightcurve['arrays']['observations']['phase'][n_obs] = lightcurve[obs]['phase']
if obs in lists['transit_out']:
transit_out_flag[n_obs] = True
else:
transit_in_flag[n_obs] = True
for band_key in C_bands:
lightcurve['arrays']['rescaling_' + band_key] = \
np.average(lightcurve['arrays']['observations']['ratio_' + band_key][transit_out_flag, 0], axis=0)
sorting_index = np.argsort(lightcurve['arrays']['observations']['phase'])
transit_out_flag = transit_out_flag[sorting_index]
transit_in_flag = transit_in_flag[sorting_index]
lightcurve['arrays']['observations']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][sorting_index]
lightcurve['arrays']['observations']['phase'] = lightcurve['arrays']['observations']['phase'][sorting_index]
lightcurve['arrays']['transit_in']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_in_flag]
lightcurve['arrays']['transit_in']['phase'] = lightcurve['arrays']['observations']['phase'][transit_in_flag]
lightcurve['arrays']['transit_out']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_out_flag]
lightcurve['arrays']['transit_out']['phase'] = lightcurve['arrays']['observations']['phase'][transit_out_flag]
for band_key in C_bands:
lightcurve['arrays']['observations']['ratio_' + band_key] = \
lightcurve['arrays']['observations']['ratio_' + band_key][sorting_index] \
/ lightcurve['arrays']['rescaling_' + band_key]
lightcurve['arrays']['transit_in']['ratio_' + band_key] = \
lightcurve['arrays']['observations']['ratio_' + band_key][transit_in_flag]
lightcurve['arrays']['transit_out']['ratio_' + band_key] = \
lightcurve['arrays']['observations']['ratio_' + band_key][transit_out_flag]
avg_out, avg_out_sq = \
np.average(lightcurve['arrays']['transit_out']['ratio_' + band_key][:, 0],
weights=1./(lightcurve['arrays']['transit_out']['ratio_' + band_key][:, 1])**2,
returned=True)
avg_in, avg_in_sq = \
np.average(lightcurve['arrays']['transit_in']['ratio_' + band_key][:, 0],
weights=1. / (lightcurve['arrays']['transit_in']['ratio_' + band_key][:, 1]) ** 2,
returned=True)
lightcurve['average'][band_key] = {
'average_out': np.asarray([avg_out, 1./np.power(avg_out_sq, 0.5)]),
'average_in': np.asarray([avg_in, 1. / np.power(avg_in_sq, 0.5)]),
}
delta_fac = \
lightcurve['average'][band_key]['average_in'][0]/lightcurve['average'][band_key]['average_out'][0]
delta_err = delta_fac * np.sqrt(
(lightcurve['average'][band_key]['average_out'][1]
/ lightcurve['average'][band_key]['average_out'][0]) ** 2
+ (lightcurve['average'][band_key]['average_in'][1]
/ lightcurve['average'][band_key]['average_in'][0]) ** 2)
lightcurve['average'][band_key]['delta'] = np.asarray([(1.-delta_fac)*100., delta_err*100.])
lightcurve['arrays']['observations']['transit_out_flag'] = transit_out_flag
lightcurve['arrays']['observations']['transit_in_flag'] = transit_in_flag
""" Compute the duration of the pre-transit observations, using as scale
the number of bins, with the same size as those used inside the transit.
The value is given by the difference of the phase of the beginning of the transit minus
the phase of the first observation, keeping in mind that the centre of the transit has phase = 0
An additional bin is added if there are observations left out from the actual number of bins
"""
pre_duration = transit_in_bins[0] - lightcurve['arrays']['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration/transit_in_step)
if pre_duration % transit_in_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
""" same as pre-transit, but suing the post-transit instead"""
post_duration = lightcurve['arrays']['transit_out']['phase'][-1] - transit_in_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_in_step)
if post_duration % transit_in_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
""" THe full array with both in-transit and out-transit phase, built in such a way that the
- the lower boundary of the first in-transit bin corresponds to the beginning of the transit
- the upper boundary of the last in-transit bin corresponds to the end of the transit
"""
transit_bins = np.arange(transit_in_bins[0]-nsteps_pre*transit_in_step,
transit_in_bins[-1] + (nsteps_post+1.1) * transit_in_step,
transit_in_step)
lightcurve['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_out': {},
}
for band_key in C_bands:
lightcurve['binned']['observations']['ratio_' + band_key] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins)-1):
sel = (lightcurve['arrays']['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve['arrays']['observations']['phase'] < transit_bins[nb+1])
if np.sum(sel) <= 0: continue
lightcurve['binned']['observations']['phase'][n_a] = np.average(lightcurve['arrays']['observations']['phase'][sel])
for band_key in C_bands:
lightcurve['binned']['observations']['ratio_' + band_key][n_a, 0], sum_weights = np.average(
lightcurve['arrays']['observations']['ratio_' + band_key][sel, 0],
weights=1. / lightcurve['arrays']['observations']['ratio_' + band_key][sel, 1]**2,
returned=True)
lightcurve['binned']['observations']['ratio_' + band_key][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
planet_dict['transit_duration'][0]/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
else:
transit_in_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve['binned']['transit_in']['phase'] = lightcurve['binned']['observations']['phase'][transit_in_flag]
lightcurve['binned']['transit_out']['phase'] = lightcurve['binned']['observations']['phase'][transit_out_flag]
lightcurve['binned']['observations']['phase'] = lightcurve['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
lightcurve['binned']['transit_in']['ratio_' + band_key] = \
lightcurve['binned']['observations']['ratio_' + band_key][transit_in_flag, :]
lightcurve['binned']['transit_out']['ratio_' + band_key] = \
lightcurve['binned']['observations']['ratio_' + band_key][transit_out_flag, :]
lightcurve['binned']['observations']['ratio_' + band_key] = \
lightcurve['binned']['observations']['ratio_' + band_key][:n_a, :]
save_to_cpickle(subroutine_name+'_processed', processed, config_in['output'], night)
save_to_cpickle(subroutine_name, lightcurve, config_in['output'], night)
def plot_spectra_lightcurve(config_in, night_input='', clv_rm_correction=False):
import matplotlib.pyplot as plt
if clv_rm_correction:
subroutine_name = 'spectra_lightcurve_clv_rm_correction'
else:
subroutine_name = 'spectra_lightcurve'
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the analysis"""
try:
lightcurve = load_from_cpickle(subroutine_name, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Plotting'))
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Skipped'))
continue
#observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
C_bands = lightcurve['C_bands']
print()
for band_key in C_bands:
print("Night: {0:s} Band: {1:s} Delta:{2:8.4f} +- {3:8.4f} [%]".format(night, band_key,
lightcurve['average'][band_key]['delta'][0], lightcurve['average'][band_key]['delta'][1]))
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Spectra lightcurve - night {0:s} \n {1:s}'.format(night, band_key))
plt.errorbar(lightcurve['arrays']['observations']['phase'],
lightcurve['arrays']['observations']['ratio_' + band_key][:,0]*100 -100.,
yerr= lightcurve['arrays']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve['binned']['observations']['phase'],
lightcurve['binned']['observations']['ratio_' + band_key][:, 0]*100 -100.,
yerr= lightcurve['binned']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve['arrays']['observations']['phase'][0]-0.01,
lightcurve['arrays']['observations']['phase'][-1]+0.01)
plt.xlabel('orbital phase')
plt.ylabel('$\mathcal{R}$ - 1. [%]')
plt.legend()
plt.show()
print()
| 21,465 | 44.478814 | 132 | py |
SLOPpy | SLOPpy-main/SLOPpy/interstellar_lines.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_interstellar_lines", "plot_interstellar_lines"]
subroutine_name = 'interstellar_lines'
# def plot_identify_stellar_lines(config_in)
def compute_interstellar_lines(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
if not interstellar_lines:
return
for night in night_dict:
print()
print("compute_interstellar_lines Night: ", night)
try:
interstellar = load_from_cpickle('interstellar_lines_processed', config_in['output'], night)
skip_lineselection = True
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
processed = {
'subroutine': subroutine_name,
'line_rebin': {},
'line_shift': {}
}
interstellar = {
'subroutine': subroutine_name,
}
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
interstellar[obs] = {
'wave_BRF': shift_wavelength_array(input_data[obs]['wave'], observational_pams[obs]['rv_shift_ORF2BRF']),
'correction': np.ones(np.shape(input_data[obs]['wave']))
}
""" for plotting purpose only"""
processed[obs]['flux'] = input_data[obs]['e2ds'] / calib_data['blaze'] / input_data[obs]['step']
processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds']) / calib_data['blaze'] / input_data[obs][
'step']
for line_name, line in interstellar_lines.items():
processed[line_name] = {
'line_rebin': {},
'poly_coeff': {},
'normalized': {},
'line_shift': {
'selected_points': []
}
}
processed[line_name]['min_wave'] = max(shared_data['coadd']['wavelength_range'][0], line[0] - line[2]*2)
processed[line_name]['max_wave'] = min(shared_data['coadd']['wavelength_range'][1], line[0] + line[2]*2)
processed[line_name]['wave'] = np.arange(processed[line_name]['min_wave'],
processed[line_name]['max_wave'],
instrument_dict[instrument]['wavelength_step'])
processed[line_name]['size'] = np.size(processed[line_name]['wave'], axis=0)
processed[line_name]['step'] = np.ones(processed[line_name]['size'])\
* instrument_dict[instrument]['wavelength_step']
processed[line_name]['correction'] = np.ones(processed[line_name]['size'])
for obs in lists['observations']:
preserve_flux = input_data[obs].get('absolute_flux', True)
""" shifting a chunk of the spectra to the Solar System Barycenter reference """
processed[line_name]['line_rebin'][obs] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
processed[line_name]['wave'],
processed[line_name]['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
argmin_sel = np.argmin(processed[line_name]['line_rebin'][obs][5:-5]) + 5
wave_sel = processed[line_name]['wave'][argmin_sel]
processed[line_name]['line_shift']['selected_points'].append(wave_sel)
processed[line_name]['line_shift']['position'] = np.median(
processed[line_name]['line_shift']['selected_points'])
processed[line_name]['line_shift']['delta_lambda'] = processed[line_name]['line_shift']['position'] - line[0]
try:
if interstellar_lines['automatic'] == True:
interstellar[line_name] = processed[line_name]['line_shift']['position']
else:
interstellar[line_name] = line[0]
except:
interstellar[line_name] = line[0]
""" selection of spectral range for continuum normalization and interstellar line modelling"""
processed[line_name]['wavelength_selection'] = \
(np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[1])
processed[line_name]['continuum_selection'] = \
(~processed[line_name]['wavelength_selection']) \
& (np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[2])
processed[line_name]['interstellar_selection'] = \
(processed[line_name]['wavelength_selection'] | processed[line_name]['continuum_selection'])
spline_wave_points = []
spline_norm_points = []
# TODO
#! 1) Rescale by median each observation and collect all the value
#! 2) Perform a continuum normalization on the collect values, with
#! iterative sigma-clipping
#! 3) Perform a spline / gaussian fit of the spectral line
for obs in lists['telluric']:
# sel1 = (np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[1])
# sel2 = (~sel1) & (np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[2])
# sel3 = (sel1 | sel2)
""" normalization around the interstellar line """
processed[line_name]['poly_coeff'][obs] = \
np.polyfit(processed[line_name]['wave'][processed[line_name]['continuum_selection']],
processed[line_name]['line_rebin'][obs][processed[line_name]['continuum_selection']],
2)
processed[line_name]['normalized'][obs] = \
processed[line_name]['line_rebin'][obs][processed[line_name]['interstellar_selection']] \
/ np.polyval(processed[line_name]['poly_coeff'][obs],
processed[line_name]['wave'][processed[line_name]['interstellar_selection']])
spline_wave_points.extend(processed[line_name]['wave'][processed[line_name]['interstellar_selection']])
spline_norm_points.extend(processed[line_name]['normalized'][obs])
""" sorting the array to avoid problems with the spline function"""
spline_sorting_index = np.argsort(spline_wave_points)
spline_wave_points = np.asarray(spline_wave_points)[spline_sorting_index]
spline_norm_points = np.asarray(spline_norm_points)[spline_sorting_index]
processed[line_name]['spline_eval'], \
processed[line_name]['spline_coeff'], \
processed[line_name]['spline_knots'] = \
compute_spline(spline_wave_points, spline_norm_points, 0.08, knot_order=3)
processed[line_name]['correction'][processed[line_name]['wavelength_selection']] = \
sci_int.splev(processed[line_name]['wave'][processed[line_name]['wavelength_selection']],
processed[line_name]['spline_coeff'])
for obs in lists['observations']:
interstellar[obs]['wavelength_selection'] = \
(np.abs(interstellar[obs]['wave_BRF']-interstellar[line_name]) < line[1])
interstellar[obs]['continuum_selection'] = \
(~interstellar[obs]['wavelength_selection']) \
& (np.abs(interstellar[obs]['wave_BRF']-interstellar[line_name]) < line[2])
interstellar[obs]['interstellar_selection'] = \
(interstellar[obs]['wavelength_selection'] | interstellar[obs]['continuum_selection'])
interstellar[obs]['correction'][interstellar[obs]['wavelength_selection']] = \
sci_int.splev(interstellar[obs]['wave_BRF'][interstellar[obs]['wavelength_selection']],
processed[line_name]['spline_coeff'])
save_to_cpickle('interstellar_lines_processed', processed, config_in['output'], night)
save_to_cpickle('interstellar_lines', interstellar, config_in['output'], night)
def plot_interstellar_lines(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
if not interstellar_lines:
return
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_interstellar_lines Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('interstellar_lines_processed', config_in['output'], night)
interstellar = load_from_cpickle('interstellar_lines', config_in['output'], night)
except:
print()
print('No interstellar correction, no plots')
continue
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
# fig = plt.figure(figsize=(12, 6))
# gs = GridSpec(2, 2, width_ratios=[50, 1])
#
# ax1 = plt.subplot(gs[0, 0])
# ax2 = plt.subplot(gs[1, 0], sharex=ax1)
# cbax1 = plt.subplot(gs[:, 1])
fig, gs, cbax1, ax1, ax2 = grid_2plot()
for i, obs in enumerate(lists['observations']):
"""rescaling"""
processed[obs]['flux_rescaling'], processed[obs]['flux_rescaled'], processed[obs]['flux_rescaled_err'] = \
perform_rescaling(interstellar[obs]['wave_BRF'],
processed[obs]['flux'],
processed[obs]['flux_err'],
observational_pams['wavelength_rescaling'])
if i == 0:
ax1.scatter(interstellar[obs]['wave_BRF'], processed[obs]['flux_rescaled'],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5, label='observations (BRF)')
else:
ax1.scatter(interstellar[obs]['wave_BRF'], processed[obs]['flux_rescaled'],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
# ax1.plot(interstellar['wave'], interstellar['correction'], c='black')
ax2.scatter(interstellar[obs]['wave_BRF'], processed[obs]['flux_rescaled']/interstellar[obs]['correction'],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
for line_name, line in interstellar_lines.items():
ax1.axvline(interstellar[line_name]-line[1], c='b')
ax1.axvline(interstellar[line_name]+line[1], c='b')
ax1.axvline(interstellar[line_name]-line[2], c='g')
ax1.axvline(interstellar[line_name]+line[2], c='g')
ax2.axvline(interstellar[line_name]-line[1], c='b')
ax2.axvline(interstellar[line_name]+line[1], c='b')
ax2.axvline(interstellar[line_name]-line[2], c='g')
ax2.axvline(interstellar[line_name]+line[2], c='g')
# ax1.plot(processed[line_name]['wave'], processed[line_name]['flux_rescaled'], c='b')
try:
wave_min = min(wave_min, interstellar[line_name])
wave_max = max(wave_max, interstellar[line_name])
range_max = max(range_max, line[2])
except:
wave_min = interstellar[line_name]
wave_max = interstellar[line_name]
range_max = line[2]
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax1.set_xlim(wave_min-2*range_max, wave_max+2*range_max)
ax1.legend(loc=1)
ax2.set_title('After interstellar line correction')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
# fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 14,029 | 44.700326 | 121 | py |
SLOPpy | SLOPpy-main/SLOPpy/compare_clv_rm_effects.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ['plot_compare_clv_rm_effects_planetRF',
'plot_compare_clv_rm_effects_observerRF',
'plot_compare_clv_rm_effects_stellarRF',
'plot_compare_clv_rm_effects']
def plot_compare_clv_rm_effects_planetRF(config_in, night_input=''):
plot_compare_clv_rm_effects(config_in, night_input, reference='planetRF')
def plot_compare_clv_rm_effects_observerRF(config_in, night_input=''):
plot_compare_clv_rm_effects(config_in, night_input, reference='observerRF')
def plot_compare_clv_rm_effects_stellarRF(config_in, night_input=''):
plot_compare_clv_rm_effects(config_in, night_input, reference='stellarRF')
def plot_compare_clv_rm_effects(config_in, night_input='', reference='planetRF'):
transmission_average = load_from_cpickle('transmission_average_'+reference, config_in['output'])
transmission_clv_rm_average = load_from_cpickle('transmission_clv_rm_average_'+reference, config_in['output'])
night_dict = from_config_get_nights(config_in)
fig = plt.figure(figsize=(12, 9))
gs = GridSpec(2, 1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
ax1.errorbar(transmission_clv_rm_average['wave'],
transmission_clv_rm_average['average'],
yerr=transmission_clv_rm_average['average_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average, with CLV RM')
ax1.errorbar(transmission_clv_rm_average['binned_wave'],
transmission_clv_rm_average['binned'],
yerr=transmission_clv_rm_average['binned_err'],
fmt='ko', ms=3, zorder=20, label='binned, with CLV RM')
#ax1.errorbar(transmission_average['binned_wave'],
# transmission_average['binned'],
# yerr=transmission_average['binned_err'],
# fmt='mo', ms=3, zorder=15, label='binned, no CLV RM')
ax2.errorbar(transmission_average['wave'],
transmission_average['average'],
yerr=transmission_average['average_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average, no CLV RM')
ax2.errorbar(transmission_average['binned_wave'],
transmission_average['binned'],
yerr=transmission_average['binned_err'],
fmt='ko', ms=3, zorder=20, label='binned, no CLV RM')
#ax2.errorbar(transmission_clv_rm_average['binned_wave'],
# transmission_clv_rm_average['binned'],
# yerr=transmission_clv_rm_average['binned_err'],
# fmt='mo', ms=3, zorder=15, alpha=0.5, label='binned, with CLV RM')
total_night = len(night_dict)
side_step = config_in['master-out']['wavelength_step'] * config_in['master-out']['binning_factor'] / 10
for n_night, night in enumerate(night_dict):
ax1.errorbar(transmission_clv_rm_average['binned_wave'] + (n_night-total_night/2) * side_step,
transmission_clv_rm_average[night]['binned'],
yerr=transmission_clv_rm_average[night]['binned_err'],
color='C'+repr(n_night), label='{0:s} with CLV RM'.format(night),
fmt='o', ms=1, zorder=17, alpha=0.75)
ax2.errorbar(transmission_average['binned_wave'] + (n_night-total_night/2) * side_step,
transmission_average[night]['binned'],
yerr=transmission_average[night]['binned_err'],
color='C' + repr(n_night), label='{0:s} no CLV RM'.format(night),
fmt='o', ms=1, zorder=17, alpha=0.75)
#ax1.set_ylim(0.95-spec_offset*(1.+n_night), 1.05)
ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
ax1.set_ylim(0.985, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax1.legend(loc=3)
ax1.set_title('Average transmission spectrum with CLV and RM correction, in {0:s}'.format(reference))
ax2.set_title('Average transmission spectrum without CLV and RM correction, in {0:s}'.format(reference))
plt.show()
| 4,395 | 45.273684 | 114 | py |
SLOPpy | SLOPpy-main/SLOPpy/spectra_lightcurve_average.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_spectra_lightcurve_average', 'plot_spectra_lightcurve_average',
'compute_spectra_lightcurve_average_clv_rm_correction',
'plot_spectra_lightcurve_average_clv_rm_correction']
def compute_spectra_lightcurve_average_clv_rm_correction(config_in, lines_label):
compute_spectra_lightcurve_average(config_in, lines_label)
def plot_spectra_lightcurve_average_clv_rm_correction(config_in, night_input=''):
plot_spectra_lightcurve_average(config_in, night_input)
subroutine_name = 'spectra_lightcurve_average'
pick_files = 'spectra_lightcurve'
sampler_name = 'emcee'
def compute_spectra_lightcurve_average(config_in, lines_label):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label] # from_config_get_transmission_lightcurve(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP'
'user_uncorrected']
append_list = ['', '_uncorrected', '_clv_model']
for results_selection in results_list:
skip_iteration = False
try:
lightcurve_average = load_from_cpickle(subroutine_name+ '_' + results_selection, config_in['output'], lines=lines_label)
print("{0:45s} {1:s}".format(subroutine_name, 'Retrieved'))
return
except (FileNotFoundError, IOError):
print(" No average transmission lightcurve found for case:{0:s}, computing now ".format(results_selection))
print("{0:45s} {1:s}".format(subroutine_name, 'Computing'))
print()
"""
C stabds for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(shared_data['coadd']['wave'] - line_val)*2. <passband_val)
"""
S stand for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (shared_data['coadd']['wave'] >= band_val[0]) & (shared_data['coadd']['wave'] <= band_val[1])
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
lightcurve_average = {
'subroutine': subroutine_name,
'transit_in_flag': [],
'transit_full_flag': [],
'transit_out_flag': [],
'transit_in': {},
'transit_full': {},
'transit_out': {},
'observations': {
'phase': []
},
'bands_list': [],
'C_bands': C_bands,
'S_bands': S_bands,
'average': {},
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['observations']['ratio_' + band_key + name_append] = []
lightcurve_average['bands_list'].extend([band_key])
for night in night_dict:
try:
lightcurve = load_from_cpickle(pick_files + '_' + results_selection, config_in['output'], night, lines_label)
except:
print(" No night spectra lightcurve found for case:{0:s}, skipping ".format(results_selection))
skip_iteration = True
continue
#lightcurve = load_from_cpickle(pick_files, config_in['output'], night)
lightcurve_average['observations']['phase'].extend(lightcurve['arrays']['observations']['phase'].tolist())
lightcurve_average['transit_in_flag'].extend(
lightcurve['arrays']['observations']['transit_in_flag'].tolist())
lightcurve_average['transit_full_flag'].extend(
lightcurve['arrays']['observations']['transit_full_flag'].tolist())
lightcurve_average['transit_out_flag'].extend(
lightcurve['arrays']['observations']['transit_out_flag'].tolist())
for band_key in lightcurve_average['bands_list']:
for name_append in append_list:
lightcurve_average['observations']['ratio_' + band_key+ name_append].extend(
lightcurve['arrays']['observations']['ratio_' + band_key+ name_append].tolist())
if skip_iteration: continue
sorting_index = np.argsort(lightcurve_average['observations']['phase'])
lightcurve_average['observations']['phase'] = np.asarray(lightcurve_average['observations']['phase'])[sorting_index]
lightcurve_average['transit_in_flag'] = np.asarray(lightcurve_average['transit_in_flag'], dtype=np.int16)[sorting_index]
lightcurve_average['transit_full_flag'] = np.asarray(lightcurve_average['transit_full_flag'], dtype=np.int16)[sorting_index]
lightcurve_average['transit_out_flag'] = np.asarray(lightcurve_average['transit_out_flag'], dtype=np.int16)[sorting_index]
lightcurve_average['transit_in']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_out_flag']]
#for band_key in lightcurve_average['bands_list']:
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['observations']['ratio_' + band_key + name_append] = \
np.asarray(lightcurve_average['observations']['ratio_' + band_key + name_append])[sorting_index]
lightcurve_average['transit_in']['ratio_' + band_key + name_append] = \
lightcurve_average['observations']['ratio_' + band_key + name_append][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['ratio_' + band_key + name_append] = \
lightcurve_average['observations']['ratio_' + band_key + name_append][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['ratio_' + band_key + name_append] = \
lightcurve_average['observations']['ratio_' + band_key + name_append][lightcurve_average['transit_out_flag']]
avg_out, avg_out_sq = \
np.average(lightcurve_average['transit_out']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve_average['transit_out']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_in, avg_in_sq = \
np.average(lightcurve_average['transit_in']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve_average['transit_in']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_full, avg_full_sq = \
np.average(lightcurve_average['transit_full']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve_average['transit_full']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_out = \
np.average(lightcurve_average['transit_out']['ratio_' + band_key + name_append][:, 0])
avg_in = \
np.average(lightcurve_average['transit_in']['ratio_' + band_key + name_append][:, 0])
avg_full = \
np.average(lightcurve_average['transit_full']['ratio_' + band_key + name_append][:, 0])
lightcurve_average['average'][band_key + name_append] = {
'average_out': np.asarray([avg_out, 1. / np.power(avg_out_sq, 0.5)]),
'average_in': np.asarray([avg_in, 1. / np.power(avg_in_sq, 0.5)]),
'average_full': np.asarray([avg_full, 1. / np.power(avg_full_sq, 0.5)]),
}
delta_fac = \
lightcurve_average['average'][band_key + name_append]['average_full'][0] / lightcurve_average['average'][band_key + name_append]['average_out'][0]
delta_err = delta_fac * np.sqrt(
(lightcurve_average['average'][band_key + name_append]['average_out'][1]
/ lightcurve_average['average'][band_key + name_append]['average_out'][0]) ** 2
+ (lightcurve_average['average'][band_key + name_append]['average_full'][1]
/ lightcurve_average['average'][band_key + name_append]['average_full'][0]) ** 2)
lightcurve_average['average'][band_key + name_append]['delta'] = np.asarray([(1. - delta_fac) * 100., delta_err * 100.])
pre_duration = transit_full_bins[0] - lightcurve_average['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration / transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
post_duration = lightcurve_average['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
transit_bins = np.arange(transit_full_bins[0] - nsteps_pre * transit_full_step,
transit_full_bins[-1] + (nsteps_post + 1.1) * transit_full_step,
transit_full_step)
lightcurve_average['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins) - 1):
sel = (lightcurve_average['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve_average['observations']['phase'] < transit_bins[nb + 1])
if np.sum(sel) <= 0: continue
lightcurve_average['binned']['observations']['phase'][n_a] = np.average(
lightcurve_average['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve_average['observations']['ratio_' + band_key + name_append][sel, 0],
weights=1. / lightcurve_average['observations']['ratio_' + band_key + name_append][sel, 1] ** 2,
returned=True)
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve_average['binned']['transit_in']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_in_flag]
lightcurve_average['binned']['transit_out']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_out_flag]
lightcurve_average['binned']['observations']['phase'] = lightcurve_average['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['transit_in']['ratio_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][transit_in_flag, :]
lightcurve_average['binned']['transit_out']['ratio_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][transit_out_flag, :]
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name+ '_' + results_selection, lightcurve_average, config_in['output'], lines=lines_label)
def plot_spectra_lightcurve_average(config_in, night_input='', clv_rm_correction=False):
import matplotlib.pyplot as plt
if clv_rm_correction:
subroutine_name = 'spectra_lightcurve_average_clv_rm_correction'
else:
subroutine_name = 'spectra_lightcurve_average'
try:
lightcurve_average = load_from_cpickle(subroutine_name, config_in['output'])
print("{0:45s} {1:s}".format(subroutine_name, 'Plotting'))
except:
print("{0:45s} {1:s}".format(subroutine_name, 'Plot skipped'))
return
C_bands = lightcurve_average['C_bands']
print()
for band_key in C_bands:
print("Average Band: {1:s} Delta:{2:8.4f} +- {3:8.4f} [%]".format(' ', band_key,
lightcurve_average['average'][band_key][
'delta'][0],
lightcurve_average['average'][band_key][
'delta'][1]))
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Average spectra lightcurve\n {0:s}'.format(band_key))
plt.errorbar(lightcurve_average['observations']['phase'],
lightcurve_average['observations']['ratio_' + band_key][:,0]*100 -100.,
yerr= lightcurve_average['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve_average['binned']['observations']['phase'],
lightcurve_average['binned']['observations']['ratio_' + band_key][:, 0]*100 -100.,
yerr= lightcurve_average['binned']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve_average['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve_average['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve_average['observations']['phase'][0]-0.01,
lightcurve_average['observations']['phase'][-1]+0.01)
plt.xlabel('orbital phase')
plt.ylabel('$\mathcal{R}$ - 1.')
plt.legend()
plt.show()
| 17,778 | 49.652422 | 166 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_airmass_stellarRF.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_airmass_stellarRF",
"plot_telluric_airmass_stellarRF",
"compute_telluric_airmass_reference_stellarRF",
"plot_telluric_airmass_reference_stellarRF"]
subroutine_name = 'telluric_airmass_stellarRF'
def compute_telluric_airmass_stellarRF(config_in):
compute_telluric_stellarRF(config_in,
use_reference_airmass=False,
subroutine_name='telluric_airmass_stellarRF')
def compute_telluric_airmass_reference_stellarRF(config_in):
compute_telluric_stellarRF(config_in,
use_reference_airmass=True,
subroutine_name='telluric_airmass_reference_stellarRF')
def plot_telluric_airmass_reference_stellarRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_stellarRF(config_in, night_input)
def compute_telluric_stellarRF(config_in, **kwargs):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_stellarRF Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print()
print(" No telluric correction file found, computing now ")
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': subroutine_name
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'stellar'
}
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
telluric[obs] = {}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['flux'] = input_data[obs]['e2ds']/calib_data['blaze']/input_data[obs]['step']
processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds'])/calib_data['blaze']/input_data[obs]['step']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
input_data['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['flux_SRF_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
input_data['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['flux_SRF'], processed[obs]['flux_SRF_err'], processed[obs]['null'] = \
replace_values_errors(processed[obs]['flux_SRF'], processed[obs]['flux_SRF_err'], 0.0001)
"""rescaling"""
processed[obs]['flux_SRF_rescaling'], processed[obs]['flux_SRF_rescaled'], processed[obs]['flux_SRF_rescaled_err'] = \
perform_rescaling(input_data['coadd']['wave'],
processed[obs]['flux_SRF'],
processed[obs]['flux_SRF_err'],
observational_pams['wavelength_rescaling'])
processed[obs]['logI'] = np.log(processed[obs]['flux_SRF_rescaled'])
processed[obs]['logI_err'] = processed[obs]['flux_SRF_rescaled_err'] / processed[obs]['flux_SRF_rescaled']
processed['telluric'] = {}
n_coadd = np.size(input_data['coadd']['wave'])
abs_slope = np.ones(n_coadd, dtype=np.double)
line_shift = np.ones(n_coadd, dtype=np.double)
zero_point = np.ones(n_coadd, dtype=np.double)
pearson_r = np.zeros(n_coadd, dtype=np.double)
pearson_p = np.zeros(n_coadd, dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = observational_pams[obs]['AIRMASS']
berv[n_obs] = observational_pams[obs]['BERV']
rvc[n_obs] = observational_pams[obs]['RVC']
logi_array = np.empty([lists['n_tellurics'], n_coadd], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], n_coadd], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][:]
sigi_array[n_obs, :] = processed[obs]['logI_err'][:]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope, zero_point = \
airmass_linear_curve_fit(airmass, logi_array, sigi_array, n_coadd)
else:
abs_slope, zero_point = \
airmass_linear_lstsq(airmass, logi_array)
telluric['stellarRF'] = {
'wave': input_data['coadd']['wave'],
'step': input_data['coadd']['step']
}
telluric['stellarRF']['spectrum'] = np.exp(abs_slope)
telluric['stellarRF']['emission'] = (telluric['stellarRF']['spectrum'] > 1.00000)
telluric['stellarRF']['spectrum_fixed'] = telluric['stellarRF']['spectrum'][:]
telluric['stellarRF']['spectrum_fixed'][telluric['stellarRF']['emission']]= 1.000
telluric['stellarRF']['spline_eval'], \
telluric['stellarRF']['spline_coeff'], \
telluric['stellarRF']['spline_knots'] = \
compute_spline(input_data['coadd']['wave'],
telluric['stellarRF']['spectrum_fixed'],
0.05)
telluric['airmass_ref'] = processed['airmass_ref']
""" Moving the spline to the observerRF in the e2ds"""
for obs in lists['observations']:
""" 1) shifting the telluric correction spline to the observer RV"""
wave_ORF = shift_wavelength_array(np.asarray(telluric['stellarRF']['spline_coeff'][0]),
-observational_pams[obs]['rv_shift_ORF2SRF_mod'])
""" 2) new spline coefficients """
tck1 = [shift_wavelength_array(np.asarray(telluric['stellarRF']['spline_coeff'][0]),
- observational_pams[obs]['rv_shift_ORF2SRF_mod']),
telluric['stellarRF']['spline_coeff'][1],
telluric['stellarRF']['spline_coeff'][2]]
""" 3) computation of the spline at the location of the spectra, taking care of the regions
out of the coadded spectrum """
inside_spline = (input_data[obs]['wave'] > wave_ORF[0]) & (input_data[obs]['wave'] < wave_ORF[-1])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in range(0, input_data[obs]['n_orders']):
if np.sum(inside_spline[order, :])>0 :
telluric[obs]['spline_noairmass'][order, inside_spline[order, :]] = \
sci_int.splev(input_data[obs]['wave'][order, inside_spline[order, :]], tck1)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
observational_pams[obs]['AIRMASS'] - processed['airmass_ref'])
""" Now a similar approach is followed for the telluric spectrum before spline fit
"""
telluric[obs]['spectrum_noairmass'] = \
rebin_1d_to_2d(input_data['coadd']['wave'],
input_data['coadd']['step'],
telluric['stellarRF']['spectrum'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
preserve_flux=False)
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(telluric[obs]['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['rv_shift_ORF2SRF_mod'] = observational_pams[obs]['rv_shift_ORF2SRF_mod']
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
save_to_cpickle('telluric', telluric, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_airmass_stellarRF(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_airmass_stellarRF Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 16,023 | 43.885154 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_airmass_observerRF_chunks.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_airmass_observerRF_chunks"]
def compute_telluric_airmass_observerRF_chunks(config_in):
compute_telluric_observerRF_chunks(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=False,
subroutine_name='compute_telluric_airmass_observerRF_chunks')
def compute_telluric_observerRF_chunks(config_in, **kwargs):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_observerRF_chunks Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print("No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer'
}
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {}
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
if processed['n_orders'] == 0:
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
for obs in lists['observations']:
processed[obs]['e2ds_precorrected'] = processed[obs]['e2ds_rescaled'][:]
processed[obs]['e2ds_precorrected_err'] = input_data[obs]['e2ds_err'] / processed[obs]['e2ds_rescaling']
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
for niter in xrange(0, kwargs['n_iterations']):
print("NITER: ", niter)
for obs in lists['telluric']:
processed[obs]['logI'] = np.log(processed[obs]['e2ds_precorrected'])
processed[obs]['logI_err'] = processed[obs]['e2ds_precorrected_err']/processed[obs]['e2ds_precorrected']
processed['telluric'] = {}
abs_slope = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
line_shift = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
zero_point = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_r = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_p = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
for order in xrange(0, processed['n_orders']):
print(" - order ", repr(order))
logi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][order, :]
sigi_array[n_obs, :] = processed[obs]['logI_err'][order, :]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if kwargs['use_berv']:
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_lstsq(airmass, berv, logi_array)
else:
if observational_pams['linear_fit_method']== 'linear_curve_fit':
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_curve_fit(airmass, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_lstsq(airmass, logi_array)
""" Saving the outcome to dictionary """
processed['telluric']['order_'+repr(order)] = {'logi_array': logi_array, 'sigi_array': sigi_array}
processed['telluric']['spectrum_noairmass'] = np.exp(abs_slope)
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_precorrected'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_precorrected_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in xrange(0, processed['n_orders']):
telluric[obs]['spline_noairmass'][order, :], _, _ = \
compute_spline(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum_noairmass'][order, :],
0.05)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
| 10,263 | 48.346154 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_coadd.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.telluric_molecfit_preparation import compute_telluric_molecfit_preparation
__all__ = ["compute_telluric_molecfit_coadd",
"plot_telluric_molecfit_coadd"]
subroutine_name = 'telluric_molecfit_coadd'
def compute_telluric_molecfit_coadd(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
compute_telluric_molecfit_preparation(config_in)
aer_version = molecfit_dict.get('aer_version', '3.8')
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
print(' instrument :', instrument_name)
print()
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
processed['work_dir'] = tellprep['work_dir']
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
molecfit_dict['rebinning_step'],
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'],
dtype=np.double) * molecfit_dict['rebinning_step']
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
# TODO: fix the wave:include files
wave_include = '"'
for wli_s, wli_e in zip(tellprep['include']['vacuum'][:, 0], tellprep['include']['vacuum'][:, 1]):
wave_include = wave_include+str(wli_s)+','+str(wli_e)+','
wave_include = wave_include[:-1]+'"'
n_coadd = 0
n_reference = 0
texp_cumulated = 0.00
texp_total = 0.000
coadd_list = []
# Computing the total integration time
for n_obs, obs in enumerate(lists['observations']):
texp_total += input_data[obs]['EXPTIME']
print(' Writing data and configuration files for molecfit+calctrans')
print()
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
input_data[obs]['molecfit']['aer_version'] = aer_version
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" This part is relative to the coadded spectrum, must be placed here because
some variables such as direcotry names must be defined before the next step
spectra are coadded to increase the SNR of the spectrum analyzed by molecfit
"""
if n_coadd == 0:
reference_name = 'coadded_{0:03d}'.format(n_reference)
reference_dirname = './' + processed['work_dir'] + '/' + reference_name + '/'
os.system('mkdir -p ' + reference_dirname)
rebin_coadd = processed[obs]['rebin_ORF'].copy()
molecfit_pams = {
'MJD': input_data[obs]['MJD'],
'UTC': input_data[obs]['UTC'],
'ELEVATION': input_data[obs]['ELEVATION'],
'HUMIDITY': input_data[obs]['HUMIDITY'],
'PRESSURE': input_data[obs]['PRESSURE'],
'TEMPERATURE_EN': input_data[obs]['TEMPERATURE_EN'],
'TEMPERATURE_M1': input_data[obs]['TEMPERATURE_M1']}
coadded_files = open(reference_dirname + reference_name + '_files.list', 'w')
coadd_list.append(reference_name)
observations_dirlist = []
observations_exelist = []
else:
rebin_coadd += processed[obs]['rebin_ORF']
molecfit_pams['MJD'] += input_data[obs]['MJD']
molecfit_pams['UTC'] += input_data[obs]['UTC']
molecfit_pams['ELEVATION'] += input_data[obs]['ELEVATION']
molecfit_pams['HUMIDITY'] += input_data[obs]['HUMIDITY']
molecfit_pams['PRESSURE'] += input_data[obs]['PRESSURE']
molecfit_pams['TEMPERATURE_EN'] += input_data[obs]['TEMPERATURE_EN']
molecfit_pams['TEMPERATURE_M1'] += input_data[obs]['TEMPERATURE_M1']
n_coadd += 1
coadded_files.write(obs + '\n')
texp_cumulated += input_data[obs]['EXPTIME']
# """ Molecfit analysis is skipped if the telluric correction has been computed already"""
# if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
# print(' molecfit+calctrans results for ' + obs + ' already available')
# continue
"""
This is the directory for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT,
which is different from the one where the coadded spectrum is saved
"""
observation_dirname = './' + processed['work_dir'] + '/' + 'obs_{0:03d}'.format(n_obs) + '/'
os.system('mkdir -p ' + observation_dirname)
""" the spectrum is saved as a BinTable Fits file in a format suitable for molecfit
this is the spectrum for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT, so it is saved inside
the folder with the observation name
"""
observation_name = obs
observation_tabname = obs + '_ORF_s1d.fits'
write_molecfit_input_spectrum(processed['rebin']['wave'],
processed[obs]['rebin_ORF'],
observation_dirname + observation_tabname)
observation_calctrans_parname = observation_name + '_calctrans.rc'
write_calctrans_par(observation_dirname + observation_calctrans_parname)
""" Writing the SOF files for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT
For the observed spectrum
"""
observation_calctrans_sofname = obs + '_calctrans.sof'
observation_calctrans_soffile = open(observation_dirname + observation_calctrans_sofname, 'w')
observation_calctrans_soffile.write(observation_tabname+' SCIENCE\n')
observation_calctrans_soffile.write('../' + reference_name + '/MODEL_MOLECULES.fits MODEL_MOLECULES\n')
observation_calctrans_soffile.write('../' + reference_name + '/ATM_PARAMETERS.fits ATM_PARAMETERS\n')
observation_calctrans_soffile.write(
'../' + reference_name + '/BEST_FIT_PARAMETERS.fits BEST_FIT_PARAMETERS\n')
observation_calctrans_soffile.close()
""" Writing the bash script to execute MOLECFIT_CALCTRANS in the directory containing the science fits
"""
bash_file = './' + processed['work_dir'] + '/calctrans_exec_' + obs + '.source'
bash_script = open(bash_file, 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing calctrans on ' + obs + ' \n')
bash_script.write('cd ' + observation_dirname + ' \n')
bash_script.write(molecfit_dict['esorex_exec'] + ' --recipe-config=' + observation_calctrans_parname
+ ' molecfit_calctrans ' + observation_calctrans_sofname + '> ' + obs + '_calctrans.log\n')
bash_script.write('cd $TMPDIR \n')
bash_script.close()
observations_dirlist.append(observation_dirname)
observations_exelist.append(bash_file)
processed[obs]['dir_name'] = observation_dirname
processed[obs]['tab_name'] = observation_tabname
if (texp_cumulated >= molecfit_dict['exptime_coadd'] and
texp_total-texp_cumulated >= molecfit_dict['exptime_coadd']) \
or n_obs == len(lists['observations'])-1:
coadded_files.close()
print(' Coadded spectrum: ', n_reference)
if os.path.exists(reference_dirname + 'TELLURIC_CORR.fits'):
print(' molecfit for ' + reference_name + ' previously completed')
print()
else:
rebin_coadd /= n_coadd
""" the spectra is saved as an ASCII file in a format suitable for molecfit """
reference_tabname = reference_name + '_ORF_s1d.fits'
write_molecfit_input_spectrum(processed['rebin']['wave'],
rebin_coadd,
reference_dirname + reference_tabname)
""" Average of the observational parameters """
for key in molecfit_pams:
molecfit_pams[key] /= n_coadd
molecfit_pams['GEOELEV'] = input_data[obs]['GEOELEV']
molecfit_pams['GEOLONG'] = input_data[obs]['GEOLONG']
molecfit_pams['GEOLAT'] = input_data[obs]['GEOLAT']
reference_molecfit_parname = reference_name + '_molecfit.rc'
write_molecfit_par(reference_dirname + reference_molecfit_parname,
wave_include,
input_data[obs]['molecfit'],
molecfit_pams)
reference_calctrans_parname = reference_name + '_calctrans.rc'
write_calctrans_par(reference_dirname + reference_calctrans_parname)
reference_molecfit_sofname = reference_name + '_molecfit.sof'
reference_molecfit_soffile = open(reference_dirname + reference_molecfit_sofname, 'w')
reference_molecfit_soffile.write(reference_tabname + ' SCIENCE\n')
reference_molecfit_soffile.close()
""" Writing the SOF files for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT
For the observed spectrum
"""
reference_calctrans_sofname = obs + '_calctrans.sof'
reference_calctrans_soffile = open(reference_dirname + reference_calctrans_sofname, 'w')
reference_calctrans_soffile.write(reference_tabname+' SCIENCE\n')
reference_calctrans_soffile.write('MODEL_MOLECULES.fits MODEL_MOLECULES\n')
reference_calctrans_soffile.write('ATM_PARAMETERS.fits ATM_PARAMETERS\n')
reference_calctrans_soffile.write('BEST_FIT_PARAMETERS.fits BEST_FIT_PARAMETERS\n')
reference_calctrans_soffile.close()
""" Writing the bash script to execute MOLECFIT_MODEL and MOLECFIT_CALCTRANS in the directory containing the coadded fits
"""
bash_file = './' + processed['work_dir'] + '/molecfit_exec_' + reference_name + '.source'
bash_script = open(bash_file, 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing molecfit on ' + reference_name + ' \n')
bash_script.write('cd ' + reference_dirname + ' \n')
bash_script.write(molecfit_dict['esorex_exec'] + ' --recipe-config=' + reference_molecfit_parname
+ ' molecfit_model ' + reference_molecfit_sofname + '> ' + obs + '_molecfit.log\n')
bash_script.write(molecfit_dict['esorex_exec'] + ' --recipe-config=' + reference_calctrans_parname
+ ' molecfit_calctrans ' + reference_calctrans_sofname + '> ' + obs + '_calctrans.log\n')
bash_script.write('cd $TMPDIR \n')
bash_script.close()
os.system('. ' + bash_file)
for dirname, exename in zip(observations_dirlist, observations_exelist):
if os.path.exists(dirname + 'TELLURIC_CORR.fits'):
print(' molecfit for ' + dirname + ' previously completed')
print()
else:
os.system('. ' + exename)
n_coadd = 0
n_reference += 1
texp_total -= texp_cumulated
texp_cumulated = 0.0
print()
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
observation_dirname = processed[obs]['dir_name']
print(' Telluric correction for ', obs, 'retrieved from ', observation_dirname + 'TELLURIC_CORR.fits')
""" Loading the telluric spectrum from the output directory of molecfit """
corr_fits = fits.open(observation_dirname + 'TELLURIC_CORR.fits')
# orig_fits = fits.open(observation_dirname + observation_tabname)
telluric_molecfit = corr_fits[1].data
""" rebinning onto the e2ds wave scale"""
if molecfit_dict.get('fix_telluric', True):
print(' fix_telluric applied - temporary workaround for line at 5885.97 A [ORF]')
line_boundaries = [5885.74, 5886.21]
sel = (processed['rebin']['wave'] > line_boundaries[0]) \
& (processed['rebin']['wave'] < line_boundaries[1])
tell_cont = np.amax(telluric_molecfit[sel])
telluric_molecfit[sel] = (telluric_molecfit[sel] - tell_cont) / 2.0 + tell_cont
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = ~(np.isfinite(telluric[obs]['spectrum']))
telluric[obs]['spectrum'][temp] = 1.0
sel = telluric[obs]['spectrum'] < 0.0001
telluric[obs]['spectrum'][sel] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_molecfit_coadd(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
# plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
# plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
# plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
# plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
# plt.plot()
print("plot_telluric_molecfit_coadd Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i == 0:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array))
# ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
# ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
# ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
# ax2.axhline(1.00+lift_spectrum, c='k')
# ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
# ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0, 0] < 1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 23,722 | 45.976238 | 141 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_observerRF_skycalc.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.eso_skycalc_cli import get_eso_sckycalc_harps
__all__ = ["compute_telluric_observerRF_skycalc", "plot_telluric_observerRF_skycalc"]
def compute_telluric_observerRF_skycalc(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_observerRF Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print("No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_observerRF_skycalc',
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': 'telluric_observerRF_skycalc',
'reference_frame': 'observer'
}
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
if processed['n_orders'] == 0:
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
""" Reference airmass for iterative correction of airmass"""
telluric['sky_template'] = {}
instrument = night_dict[night]['instrument']
for n_obs, obs in enumerate(lists['transit_in']):
if n_obs >= lists['n_transit_in']/2.:
obs_ref = obs
break
telluric['sky_template']['ref_observation'] = obs_ref
if instrument == 'HARPS':
telluric['sky_template']['use_eso_skycalc'] = True
telluric['sky_template']['ref_airmass'] = input_data[obs_ref]['AIRMASS']
telluric['sky_template']['data'] = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
else:
telluric_model_file = config_in['instruments'][instrument]['telluric_model']
telluric['sky_template']['use_eso_skycalc'] = False
telluric['sky_template']['ref_airmass'] = 1.00000
telluric['sky_template']['data'] = fits.open(telluric_model_file)
for obs in lists['observations']:
processed[obs]['e2ds_precorrected'] = processed[obs]['e2ds_rescaled'][:]
processed[obs]['e2ds_precorrected_err'] = input_data[obs]['e2ds_err'] / processed[obs]['e2ds_rescaling']
for niter in xrange(0, 1):
print("NITER: ", niter)
for obs in lists['telluric']:
processed[obs]['logI'] = np.log(processed[obs]['e2ds_precorrected'])
processed[obs]['logI_err'] = processed[obs]['e2ds_precorrected_err']/processed[obs]['e2ds_precorrected']
processed['telluric'] = {}
abs_slope = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
line_shift = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
zero_point = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_r = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_p = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
for order in range(0, processed['n_orders']):
logi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][order, :]
sigi_array[n_obs, :] = processed[obs]['logI_err'][order, :]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if observational_pams['linear_fit_method']== 'linear_curve_fit':
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_lstsq(airmass, berv, logi_array)
""" Saving the outcome to dictionary """
processed['telluric']['order_'+repr(order)] = {'logi_array': logi_array, 'sigi_array': sigi_array}
if telluric['sky_template']['use_eso_skycalc']:
wave_model, step_model, tran_model, terr_model = \
get_eso_sckycalc_harps(obs_ref,
[input_data[obs_ref]['wave'][0], input_data[obs_ref]['wave'][-1]],
input_data[obs_ref]['RA'],
input_data[obs_ref]['DEC'],
night, config_in['output'])
tran_model_rebinned = \
rebin_1d_to_1d(wave_model,
step_model,
tran_model,
input_data[obs_ref]['wave'],
input_data[obs_ref]['step'],
preserve_flux=False)
telluric['sky_template']['data'][order, :] = \
np.power(tran_model_rebinned,
1./telluric['sky_template']['ref_airmass'])
processed['telluric']['spectrum_noairmass'] = np.exp(abs_slope)
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_precorrected'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_precorrected_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in range(0, processed['n_orders']):
telluric[obs]['spline_noairmass'][order, :], _, _ = \
compute_spline(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum_noairmass'][order, :],
0.05)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_observerRF_skycalc(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_airmass_stellarRF Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
for order in range(0, processed[obs]['n_orders']):
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=line_colors[i], lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=line_colors[i])
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=line_colors[i])
ax2.axhline(1.00, c='k')
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
obs_ref = telluric['sky_template']['ref_observation']
for order in range(0, processed[obs]['n_orders']):
ax2.scatter(processed[obs_ref]['wave'][order, :], telluric[obs_ref]['spectrum_noairmass'][order, :], s=2, c='C0', zorder=1000)
ax2.plot(processed[obs_ref]['wave'][order, :], telluric['sky_template']['data'][order, :], c='C0', zorder=1000)
ax1.plot(processed[obs_ref]['wave'][order, :], telluric['sky_template']['data'][order, :], c='C0', zorder=1000)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 14,532 | 46.338762 | 138 | py |
SLOPpy | SLOPpy-main/SLOPpy/clv_rm_models_lines.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.kepler_exo import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.math_functions import *
from astropy.convolution import Gaussian1DKernel, convolve
__all__ = ['compute_clv_rm_models_lines', 'plot_clv_rm_models_lines']
subroutine_name = 'clv_rm_models_lines'
def compute_clv_rm_models_lines(config_in, lines_label):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
line_iter_dict = spectral_lines[lines_label]
clv_rm_correction = line_iter_dict.get('clv_rm_correction', True)
if not clv_rm_correction:
return
# wave_extension: additional range in wavelength added to avoid convolution
# problems at the side of the spectrum
wave_extension = 5.0
# un-convolved portion of the spectrum given by range_boundaries +-
# (wave_extension - wave_fix_convo)
wave_fix_convo = 1.0
# Added back-compatibility to old or "wrong" keys
norm_dict = line_iter_dict.get('normalization', clv_rm_dict.get('normalization', {}))
norm_pams={}
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
#if not config_in['settings'].get('full_output', False):
# for night in night_dict:
# clv_rm_models = load_from_cpickle(
# 'clv_rm_models', config_in['output'], night)
print("{0:45s} {1:s}".format(
subroutine_name, 'Retrieved'))
except (FileNotFoundError, IOError):
print("{0:45s} {1:s}".format(
subroutine_name, 'Computing'))
print()
"""
Loading the spectral synthesis results, at the moment only SME output is supported.
Properties of the synthesis data files
- limb_angles: this is an input to SME, so it is specific on how the synthesis has been performed
- spectra: stellar spectrum as a function of the limb angle, sampled near the spectral lines
- model: integrated spectrum of the star
"""
synthesis_data_limb_angles = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_muvals.txt', dtype=np.double)
synthesis_data_spectra = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_spectra.txt', dtype=np.double)
synthesis_data_model = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_model.txt', dtype=np.double)
synthesis = {
'surface': {
'wave': synthesis_data_spectra[:, 0],
'flux': synthesis_data_spectra[:, 1:],
'n_mu': np.size(synthesis_data_limb_angles),
'mu': synthesis_data_limb_angles
},
'total': {
'wave': synthesis_data_model[:, 0],
'norm': synthesis_data_model[:, 1],
}
}
""" Setting up the array for model computation """
synthesis['total']['step'] = synthesis['total']['wave'] * 0.0
synthesis['total']['step'][1:] = synthesis['total']['wave'][1:] - \
synthesis['total']['wave'][:-1]
synthesis['total']['step'][0] = synthesis['total']['step'][1]
synthesis['surface']['step'] = synthesis['surface']['wave'] * 0.0
synthesis['surface']['step'][1:] = synthesis['surface']['wave'][1:] - \
synthesis['surface']['wave'][:-1]
synthesis['surface']['step'][0] = synthesis['surface']['step'][1]
synthesis['surface']['wave_out'] = np.arange(synthesis['surface']['wave'][0],
synthesis['surface']['wave'][-1],
clv_rm_dict['rebinning_step'])
synthesis['surface']['size_out'] = np.size(
synthesis['surface']['wave_out'], axis=0)
synthesis['surface']['step_out'] = np.ones(
synthesis['surface']['size_out']) * clv_rm_dict['rebinning_step']
synthesis['total']['norm_out'] = rebin_1d_to_1d(synthesis['total']['wave'],
synthesis['total']['step'],
synthesis['total']['norm'],
synthesis['surface']['wave_out'],
synthesis['surface']['step_out'],
method='exact_flux',
preserve_flux=False)
""" Check if the number of spectra corresponds to the number of limb angle values """
if np.size(synthesis['surface']['flux'], axis=1) != synthesis['surface']['n_mu']:
print('ERROR in loading the stellar spectra')
"""
Setting up the grid of stellar spectra for the CLV and RM computation
odd number of points to include the zero value
"""
star_grid = {
'n_grid': clv_rm_dict['n_gridpoints'],
'half_grid': int((clv_rm_dict['n_gridpoints'] - 1) / 2)
}
""" Coordinates of the centers of each grid cell (add offset) """
star_grid['xx'] = np.linspace(-1.0000000000000, 1.0000000000000,
star_grid['n_grid'], dtype=np.double)
star_grid['xc'], star_grid['yc'] = np.meshgrid(
star_grid['xx'], star_grid['xx'], indexing='xy')
# check the Note section of the wiki page of meshgrid
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
""" Distance of each grid cell from the center of the stellar disk """
star_grid['rc'] = np.sqrt(star_grid['xc'] ** 2 + star_grid['yc'] ** 2)
# Must avoid negative numbers inside the square root
star_grid['inside'] = star_grid['rc'] < 1.0000000000000
# Must avoid negative numbers inside the square root
star_grid['outside'] = star_grid['rc'] >= 1.00000000000000
""" Determine the mu angle for each grid cell, as a function of radius. """
star_grid['mu'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix with the mu values
star_grid['mu'][star_grid['inside']] = np.sqrt(
1. - star_grid['rc'][star_grid['inside']] ** 2)
""" 2.2 Determine the Doppler shift to apply to the spectrum of each grid cell, from Cegla+2015 """
star_grid['x_ortho'] = star_grid['xc'] * np.cos(star_dict['lambda'][0] * deg2rad) \
- star_grid['yc'] * np.sin(
star_dict['lambda'][0] * deg2rad) # orthogonal distances from the spin-axis
star_grid['y_ortho'] = star_grid['xc'] * np.sin(star_dict['lambda'][0] * deg2rad) \
+ star_grid['yc'] * np.cos(star_dict['lambda'][0] * deg2rad)
star_grid['r_ortho'] = np.sqrt(
star_grid['x_ortho'] ** 2 + star_grid['y_ortho'] ** 2)
star_grid['z_ortho'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix
star_grid['z_ortho'][star_grid['inside']] = np.sqrt(
1. - star_grid['r_ortho'][star_grid['inside']] ** 2)
""" rotate the coordinate system around the x_ortho axis by an agle: """
star_grid['beta'] = (np.pi / 2.) - \
star_dict['inclination'][0] * deg2rad
""" orthogonal distance from the stellar equator """
star_grid['yp_ortho'] = star_grid['z_ortho'] * np.sin(star_grid['beta']) + star_grid['y_ortho'] * np.cos(
star_grid['beta'])
""" stellar rotational velocity for a given position """
star_grid['v_star'] = star_grid['x_ortho'] * star_dict['vsini'][0] * (
1. - star_dict['alpha'][0] * star_grid['yp_ortho'] ** 2)
# Null velocity for points outside the stellar surface
star_grid['v_star'][star_grid['outside']] = 0.0
""" Associate a synthetic spectrum to each cell """
""" recomputation of spectra_mu - most likely it has been deleted from the
output file
"""
star_grid['spectra_mu'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
index_closer = np.abs(
synthesis['surface']['mu'] - star_grid['mu'][y, x]).argmin() # take the index of the closer value
if star_grid['mu'][y, x] in synthesis['surface']['mu']:
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, index_closer]
continue
elif index_closer == synthesis['surface']['n_mu'] - 1 or \
synthesis['surface']['mu'][index_closer] > star_grid['mu'][y, x]:
mu_ind0 = index_closer - 1
mu_ind1 = index_closer
else:
mu_ind0 = index_closer
mu_ind1 = index_closer + 1
diff_mu = synthesis['surface']['mu'][mu_ind1] - \
synthesis['surface']['mu'][mu_ind0]
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, mu_ind0] \
+ (star_grid['mu'][y, x] - synthesis['surface']['mu'][mu_ind0]) / diff_mu \
* (synthesis['surface']['flux'][:, mu_ind1]
- synthesis['surface']['flux'][:, mu_ind0])
""" Computation of the continuum level (total flux is already normalized)"""
star_grid['continuum'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
spectral_window = ((synthesis['surface']['wave'] > clv_rm_dict['continuum_range'][0]) &
(synthesis['surface']['wave'] < clv_rm_dict['continuum_range'][1]))
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
star_grid['continuum'][x][y] = np.median(
star_grid['spectra_mu'][x][y][spectral_window])
star_grid['continuum_level'] = np.sum(star_grid['continuum'])
"""
Setting up the grid for the rescaling factor of the planetary radius
"""
try:
radius_grid = np.arange(clv_rm_dict['radius_factor'][0],
clv_rm_dict['radius_factor'][1] +
clv_rm_dict['radius_factor'][2],
clv_rm_dict['radius_factor'][2])
except KeyError:
radius_grid = np.arange(0.5, 2.6, 0.1)
""" CLV + RM model computation is performed only on the wavelength range of
interest, with the addition of a few Angstroms """
wave_selection = (synthesis['surface']['wave_out'] > line_iter_dict['range'][0]-wave_extension) \
& (synthesis['surface']['wave_out'] < line_iter_dict['range'][1]+wave_extension)
for night in night_dict:
""" Retrieving the list of observations"""
print()
print("compute_CLV_RM_models for lines {0:s}, Night: {1:s}".format(lines_label, night))
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
continue
except:
print()
print(" No CLV & RM correction files found for lines {0:s}, Night: {1:s} , computing now".format(lines_label, night))
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
clv_rm_models = {
'common': {
'wave': synthesis['surface']['wave_out'][wave_selection],
'step': synthesis['surface']['step_out'][wave_selection],
'norm': synthesis['total']['norm_out'][wave_selection],
'size': int(np.sum(wave_selection)),
'continuum_level': star_grid['continuum_level'],
'radius_grid': radius_grid,
'n_radius_grid': len(radius_grid)
}
}
clv_rm_models['common']['convolution_dlambda'] = \
np.median(clv_rm_models['common']['wave']) / \
instrument_dict[instrument]['resolution']
clv_rm_models['common']['convolution_sigma'] = \
clv_rm_models['common']['convolution_dlambda'] / \
np.median(clv_rm_models['common']['step'])
gaussian = Gaussian1DKernel(
stddev=clv_rm_models['common']['convolution_sigma'])
clv_rm_models['common']['norm_convolved'] = convolve(
clv_rm_models['common']['norm'], gaussian)
""" Fixing border effect (we took already wave_extension angstrom outside of the
actual range, so doing it this way is fine)"""
wave_fix = wave_extension - wave_fix_convo
wave_fix_convolution = (clv_rm_models['common']['wave'] < line_iter_dict['range'][0]-wave_fix) | (clv_rm_models['common']['wave'] > line_iter_dict['range'][1]+wave_fix)
clv_rm_models['common']['norm_convolved'][wave_fix_convolution] = clv_rm_models['common']['norm'][wave_fix_convolution]
#import matplotlib.pyplot as plt
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models['common']['norm'], c='C1')
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models['common']['norm_convolved'], c='C2')
#plt.show()
#quit()
"""
Computation of the first derivative, useful to identify
continuum level. This method is prone to errors for
observational data, but it's quite robust for synthetic spectra
if jumps in wavelngth are small
"""
clv_rm_models['common']['norm_convolved_derivative'] = \
first_derivative(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm_convolved'])
wave_fix = wave_extension - wave_fix_convo*2
exclude_borders = (clv_rm_models['common']['wave'] > line_iter_dict['range'][0]+wave_fix) & (clv_rm_models['common']['wave'] < line_iter_dict['range'][1]-wave_fix)
print(' Range for continuum normalization: ',line_iter_dict['range'][0]-wave_fix, line_iter_dict['range'][1]+wave_fix)
# Using only the 10percentile of values of the derivative around zero
cont_10perc = np.percentile(np.abs(clv_rm_models['common']['norm_convolved_derivative']), norm_pams['percentile_selection'])
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (exclude_borders) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
print(' Number of points within 10percentile: {0:10.0f}'.format(np.sum((np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc))))
print(' Number of points included in restricted borders: {0:10.0f}'.format(np.sum(exclude_borders)))
print(' Number of points above threshold: {0:10.0f}'.format(np.sum( (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']))))
norm_convolved_bool = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (exclude_borders) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
if np.sum(norm_convolved_bool) < np.sum(exclude_borders)/50:
print(' Lower threshold decreased by 80% to allow point selection ', norm_pams['lower_threshold']*0.80)
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (exclude_borders) & (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']*0.80)
else:
clv_rm_models['common']['norm_convolved_bool'] = norm_convolved_bool
print(' Number of points for continuum normalization: {0:10.0f}'.format(np.sum(clv_rm_models['common']['norm_convolved_bool'])))
processed = {}
print()
for obs in lists['observations']:
print(' Computing CLV+RM correction for ', obs)
processed[obs] = {}
clv_rm_models[obs] = {}
n_oversampling = int(
observational_pams[obs]['EXPTIME'] / clv_rm_dict['time_step'])
if n_oversampling % 2 == 0:
n_oversampling += 1
half_time = observational_pams[obs]['EXPTIME'] / 2 / 86400.
processed[obs]['bjd_oversampling'] = np.linspace(observational_pams[obs]['BJD'] - half_time,
observational_pams[obs]['BJD'] + half_time,
n_oversampling, dtype=np.double)
if planet_dict['orbit'] == 'circular':
# Time of pericenter concides with transit time, if we assume e=0 and omega=np.pi/2.
eccentricity = 0.00
omega_rad = np.pi / 2.
# Tcent is assumed as reference time
Tref = planet_dict['reference_time_of_transit'][0]
Tcent_Tref = 0.000
else:
omega_rad = planet_dict['omega'][0] * deg2rad
Tref = planet_dict['reference_time']
Tcent_Tref = planet_dict['reference_time_of_transit'][0] - Tref
eccentricity = planet_dict['eccentricity'][0]
inclination_rad = planet_dict['inclination'][0] * deg2rad
true_anomaly, orbital_distance_ratio = kepler_true_anomaly_orbital_distance(
processed[obs]['bjd_oversampling'] - Tref,
Tcent_Tref,
planet_dict['period'][0],
eccentricity,
omega_rad,
planet_dict['semimajor_axis_ratio'][0])
""" planet position during its orbital motion, in unit of stellar radius"""
# Following Murray+Correia 2011 , with the argument of the ascending node set to zero.
# 1) the ascending node coincide with the X axis
# 2) the reference plance coincide with the plane of the sky
processed[obs]['planet_position'] = {
'xp': -orbital_distance_ratio * (np.cos(omega_rad + true_anomaly)),
'yp': orbital_distance_ratio * (np.sin(omega_rad + true_anomaly) * np.cos(inclination_rad)),
'zp': orbital_distance_ratio * (np.sin(inclination_rad) * np.sin(omega_rad + true_anomaly))
}
# projected distance of the planet's center to the stellar center
processed[obs]['planet_position']['rp'] = np.sqrt(processed[obs]['planet_position']['xp'] ** 2
+ processed[obs]['planet_position']['yp'] ** 2)
# obscured flux integrated over the full epoch
# grid n_radius_grid X size_out (of spectral model)
clv_rm_models[obs]['missing_flux'] = np.zeros(
[len(radius_grid), clv_rm_models['common']['size']], dtype=np.double)
# iterating on the sub-exposures
for j, zeta in enumerate(processed[obs]['planet_position']['zp']):
if zeta > 0 and processed[obs]['planet_position']['rp'][j] < 1. + planet_dict['radius_ratio'][0]:
# the planet is in the foreground or inside the stellar disk, continue
# adjustment: computation is performed even if only part of the planet is shadowing the star
rd = np.sqrt((processed[obs]['planet_position']['xp'][j] - star_grid['xc']) ** 2 +
(processed[obs]['planet_position']['yp'][j] - star_grid['yc']) ** 2)
# iterating on the cell grid
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
# skip the step if the cell is outside the stellar disk
# or if the cell is not shadowed by the planet when the largest possible size is considered
if star_grid['outside'][y, x] or rd[y, x] > planet_dict['radius_ratio'][0]*radius_grid[-1]:
continue
# rescaled planetary radius selection
grid_sel = (
rd[y, x] <= planet_dict['radius_ratio'][0]*radius_grid)
# stellar flux in the masked region
flux_tmp = rebin_1d_to_1d(synthesis['surface']['wave'],
synthesis['surface']['step'],
star_grid['spectra_mu'][x][y],
clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
rv_shift=star_grid['v_star'][y, x],
method='exact_flux',
preserve_flux=False)
# fixing zero values that may have been introduced by
# the rebinning process from an extremely irregular sampling
ind_sel = np.where(flux_tmp < 0.)[0]
for ii in ind_sel:
if ii == 0:
flux_tmp[ii] = flux_tmp[ii + 1]
elif ii == np.size(flux_tmp) - 1:
flux_tmp[ii] = flux_tmp[ii - 1]
else:
flux_tmp[ii] = (
flux_tmp[ii - 1] + flux_tmp[ii + 1]) / 2.
"""
Outer product of the radius selection array (size=M)
and the flux array (N) so that it can be summed
properly to the MxN missing_flux matrix.
"""
clv_rm_models[obs]['missing_flux'] += \
np.outer(grid_sel, flux_tmp)
clv_rm_models[obs]['missing_flux'] /= n_oversampling
clv_rm_models[obs]['stellar_spectra'] = \
np.outer(np.ones(len(radius_grid)), clv_rm_models['common']['norm']) \
- (clv_rm_models[obs]['missing_flux'] /
clv_rm_models['common']['continuum_level'])
clv_rm_models[obs]['stellar_spectra_convolved'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_derivative'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_continuum_bool'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=bool)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
for ii in range(0, len(radius_grid)):
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] = \
convolve(clv_rm_models[obs]['stellar_spectra'][ii, :],
gaussian)
clv_rm_models[obs]['stellar_spectra_convolved'][ii, wave_fix_convolution] = \
clv_rm_models[obs]['stellar_spectra'][ii, wave_fix_convolution]
"""
This is the theoretical transmission spectrum in the stellar reference frame
when only CLV and RM effects are present (no atmospheric
transmission)
"""
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] = \
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] \
/ clv_rm_models['common']['norm_convolved']
"""
High-resolution transmission spectra are always rescaled for
their continuum because in fiber-fed spectrographs the
information on the absolute flux of the star is lost.
If not using the normalized spectrum, normalization factor must
be included somehow when correcting for the CLV+RM, before
fitting the atomic absoprtion lines
"""
normalization_function = np.polynomial.chebyshev.Chebyshev.fit(
clv_rm_models['common']['wave'][clv_rm_models['common']['norm_convolved_bool']],
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :][clv_rm_models['common']['norm_convolved_bool']],
deg=norm_pams['model_poly_degree']
)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :] = clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] / normalization_function(clv_rm_models['common']['wave'])
##if ii!= 5: continue
##import matplotlib.pyplot as plt
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['stellar_spectra_convolved'][ii, :], c='C0')
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models['common']['norm_convolved'], c='C1')
##plt.show()
##
##
##print(np.sum(clv_rm_models['common']['norm_convolved_bool']))
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :], label='convolved, normalized')
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved'][ii, :], label='convolved')
##plt.plot(clv_rm_models['common']['wave'], normalization_function(clv_rm_models['common']['wave']), c='C5', zorder=10)
##plt.scatter(
## clv_rm_models['common']['wave'][clv_rm_models['common']['norm_convolved_bool']],
## clv_rm_models[obs]['clv_rm_model_convolved'][ii, :][clv_rm_models['common']['norm_convolved_bool']], s=5, c='C3')
##plt.legend()
##plt.show()
##if obs=='HARPN.2016-06-04T02-27-41.158': quit()
""" In the planetary reference frame, the corrected transmission
spectrum T_corr is given by
T_corr = T_input * (synthetic_convolved /
stellar_spectra_convolved),
where: T_input: transmission spectrum before the correction
synthetic_convolved: integrated synthetic stellar spectrum,
convolved for the instrumental resolution.
stellar_spectra_convolved: stellar spectrum after removing the
contribute of the stellar surface covered by the planet, convolved
for the instrumental resolution (synthetic_convolved and
stellar_spectra_convolved are in the stellar rest frame must be
rebinned in the planetary rest frame)
Since clv_rm_model_convolved = stellar_spectra_convolved /
synthetic_convolved the observed transmission spectrum must be
DIVIDED by clv_rm_model_convolved
"""
save_to_cpickle('clv_rm_models', clv_rm_models,
config_in['output'], night, lines_label)
clv_rm_models = None # Forcing memory de-allocation
if not config_in['settings'].get('full_output', False):
del star_grid['spectra_mu']
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
except (FileNotFoundError, IOError):
save_to_cpickle('clv_rm_star_grid', star_grid, config_in['output'])
save_to_cpickle('clv_rm_synthesis', synthesis, config_in['output'])
def plot_clv_rm_models_lines(config_in, lines_label, night_input=''):
spectral_lines = from_config_get_spectral_lines(config_in)
line_iter_dict = spectral_lines[lines_label]
clv_rm_correction = line_iter_dict.get('clv_rm_correction', True)
if not clv_rm_correction:
return
night_dict = from_config_get_nights(config_in)
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if night_input == '':
# Visualize the mu of star
fig = plt.figure(figsize=(8, 6.5))
plt.title('Limb angle')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['mu'], 60, cmap=plt.cm.viridis)
plt.colorbar(label='$\mu$') # draw colorbar
# plot data points.
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
# Visualize the RV of star
fig = plt.figure(figsize=(8, 6.5))
# CS = plt.contour(xx,xx,v_star,50,linewidths=0.5,colors='k')
plt.title('Radial velocity field')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['v_star'], 100, cmap=plt.cm.seismic)
plt.colorbar(label='v_star') # draw colorbar
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
lists = load_from_cpickle('lists', config_in['output'], night)
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(
lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][i0_radius, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][-1, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][-1, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n Input spectra, stellar radius'.format(night))
ax2.set_title('Stellar radius x {0:2.2f}'.format(
clv_rm_models['common']['radius_grid'][-1]))
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_out']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra_convolved'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm'][:],
color='C3')
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm_convolved'][:],
color='C3')
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n CLV+RM correction, convolved '.format(night))
ax2.set_title('Out of transit')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
for obs in lists['transit_out']:
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n CLV+RM correction, convolved '.format(night))
ax2.set_title('Out of transit')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
#ax1.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['clv_rm_model_convolved'][i0_radius, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
for obs in lists['transit_out']:
#ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['clv_rm_model_convolved'][-1, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][-1, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n CLV+RM correction, convolved and normalized '.format(night))
ax2.set_title('Out of transit')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 39,165 | 44.808187 | 190 | py |
SLOPpy | SLOPpy-main/SLOPpy/write_output_transmission.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.transmission_spectrum_preparation import compute_transmission_spectrum_preparation
from scipy.signal import savgol_filter
__all__ = ['write_output_transmission', 'plot_output_transmission',
'write_output_transmission_planetRF', 'plot_output_transmission_planetRF',
'write_output_transmission_stellarRF', 'plot_output_transmission_stellarRF',
'write_output_transmission_observerRF', 'plot_output_transmission_observerRF']
subroutine_name = 'write_output_transmission'
sampler_name = 'emcee'
def write_output_transmission_planetRF(config_in):
write_output_transmission(config_in, reference='planetRF')
def plot_output_transmission_planetRF(config_in, night_input, results_input=''):
plot_output_transmission(config_in, night_input, results_input, reference='planetRF')
def write_output_transmission_stellarRF(config_in):
write_output_transmission(config_in, reference='stellarRF')
def plot_output_transmission_stellarRF(config_in, night_input, results_input=''):
plot_output_transmission(config_in, night_input, results_input, reference='stellarRF')
def write_output_transmission_observerRF(config_in):
write_output_transmission(config_in, reference='observerRF')
def plot_output_transmission_observerRF(config_in, night_input, results_input=''):
plot_output_transmission(config_in, night_input, results_input, reference='observerRF')
def write_output_transmission(config_in, reference='planetRF', night_input='', preparation_only=False, pca_iteration=-1):
results_list_default = ['user']
# compute_transmission_spectrum_preparation(config_in)
pca_parameters = from_config_get_pca_parameters(config_in)
night_dict = from_config_get_nights(config_in)
### transmission_dict = from_config_get_transmission(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
fullspectrum_dict = from_config_get_fullspectrum_parameters(config_in)
clv_rm_correction = fullspectrum_dict.get('clv_rm_correction', True)
norm_dict = fullspectrum_dict.get('normalization', {})
norm_pams = {}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
range_temp = fullspectrum_dict.get('range', None)
if range_temp:
""" Using the line-specific range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= fullspectrum_dict['range'][0]) \
& (shared_data['coadd']['wave'] < fullspectrum_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= fullspectrum_dict['range'][0]) \
& (shared_data['binned']['wave'] < fullspectrum_dict['range'][1])
transmission_template = {
'subroutine': subroutine_name,
'range': range_temp,
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
else:
transmission_template = {
'subroutine': subroutine_name,
'range': shared_data['coadd']['wavelength_range'],
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
'binned_range': shared_data['binned']['wavelength_range'],
'binned_wave': shared_data['binned']['wave'],
'binned_step': shared_data['binned']['step'],
'binned_size': shared_data['binned']['size']
}
for night in night_dict:
print()
print("Running {0:45s} Night:{1:15s} ".format(subroutine_name, night))
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(pca_parameters.get('ref_iteration', 3)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
results_list = results_list_default.copy()
print(' Observational parameters from configuration file')
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
except (FileNotFoundError, IOError):
clv_rm_correction = False
for results_selection in results_list:
try:
transmission = load_from_cpickle(subroutine_name+'_'+reference + '_' +
results_selection, config_in['output'], night, it_string=it_string)
print("{0:45s} Night:{1:15s} {2:s} {3:s}".format(
subroutine_name, night, results_selection, 'Retrieved'))
continue
except (FileNotFoundError, IOError):
print("{0:45s} Night:{1:15s} {2:s} {3:s}".format(
subroutine_name, night, results_selection, 'Computing'))
transmission = transmission_template.copy()
if len(it_string) > 0:
transmission['pca_output'] = True
else:
transmission['pca_output'] = False
print_warning = True
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['deblazed']
ferr: preparation[obs]['deblazed_err']
"""
transmission[obs] = {}
transmission[obs] = {
'BJD': input_data[obs]['BJD'],
'AIRMASS': input_data[obs]['AIRMASS']
}
""" Shift into planetary reference system is the default
choice"""
if results_selection == 'user':
planet_R_factor = observational_pams.get('Rp_factor', 1.00000)
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
rv_shift_clv = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2PRF']
rv_shift_clv = observational_pams[obs]['rv_shift_SRF2PRF']
""" Step 2): rebin the 2D ratio spectra to 1D """
if transmission['pca_output']:
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
rv_shift=rv_shift,
preserve_flux=False,
is_error=True)
else:
preserve_flux = input_data[obs].get('absolute_flux', True)
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
### Small border bugfix
if transmission[obs]['rebinned_err'][0] ==0:
transmission[obs]['rebinned'][0] = transmission[obs]['rebinned'][1]
transmission[obs]['rebinned_err'][0] = transmission[obs]['rebinned_err'][1]
if transmission[obs]['rebinned_err'][-1] ==0:
transmission[obs]['rebinned'][-1] = transmission[obs]['rebinned'][-2]
transmission[obs]['rebinned_err'][-1] = transmission[obs]['rebinned_err'][-2]
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.plot(input_data[obs]['wave'][0,:], preparation[obs]['ratio_err'][0,:])
#plt.scatter(transmission['wave'], transmission[obs]['rebinned_err'], c='b')
#plt.axhline(0.0000, c='C2')
#plt.show()
#quit()
#import matplotlib.pyplot as plt
#plt.scatter(input_data[obs]['wave'], preparation[obs]['ratio'], s=2)
#plt.xlim(lines_dict['range'][0], lines_dict['range'][1])
# plt.show()
if clv_rm_correction:
"""" CLV + RM computation in the planetary reference frame """
transmission[obs]['clv_model_stellarRF'] = interpolate1d_grid_nocheck(planet_R_factor,
clv_rm_models['common']['radius_grid'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'])
transmission[obs]['clv_model_rebinned'] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
transmission[obs]['clv_model_stellarRF'],
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift_clv,
reference_value=1.)
"Fix to avoid division by zero and border effects"
transmission[obs]['corrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['clv_model_rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['clv_model_rebinned']
else:
transmission[obs]['clv_model_rebinned'] = np.ones(transmission['size'])
transmission[obs]['corrected'] = transmission[obs]['rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err']
if print_warning:
print(' *** No CLV correction')
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with lines of interes
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values
"""
transmission[obs]['line_exclusion'] = (transmission['wave'] > 0.)
""" Continuum normalization:
1) exclusion of regions with transmission lines under study, now
in the RF of the lines
SKIPPED as we are operating on the full spectrum
"""
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into account the planetary RV semi-amplitude
"""
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'],transmission[obs]['line_exclusion'], c='C0', s=1)
if clv_rm_correction:
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
transmission['wave'],
transmission['step'],
rv_shift=rv_shift_clv,
preserve_flux=False)
stellar_spectrum_derivative = first_derivative(transmission['wave'], stellar_spectrum_rebinned)
missing_model = (np.abs(stellar_spectrum_rebinned) < 0.0001)
cont_10perc = np.percentile(np.abs(stellar_spectrum_derivative[~missing_model]), norm_pams['percentile_selection'])
line_exclusion = transmission[obs]['line_exclusion'] \
& (np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
if np.sum(line_exclusion) < len(line_exclusion)/200:
transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] \
& ( missing_model | ((np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])))
else:
transmission[obs]['line_exclusion'] = line_exclusion
#plt.plot(transmission['wave'],stellar_spectrum_rebinned)
#plt.plot(transmission['wave'],stellar_spectrum_derivative, c='C1')
#sel1 = (np.abs(stellar_spectrum_derivative) < cont_10perc)
#sel2 = (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
#plt.scatter(transmission['wave'],transmission[obs]['line_exclusion'], c='C1', s=1)
#plt.scatter(transmission['wave'],sel1 + 0.1, c='C2', s=1)
#plt.scatter(transmission['wave'],sel2 + 0.2, c='C3', s=1)
#plt.ylim(0,1.3)
#plt.show()
elif print_warning:
print(" No stellar synthetic spectrum from CLV models")
print(" some stellar lines may be included in transmission normalization ")
print_warning = False
""" Continuum normalization:
3) Polynomial fit, everything is hard coded now but personalized
options can be implemented easily in the yaml file
"""
selection = transmission[obs]['line_exclusion'] & (
transmission[obs]['corrected'] > np.std(transmission[obs]['corrected']))
transmission[obs]['continuum_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['corrected'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_coeff'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.scatter(transmission['wave'], transmission[obs]['corrected_err']+0.05, c='b')
#plt.scatter(transmission['wave'], transmission[obs]['normalized_err'], c='r')
#plt.show()
#quit()
transmission[obs]['continuum_uncorrected_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['rebinned'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum_uncorrected'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_uncorrected_coeff'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' ', obs, ' normalization using Savitzky-Golay filter')
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum'] = savgol_filter(transmission[obs]['corrected'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
transmission[obs]['continuum_uncorrected'] = savgol_filter(transmission[obs]['rebinned'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
else:
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized'] = transmission[obs]['corrected'].copy()
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'].copy()
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
# plt.show()
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum_uncorrected'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'].copy()
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'].copy()
print_warning = False
transm_average = np.zeros([len(lists['transit_full']), transmission['size']])
weights_average = np.zeros([len(lists['transit_full']), transmission['size']])
clvrm_average = np.zeros([len(lists['transit_full']), transmission['size']])
uncorr_average = np.zeros([len(lists['transit_full']), transmission['size']])
for i, obs in enumerate(lists['transit_full']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
clvrm_average[i, :] = transmission[obs]['clv_model_rebinned'][:]
uncorr_average[i, :] = transmission[obs]['normalized_uncorrected'][:]
transmission['average'], transmission['sum_weights'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_err'] = 1. / np.sqrt(transmission['sum_weights'])
transmission['average_clv_model'], _ = np.average(
clvrm_average, axis=0, weights=weights_average, returned=True)
transmission['average_uncorrected'], _ = np.average(
uncorr_average, axis=0, weights=weights_average, returned=True)
transmission['binned'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
transmission['binned_clv_model'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_clv_model'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_uncorrected'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_uncorrected'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transm_average = np.zeros([len(lists['transit_out']), transmission['size']])
weights_average = np.zeros([len(lists['transit_out']), transmission['size']])
for i, obs in enumerate(lists['transit_out']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
transmission['average_out'], transmission['sum_weights_out'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_out_err'] = 1./np.sqrt(transmission['sum_weights_out'])
transmission['binned_out'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_out_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
#save_to_cpickle('transmission_'+reference+'_processed', processed, config_in['output'], night)
save_to_cpickle(subroutine_name + '_' + reference + '_' + results_selection,
transmission, config_in['output'], night, it_string=it_string)
# Forcing memory deallocation
transmission = None
# Forcing memory deallocation
clv_rm_models = None
def plot_output_transmission(config_in, night_input='', results_input='', reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
fullspectrum_dict = from_config_get_fullspectrum_parameters(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
if results_input == '':
results_list = ['user']
else:
results_list = np.atleast_1d(results_input)
clv_rm_correction = fullspectrum_dict.get('clv_rm_correction', True)
os.system('mkdir -p plots')
interactive_plots = from_config_get_interactive_plots(config_in)
for night in night_list:
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
else:
it_string = ''
preparation_input = None
if clv_rm_correction:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for results_selection in results_list:
filename_rad = subroutine_name + '_'+reference+'_'+results_selection
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the analysis"""
try:
#processed = load_from_cpickle('transmission_'+reference+'_processed', config_in['output'], night)
transmission = load_from_cpickle(filename_rad, config_in['output'], night, it_string)
except (FileNotFoundError, IOError):
print()
print("No transmission spectrum in {0:s}, no plots".format(reference))
continue
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(transmission[obs]['BJD'] - 2450000.0)
am.append(transmission[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors = color_cmap(color_norm(np.asarray(bjd)))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
ax1.set_ylim(0.925, 1.075)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Night: {0:s} \n In-transit transmission spectrum in {1:s} \n Solution {2:s}'.format(
night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(fullspectrum_dict['plot_range'][0], fullspectrum_dict['plot_range'][1])
except:
ax1.set_xlim(transmission['range'][0], transmission['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_observations',
config_in['output'], night, it_string=it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
ax2.plot(master_out['wave'],
master_out['rescaled']-0.06,
color='k', zorder=10, label='master-out')
except (FileNotFoundError, IOError):
pass
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
ax2.plot(telluric['template']['input']['wave'],
telluric['template']['input']['flux'] - 0.06,
color='C1', zorder=10, label='telluric')
ax2.plot(telluric['template']['input']['wave'],
(telluric['template']['input']['flux']-1.)*10. + 1. - 0.06,
color='C2', alpha=0.5, zorder=9, label='telluric (x10)')
except (FileNotFoundError, IOError, KeyError):
pass
#master_out = load_from_cpickle('master_out', config_in['output'], night)
# ax1.errorbar(master_out['wave'],
# master_out['rescaled'],
# yerr=master_out['rescaled_err'],
# fmt='.', c='C0', label='master-out ' + night)
ax1.errorbar(transmission['wave'],
transmission['average'],
yerr=transmission['average_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25)
ax1.errorbar(transmission['binned_wave'],
transmission['binned'],
yerr=transmission['binned_err'],
fmt='ro', ms=4, lw=2, zorder=10)
ax2.errorbar(transmission['wave'],
transmission['average_out'],
yerr=transmission['average_out_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25, label='average')
ax2.errorbar(transmission['binned_wave'],
transmission['binned_out'],
yerr=transmission['binned_out_err'],
fmt='ro', ms=4, lw=2, zorder=10, label='binned average')
ax1.set_ylim(0.99, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Night: {0:s} \n In-transit transmission spectrum in {1:s} \n Solution {2:s}'.format(
night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
try:
ax1.set_xlim(fullspectrum_dict['plot_range'][0], fullspectrum_dict['plot_range'][1])
except:
ax1.set_xlim(transmission['range'][0], transmission['range'][1])
#ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
output_file = get_filename(filename_rad + '_binned',
config_in['output'], night, it_string=it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
if not clv_rm_correction:
continue
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax1.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax2.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Night: {0:s} \n CLV-RM correction in {1:s} \n Solution {2:s}'.format(
night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(fullspectrum_dict['plot_range'][0], fullspectrum_dict['plot_range'][1])
except:
ax1.set_xlim(transmission['range'][0], transmission['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_clv_rm_models',
config_in['output'], night, it_string=it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
| 39,983 | 48.917603 | 146 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from scipy.interpolate import UnivariateSpline
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_spectrum_preparation',
'plot_transmission_spectrum_preparation']
def compute_transmission_spectrum_preparation(config_in):
subroutine_name = 'transmission_spectrum_preparation'
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
preparation = load_from_cpickle('transmission_preparation',
config_in['output'],
night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
if config_in['master-out'].get('use_composite', False):
master_out = load_from_cpickle('master_out_composite', config_in['output'], night)
print(' Using composite master-out from all nights')
else:
master_out = load_from_cpickle('master_out', config_in['output'], night)
if config_in['master-out'].get('use_smoothed', False):
master_out['rescaled'] = master_out['smoothed']
master_out['rescaled_err'] = master_out['smoothed_err']
print(' Using smoothed master-out')
preparation = {
'subroutine': subroutine_name,
}
for obs in lists['observations']:
preparation[obs] = {}
preparation[obs]['master_out'] = {}
preparation[obs]['wave'] = input_data[obs]['wave'] #Added for plotting purpose only
""" Step 1+2): bring back the master-out to the ORF and rebin the 1D master-out to the 2D observation scale"""
preparation[obs]['master_out']['rebinned'] = \
rebin_1d_to_2d(master_out['wave'],
master_out['step'],
master_out['rescaled'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
preserve_flux=False)
preparation[obs]['master_out']['rebinned_err'] = \
rebin_1d_to_2d(master_out['wave'],
master_out['step'],
master_out['rescaled_err'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
preserve_flux=False,
is_error=True)
for order in range(0, observational_pams['n_orders']):
preparation[obs]['master_out']['rebinned'][order, :], \
preparation[obs]['master_out']['rebinned_err'][order, :], \
_ = \
replace_values_errors_with_interpolation_1d(preparation[obs]['master_out']['rebinned'][order, :],
preparation[obs]['master_out']['rebinned_err'][order, :],
less_than=0.001, greater_than=5.0000)
#replace_values_errors(preparation[obs]['master_out']['rebinned'],
# preparation[obs]['master_out']['rebinned_err'],
# threshold=0.0001, replacement=1.0000)
""" Step 3): obtain the unscaled transmission spectrum for this observation """
preparation[obs]['ratio'] = input_data[obs]['e2ds']/\
preparation[obs]['master_out']['rebinned']
preparation[obs]['ratio_err'] = preparation[obs]['ratio'] * \
np.sqrt((input_data[obs]['e2ds_err']/
input_data[obs]['e2ds'])**2 +
(preparation[obs]['master_out']['rebinned_err']/
preparation[obs]['master_out']['rebinned'])**2)
preparation[obs]['ratio_precleaning'] = preparation[obs]['ratio'].copy()
preparation[obs]['ratio_precleaning_err'] = preparation[obs]['ratio_err'].copy()
if night_dict[night].get('spline_residuals', True):
print()
print(' Cleaning for telluric residuals with Univariate Spline - threshold about 5%')
# cleaning using spline_univariate
for order in range(0, observational_pams['n_orders']):
obs_reference = lists['observations'][0]
len_y = len(lists['observations'])
len_x = len(preparation[obs_reference]['wave'][order, :])
time_from_transit = np.empty(len_y, dtype=np.double)
data_array = np.empty([len_y, len_x], dtype=np.double)
median_array = np.empty(len_y, dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
time_from_transit[i_obs] = input_data[obs]['BJD'] - observational_pams['time_of_transit']
median_array[i_obs] = np.median(preparation[obs]['ratio_precleaning'][order ,:])
data_array[i_obs, :] = preparation[obs]['ratio_precleaning'][order ,:]/median_array[i_obs]
#wave = preparation[obs]['wave'][order, :]
res = data_array * 1.
val = np.empty([len_y, len_x], dtype=np.double)
for ii in range(0, len_x):
spl = UnivariateSpline(time_from_transit, data_array[:, ii])
val[:,ii] = spl(time_from_transit)
res[:,ii] -= val[:,ii]
res[:,ii] /= val[:,ii]
sel = np.abs(res) > 0.05
for i_obs, obs in enumerate(lists['observations']):
if np.sum(sel[i_obs]) > 0:
preparation[obs]['ratio'][order, sel[i_obs]] = val[i_obs, sel[i_obs]] * median_array[i_obs]
preparation[obs]['ratio_err'][order, sel[i_obs]] *= 10.
else:
print()
print(' Cleaning for telluric residuals NOT performed')
for obs in lists['observations']:
preparation[obs]['deblazed'] = preparation[obs]['ratio'] / calib_data['blaze'] / (input_data[obs]['step'] / np.median(input_data[obs]['step']))
preparation[obs]['deblazed_err'] = preparation[obs]['ratio_err'] / calib_data['blaze'] / (input_data[obs]['step'] / np.median(input_data[obs]['step']))
if not config_in['settings'].get('full_output', False):
del preparation[obs]['master_out']
else:
# added for plotting purposes only
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
preparation[obs]['wave'],
preparation[obs]['deblazed'],
preparation[obs]['deblazed_err'],
observational_pams['wavelength_rescaling'])
save_to_cpickle('transmission_preparation', preparation, config_in['output'], night)
print()
""" Keep going from here after preparation, unless the subroutines has been called just
to preform the data preparation step
"""
def plot_transmission_spectrum_preparation(config_in, night_input=''):
subroutine_name = 'transmission_spectrum_preparation'
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# ! To be removed when testing is done
# ! This plots do not make any sense anymore
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
""" Retrieving the analysis"""
try:
preparation = load_from_cpickle('transmission_preparation', config_in['output'], night)
except:
print("No transmission spectrum results, no plots")
print()
continue
#from SLOPpy.subroutines.lines_fit_functions import logprob_case12
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
len_y = len(lists['observations'])
len_x = 4096
order= 11
time_from_transit = np.empty(len_y, dtype=np.double)
plot_data = np.empty([len_y, len_x], dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
time_from_transit[i_obs] = input_data[obs]['BJD'] - observational_pams['time_of_transit']
plot_data[i_obs, :] = preparation[obs]['deblazed'][order ,:]/ np.median(preparation[obs]['deblazed'][order ,:])
wave = preparation[obs]['wave'][order, :]
wave_meshgrid, time_meshgrid = np.meshgrid(wave, time_from_transit)
cmap = plt.get_cmap('coolwarm')
#levels = MaxNLocator(nbins=15).tick_values(
# plot_data.min(), plot_data.max())
levels = MaxNLocator(nbins=21).tick_values(0.90, 1.10)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
plt.title('Transmission map in observer reference frame\n {0:s}'.format(night))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
if night_dict[night].get('spline_residuals', True):
res = plot_data * 1.
from scipy.interpolate import UnivariateSpline
for ii in range(0,4096):
spl = UnivariateSpline(time_from_transit, plot_data[:, ii])
val = spl(time_from_transit)
res[:,ii] -= val
res[:,ii] /= val
cmap = plt.get_cmap('coolwarm')
levels = MaxNLocator(nbins=10).tick_values(-0.05, 0.05)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
plt.title('Residuals after dividing by UnivariateSpline Spline\n {0:s}'.format(night))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
res, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
""" Creation of the color array, based on the BJD of the observations
"""
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax1.set_ylim(0.90, 1.10)
#ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for obs in lists['transit_in']:
#preparation[obs]['rescaling'], \
#preparation[obs]['rescaled'], \
#preparation[obs]['rescaled_err'] = perform_rescaling(
# preparation[obs]['wave'],
# preparation[obs]['deblazed'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
# preparation[obs]['deblazed_err'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
# observational_pams['wavelength_rescaling'])
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
preparation[obs]['wave'],
preparation[obs]['deblazed'],
preparation[obs]['deblazed_err'],
observational_pams['wavelength_rescaling'])
ax1.scatter(preparation[obs]['wave'],
preparation[obs]['rescaled'],
s=1, alpha=0.25,
color=colors_plot['mBJD'][obs])
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 13,646 | 42.32381 | 163 | py |
SLOPpy | SLOPpy-main/SLOPpy/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from SLOPpy.sloppy_run import sloppy_run
from SLOPpy.subroutines.io_subroutines import yaml_parser, pars_input
#from SLOPpy.subroutines.interpol import interpolate1d_grid_nocheck
from SLOPpy.prepare_datasets import *
from SLOPpy.sky_correction import *
from SLOPpy.differential_refraction_preparation import *
from SLOPpy.differential_refraction import *
from SLOPpy.check_differential_refraction import *
from SLOPpy.telluric_template import *
from SLOPpy.telluric_molecfit_v1_preparation import *
from SLOPpy.telluric_molecfit_v1 import *
from SLOPpy.telluric_molecfit_v1_coadd import *
from SLOPpy.telluric_molecfit_preparation import *
from SLOPpy.telluric_molecfit import *
from SLOPpy.telluric_molecfit_coadd import *
from SLOPpy.telluric_template_alternative import *
from SLOPpy.telluric_airmass_stellarRF import *
from SLOPpy.telluric_airmass_observerRF import *
from SLOPpy.telluric_airmass_observerRF_chunks import *
from SLOPpy.telluric_observerRF_skycalc import *
from SLOPpy.interstellar_lines import *
from SLOPpy.master_out import *
from SLOPpy.transmission_spectrum_preparation import *
from SLOPpy.transmission_spectrum import *
from SLOPpy.transmission_spectrum_average import *
from SLOPpy.transmission_spectrum_shortcuts import *
from SLOPpy.second_telluric_correction_on_transmission import *
#from SLOPpy.clv_rm_modelling import *
from SLOPpy.compare_clv_rm_effects import *
from SLOPpy.spectra_lightcurve import *
from SLOPpy.spectra_lightcurve_average import *
from SLOPpy.transmission_lightcurve import *
from SLOPpy.transmission_lightcurve_average import *
from SLOPpy.write_output_spectra import *
from SLOPpy.write_output_transmission import *
from SLOPpy.quick_transmission import *
# NEW
from SLOPpy.clv_rm_models import *
from SLOPpy.clv_rm_models_lines import *
from SLOPpy.transmission_mcmc import *
from SLOPpy.transmission_binned_mcmc import *
from SLOPpy.pca_preparation import *
from SLOPpy.sysrem_correction import *
__version__ = "1.2"
| 2,026 | 38.745098 | 69 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_v1_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit_v1_preparation",
"plot_telluric_molecfit_v1_preparation"]
def compute_telluric_molecfit_v1_preparation(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
for night in night_dict:
try:
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
continue
except:
print()
print("compute_telluric_molecfit_preparation Night: ", night)
print()
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
tellprep = {
'work_dir': config_in['output'] + '_molecfit_' + night,
'include': {}
}
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p ' + tellprep['work_dir'])
os.system('mkdir -p ' + tellprep['work_dir'] + '/output/')
"""
Creation of the include files
"""
"""
includes_spans_ORF: wavelength ranges _with_ telluric lines, in the ORF
includes_spans_SRF: wavelength ranges _without_ stellar lines and broadly
overlapping with telluric ranges, in the SRF
the two lists must have the same number of columns, with precise correspondence
"""
tellprep['include']['spans_telluric'] = np.genfromtxt(molecfit_dict['include_telluric'])
tellprep['include']['spans_stellar_SRF'] = np.genfromtxt(molecfit_dict['include_stellar'])
#print()
#print(tellprep['include']['spans_telluric'])
#print()
#print(tellprep['include']['spans_stellar_SRF'])
""" shift the stellar wavelength ranges into ORF """
tellprep['include']['rv_shift_SRF2ORF'] = -observational_pams['BERV_avg'] + observational_pams['RV_star'][
'RV_systemic']
#print()
#print(tellprep['include']['rv_shift_SRF2ORF'])
#print(observational_pams['BERV_avg'])
#print(observational_pams['RV_star']['RV_systemic'])
#print()
#print()
tellprep['include']['spans_stellar'] = tellprep['include']['spans_stellar_SRF']\
* (tellprep['include']['rv_shift_SRF2ORF']
/ (299792458. / 1000.000) + 1.00000)
#print()
#print(tellprep['include']['spans_stellar'])
""" Selecting the overlapping regions between the two lists: we want telluric regions that are not contaminated
by stellar lines,
"""
sel_lower = (tellprep['include']['spans_stellar'][:, 0] > tellprep['include']['spans_telluric'][:, 0])
sel_upper = (tellprep['include']['spans_stellar'][:, 1] < tellprep['include']['spans_telluric'][:, 1])
""" Final list in the ORF is built"""
tellprep['include']['selected'] = tellprep['include']['spans_telluric'].copy()
tellprep['include']['selected'][sel_lower, 0] = tellprep['include']['spans_stellar'][sel_lower, 0]
tellprep['include']['selected'][sel_upper, 1] = tellprep['include']['spans_stellar'][sel_upper, 1]
#print()
#print(tellprep['include']['selected'])
""" Molecfit line list must be given in vacuum wavelength, even if the stellar spectra is in air wavelength
conversion from air to vacuum for include file preparation
where s = 10000 / lambda air and the conversion is: lambda_vac = lambda_air * n.
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s2 = (10000. / tellprep['include']['selected']) ** 2
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s2) + 0.0001599740894897 / (
38.92568793293 - s2)
tellprep['include']['vacuum'] = tellprep['include']['selected'] * n / 10000.
fileout = open('./' + tellprep['work_dir'] + '/include_' + night + '.dat', 'w')
for i_s, i_e in zip(tellprep['include']['vacuum'][:, 0], tellprep['include']['vacuum'][:, 1]):
fileout.write('{0:12.8f} {1:12.8f}\n'.format(i_s, i_e))
fileout.close()
#quit()
save_to_cpickle('telluric_molecfit_preparation', tellprep, config_in['output'], night)
def plot_telluric_molecfit_v1_preparation(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_template Night: ", night)
| 5,343 | 38.007299 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_v1_coadd.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.telluric_molecfit_v1_preparation import compute_telluric_molecfit_v1_preparation
__all__ = ["compute_telluric_molecfit_v1_coadd",
"plot_telluric_molecfit_v1_coadd"]
subroutine_name = 'telluric_molecfit_v1_coadd'
def compute_telluric_molecfit_v1_coadd(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
compute_telluric_molecfit_v1_preparation(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
print(' instrument :', instrument_name)
print()
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
processed['work_dir'] = tellprep['work_dir']
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
molecfit_dict['rebinning_step'],
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'],
dtype=np.double) * molecfit_dict['rebinning_step']
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
n_coadd = 0
n_reference = 0
texp_cumulated = 0.00
texp_total = 0.000
coadd_list = []
# Computing the total integration time
for n_obs, obs in enumerate(lists['observations']):
texp_total += input_data[obs]['EXPTIME']
print(' Writing data and configuration files for molecfit+calctrans')
print()
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" Molecfit analysis is skipped if the telluric correction has been computed already"""
# if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
# print(' molecfit+calctrans results for ' + obs + ' already available')
# continue
""" the spectra is saved as an ASCII file in a format suitable for molecfit """
fileout = open('./' + processed['work_dir'] + '/' + obs + '_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_ORF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
processed[obs]['rebin_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF'])
fileout = open('./' + processed['work_dir'] + '/' + obs + '_SRF_s1d.dat','w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_SRF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
""" spectra is coadded to increase the SNR of the spectrum analyzed by molecfit """
if n_coadd == 0:
reference_name = 'coadded_{0:03d}'.format(n_reference)
rebin_coadd = processed[obs]['rebin_ORF'].copy()
molecfit_pams = {
'MJD': input_data[obs]['MJD'],
'UTC': input_data[obs]['UTC'],
'ELEVATION': input_data[obs]['ELEVATION'],
'HUMIDITY': input_data[obs]['HUMIDITY'],
'PRESSURE': input_data[obs]['PRESSURE'],
'TEMPERATURE_EN': input_data[obs]['TEMPERATURE_EN'],
'TEMPERATURE_M1': input_data[obs]['TEMPERATURE_M1']}
coadded_files = open('./' + processed['work_dir'] + '/' + reference_name + '_files.list', 'w')
coadd_list.append(reference_name)
else:
rebin_coadd += processed[obs]['rebin_ORF']
molecfit_pams['MJD'] += input_data[obs]['MJD']
molecfit_pams['UTC'] += input_data[obs]['UTC']
molecfit_pams['ELEVATION'] += input_data[obs]['ELEVATION']
molecfit_pams['HUMIDITY'] += input_data[obs]['HUMIDITY']
molecfit_pams['PRESSURE'] += input_data[obs]['PRESSURE']
molecfit_pams['TEMPERATURE_EN'] += input_data[obs]['TEMPERATURE_EN']
molecfit_pams['TEMPERATURE_M1'] += input_data[obs]['TEMPERATURE_M1']
n_coadd += 1
coadded_files.write(obs + '\n')
texp_cumulated += input_data[obs]['EXPTIME']
# TODO: input from configuration file for molecfit installation path
bash_script = open('./' + processed['work_dir'] + '/molecfit_exec_' + obs + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing calctrans on ' + obs + ' \n')
bash_script.write(molecfit_dict['installation_path'] + 'calctrans ' +
obs + '.par > ' + obs + '_calctrans.log\n')
bash_script.close()
write_molecfit_v1_par('./' + processed['work_dir'] + '/' + obs + '.par',
obs + '_ORF_s1d.dat',
reference_name,
'include_' + night + '.dat',
input_data[obs]['molecfit'],
input_data[obs])
if (texp_cumulated >= molecfit_dict['exptime_coadd'] and
texp_total-texp_cumulated >= molecfit_dict['exptime_coadd']) \
or n_obs == len(lists['observations'])-1:
coadded_files.close()
print(' Coadded spectrum: ', n_reference)
rebin_coadd /= n_coadd
""" the spectra is saved as an ASCII file in a format suitable for molecfit """
fileout = open('./' + processed['work_dir'] + '/' + reference_name + '_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], rebin_coadd):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
""" Average of the observational parameters """
for key in molecfit_pams:
molecfit_pams[key] /= n_coadd
molecfit_pams['GEOELEV'] = input_data[obs]['GEOELEV']
molecfit_pams['GEOLONG'] = input_data[obs]['GEOLONG']
molecfit_pams['GEOLAT'] = input_data[obs]['GEOLAT']
# TODO: input from configuration file for molecfit installation path
bash_script = open('./' + processed['work_dir'] + '/molecfit_exec_' + reference_name + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing molecfit+calctrans on ' + reference_name + ' \n')
bash_script.write(molecfit_dict['installation_path'] + 'molecfit ' +
reference_name + '.par > ' + reference_name + '_molecfit.log\n')
bash_script.write(molecfit_dict['installation_path'] + 'calctrans ' +
reference_name + '.par > ' + reference_name + '_calctrans.log\n')
bash_script.close()
# TODO: cycle with variation in UTC until molecfit exits succesfully
#
# while True:
#
# # write parameter file
# # execute molecfit
# # check if file _tac.fits has been written (= successful run)
# if cond:
# break
#
utc_reference = molecfit_pams['UTC'] * 1.
utc_incremental = True
utc_increase = 500.
while True:
if os.path.exists('./' + processed['work_dir'] + '/output/' + reference_name + '_tac.asc'):
print(' molecfit for ' + reference_name + ' previously completed')
print()
break
write_molecfit_v1_par('./' + processed['work_dir'] + '/' + reference_name + '.par',
reference_name + '_ORF_s1d.dat',
reference_name,
'include_' + night + '.dat',
input_data[obs]['molecfit'],
molecfit_pams)
os.system('cd ' + processed['work_dir'] + '/ && . ./molecfit_exec_' + reference_name + '.source')
if os.path.exists('./' + processed['work_dir'] + '/output/' + reference_name + '_tac.asc'):
print(' molecfit for ' + reference_name + ' successfully completed')
print()
break
if molecfit_pams['UTC'] > 86400 - utc_increase:
utc_incremental = False
molecfit_pams['UTC'] = utc_reference
if utc_incremental:
molecfit_pams['UTC'] += utc_increase
print(' molecfit for {0:s} crashed, UTC increased from {1:6.0f} to {2:6.0f} '.format(
reference_name, utc_reference, molecfit_pams['UTC']))
else:
molecfit_pams['UTC'] -= utc_increase
print(' molecfit for {0:s} crashed, UTC decreased from {1:6.0f} to {2:6.0f} '.format(
reference_name, utc_reference, molecfit_pams['UTC']))
n_coadd = 0
n_reference += 1
texp_total -= texp_cumulated
texp_cumulated = 0.0
"""
Execute molecfit runs on all the coadded spectra
"""
# for reference_name in coadd_list:
# os.system('cd molecfit_' + night + '/ && . ./molecfit_exec_' + reference_name + '.source')
#
print()
print(' molecfit completed')
for obs in lists['observations']:
if os.path.exists('./' + processed['work_dir'] + '/output/' + obs + '_ORF_s1d_TAC.dat'):
print(' skipping calctrans execution for observation ' + obs)
else:
print(' calctrans execution for observation ' + obs)
os.system('cd ' + processed['work_dir'] + '/ && . ./molecfit_exec_' + obs + '.source')
print()
print(' calctrans completed')
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
""" Loading the telluric spectrum from the output directory of molecfit """
telluric_molecfit = np.genfromtxt(
'./' + processed['work_dir'] + '/output/'+obs+'_ORF_s1d_TAC.dat', usecols=2)
""" rebinning onto the e2ds wave scale"""
if molecfit_dict.get('fix_telluric', True):
print(' fix_telluric applied - temporary workaround for line at 5885.97 A [ORF]')
line_boundaries = [5885.74, 5886.21]
sel = (processed['rebin']['wave'] > line_boundaries[0]) \
& (processed['rebin']['wave'] < line_boundaries[1])
tell_cont = np.amax(telluric_molecfit[sel])
telluric_molecfit[sel] = (telluric_molecfit[sel] - tell_cont) / 2.0 + tell_cont
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = ~(np.isfinite(telluric[obs]['spectrum']))
telluric[obs]['spectrum'][temp] = 1.0
sel = telluric[obs]['spectrum'] < 0.0001
telluric[obs]['spectrum'][sel] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_molecfit_v1_coadd(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
# plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
# plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
# plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
# plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
# plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i == 0:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array))
# ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
# ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
# ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
# ax2.axhline(1.00+lift_spectrum, c='k')
# ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
# ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0, 0] < 1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 22,214 | 44.244399 | 118 | py |
SLOPpy | SLOPpy-main/SLOPpy/spectra_lightcurve.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.clv_rm_subroutines import *
from SLOPpy.subroutines.math_functions import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_spectra_lightcurve',
'compute_spectra_lightcurve_clv_rm_correction',
'plot_spectra_lightcurve',
'plot_spectra_lightcurve_clv_rm_correction']
def compute_spectra_lightcurve_clv_rm_correction(config_in, lines_label):
compute_spectra_lightcurve(config_in, lines_label)
def plot_spectra_lightcurve_clv_rm_correction(config_in, night_input=''):
plot_spectra_lightcurve(config_in, night_input)
subroutine_name = 'spectra_lightcurve'
sampler_name = 'emcee'
def compute_spectra_lightcurve(config_in, lines_label):
results_list_default = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
append_list = ['', '_uncorrected', '_clv_model']
do_average_instead_of_sum = True
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
# from_config_get_transmission_lightcurve(config_in)
#lightcurve_dict = from_config_get_transmission_lightcurve(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" Using the MCMC fit range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
preparation_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
}
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
"""
The transit phase [0-1] is divided in N (=5) bins. Two arrays are computed:
- transit_in_bins: array with the boundaries of the bins, size=N+1
- transit_in_step: average size of the bin, size=1
"""
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
""" Preparation stage - rebinning of spectra """
for night in night_dict:
preparation = None # Free up memory
try:
preparation = load_from_cpickle(subroutine_name + '_preparation', config_in['output'], night, lines_label)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name+ '_preparation', night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name+ '_preparation', night, 'Computing'))
print()
preparation = preparation_template.copy()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
for n_obs, obs in enumerate( lists['observations']):
preparation[obs] = {}
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
input_data[obs]['wave'], input_data[obs]['e2ds'], input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
preparation[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
preparation['wave'],
preparation['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
preparation[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
preparation['wave'],
preparation['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
save_to_cpickle(subroutine_name+'_preparation', preparation, config_in['output'], night, lines_label)
# Free up memory
calib_data = None
input_data = None
observational_pams = None
""" Actual computation of spectral lightcurve """
# doublet sodium in the lab reference frame
"""
C stands for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(preparation['wave'] - line_val) < passband_val / 2.)
"""
S stands for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (preparation['wave'] >= band_val[0]) & (preparation['wave'] <= band_val[1])
results_list = results_list_default.copy()
for results_selection in results_list_default:
skip_iteration = False
for night in night_dict:
print_warning = True
if skip_iteration: continue
binned_mcmc_night = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
binned_mcmc_global = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines_label)
mcmc_night = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
mcmc_global = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines_label)
results_list = ['user']
if (mcmc_night or binned_mcmc_night):
results_list.append(['mcmc_night_MED', 'mcmc_night_MAP'])
if (mcmc_global or binned_mcmc_global):
results_list.append(['mcmc_global_MED', 'mcmc_global_MAP'])
if results_selection not in results_list:
print(' {0:s} results not found, skipping iteration'.format(results_selection))
skip_iteration = True
continue
if mcmc_night and results_selection in ['mcmc_night_MED', 'mcmc_night_MAP']:
mcmc_results_night = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
print(' Observational parameters from MCMC fit of unbinned data, individual night')
elif mcmc_global and results_selection in ['mcmc_global_MED', 'mcmc_global_MAP']:
mcmc_results_global = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label)
print(' Observational parameters from MCMC fit of unbinned data, global fit')
elif binned_mcmc_night and results_selection in ['mcmc_night_MED', 'mcmc_night_MAP']:
mcmc_results_night = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
print(' Observational parameters from MCMC fit of binned data, individual night')
elif binned_mcmc_global and results_selection in ['mcmc_global_MED', 'mcmc_global_MAP']:
mcmc_results_global = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label)
print(' Observational parameters from MCMC fit of binned data, global fit')
else:
print(' Observational parameters from configuration file')
try:
lightcurve = load_from_cpickle(subroutine_name + '_' + results_selection , config_in['output'], night, lines_label)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
preparation = load_from_cpickle(subroutine_name + '_preparation', config_in['output'], night, lines_label)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'compute_spectra_lightcurve',
'range': preparation['range'],
'wave': preparation['wave'],
'step': preparation['step'],
'size': preparation['size']
}
lightcurve = {
'subroutine': subroutine_name,
'arrays': {
'observations': {
'obs_name': np.zeros(len(lists['observations']), dtype=str),
'phase': np.zeros(len(lists['observations'])),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
},
'C_bands': C_bands,
'S_bands': S_bands,
'average': {},
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
}
""" Adding the C-bands arrays to the dictionary"""
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['ratio_' + band_key + name_append] = np.zeros([len(lists['observations']), 2])
transit_out_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_in_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_full_flag = np.zeros(len(lists['observations']), dtype=bool)
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
""" Shift into planetary reference system is the default
choice"""
if results_selection == 'user':
planet_R_factor = observational_pams.get('Rp_factor', 1.00000)
elif results_selection == 'mcmc_night_MED':
planet_R_factor = mcmc_results_night['results']['planet_R']
elif results_selection == 'mcmc_night_MAP':
planet_R_factor = mcmc_results_night['results_MAP']['planet_R']
elif results_selection == 'mcmc_global_MED':
planet_R_factor = mcmc_results_global['results']['planet_R']
elif results_selection == 'mcmc_global_MAP':
planet_R_factor = mcmc_results_global['results_MAP']['planet_R']
for n_obs, obs in enumerate( lists['observations']):
processed[obs] = {}
lightcurve[obs] = {}
processed[obs]['uncorrected'] = preparation[obs]['rebinned']
processed[obs]['uncorrected_err'] = preparation[obs]['rebinned_err']
if clv_rm_correction:
"""" CLV + RM computation in the planetary reference frame """
processed[obs]['clv_model_stellarRF'] = interpolate1d_grid_nocheck(planet_R_factor,
clv_rm_models['common']['radius_grid'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'])
processed[obs]['clv_model_rebinned'] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
processed[obs]['clv_model_stellarRF'],
processed['wave'],
processed['step'],
preserve_flux=False)
processed[obs]['rebinned'] = processed[obs]['uncorrected'] / processed[obs]['clv_model_rebinned']
processed[obs]['rebinned_err'] = processed[obs]['uncorrected_err'] / processed[obs]['clv_model_rebinned']
else:
processed[obs]['clv_model_rebinned'] = np.ones(processed['size'])
processed[obs]['rebinned'] = processed[obs]['uncorrected']
processed[obs]['rebinned_err'] = processed[obs]['uncorrected_err']
if print_warning:
print(' *** No CLV correction')
print_warning = False
try:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
processed[obs]['bands'] = {
'phase': phase_internal
}
processed[obs]['bands_uncorrected'] = {
'phase': phase_internal
}
processed[obs]['bands_clv_model'] = {
'phase': phase_internal
}
processed[obs]['s_integrated'] = 0.000
processed[obs]['s_integrated_uncorrected'] = 0.000
processed[obs]['s_integrated_clv_model'] = 0.000
processed[obs]['s_sigmaq_sum'] = 0.000
n_bands = 0.00
for band_key, band_val in S_bands.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key] = \
[np.average(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)
/len(processed[obs]['rebinned_err'][band_val])**2]
processed[obs]['bands_uncorrected'][band_key] = \
[np.average(processed[obs]['uncorrected'][band_val]),
np.sum((processed[obs]['uncorrected_err'][band_val])**2)
/len(processed[obs]['uncorrected_err'][band_val])**2]
processed[obs]['bands_clv_model'][band_key] = \
[np.average(processed[obs]['clv_model_rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)
/len(processed[obs]['rebinned_err'][band_val])**2]
else:
processed[obs]['bands'][band_key] = \
[np.sum(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)]
processed[obs]['bands_uncorrected'][band_key] = \
[np.sum(processed[obs]['uncorrected'][band_val]),
np.sum((processed[obs]['uncorrected_err'][band_val])**2)]
processed[obs]['bands_clv_model'][band_key] = \
[np.sum(processed[obs]['clv_model_rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)]
processed[obs]['s_integrated'] += processed[obs]['bands'][band_key][0]
processed[obs]['s_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][0]
processed[obs]['s_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][0]
processed[obs]['s_sigmaq_sum'] += processed[obs]['bands'][band_key][1]
n_bands += 1.
#todo: why a 2 denominator???
processed[obs]['s_integrated'] /= (n_bands / 2.)
processed[obs]['s_integrated_uncorrected'] /= (n_bands / 2.)
processed[obs]['s_integrated_clv_model'] /= (n_bands / 2.)
processed[obs]['s_sigmaq_sum'] /= (n_bands / 2.)**2
for band_key, band_dict in C_bands.items():
processed[obs]['bands'][band_key] = {}
processed[obs]['bands_uncorrected'][band_key] = {}
processed[obs]['bands_clv_model'][band_key] = {}
processed[obs]['c_integrated'] = 0.000
processed[obs]['c_integrated_uncorrected'] = 0.000
processed[obs]['c_integrated_clv_model'] = 0.000
processed[obs]['c_sigmaq_sum'] = 0.000
n_bands = 0.00
for line_key, line_val in band_dict.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key][line_key] = \
[np.average(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
processed[obs]['bands_uncorrected'][band_key][line_key] = \
[np.average(processed[obs]['uncorrected'][line_val]),
np.sum((processed[obs]['uncorrected_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
processed[obs]['bands_clv_model'][band_key][line_key] = \
[np.average(processed[obs]['clv_model_rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
else:
processed[obs]['bands'][band_key][line_key] = \
[np.sum(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)]
processed[obs]['c_integrated'] += processed[obs]['bands'][band_key][line_key][0]
processed[obs]['c_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][line_key][0]
processed[obs]['c_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][line_key][0]
processed[obs]['c_sigmaq_sum'] += processed[obs]['bands'][band_key][line_key][1]
n_bands += 1.
processed[obs]['c_integrated'] /= (n_bands / 2.)
processed[obs]['c_integrated_uncorrected'] /= (n_bands / 2.)
processed[obs]['c_integrated_clv_model'] /= (n_bands / 2.)
processed[obs]['c_sigmaq_sum'] /= (n_bands / 2.) ** 2
for name_append in append_list:
ratio = processed[obs]['c_integrated' + name_append] / processed[obs]['s_integrated' + name_append]
ratio_err = ratio * np.sqrt(
processed[obs]['c_sigmaq_sum'] / processed[obs]['c_integrated' + name_append] ** 2
+ processed[obs]['s_sigmaq_sum'] / processed[obs]['s_integrated' + name_append] ** 2)
lightcurve[obs]['ratio_' + band_key + name_append] = [ratio, ratio_err]
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][n_obs, :] = \
lightcurve[obs]['ratio_' + band_key + name_append][:]
lightcurve[obs]['phase'] = processed[obs]['bands']['phase']
lightcurve['arrays']['observations']['obs_name'][n_obs] = obs
lightcurve['arrays']['observations']['phase'][n_obs] = lightcurve[obs]['phase']
if obs in lists['transit_out']:
transit_out_flag[n_obs] = True
if obs in lists['transit_in']:
transit_in_flag[n_obs] = True
if obs in lists['transit_full']:
transit_full_flag[n_obs] = True
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['rescaling_' + band_key + name_append] = \
np.average(lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_out_flag, 0], axis=0)
sorting_index = np.argsort(lightcurve['arrays']['observations']['phase'])
transit_out_flag = transit_out_flag[sorting_index]
transit_in_flag = transit_in_flag[sorting_index]
transit_full_flag = transit_full_flag[sorting_index]
lightcurve['arrays']['observations']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][sorting_index]
lightcurve['arrays']['observations']['phase'] = lightcurve['arrays']['observations']['phase'][sorting_index]
lightcurve['arrays']['transit_in']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_in_flag]
lightcurve['arrays']['transit_in']['phase'] = lightcurve['arrays']['observations']['phase'][transit_in_flag]
lightcurve['arrays']['transit_full']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_full_flag]
lightcurve['arrays']['transit_full']['phase'] = lightcurve['arrays']['observations']['phase'][transit_full_flag]
lightcurve['arrays']['transit_out']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_out_flag]
lightcurve['arrays']['transit_out']['phase'] = lightcurve['arrays']['observations']['phase'][transit_out_flag]
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][sorting_index] \
/ lightcurve['arrays']['rescaling_' + band_key + name_append]
lightcurve['arrays']['transit_in']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_in_flag]
lightcurve['arrays']['transit_full']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_full_flag]
lightcurve['arrays']['transit_out']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_out_flag]
avg_out, avg_out_sq = \
np.average(lightcurve['arrays']['transit_out']['ratio_' + band_key + name_append][:, 0],
weights=1./(lightcurve['arrays']['transit_out']['ratio_' + band_key + name_append][:, 1])**2,
returned=True)
avg_in, avg_in_sq = \
np.average(lightcurve['arrays']['transit_in']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve['arrays']['transit_in']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_full, avg_full_sq = \
np.average(lightcurve['arrays']['transit_full']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve['arrays']['transit_full']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
lightcurve['average'][band_key + name_append] = {
'average_out': np.asarray([avg_out, 1./np.power(avg_out_sq, 0.5)]),
'average_in': np.asarray([avg_in, 1. / np.power(avg_in_sq, 0.5)]),
'average_full': np.asarray([avg_full, 1. / np.power(avg_full_sq, 0.5)]),
}
delta_fac = (lightcurve['average'][band_key + name_append]['average_full'][0]
/ lightcurve['average'][band_key + name_append]['average_out'][0])
delta_err = delta_fac * np.sqrt(
(lightcurve['average'][band_key + name_append]['average_out'][1]
/ lightcurve['average'][band_key + name_append]['average_out'][0]) ** 2
+ (lightcurve['average'][band_key + name_append]['average_full'][1]
/ lightcurve['average'][band_key + name_append]['average_full'][0]) ** 2)
lightcurve['average'][band_key + name_append]['delta'] = np.asarray([(1.-delta_fac)*100., delta_err*100.])
lightcurve['arrays']['observations']['transit_out_flag'] = transit_out_flag
lightcurve['arrays']['observations']['transit_in_flag'] = transit_in_flag
lightcurve['arrays']['observations']['transit_full_flag'] = transit_full_flag
""" Compute the duration of the pre-transit observations, using as scale
the number of bins, with the same size as those used inside the
transit.
The value is given by the difference of the phase of the beginning of the transit minus
the phase of the first observation, keeping in mind that the centre of the transit has phase = 0
An additional bin is added if there are observations left out from the actual number of bins
"""
pre_duration = transit_full_bins[0] - lightcurve['arrays']['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration/transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
""" same as pre-transit, but suing the post-transit instead"""
post_duration = lightcurve['arrays']['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
""" THe full array with both in-transit and out-transit phase, built in such a way that the
- the lower boundary of the first in-transit bin corresponds to the beginning of the transit
- the upper boundary of the last in-transit bin corresponds to the end of the transit
"""
transit_bins = np.arange(transit_full_bins[0]-nsteps_pre*transit_full_step,
transit_full_bins[-1] + (nsteps_post+1.1) * transit_full_step,
transit_full_step)
lightcurve['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['ratio_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins)-1):
sel = (lightcurve['arrays']['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve['arrays']['observations']['phase'] < transit_bins[nb+1])
if np.sum(sel) <= 0: continue
lightcurve['binned']['observations']['phase'][n_a] = np.average(lightcurve['arrays']['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['ratio_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][sel, 0],
weights=1. / lightcurve['arrays']['observations']['ratio_' + band_key + name_append][sel, 1]**2,
returned=True)
lightcurve['binned']['observations']['ratio_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve['binned']['transit_in']['phase'] = lightcurve['binned']['observations']['phase'][transit_in_flag]
lightcurve['binned']['transit_full']['phase'] = lightcurve['binned']['observations']['phase'][transit_full_flag]
lightcurve['binned']['transit_out']['phase'] = lightcurve['binned']['observations']['phase'][transit_out_flag]
lightcurve['binned']['observations']['phase'] = lightcurve['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['transit_in']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][transit_in_flag, :]
lightcurve['binned']['transit_full']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][transit_full_flag, :]
lightcurve['binned']['transit_out']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][transit_out_flag, :]
lightcurve['binned']['observations']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name+ '_' + results_selection + '_processed', processed, config_in['output'], night, lines_label)
save_to_cpickle(subroutine_name+ '_' + results_selection, lightcurve, config_in['output'], night, lines_label)
# Forcing memory deallocation
lightcurve = None
processed = None
preparation = None
clv_rm_models = None
def plot_spectra_lightcurve(config_in, night_input='', clv_rm_correction=False):
import matplotlib.pyplot as plt
if clv_rm_correction:
subroutine_name = 'spectra_lightcurve_clv_rm_correction'
else:
subroutine_name = 'spectra_lightcurve'
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the analysis"""
try:
lightcurve = load_from_cpickle(subroutine_name, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Plotting'))
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Skipped'))
continue
#observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
C_bands = lightcurve['C_bands']
print()
for band_key in C_bands:
print("Night: {0:s} Band: {1:s} Delta:{2:8.4f} +- {3:8.4f} [%]".format(night, band_key,
lightcurve['average'][band_key]['delta'][0], lightcurve['average'][band_key]['delta'][1]))
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Spectra lightcurve - night {0:s} \n {1:s}'.format(night, band_key))
plt.errorbar(lightcurve['arrays']['observations']['phase'],
lightcurve['arrays']['observations']['ratio_' + band_key][:,0]*100 -100.,
yerr= lightcurve['arrays']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve['binned']['observations']['phase'],
lightcurve['binned']['observations']['ratio_' + band_key][:, 0]*100 -100.,
yerr= lightcurve['binned']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve['arrays']['observations']['phase'][0]-0.01,
lightcurve['arrays']['observations']['phase'][-1]+0.01)
plt.xlabel('orbital phase')
plt.ylabel('$\mathcal{R}$ - 1. [%]')
plt.legend()
plt.show()
print()
| 36,599 | 50.260504 | 144 | py |
SLOPpy | SLOPpy-main/SLOPpy/differential_refraction.bkp.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_differential_refraction", "plot_differential_refraction"]
subroutine_name = 'differential_refraction'
def compute_differential_refraction(config_in):
night_dict = from_config_get_nights(config_in)
print()
for night in night_dict:
try:
refraction = load_from_cpickle('refraction', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
processed = load_from_cpickle('refraction_processed_halfway', config_in['output'], night)
refraction = load_from_cpickle('refraction_halfway', config_in['output'], night)
print(" Starting from intermediate step ")
except:
processed = {
'subroutine': subroutine_name,
'coadd': {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
#'flux': np.zeros(input_data['coadd']['size'], dtype=np.double),
#'flux_err': np.zeros(input_data['coadd']['size'], dtype=np.double)
}
}
refraction = {
'subroutine': 'differential_refraction',
'wave': processed['coadd']['wave']
}
total_flux = np.empty([len(lists['observations']), input_data['coadd']['size']], dtype=np.double)
total_wght = np.zeros([len(lists['observations']), input_data['coadd']['size']], dtype=np.double)
total_mask = np.ones([len(lists['observations']), input_data['coadd']['size']], dtype=bool)
print(" Chebyshev polynomial order for differential refraction fit: ",
observational_pams['refraction_poly_order'])
print(" Number of iterations: ",
observational_pams['refraction_poly_iters'])
print()
""" Rebinning of all the spectra """
for n_obs, obs in enumerate(lists['observations']):
print(" Spectral rebinning - Processing: ", obs)
processed[obs] = {}
""" Rebinning of the spectra in the SRF, except for a fixed constant in order to minimize
the difference between """
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_rebinned_stellarRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['err_flux_rebinned_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['flux_rebinned_stellarRF'], \
processed[obs]['err_flux_rebinned_SRF'], \
processed[obs]['flux_rebinned_SRF_null'] = \
replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF'],
processed[obs]['err_flux_rebinned_SRF'],
force_positive=True)
processed[obs]['rescaling'], processed[obs]['rescaled'], processed[obs]['rescaled_err'] = \
perform_rescaling(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF'],
processed[obs]['err_flux_rebinned_SRF'],
observational_pams['wavelength_rescaling'])
processed[obs]['rescaled_blazed'] = input_data[obs]['e2ds'] \
/ processed[obs]['rescaling'] \
/ calib_data['blaze']
if obs in lists['telluric']:
total_flux[n_obs, :] = processed[obs]['rescaled']
total_mask[n_obs, :] = processed[obs]['flux_rebinned_SRF_null']
total_wght[n_obs, :] = 1. / (processed[obs]['rescaled_err'] ** 2)
# processed['coadd']['flux'] += processed[obs]['flux_rebinned_stellarRF']
# """ SNR (assumed to be the square root of the flux) is added in quadrature """
# processed['coadd']['flux_err'] += processed[obs]['err_flux_rebinned_SRF'] ** 2
print(" Observation added to reference spectrum")
#masked_array = np.ma.array(total_flux, mask=total_mask)
#processed['coadd']['rescaled'], sum_weights = np.ma.average(masked_array,
# weights=total_wght,
# axis=0,
# returned=True)
# processed['coadd']['rescaled'][sum_weights <= 0.0001] = 1.000
# sum_weights[sum_weights <= 0.0001] = 0.0001
# processed['coadd']['rescaled_err'] = 1. / np.sqrt(sum_weights)
masked_array = np.ma.array(total_flux, mask=total_mask)
rescaled_mask, sum_weights = np.ma.average(masked_array,
weights=total_wght,
axis=0,
returned=True)
processed['coadd']['rescaled'] = rescaled_mask.filled(0.00)
sum_weights[sum_weights <= 0.0] = 1.0
processed['coadd']['rescaled_err'] = 1. / np.sqrt(sum_weights)
processed['coadd']['rescaled'], processed['coadd']['rescaled_err'], processed['coadd']['null'] = \
replace_values_errors_with_interpolation_1d(processed['coadd']['rescaled'],
processed['coadd']['rescaled_err'],
force_positive=True)
save_to_cpickle('refraction_processed_halfway', processed, config_in['output'], night)
save_to_cpickle('refraction_halfway', refraction, config_in['output'], night)
""" Now each observation is divided by the reference spectrum, after being redshifted in the observer RF
The result is then used to model the flux variation
"""
for obs in lists['observations']:
print(" Division by reference spectrum and fit of the flux variation: ", obs)
preserve_flux = input_data[obs].get('absolute_flux', True)
""" Going back to the observer RF and rebinning the spectrum into the observed orders """
processed[obs]['master_flux'] = \
rebin_1d_to_2d(processed['coadd']['wave'],
processed['coadd']['step'],
processed['coadd']['rescaled'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['master_ferr'] = \
rebin_1d_to_2d(processed['coadd']['wave'],
processed['coadd']['step'],
processed['coadd']['rescaled_err'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['master_flux'], processed[obs]['master_ferr'], processed[obs]['master_null'] = \
replace_values_errors_with_interpolation_2d(processed[obs]['master_flux'],
processed[obs]['master_ferr'],
less_than=0.001)
processed[obs]['ratio'] = processed[obs]['rescaled_blazed'] / processed[obs]['master_flux']
"""
processed[obs]['ratio'] = input_data[obs]['e2ds']\
/processed[obs]['rescaling']\
/ (processed[obs]['master_flux'] * calib_data['blaze'])
"""
refraction[obs] = {}
refraction[obs]['polyfit_e2ds'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
processed[obs]['residuals'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
refraction[obs]['poly_flag'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']],
dtype=bool)
for order in range(0, input_data[obs]['n_orders']):
order_coeff_name = 'order_' + repr(order)
refraction[obs]['poly_flag'][order, :] = (processed[obs]['ratio'][order, :] > 0.1)
refraction[obs]['poly_flag'][order, :50] = False
refraction[obs]['poly_flag'][order, -50:] = False
for n_iter in range(0, observational_pams['refraction_poly_iters']):
refraction[obs][order_coeff_name] = np.polynomial.chebyshev.chebfit(
input_data[obs]['wave'][order, refraction[obs]['poly_flag'][order, :]],
processed[obs]['ratio'][order, refraction[obs]['poly_flag'][order, :]],
observational_pams['refraction_poly_order'])
refraction[obs]['polyfit_e2ds'][order, :] = \
np.polynomial.chebyshev.chebval(input_data[obs]['wave'][order, :],
refraction[obs][order_coeff_name])
processed[obs]['residuals'][order, :] = refraction[obs]['polyfit_e2ds'][order, :]\
- processed[obs]['ratio'][order, :]
if n_iter < observational_pams['refraction_poly_iters'] - 1:
std = np.std(processed[obs]['residuals'][order, :])
refraction[obs]['poly_flag'][order, :] = (refraction[obs]['poly_flag'][order, :]) \
& (np.abs(processed[obs]['residuals'][order, :]) <
observational_pams['refraction_poly_sigma'] * std)
processed[obs]['e2ds_corrected'] = input_data[obs]['e2ds'] / refraction[obs]['polyfit_e2ds']
processed[obs]['e2ds_corrected_err'] = input_data[obs]['e2ds_err'] / refraction[obs]['polyfit_e2ds']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_rebinned_stellarRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_corrected'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['err_flux_rebinned_SRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_corrected_err'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
processed[obs]['flux_rebinned_stellarRF_corrected'], \
processed[obs]['err_flux_rebinned_SRF_corrected'], _ = \
replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF_corrected'],
processed[obs]['err_flux_rebinned_SRF_corrected'],
less_than=0.001)
save_to_cpickle('refraction_processed', processed, config_in['output'], night)
save_to_cpickle('refraction', refraction, config_in['output'], night)
def plot_differential_refraction(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
#observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
""" Retrieving the analysis"""
processed = load_from_cpickle('refraction_processed', config_in['output'], night)
refraction = load_from_cpickle('refraction', config_in['output'], night)
except:
print(" Failed in retrieving processed data")
return
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(input_data[obs]['BJD'] - 2450000.0)
am.append(input_data[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors = color_cmap(color_norm(np.asarray(bjd)))
offset = 0.10
y_limits = [0.8, 1.2]
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
shift = i/10.0
for order in range(0, input_data[obs]['n_orders']):
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['rescaled_blazed'][order, :] - shift,
c=line_colors[i], s=1, alpha=0.5)
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['master_flux'][order, :], - shift,
c='k', lw=1)
ax2.scatter(input_data[obs]['wave'][order, :],
processed[obs]['rescaled_blazed'][order, :]/refraction[obs]['polyfit_e2ds'][order, :] - shift,
c=line_colors[i], s=1, alpha=0.5)
ax2.plot(input_data[obs]['wave'][order, :],
processed[obs]['master_flux'][order, :], - shift,
c='k', lw=1)
ax1.set_xlim(processed['coadd']['wave'][0], processed['coadd']['wave'][-1])
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
color = [colors[i][:-1]]
if i==0:
ax1.scatter(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF'] / processed[obs]['rescaling'],
c=color, s=2, alpha=0.2, label='observation')
else:
ax1.scatter(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF'] / processed[obs]['rescaling'],
c=color, s=2, alpha=0.2)
ax2.scatter(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF_corrected'] / processed[obs]['rescaling'],
c=color, s=3, alpha=0.2)
ax1.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1, label='reference spectrum')
ax2.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
ax1.set_xlim(processed['coadd']['wave'][0], processed['coadd']['wave'][-1])
ax1.set_ylim(y_limits)
ax2.set_ylim(y_limits)
ax1.legend(loc=1)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax2.set_title('Corrected spectra')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
if i == 0:
offset = np.std(processed[obs]['ratio'][refraction[obs]['poly_flag']].flatten()) * 6
average = np.average(processed[obs]['ratio'][refraction[obs]['poly_flag']].flatten())
y_limits = [average-offset, average+offset]
color = [colors[i][:-1]]
for order in range(0, input_data[obs]['n_orders']):
ax.scatter(input_data[obs]['wave'][refraction[obs]['poly_flag']],
processed[obs]['ratio'][refraction[obs]['poly_flag']] + offset*i,
s=1, c=color, alpha=0.50, zorder=2)
ax.scatter(input_data[obs]['wave'][~refraction[obs]['poly_flag']],
processed[obs]['ratio'][~refraction[obs]['poly_flag']] + offset*i,
s=2, c='k', alpha=0.05, zorder=1)
ax.plot(input_data[obs]['wave'][order, :],
refraction[obs]['polyfit_e2ds'][order, :] + offset*i,
c='k', lw=1, zorder=5)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.legend(loc=3)
ax.set_title('Night: {0:s} \n Fit of the ratio obs/master'.format(night))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT: residuals of the fit
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
if i == 0:
median = np.median(processed[obs]['residuals'][refraction[obs]['poly_flag']].flatten())
offset = np.std(processed[obs]['residuals'][refraction[obs]['poly_flag']].flatten()) * 6
y_limits = [median-offset, median+offset]
color = colors[i][:-1]
for order in range(0, input_data[obs]['n_orders']):
# Workaround to damn stupid matplotlib error I didn't manage to solve
ax.scatter(input_data[obs]['wave'][refraction[obs]['poly_flag']],
processed[obs]['residuals'][refraction[obs]['poly_flag']] + offset*i,
s=1, c=[color], alpha=0.50, zorder=2)
ax.scatter(input_data[obs]['wave'][~refraction[obs]['poly_flag']],
processed[obs]['residuals'][~refraction[obs]['poly_flag']] + offset*i,
s=2, c='k', alpha=0.05, zorder=1)
ax.axhline(offset*i, c='k', zorder=3)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.legend(loc=3)
ax.set_title('Night: {0:s} \n Residuals of the fit on ratio obs/master'.format(night))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT: corrected e2ds
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
color = [colors[i][:-1]]
ax.scatter(input_data[obs]['wave'][refraction[obs]['poly_flag']],
processed[obs]['e2ds_corrected'][refraction[obs]['poly_flag']]/processed[obs]['rescaling'],
s=2, c=color, alpha=0.10)
ax.scatter(input_data[obs]['wave'][~refraction[obs]['poly_flag']],
processed[obs]['e2ds_corrected'][~refraction[obs]['poly_flag']]/processed[obs]['rescaling'],
s=2, c='k', alpha=0.05)
#for order in range(0, np.size(input_data[obs]['wave'][:, 0])):
#
# ax.plot(input_data[obs]['wave'][order, :],
# refraction[obs]['polyfit_e2ds'][order, :],
# c=color_array, lw=1)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.set_title('Night: {0:s} \n Corrected and rescaled e2ds spectra'.format(night))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 25,301 | 47.378585 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit_preparation",
"plot_telluric_molecfit_preparation"]
def compute_telluric_molecfit_preparation(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
for night in night_dict:
try:
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
continue
except:
print()
print("compute_telluric_molecfit_preparation Night: ", night)
print()
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
tellprep = {
'work_dir': config_in['output'] + '_molecfit_' + night,
'include': {}
}
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p ' + tellprep['work_dir'])
os.system('mkdir -p ' + tellprep['work_dir'] + '/output/')
"""
Creation of the include files
"""
"""
includes_spans_ORF: wavelength ranges _with_ telluric lines, in the ORF
includes_spans_SRF: wavelength ranges _without_ stellar lines and broadly
overlapping with telluric ranges, in the SRF
the two lists must have the same number of columns, with precise correspondence
"""
tellprep['include']['spans_telluric'] = np.genfromtxt(molecfit_dict['include_telluric'])
tellprep['include']['spans_stellar_SRF'] = np.genfromtxt(molecfit_dict['include_stellar'])
#print()
#print(tellprep['include']['spans_telluric'])
#print()
#print(tellprep['include']['spans_stellar_SRF'])
""" shift the stellar wavelength ranges into ORF """
tellprep['include']['rv_shift_SRF2ORF'] = -observational_pams['BERV_avg'] + observational_pams['RV_star'][
'RV_systemic']
#print()
#print(tellprep['include']['rv_shift_SRF2ORF'])
#print(observational_pams['BERV_avg'])
#print(observational_pams['RV_star']['RV_systemic'])
#print()
#print()
tellprep['include']['spans_stellar'] = tellprep['include']['spans_stellar_SRF']\
* (tellprep['include']['rv_shift_SRF2ORF']
/ (299792458. / 1000.000) + 1.00000)
#print()
#print(tellprep['include']['spans_stellar'])
""" Selecting the overlapping regions between the two lists: we want telluric regions that are not contaminated
by stellar lines,
"""
sel_lower = (tellprep['include']['spans_stellar'][:, 0] > tellprep['include']['spans_telluric'][:, 0])
sel_upper = (tellprep['include']['spans_stellar'][:, 1] < tellprep['include']['spans_telluric'][:, 1])
""" Final list in the ORF is built"""
tellprep['include']['selected'] = tellprep['include']['spans_telluric'].copy()
tellprep['include']['selected'][sel_lower, 0] = tellprep['include']['spans_stellar'][sel_lower, 0]
tellprep['include']['selected'][sel_upper, 1] = tellprep['include']['spans_stellar'][sel_upper, 1]
#print()
#print(tellprep['include']['selected'])
""" Molecfit line list must be given in vacuum wavelength, even if the stellar spectra is in air wavelength
conversion from air to vacuum for include file preparation
where s = 10000 / lambda air and the conversion is: lambda_vac = lambda_air * n.
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s2 = (10000. / tellprep['include']['selected']) ** 2
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s2) + 0.0001599740894897 / (
38.92568793293 - s2)
tellprep['include']['vacuum'] = tellprep['include']['selected'] * n / 10000.
#fileout = open('./' + tellprep['work_dir'] + '/include_' + night + '.dat', 'w')
#for i_s, i_e in zip(tellprep['include']['vacuum'][:, 0], tellprep['include']['vacuum'][:, 1]):
# fileout.write('{0:12.8f} {1:12.8f}\n'.format(i_s, i_e))
#fileout.close()
#quit()
save_to_cpickle('telluric_molecfit_preparation', tellprep, config_in['output'], night)
def plot_telluric_molecfit_preparation(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_template Night: ", night)
| 5,335 | 37.948905 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_airmass_observerRF.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_airmass_berv_observerRF",
"plot_telluric_airmass_berv_observerRF",
"compute_telluric_airmass_observerRF",
"plot_telluric_airmass_observerRF",
"compute_telluric_airmass_reference_observerRF",
"plot_telluric_airmass_reference_observerRF",
"compute_telluric_airmass_berv_reference_observerRF",
"plot_telluric_airmass_berv_reference_observerRF"
]
def compute_telluric_airmass_berv_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=False,
subroutine_name='telluric_airmass_berv_observerRF')
def compute_telluric_airmass_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=False,
subroutine_name='telluric_airmass_observerRF')
def compute_telluric_airmass_reference_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=True,
subroutine_name='telluric_airmass_reference_observerRF')
def compute_telluric_airmass_berv_reference_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=True,
subroutine_name='telluric_airmass_berv_reference_observerRF')
def plot_telluric_airmass_berv_observerRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_observerRF(config_in, night_input)
def plot_telluric_airmass_reference_observerRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_observerRF(config_in, night_input)
def plot_telluric_airmass_berv_reference_observerRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_observerRF(config_in, night_input)
def compute_telluric_observerRF(config_in, **kwargs):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_observerRF Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print("No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer'
}
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
if processed['n_orders'] == 0:
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
for obs in lists['observations']:
processed[obs]['e2ds_precorrected'] = processed[obs]['e2ds_rescaled'][:]
processed[obs]['e2ds_precorrected_err'] = input_data[obs]['e2ds_err'] / processed[obs]['e2ds_rescaling']
for niter in range(0, kwargs['n_iterations']):
if kwargs['n_iterations'] > 1:
print("NITER: ", niter)
for obs in lists['telluric']:
processed[obs]['logI'] = np.log(processed[obs]['e2ds_precorrected'])
processed[obs]['logI_err'] = processed[obs]['e2ds_precorrected_err']/processed[obs]['e2ds_precorrected']
processed['telluric'] = {}
abs_slope = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
line_shift = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
zero_point = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_r = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_p = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
for order in range(0, processed['n_orders']):
logi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][order, :]
sigi_array[n_obs, :] = processed[obs]['logI_err'][order, :]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if kwargs['use_berv']:
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_lstsq(airmass, berv, logi_array)
else:
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_curve_fit(airmass, logi_array, sigi_array, processed['n_pixels'])
#abs_slope[order, :], zero_point[order, :] = \
# airmass_linear_curve_fit_ransac(airmass, logi_array, sigi_array, processed['n_pixels'])
#obs_ref = lists['observations'][0]
#plt.plot(processed[obs_ref]['wave'][order,:], processed[obs_ref]['e2ds_rescaled'][order,:])
#for iii in range(0, processed['n_pixels']):
# if iii < 3700 or iii > 3720: continue
# plt.axvline(processed[obs_ref]['wave'][order,iii])
#plt.show()
#
#ik=0
#air_arr = np.arange(1.2, 2.5, 0.1)
#for iii in range(0, processed['n_pixels']):
#
# if iii < 3700 or iii > 3720: continue
# plt.errorbar(airmass, logi_array[:, iii]+ik, yerr=sigi_array[:, iii], fmt='o')
# print(np.exp(abs_slope[order, iii]))
# plt.plot(air_arr, air_arr*abs_slope[order, iii] + zero_point[order, iii]+ik)
# ik -= 0.20
#plt.show()
else:
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_lstsq(airmass, logi_array)
plt.show()
""" Saving the outcome to dictionary """
processed['telluric']['order_'+repr(order)] = {'logi_array': logi_array, 'sigi_array': sigi_array}
if kwargs.get('use_template', False):
telluric_template_data = np.genfromtxt(night_dict[night]['telluric_template'])
spectrum_noairmass = np.exp(abs_slope)
obs_reference = lists['observations'][0]
telluric['template'] = {
'input':{
'wave': telluric_template_data[:, 0],
'flux': telluric_template_data[:, 1],
'ferr': telluric_template_data[:, 2],
'step': telluric_template_data[:, 3]
},
'rebinned':{
'wave': input_data[obs_reference]['wave'],
'step': input_data[obs_reference]['step']
}
}
telluric['template']['rebinned']['flux'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False)
telluric['template']['rebinned']['ferr'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['ferr'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False,
is_error=True)
plt.plot(telluric['template']['input']['wave'], telluric['template']['input']['flux'], zorder=1, c='C0')
plt.scatter(telluric['template']['rebinned']['wave'], telluric['template']['rebinned']['flux'],zorder=2, s=2)
plt.scatter(telluric['template']['rebinned']['wave'],spectrum_noairmass, alpha=0.5, s=1, zorder=3)
factor_list = []
slope_list = []
for order in range(0, processed['n_orders']):
fit_selection = (telluric['template']['rebinned']['flux'][order, :] < 1.0)
# Check if there are telluric lines in this wavelength range
if np.sum(fit_selection) > 30:
#telluric_factor, telluric_slope, success_flag = find_telluric_rescaling_factor(
# spectrum_noairmass[order, :],
# telluric['template']['rebinned']['flux'][order, :]
#)
telluric_factor, telluric_slope, success_flag = find_telluric_rescaling_factor_2steps(
spectrum_noairmass[order, :],
telluric['template']['rebinned']['flux'][order, :]
)
if success_flag:
factor_list.extend([telluric_factor])
slope_list.extend([telluric_slope])
if len(factor_list)>1:
telluric_slope = np.median(slope_list)
telluric_factor = np.median(factor_list)
elif len(factor_list) == 1:
telluric_slope = slope_list[0]
telluric_factor = factor_list[0]
else:
telluric_slope = 0.00
telluric_factor = 0.00
print(' telluric factor: {0:7f} (correction slope: {1:7f}'.format(telluric_factor,telluric_slope))
print()
#print(telluric_factor, success_flag)
#quit()
processed['telluric']['spectrum_noairmass'] = \
(telluric['template']['rebinned']['flux']-1.)*telluric_factor + 1.0
plt.plot(telluric['template']['input']['wave'],
(telluric['template']['input']['flux']-1.)*telluric_factor + 1.0, zorder=1, c='C1')
plt.show()
else:
processed['telluric']['spectrum_noairmass'] = np.exp(abs_slope)
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_precorrected'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_precorrected_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in range(0, processed['n_orders']):
telluric[obs]['spline_noairmass'][order, :], _, _ = \
compute_spline(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum_noairmass'][order, :],
0.05)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_airmass_observerRF(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_airmass_observerRF Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 22,627 | 46.537815 | 125 | py |
SLOPpy | SLOPpy-main/SLOPpy/sloppy_run.py | import SLOPpy
import argparse
import os
import sys
import collections
def sloppy_run():
print()
print('SLOPpy v{0}'.format(SLOPpy.__version__))
print()
print('Python version in use:')
print(sys.version)
#if sys.version_info[0] == 3 and sys.version_info[1] > 7:
# print('WARNING MESSAGES SUPPRESSED!')
#print()
parser = argparse.ArgumentParser(prog='SLOPpy_Run', description='SLOPpy runner')
parser.add_argument('config_file', type=str, nargs=1, help='config file')
args = parser.parse_args()
file_conf = args.config_file[0]
config_in = SLOPpy.yaml_parser(file_conf)
SLOPpy.pars_input(config_in)
print()
""" creation of the pickle files """
SLOPpy.prepare_datasets(config_in)
#""" Retrieving the dictionary with the pipeline recipes """
#pipeline = config_in['pipeline']
""" Recipes must be performed in a given order... that's why we must use and ordered dictionary"""
""" Each of the following recipes has to be performed on the whole spectrum """
pipeline_common_routines = collections.OrderedDict()
pipeline_common_routines['sky_correction'] = SLOPpy.compute_sky_correction
#pipeline_common_routines['PCA_test01'] = SLOPpy.PCA_test01
pipeline_common_routines['pca_preparation'] = SLOPpy.compute_pca_preparation
pipeline_common_routines['sysrem_correction'] = SLOPpy.compute_sysrem_correction
# molecfit version 1.5
pipeline_common_routines['telluric_molecfit_v1_preparation'] = SLOPpy.compute_telluric_molecfit_v1_preparation
pipeline_common_routines['telluric_molecfit_v1'] = SLOPpy.compute_telluric_molecfit_v1
pipeline_common_routines['telluric_molecfit_v1_coadd'] = SLOPpy.compute_telluric_molecfit_v1_coadd
# molecfit new version
pipeline_common_routines['telluric_molecfit_preparation'] = SLOPpy.compute_telluric_molecfit_preparation
pipeline_common_routines['telluric_molecfit'] = SLOPpy.compute_telluric_molecfit
pipeline_common_routines['telluric_molecfit_coadd'] = SLOPpy.compute_telluric_molecfit_coadd
pipeline_common_routines['telluric_template'] = SLOPpy.compute_telluric_template
pipeline_common_routines['telluric_template_reference'] = SLOPpy.compute_telluric_template_reference
pipeline_common_routines['telluric_template_alternative'] = SLOPpy.compute_telluric_template_alternative
pipeline_common_routines['telluric_airmass_stellarRF'] = SLOPpy.compute_telluric_airmass_stellarRF
pipeline_common_routines['telluric_airmass_reference_stellarRF'] = SLOPpy.compute_telluric_airmass_reference_stellarRF
pipeline_common_routines['telluric_airmass_observerRF'] = SLOPpy.compute_telluric_airmass_observerRF
pipeline_common_routines['telluric_airmass_berv_observerRF'] = SLOPpy.compute_telluric_airmass_berv_observerRF
pipeline_common_routines['telluric_airmass_reference_observerRF'] = SLOPpy.compute_telluric_airmass_reference_observerRF
pipeline_common_routines['telluric_airmass_berv_reference_observerRF'] = SLOPpy.compute_telluric_airmass_berv_reference_observerRF
#pipeline_routines['telluric_obsolete_wyttenbach'] = SLOPpy.compute_telluric_obsolete_wyttenbach
#pipeline_routines['telluric_airmass_observerRF_chunks'] = SLOPpy.compute_telluric_airmass_observerRF_chunks
pipeline_common_routines['telluric_observerRF_skycalc'] = SLOPpy.compute_telluric_observerRF_skycalc
pipeline_common_routines['differential_refraction'] = SLOPpy.compute_differential_refraction
pipeline_common_routines['differential_refraction_update'] = SLOPpy.compute_differential_refraction_update
pipeline_common_routines['check_differential_refraction'] = SLOPpy.check_differential_refraction
pipeline_common_routines['write_differential_refraction'] = SLOPpy.write_differential_refraction
pipeline_common_routines['interstellar_lines'] = SLOPpy.compute_interstellar_lines
pipeline_common_routines['master_out'] = SLOPpy.compute_master_out
pipeline_common_routines['clv_rm_models'] = SLOPpy.compute_clv_rm_models
pipeline_common_routines['transmission_spectrum_preparation'] = SLOPpy.compute_transmission_spectrum_preparation
pipeline_common_routines['write_output_transmission'] = SLOPpy.write_output_transmission
pipeline_common_routines['write_output_transmission_stellarRF'] = SLOPpy.write_output_transmission_stellarRF
pipeline_common_routines['write_output_transmission_planetRF'] = SLOPpy.write_output_transmission_planetRF
pipeline_common_routines['write_output_transmission_observerRF'] = SLOPpy.write_output_transmission_observerRF
""" Legacy routines for testing purposes """
pipeline_routines = collections.OrderedDict()
#pipeline_routines['transmission_spectrum_planetRF'] = SLOPpy.compute_transmission_spectrum_planetRF
#pipeline_routines['transmission_spectrum_observerRF'] = SLOPpy.compute_transmission_spectrum_observerRF
#pipeline_routines['transmission_spectrum_stellarRF'] = SLOPpy.compute_transmission_spectrum_stellarRF
#pipeline_routines['transmission_spectrum'] = SLOPpy.compute_transmission_spectrum
#pipeline_routines['second_telluric_correction_on_transmission'] = SLOPpy.compute_second_telluric_correction_on_transmission
#pipeline_routines['transmission_map'] = SLOPpy.compute_transmission_map
#pipeline_routines['transmission_clv_rm_map'] = SLOPpy.compute_transmission_clv_rm_map
""" Each of the following recipes has to be performed independently on each
set of spectral lines """
pipeline_lines_routines = collections.OrderedDict()
"""
pipeline_lines_routines['transmission_spectrum_planetRF'] = SLOPpy.compute_transmission_spectrum_planetRF
pipeline_lines_routines['transmission_spectrum_observerRF'] = SLOPpy.compute_transmission_spectrum_observerRF
pipeline_lines_routines['transmission_spectrum_stellarRF'] = SLOPpy.compute_transmission_spectrum_stellarRF
pipeline_lines_routines['transmission_spectrum'] = SLOPpy.compute_transmission_spectrum
pipeline_lines_routines['second_telluric_correction_on_transmission'] = SLOPpy.compute_second_telluric_correction_on_transmission
pipeline_lines_routines['transmission_map'] = SLOPpy.compute_transmission_map
pipeline_lines_routines['transmission_clv_rm_map'] = SLOPpy.compute_transmission_clv_rm_map
#pipeline_lines_routines['clv_rm_modelling'] = SLOPpy.compute_clv_rm_modelling
"""
# ! NEW
pipeline_lines_routines['quick_transmission'] = SLOPpy.compute_quick_transmission
pipeline_lines_routines['clv_rm_models_lines'] = SLOPpy.compute_clv_rm_models_lines
pipeline_lines_routines['transmission_mcmc'] = SLOPpy.compute_transmission_mcmc
pipeline_lines_routines['transmission_mcmc_iterative'] = SLOPpy.compute_transmission_mcmc_iterative
pipeline_lines_routines['transmission_binned_mcmc'] = SLOPpy.compute_transmission_binned_mcmc
pipeline_lines_routines['transmission_binned_mcmc_iterative'] = SLOPpy.compute_transmission_binned_mcmc_iterative
pipeline_lines_routines['transmission_spectrum_planetRF'] = SLOPpy.compute_transmission_spectrum_planetRF
pipeline_lines_routines['transmission_spectrum_observerRF'] = SLOPpy.compute_transmission_spectrum_observerRF
pipeline_lines_routines['transmission_spectrum_stellarRF'] = SLOPpy.compute_transmission_spectrum_stellarRF
pipeline_lines_routines['transmission_spectrum'] = SLOPpy.compute_transmission_spectrum
pipeline_lines_routines['transmission_spectrum_planetRF_iterative'] = SLOPpy.compute_transmission_spectrum_planetRF_iterative
pipeline_lines_routines['transmission_spectrum_observerRF_iterative'] = SLOPpy.compute_transmission_spectrum_observerRF_iterative
pipeline_lines_routines['transmission_spectrum_stellarRF_iterative'] = SLOPpy.compute_transmission_spectrum_stellarRF_iterative
pipeline_lines_routines['transmission_spectrum_iterative'] = SLOPpy.compute_transmission_spectrum_iterative
pipeline_lines_routines['transmission_spectrum_average_planetRF'] = SLOPpy.compute_transmission_spectrum_average_planetRF
pipeline_lines_routines['transmission_spectrum_average_observerRF'] = SLOPpy.compute_transmission_spectrum_average_observerRF
pipeline_lines_routines['transmission_spectrum_average_stellarRF'] = SLOPpy.compute_transmission_spectrum_average_stellarRF
pipeline_lines_routines['transmission_spectrum_average'] = SLOPpy.compute_transmission_spectrum_average
pipeline_lines_routines['transmission_spectrum_average_planetRF_iterative'] = SLOPpy.compute_transmission_spectrum_average_planetRF
pipeline_lines_routines['transmission_spectrum_average_observerRF_iterative'] = SLOPpy.compute_transmission_spectrum_average_observerRF
pipeline_lines_routines['transmission_spectrum_average_stellarRF_iterative'] = SLOPpy.compute_transmission_spectrum_average_stellarRF
pipeline_lines_routines['transmission_spectrum_average_iterative'] = SLOPpy.compute_transmission_spectrum_average
pipeline_lines_routines['transmission_lightcurve'] = SLOPpy.compute_transmission_lightcurve
pipeline_lines_routines['transmission_lightcurve_average'] = SLOPpy.compute_transmission_lightcurve_average
pipeline_lines_routines['spectra_lightcurve'] = SLOPpy.compute_spectra_lightcurve
pipeline_lines_routines['spectra_lightcurve_average'] = SLOPpy.compute_spectra_lightcurve_average
# TODO: to be updated to support single line(s) set
"""
pipeline_lines_routines['transmission_clv_rm_correction_planetRF'] = SLOPpy.compute_transmission_clv_rm_correction_planetRF
pipeline_lines_routines['transmission_clv_rm_correction_observerRF'] = SLOPpy.compute_transmission_clv_rm_correction_observerRF
pipeline_lines_routines['transmission_clv_rm_correction_stellarRF'] = SLOPpy.compute_transmission_clv_rm_correction_stellarRF
pipeline_lines_routines['transmission_clv_rm_correction'] = SLOPpy.compute_transmission_clv_rm_correction
pipeline_lines_routines['transmission_clv_rm_average_planetRF'] = SLOPpy.compute_transmission_clv_rm_average_planetRF
pipeline_lines_routines['transmission_clv_rm_average_observerRF'] = SLOPpy.compute_transmission_clv_rm_average_observerRF
pipeline_lines_routines['transmission_clv_rm_average_stellarRF'] = SLOPpy.compute_transmission_clv_rm_average_stellarRF
pipeline_lines_routines['transmission_clv_rm_average'] = SLOPpy.compute_transmission_clv_rm_average
pipeline_lines_routines['spectra_lightcurve'] = SLOPpy.compute_spectra_lightcurve
pipeline_lines_routines['spectra_lightcurve_average'] = SLOPpy.compute_spectra_lightcurve_average
pipeline_lines_routines['excess_lightcurve'] = SLOPpy.compute_spectra_lightcurve
pipeline_lines_routines['excess_lightcurve_average'] = SLOPpy.compute_spectra_lightcurve_average
pipeline_lines_routines['spectra_lightcurve_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_clv_rm_correction
pipeline_lines_routines['spectra_lightcurve_average_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_average_clv_rm_correction
pipeline_lines_routines['excess_lightcurve_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_clv_rm_correction
pipeline_lines_routines['excess_lightcurve_average_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_average_clv_rm_correction
pipeline_lines_routines['transmission_lightcurve_planetRF'] = SLOPpy.compute_transmission_lightcurve_planetRF
pipeline_lines_routines['transmission_lightcurve_observerRF'] = SLOPpy.compute_transmission_lightcurve_observerRF
pipeline_lines_routines['transmission_lightcurve_stellarRF'] = SLOPpy.compute_transmission_lightcurve_stellarRF
pipeline_lines_routines['transmission_lightcurve'] = SLOPpy.compute_transmission_lightcurve
pipeline_lines_routines['write_output_spectra'] = SLOPpy.write_output_spectra
pipeline_lines_routines['transmission_lightcurve_average_planetRF'] = SLOPpy.compute_transmission_lightcurve_average_planetRF
pipeline_lines_routines['transmission_lightcurve_average_observerRF'] = SLOPpy.compute_transmission_lightcurve_average_observerRF
pipeline_lines_routines['transmission_lightcurve_average_stellarRF'] = SLOPpy.compute_transmission_lightcurve_average_stellarRF
pipeline_lines_routines['transmission_lightcurve_average'] = SLOPpy.compute_transmission_lightcurve_average
"""
plot_preparation_routines = collections.OrderedDict()
#plot_preparation_routines['clv_rm_modelling'] = SLOPpy.plot_clv_rm_modelling
# ! New
plot_preparation_routines['clv_rm_models'] = SLOPpy.plot_clv_rm_models
plot_routines = collections.OrderedDict()
plot_routines['plot_dataset'] = SLOPpy.plot_dataset
plot_routines['prepare_dataset'] = SLOPpy.plot_dataset
plot_routines['dataset'] = SLOPpy.plot_dataset
plot_routines['sky_correction'] = SLOPpy.plot_sky_correction
plot_routines['differential_refraction'] = SLOPpy.plot_differential_refraction
plot_routines['differential_refraction_update'] = SLOPpy.plot_differential_refraction_update
plot_routines['check_differential_refraction'] = SLOPpy.plot_check_differential_refraction
#plot_routines['write_differential_refraction'] = SLOPpy.write_differential_refraction
#plot_routines['PCA_test01'] = SLOPpy.PCA_test01
# molecfit version 1.5
plot_routines['telluric_molecfit_v1'] = SLOPpy.plot_telluric_molecfit_v1
plot_routines['telluric_molecfit_v1_coadd'] = SLOPpy.plot_telluric_molecfit_v1_coadd
# molecfit new version
plot_routines['telluric_molecfit'] = SLOPpy.plot_telluric_molecfit
plot_routines['telluric_molecfit_coadd'] = SLOPpy.plot_telluric_molecfit_coadd
plot_routines['telluric_template'] = SLOPpy.plot_telluric_template
plot_routines['telluric_template_reference'] = SLOPpy.plot_telluric_template_reference
plot_routines['telluric_template_alternative'] = SLOPpy.plot_telluric_template_alternative
plot_routines['telluric_airmass_stellarRF'] = SLOPpy.plot_telluric_airmass_stellarRF
plot_routines['telluric_airmass_reference_stellarRF'] = SLOPpy.plot_telluric_airmass_reference_stellarRF
plot_routines['telluric_airmass_observerRF'] = SLOPpy.plot_telluric_airmass_observerRF
plot_routines['telluric_airmass_berv_observerRF'] = SLOPpy.plot_telluric_airmass_berv_observerRF
plot_routines['telluric_airmass_reference_observerRF'] = SLOPpy.plot_telluric_airmass_reference_observerRF
plot_routines['telluric_airmass_berv_reference_observerRF'] = SLOPpy.plot_telluric_airmass_berv_reference_observerRF
#plot_routines['telluric_obsolete_wyttenbach'] = SLOPpy.plot_telluric_obsolete_wyttenbach
#plot_routines['telluric_airmass_observerRF_chunks'] = SLOPpy.plot_telluric_airmass_observerRF_chunks
plot_routines['telluric_observerRF_skycalc'] = SLOPpy.plot_telluric_observerRF_skycalc
plot_routines['interstellar_lines'] = SLOPpy.plot_interstellar_lines
plot_routines['master_out'] = SLOPpy.plot_master_out
plot_routines['telluric_molecfit_preparation'] = SLOPpy.plot_telluric_molecfit_preparation
# ! NEW
plot_routines['transmission_spectrum_preparation'] = SLOPpy.plot_transmission_spectrum_preparation
"""
plot_routines['transmission_spectrum_planetRF'] = SLOPpy.plot_transmission_spectrum_planetRF
plot_routines['transmission_spectrum_observerRF'] = SLOPpy.plot_transmission_spectrum_observerRF
plot_routines['transmission_spectrum_stellarRF'] = SLOPpy.plot_transmission_spectrum_stellarRF
plot_routines['transmission_spectrum'] = SLOPpy.plot_transmission_spectrum
plot_routines['second_telluric_correction_on_transmission'] = SLOPpy.plot_second_telluric_correction_on_transmission
plot_routines['transmission_clv_rm_correction_planetRF'] = SLOPpy.plot_transmission_clv_rm_correction_planetRF
plot_routines['transmission_clv_rm_correction_observerRF'] = SLOPpy.plot_transmission_clv_rm_correction_observerRF
plot_routines['transmission_clv_rm_correction_stellarRF'] = SLOPpy.plot_transmission_clv_rm_correction_stellarRF
plot_routines['transmission_clv_rm_correction'] = SLOPpy.plot_transmission_clv_rm_correction
plot_routines['spectra_lightcurve'] = SLOPpy.plot_spectra_lightcurve
plot_routines['excess_lightcurve'] = SLOPpy.plot_spectra_lightcurve
plot_routines['spectra_lightcurve_clv_rm_correction'] = SLOPpy.plot_spectra_lightcurve_clv_rm_correction
plot_routines['excess_lightcurve_clv_rm_correction'] = SLOPpy.plot_spectra_lightcurve_clv_rm_correction
plot_routines['transmission_lightcurve_planetRF'] = SLOPpy.plot_transmission_lightcurve_planetRF
plot_routines['transmission_lightcurve_observerRF'] = SLOPpy.plot_transmission_lightcurve_observerRF
plot_routines['transmission_lightcurve_stellarRF'] = SLOPpy.plot_transmission_lightcurve_stellarRF
plot_routines['transmission_lightcurve'] = SLOPpy.plot_transmission_lightcurve
plot_routines['transmission_map'] = SLOPpy.plot_transmission_map
plot_routines['transmission_clv_rm_map'] = SLOPpy.plot_transmission_clv_rm_map
"""
plot_lines_routines = collections.OrderedDict()
plot_lines_routines['clv_rm_models_lines'] = SLOPpy.plot_clv_rm_models_lines
plot_lines_routines['transmission_binned_mcmc'] = SLOPpy.plot_transmission_binned_mcmc
plot_lines_routines['transmission_spectrum_planetRF'] = SLOPpy.plot_transmission_spectrum_planetRF
plot_lines_routines['transmission_spectrum_observerRF'] = SLOPpy.plot_transmission_spectrum_observerRF
plot_lines_routines['transmission_spectrum_stellarRF'] = SLOPpy.plot_transmission_spectrum_stellarRF
plot_lines_routines['transmission_spectrum'] = SLOPpy.plot_transmission_spectrum
plot_lines_routines['transmission_spectrum_planetRF_iterative'] = SLOPpy.plot_transmission_spectrum_planetRF_iterative
plot_lines_routines['transmission_spectrum_observerRF_iterative'] = SLOPpy.plot_transmission_spectrum_observerRF_iterative
plot_lines_routines['transmission_spectrum_stellarRF_iterative'] = SLOPpy.plot_transmission_spectrum_stellarRF_iterative
plot_lines_routines['transmission_spectrum_iterative'] = SLOPpy.plot_transmission_spectrum_iterative
plot_average_routines = collections.OrderedDict()
plot_average_routines['compare_master_out'] = SLOPpy.plot_compare_master_out
plot_lines_average_routines = collections.OrderedDict()
# ! These should be removed and performed line by line !
plot_lines_average_routines['transmission_binned_mcmc'] = SLOPpy.plot_transmission_binned_mcmc
plot_lines_average_routines['transmission_spectrum_average_planetRF'] = SLOPpy.plot_transmission_spectrum_average_planetRF
plot_lines_average_routines['transmission_spectrum_average_observerRF'] = SLOPpy.plot_transmission_spectrum_average_observerRF
plot_lines_average_routines['transmission_spectrum_average_stellarRF'] = SLOPpy.plot_transmission_spectrum_average_stellarRF
plot_lines_average_routines['transmission_spectrum_average'] = SLOPpy.plot_transmission_spectrum_average
"""
plot_average_routines['excess_lightcurve_average'] = SLOPpy.plot_spectra_lightcurve_average
plot_average_routines['spectra_lightcurve_average'] = SLOPpy.plot_spectra_lightcurve_average
plot_average_routines['spectra_lightcurve_average_clv_rm_correction'] = \
SLOPpy.plot_spectra_lightcurve_average_clv_rm_correction
plot_average_routines['excess_lightcurve_average_clv_rm_correction'] = \
SLOPpy.plot_spectra_lightcurve_average_clv_rm_correction
plot_average_routines['transmission_average_planetRF'] = SLOPpy.plot_transmission_average_planetRF
plot_average_routines['transmission_average_observerRF'] = SLOPpy.plot_transmission_average_observerRF
plot_average_routines['transmission_average_stellarRF'] = SLOPpy.plot_transmission_average_stellarRF
plot_average_routines['transmission_average'] = SLOPpy.plot_transmission_average
plot_average_routines['transmission_lightcurve_average_planetRF'] = SLOPpy.plot_transmission_lightcurve_average_planetRF
plot_average_routines['transmission_lightcurve_average_observerRF'] = SLOPpy.plot_transmission_lightcurve_average_observerRF
plot_average_routines['transmission_lightcurve_average_stellarRF'] = SLOPpy.plot_transmission_lightcurve_average_stellarRF
plot_average_routines['transmission_lightcurve_average'] = SLOPpy.plot_transmission_lightcurve_average
plot_average_routines['transmission_clv_rm_average_planetRF'] = SLOPpy.plot_transmission_clv_rm_average_planetRF
plot_average_routines['transmission_clv_rm_average_observerRF'] = SLOPpy.plot_transmission_clv_rm_average_observerRF
plot_average_routines['transmission_clv_rm_average_stellarRF'] = SLOPpy.plot_transmission_clv_rm_average_stellarRF
plot_average_routines['transmission_clv_rm_average'] = SLOPpy.plot_transmission_clv_rm_average
plot_average_routines['compare_clv_rm_effects_planetRF'] = SLOPpy.plot_compare_clv_rm_effects_planetRF
plot_average_routines['compare_clv_rm_effects_observerRF'] = SLOPpy.plot_compare_clv_rm_effects_observerRF
plot_average_routines['compare_clv_rm_effects_stellarRF'] = SLOPpy.plot_compare_clv_rm_effects_stellarRF
plot_average_routines['compare_clv_rm_effects'] = SLOPpy.plot_compare_clv_rm_effects
plot_average_routines['transmission_map_average'] = SLOPpy.plot_transmission_map_average
plot_average_routines['transmission_clv_rm_map_average'] = SLOPpy.plot_transmission_clv_rm_map_average
"""
"""
Execution of subroutines
"""
# ! NEW !
print()
print("*** Data preparation analysis ***")
try:
pipeline = config_in['pipeline']
has_plots = len(pipeline)
except (KeyError, TypeError):
pipeline = {}
for key in pipeline:
if key in pipeline_common_routines:
print()
pipeline_common_routines[key](config_in)
# ! Kept here for legacy purposes !
for key in pipeline:
if key in pipeline_routines:
print()
pipeline_routines[key](config_in)
# ! NEW !
print()
print("*** Spectral lines analysis ***")
try:
spectral_lines = config_in['spectral_lines']
has_plots = len(spectral_lines)
except (KeyError, TypeError):
pipeline = {}
for lines_label in spectral_lines:
for key in pipeline:
if key in pipeline_lines_routines:
print()
pipeline_lines_routines[key](config_in, lines_label)
#for key, func in pipeline_routines.items():
# if key in pipeline: func(config_in)
#TODO: must be updated to be performed on a single set of spectral lines
try:
plots = config_in['plots']
has_plots = len(plots)
except (KeyError, TypeError):
return
print()
print("*** Plot Subroutines ***")
print()
plots = config_in['plots']
nights = config_in['nights']
for key in plots:
if key in plot_preparation_routines:
plot_preparation_routines[key](config_in)
print()
for key in plots:
if key in plot_routines:
plot_routines[key](config_in)
print()
for lines_label in config_in['spectral_lines']:
for key in plots:
for night in nights:
if key in plot_lines_routines:
plot_lines_routines[key](config_in, lines_label, night)
print()
if key in plot_lines_average_routines:
plot_lines_average_routines[key](config_in, lines_label)
print()
#for key, func in plot_preparation_routines.items():
# if key in plots: func(config_in)
#
#for night in nights:
# for key, func in plot_routines.items():
# if key in plots: func(config_in, night)
#
#for key, func in plot_average_routines.items():
# if key in plots: func(config_in)
| 24,000 | 51.749451 | 139 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_mcmc.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.subroutines.bayesian_emcee import *
# from SLOPpy.subroutines.rebin_subroutines import *
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_mcmc', 'compute_transmission_mcmc_iterative']
subroutine_name = 'transmission_mcmc'
def compute_transmission_mcmc_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations', 5)):
compute_transmission_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=it)
def compute_transmission_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
planet_dict = from_config_get_planet(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" selection of those parameters that are specific of the spectral line(s)
under analysis
"""
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
norm_dict = lines_dict.get('normalization', {})
norm_pams = {}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
sampler_pams = lines_dict['sampler_parameters']
sampler_name = sampler_pams.get('sampler_name', 'emcee')
# TODO reference as input parameter
reference = 'planetRF'
"""
- case 0: only one spectral line, default line parameters are contrast, FWHM, rv_shift
- case 1: only one spectral line, no winds
- case 2: only one spectral line, no planetary radius dependance
- case 3: only one spectral line, no winds and no planetary radius dependance
- case 10: more than one spectral lines, all line parameters are free and independent
- case 11: more than one spectral lines, all lines are affected by the same wind
- case 12: more than one spectral lines, all lines have same FWHM
- case 13: more than one spectral lines, all lines are affected by the same wind and have same FWHM
- case 14: more than one spectral lines, no winds
- case 15: more than one spectral lines, no winds, all lines have same FWHM
- case 20: more than one spectral lines, no Rp dependance, all line parameters are free and independent
- case 21: more than one spectral lines, no Rp dependance, all lines are affected by the same wind
- case 22: more than one spectral lines, no Rp dependance, all lines have same FWHM
- case 23: more than one spectral lines, no Rp dependance, all lines are affected by the same wind and have same FWHM
- case 24: more than one spectral lines, no Rp dependance, no winds
- case 25: more than one spectral lines, no Rp dependance, no winds, all lines have same FWHM
free_Rp free_winds shared_winds shared_FWHM
- case 0: True True False False DEFAULT for single line
- case 1: True False False False
- case 2: False True False False
- case 3: False False False False
- case 10: True True False False DEFAULT for multiple lines
- case 11: True True True False
- case 12: True True False True
- case 13: True True True True
- case 14: True False False False
- case 15: True False False True
- case 20: False True False False
- case 21: False True True False
- case 22: False True False True
- case 23: False True True True
- case 24: False False False False
- case 25: False False False True
"""
model_case = 10
fit_pams = lines_dict['fit_parameters']
# Added compativlity to "wrong" keys
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
free_Rp = fit_pams.get('free_Rp', True) \
and fit_pams.get('free_planet_radius', True) \
and clv_rm_correction
free_winds = fit_pams.get('free_winds', True) \
and fit_pams.get('free_offset', True)
shared_winds = fit_pams.get('shared_winds', False) \
or fit_pams.get('shared_offset', False)
shared_FWHM = fit_pams.get('shared_FWHM', False) \
or fit_pams.get('shared_fwhm', False)
prior_dict = fit_pams.get('priors', {}) \
or fit_pams.get('priors', {})
if len(lines_dict['lines']) < 2:
if free_Rp is True and free_winds is True:
model_case = 0
if free_Rp is True and free_winds is False:
model_case = 1
if free_Rp is False and free_winds is True:
model_case = 2
if free_Rp is False and free_winds is False:
model_case = 3
else:
if free_Rp is True:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 10
if shared_winds is True and shared_FWHM is False:
model_case = 11
if shared_winds is False and shared_FWHM is True:
model_case = 12
if shared_winds is True and shared_FWHM is True:
model_case = 13
else:
if shared_winds is False and shared_FWHM is False:
model_case = 14
if shared_winds is False and shared_FWHM is True:
model_case = 15
else:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 20
if shared_winds is True and shared_FWHM is False:
model_case = 21
if shared_winds is False and shared_FWHM is True:
model_case = 22
if shared_winds is True and shared_FWHM is True:
model_case = 23
else:
if shared_winds is False and shared_FWHM is False:
model_case = 24
if shared_winds is False and shared_FWHM is True:
model_case = 25
jitter_flag = fit_pams.get('jitter', True)
print()
print(' free_Rp: (default: True) ', free_Rp)
print(' free_winds: (default: True) ', free_winds)
print(' shared_winds: (default: False) ', shared_winds)
print(' shared_FWHM: (default: False) ', shared_FWHM)
print(' jitter: (default: True) ', jitter_flag)
print(' # lines: ', len(lines_dict['lines']))
print(' model_case: ', model_case)
""" parameters list:
to be updated
pams_dict = {} # dictionary containing the index of a given parameter
pams_list = [] # list with the parameter names ordered according to their index
boundaries = np.empty([0, 2]) # boundaries for MCMC / nested sampling
theta_start = np.empty(0) # starting point for MCMC
lines_center = np.empty(0) # laboratory wavelength of spectral lines
pam_index = 0 # keep track of the number of variables
for line_key, line_val in lines_dict['lines'].items():
pam_name = line_key + '_contrast'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 1.00]], axis=0)
theta_start = np.append(theta_start, 0.010)
pam_index += 1
lines_center = np.append(lines_center, line_val)
# skip the inclusion of FWHM as a free parameter for each line
if the shared FWHM is selected
#
if model_case in [0, 1, 2, 3, 10, 11, 14, 20, 21, 24]:
# if not lines_dict['fit_parameters']['shared_fwhm']:
pam_name = line_key + '_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.0)
pam_index += 1
# if lines_dict['fit_parameters']['fixed_separation']: continue
# if not lines_dict['fit_parameters']['lines_shift']: continue
if model_case in [0, 2, 10, 12, 20, 22]:
pam_name = line_key + '_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.00, 5.00]], axis=0)
theta_start = np.append(theta_start, 0.00)
pam_index += 1
if model_case in [12, 13, 15, 22, 23, 25]:
# if lines_dict['fit_parameters']['shared_fwhm']:
pam_name = 'shared_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.000, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.000)
pam_index += 1
if model_case in [11, 13, 21, 23]:
# if lines_dict['fit_parameters']['fixed_separation'] and lines_dict['fit_parameters']['lines_shift']:
pam_name = 'shared_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.0, 5.0]], axis=0)
theta_start = np.append(theta_start, 0.000)
pam_index += 1
if model_case in [0, 1, 10, 11, 12, 13, 14, 15]:
pams_dict['rp_factor'] = pam_index
pams_list.append('rp_factor')
boundaries = np.append(boundaries, [[0.5, 2.0]], axis=0)
theta_start = np.append(theta_start, 1.0)
pam_index += 1
pams_dict['K_planet'] = pam_index
pams_list.append('K_planet')
boundaries = np.append(boundaries,
[[-300., planet_dict['RV_semiamplitude']
[0]+ 300.]],
axis=0)
theta_start = np.append(
theta_start, planet_dict['RV_semiamplitude'][0])
pam_index += 1
pam_name = 'jitter'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[10**(-12), 0.01]], axis=0)
theta_start = np.append(theta_start, 10**(-11))
pam_index += 1
for ii in range(0, pam_index):
print(pams_list[ii], ' ', boundaries[ii, :],
' ', theta_start[ii])
ndim = pam_index
"""
for night in night_dict:
print()
print("transmission_mcmc Night: {0:s}".format(night))
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
# Moved here to check wheter PCA or master-out have been employed
preparation_input = load_from_cpickle(
'transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
try:
mcmc_data = load_from_cpickle(subroutine_name + '_data', config_in['output'], night, lines_label, it_string)
clv_rm_radius = mcmc_data['clv_rm_radius']
clv_rm_grid = mcmc_data['clv_rm_grid']
transmission_spec = mcmc_data['transmission_spec']
transmission_spec_err = mcmc_data['transmission_spec_err']
wave_array = mcmc_data['wave_array']
time_array = mcmc_data['time_array']
planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
jitter_index = mcmc_data['jitter_index']
n_jitter = mcmc_data['n_jitter']
print(" Loading MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
except FileNotFoundError:
print(" Computing MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
calib_data = load_from_cpickle(
'calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(
config_in['output'], night, lists['observations'])
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
else:
# workaround if CLV correction is not available
clv_rm_models = {'common': {}}
clv_rm_models['common']['n_radius_grid'] = 3
clv_rm_models['common']['radius_grid'] = np.asarray(
[0.5, 1.0, 1.5])
processed = {
'subroutine': subroutine_name,
}
"""
we use the first transit_full observation to define the boolean
selection array to be used for all the observations.
In this way we make sure that all the wavelength/spectrum arrays have the
same dimension
"""
obs_reference = lists['transit_full'][0]
wave_SRF, step_SRF = shift_wavelength(input_data[obs_reference]['wave'],
input_data[obs_reference]['step'],
observational_pams[obs_reference]['rv_shift_ORF2SRF'])
print('WAVE SHAPE:', np.shape(wave_SRF))
print('STEP SHAPE:', np.shape(step_SRF))
processed['common'] = {
'range': lines_dict['fit_parameters']['range'],
'reference_wave': wave_SRF,
'reference_step': step_SRF
}
"""
Identification of the orders including the data points of interest
"""
identify_order = (wave_SRF > processed['common']['range'][0]) \
& (wave_SRF < processed['common']['range'][1])
order_selection = (np.sum(identify_order, axis=1) > 0)
order_list = np.arange(0, observational_pams['n_orders'], dtype=np.int16)[order_selection]
n_orders = len(order_list)
processed['common']['selection'] = identify_order
processed['common']['order_selection'] = order_selection
processed['common']['order_list'] = order_list
processed['common']['n_orders'] = n_orders
processed['common']['size'] = np.sum(identify_order)
print('COMMON')
print(np.shape(processed['common']['selection']))
print(processed['common']['order_selection'])
print(processed['common']['order_list'])
print(processed['common']['size'])
processed['common_extended'] = {
'range': lines_dict['range'],
'reference_wave': wave_SRF,
'reference_step': step_SRF
}
identify_order = (wave_SRF > processed['common_extended']['range'][0]) \
& (wave_SRF < processed['common_extended']['range'][1])
order_selection = (np.sum(identify_order, axis=1) > 0)
order_list = np.arange(0, observational_pams['n_orders'], dtype=np.int16)[order_selection]
n_orders = len(order_list)
processed['common_extended']['selection'] = identify_order
processed['common_extended']['order_selection'] = order_selection
processed['common_extended']['order_list'] = order_list
processed['common_extended']['n_orders'] = n_orders
processed['common_extended']['size'] = np.sum(identify_order)
#print('COMMON_EXTENDED')
#print(np.shape(processed['common_extended']['selection']))
#print(processed['common_extended']['order_selection'])
#print(processed['common_extended']['order_list'])
#print(processed['common_extended']['size'])
#print()
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['deblazed']
ferr: preparation[obs]['deblazed_err']
"""
""" First step: we rebin the spectra in the Stellar Reference Frame,
with the step size decided by the user specifically for the fit
"""
processed[obs] = {}
processed[obs]['wave_SRF'], processed[obs]['step_SRF'] = shift_wavelength(
input_data[obs]['wave'],
input_data[obs]['step'],
observational_pams[obs]['rv_shift_ORF2SRF'])
""" Continuum normalization:
3) Polynomial fit, everything is hard coded now but personalized
options can be implemented easily in the yaml file
"""
processed[obs]['continuum'] = processed['common']['reference_wave'] * 0.
processed[obs]['normalized'] = processed['common']['reference_wave'] * 0.
processed[obs]['normalized_err'] = processed['common']['reference_wave'] * 0.
""" We perform the selection only on the order
where we actually have data for the MCMC
"""
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with planetary lines
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values, fit is
performed on the extended region and then applied to the fit subset
"""
processed['common_extended']['line_exclusion'] = processed['common_extended']['selection'].copy()
""" Continuum normalization:
1) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
for line_key, line_val in lines_dict['lines'].items():
line_extension = 1.2 * \
planet_dict['RV_semiamplitude'][0] * \
line_val / speed_of_light_km
processed['common_extended']['line_exclusion'] = processed['common_extended']['line_exclusion'] \
& (np.abs(processed['common_extended']['reference_wave']-line_val) > line_extension)
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
try:
for order in processed['common_extended']['order_list']:
print('ORDER', order)
order_sel = processed['common_extended']['selection'][order, :]
print('selection', np.shape(processed['common_extended']['selection']))
print('ORDER_SEL', np.shape(order_sel))
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
processed['common_extended']['reference_wave'][order, order_sel],
processed['common_extended']['reference_step'][order, order_sel])
stellar_spectrum_derivative = first_derivative(
processed['common_extended']['reference_wave'][order, order_sel], stellar_spectrum_rebinned)
processed['common_extended']['line_exclusion'][order, order_sel] = processed['common_extended']['line_exclusion'][order, order_sel] & (
np.abs(stellar_spectrum_derivative) < 0.0005)
except KeyError:
print(
"No stellar synthetic spectrum from CLV models, some stellar lines may be included transmission normalization ")
for obs in lists['observations']:
for order in processed['common']['order_list']:
selection = processed['common_extended']['line_exclusion'][order, :] & (
preparation[obs]['deblazed'][order, :]
> np.std(preparation[obs]['deblazed'][order, :]))
if np.sum(selection) < 100:
selection = (preparation[obs]['deblazed'][order, :] >
np.std(preparation[obs]['deblazed'][order, :]))
processed[obs]['norm_coeff_' + repr(order)] = \
np.polynomial.chebyshev.chebfit(processed[obs]['wave_SRF'][order, selection],
preparation[obs]['deblazed'][order, selection],
2)
processed[obs]['continuum'][order, selection] = np.polynomial.chebyshev.chebval(
preparation[obs]['wave_SRF'][order, selection], processed[obs]['norm_coeff_' + repr(order)])
processed[obs]['normalized'][order, selection] = preparation[obs]['deblazed'][order, selection] / \
processed[obs]['continuum'][order, selection]
processed[obs]['normalized_err'][order, selection] = preparation[obs]['deblazed_err'][order, selection] / \
processed[obs]['continuum'][order, selection]
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' ', obs, ' normalization using Savitzky-Golay filter')
for obs in lists['observations']:
processed[obs]['continuum'] = np.pnes_like(preparation[obs]['deblazed'])
for order in processed['common']['order_list']:
processed[obs]['continuum'][order,:] = savgol_filter(preparation[obs]['deblazed'][order,:],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
processed[obs]['normalized'] = preparation[obs]['deblazed'] / processed[obs]['continuum']
processed[obs]['normalized_err'] = preparation[obs]['deblazed_err'] / processed[obs]['continuum']
######
print('ciao')
processed['common']['n_obs'] = len(lists['transit_full'])
processed['common']['n_radius_grid'] = clv_rm_models['common']['n_radius_grid']
processed['common']['radius_grid'] = clv_rm_models['common']['radius_grid']
clv_rm_radius = clv_rm_models['common']['radius_grid']
""" We are moving the values of interest from dictionaries to arrays
in order to speed up the MCMC
1) clv_rm_grid: array with all the CLV models, as a function of the
radius of the planet
2) time_from_transit: BJD_TDB - T0
3) planet_RVsinusoid: Fractional RV of the planet (K=1) - from a array
"""
clv_rm_grid = np.ones([processed['common']['n_radius_grid'],
processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
time_from_transit = np.empty(
processed['common']['n_obs'], dtype=np.double)
wave_array = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
time_array = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
transmission_spec = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
transmission_spec_err = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
for i_obs, obs in enumerate(lists['transit_full']):
time_from_transit[i_obs] = observational_pams[obs]['BJD'] - \
observational_pams['time_of_transit']
time_array[i_obs, :] = time_from_transit[i_obs]
# planet_RVsinusoid[i_obs] = np.sin(2*np.pi / planet_dict['period'][0] * time_from_transit[i_obs])
wave_array[i_obs, :] = processed[obs]['wave_SRF'][processed['common']['selection']].flatten()
transmission_spec[i_obs, :] = processed[obs]['normalized'][processed['common']['selection']].flatten()
transmission_spec_err[i_obs,
:] = processed[obs]['normalized_err'][processed['common']['selection']].flatten()
if clv_rm_correction is False:
continue
for i_r in range(0, processed['common']['n_radius_grid']):
clv_rm_temp = processed['common_extended']['reference_wave'] * 0.
for order in processed['common']['order_list']:
""" CLV Synthetic models are in the Stellar Reference system,
so no shift is required """
clv_rm_temp[order, :] = rebin_1d_to_1d(
clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][i_r, :],
processed[obs]['wave_SRF'][order, :],
processed[obs]['step_SRF'][order, :],
preserve_flux=False)
clv_rm_grid[i_r, i_obs, :] = clv_rm_temp[processed['common']['selection']].flatten()
# preserve_flux should be True or False?
# False if the spectra are already normalized
remove_outliers = (np.abs(transmission_spec - 1.) > 0.5)
transmission_spec[remove_outliers] = 1.0
transmission_spec_err[remove_outliers] = 1.0
planet_RVsinusoid = np.sin(
2*np.pi / planet_dict['period'][0] * time_array)
if jitter_flag:
jitter_index = []
n_jitter = 1
else:
jitter_index = None
n_jitter = 0
mcmc_data = {
'observations': lists['transit_full'],
'common_wave': processed['common']['reference_wave'],
'common_step': processed['common']['reference_step'],
'clv_rm_grid': clv_rm_grid,
'transmission_spec': transmission_spec,
'transmission_spec_err': transmission_spec_err,
'wave_array': wave_array,
'time_array': time_array,
'planet_RVsinusoid': planet_RVsinusoid,
'clv_rm_radius': clv_rm_models['common']['radius_grid'],
'n_obs': len(lists['transit_full']),
'n_radius_grid': clv_rm_models['common']['n_radius_grid'],
'jitter_index': jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name + '_data', mcmc_data,
config_in['output'], night, lines_label, it_string)
# Forcing memory deallocation
clv_rm_models = None
mcmc_data = None
print()
print("transmission_mcmc ")
try:
results_dict = load_from_cpickle(subroutine_name+'_'+sampler_name+'_results',
config_in['output'], night, lines_label, it_string)
print(" Transmission MCMC analysis for lines {0:s}, night: {1:s} already performed".format(
lines_label, night))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
start_average = np.average(results_dict['point_start'], axis=0)
ndim = results_dict['ndim']
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
continue
# R(h) = np.sqrt(1+h/delta)
except FileNotFoundError:
print()
# getting fit parameters
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter)
ndim = len(theta_start)
ngen = sampler_pams.get('n_gen', 64000)
nwalkers_mult = sampler_pams.get('n_walkers_mult', 2)
nwalkers = sampler_pams.get('n_walkers', nwalkers_mult * ndim)
nthin = sampler_pams.get('n_thin', 50)
nsteps = sampler_pams.get('n_steps', 20000)
nburnin = sampler_pams.get('n_burnin', 5000)
ndata = np.size(wave_array)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
wave_array,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
wave_array,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
wave_array,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start,
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
save_to_cpickle(subroutine_name+'_'+sampler_name+'_results',
results_dict, config_in['output'], night, lines_label, it_string)
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}])'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0], boundaries[val, 1])
)
# print(' *** physical output')
#
# results_dict['results'] = {
# 'lines_model': med_lines_model,
# 'clv_model': med_clv_model,
# 'lines_array': med_lines_array,
# 'planet_K': med_planet_K,
# 'planet_R': med_planet_R,
# 'jitter': med_jitter
# }
""" Analysis of the entire dataset """
print()
try:
all_mcmc_data = load_from_cpickle(
subroutine_name+'_data', config_in['output'], night='', lines=lines_label, it_string=it_string)
all_clv_rm_radius = all_mcmc_data['clv_rm_radius']
all_clv_rm_grid = all_mcmc_data['clv_rm_grid']
all_transmission_spec = all_mcmc_data['transmission_spec']
all_transmission_spec_err = all_mcmc_data['transmission_spec_err']
all_wave_array = all_mcmc_data['wave_array']
all_time_array = all_mcmc_data['time_array']
all_planet_RVsinusoid = all_mcmc_data['planet_RVsinusoid']
all_observations = all_mcmc_data['observations']
all_n_obs = all_mcmc_data['n_obs']
all_n_radius_grid = all_mcmc_data['n_radius_grid']
all_jitter_index = all_mcmc_data['jitter_index']
n_jitter = all_mcmc_data['n_jitter']
except:
n_jitter = 0
for night in night_dict:
mcmc_data = load_from_cpickle(subroutine_name+'_data',
config_in['output'], night, lines_label, it_string=it_string)
try:
# Building the arrays for the full analysis
all_clv_rm_grid = np.concatenate(
(all_clv_rm_grid, mcmc_data['clv_rm_grid']), axis=1)
all_transmission_spec = np.concatenate(
(all_transmission_spec, mcmc_data['transmission_spec']))
all_transmission_spec_err = np.concatenate(
(all_transmission_spec_err, mcmc_data['transmission_spec_err']))
all_wave_array = np.concatenate(
(all_wave_array, mcmc_data['wave_array']))
all_time_array = np.concatenate(
(all_time_array, mcmc_data['time_array']))
all_planet_RVsinusoid = np.concatenate(
(all_planet_RVsinusoid, mcmc_data['planet_RVsinusoid']))
all_observations = np.concatenate(
(all_observations, mcmc_data['observations']))
all_n_obs += mcmc_data['n_obs']
if jitter_flag:
all_jitter_index = np.concatenate(
(all_jitter_index, n_jitter*np.ones(np.shape(mcmc_data['wave_array']), dtype=np.int16)))
n_jitter += 1
except NameError:
""" This error is expected when retrieving the data of the first night"""
all_clv_rm_radius = mcmc_data['clv_rm_radius']
all_clv_rm_grid = mcmc_data['clv_rm_grid']
all_transmission_spec = mcmc_data['transmission_spec']
all_transmission_spec_err = mcmc_data['transmission_spec_err']
all_wave_array = mcmc_data['wave_array']
all_time_array = mcmc_data['time_array']
all_planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
all_observations = mcmc_data['observations']
all_n_obs = mcmc_data['n_obs']
all_n_radius_grid = mcmc_data['n_radius_grid']
if jitter_flag:
all_jitter_index = n_jitter * \
np.ones(
np.shape(mcmc_data['wave_array']), dtype=np.int16)
n_jitter += 1
else:
all_jitter_index = None
all_mcmc_data = {
'observations': all_observations,
'clv_rm_grid': all_clv_rm_grid,
'transmission_spec': all_transmission_spec,
'transmission_spec_err': all_transmission_spec_err,
'wave_array': all_wave_array,
'time_array': all_time_array,
'planet_RVsinusoid': all_planet_RVsinusoid,
'clv_rm_radius': all_clv_rm_radius,
'n_obs': all_n_obs,
'n_radius_grid': all_n_radius_grid,
'jitter_index': all_jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name+'_data', all_mcmc_data,
config_in['output'], night='', lines=lines_label, it_string=it_string)
try:
results_dict = load_from_cpickle(subroutine_name + '_' + sampler_name+'_results',
config_in['output'], night='', lines=lines_label, it_string=it_string)
print(" Transmission MCMC analysis for lines {0:s} already performed ".format(
lines_label))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
ndim = results_dict['ndim']
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}])'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0], boundaries[val, 1])
)
except FileNotFoundError:
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter)
ndim = len(theta_start)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
ndata = np.size(all_wave_array)
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
all_wave_array,
all_transmission_spec,
all_transmission_spec_err,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
all_wave_array,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
all_wave_array,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start,
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for night in night_dict:
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
""" No differentiation by night """
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
save_to_cpickle(subroutine_name + '_'+sampler_name+'_results',
results_dict, config_in['output'], night='', lines=lines_label, it_string=it_string)
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}])'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0], boundaries[val, 1])
)
print('MCMC completed')
# Update planet parameters
# deprecated
# try:
# _ = load_from_cpickle(
# 'observational', config_in['output'], night, lines_label)
# print(" Transmission MCMC results for lines {0:s} already store in observational array".format(
# lines_label))
# except FileNotFoundError:
#
# results_full = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night='', lines=lines_label)
#
# for night in night_dict:
#
# results_night = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night=night, lines=lines_label)
# lists = load_from_cpickle('lists', config_in['output'], night)
# observational_pams = load_from_cpickle(
# 'observational_pams', config_in['output'], night)
# for obs in lists['observations']:
#
# """ RV shift from the observer RF to the planet RF
# STRONG ASSUMPTIONS:
# - there is only the transiting planet in the system
# - the planet has null eccentricity
# - linear approximation or the orbit near the transit event
#
# Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
# and finally onto the planet
# """
# observational_pams[obs]['rv_shift_ORF2PRF'] = \
# observational_pams[obs]['BERV'] \
# - observational_pams['RV_star']['RV_systemic'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# """ RV shift from Stellar Rest Frame to Planetary Rest Frame
# We have to take into account the RV of star relatively to the Barycenter
# """
# observational_pams[obs]['rv_shift_SRF2PRF'] = \
# + observational_pams[obs]['RV_bjdshift'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# observational_pams['Rp_factor'] = results_full['results']['planet_R']
# observational_pams['lines_array'] = results_full['results']['lines_array']
# observational_pams['jitter'] = results_full['results']['jitter']
# save_to_cpickle('observational', observational_pams,
# config_in['output'], night, lines_label)
| 56,597 | 46.24374 | 159 | py |
SLOPpy | SLOPpy-main/SLOPpy/check_differential_refraction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["check_differential_refraction", "plot_check_differential_refraction", "write_differential_refraction"]
subroutine_name = 'check_differential_refraction'
def check_differential_refraction(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
processed = load_from_cpickle('check_differential_refraction_processed', config_in['output'], night)
check_drc = load_from_cpickle('check_differential_refraction', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
input_data_corrected = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=True, use_telluric=False)
input_data_s1d = load_from_cpickle('input_dataset_s1d_fibA', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
check_drc = {
'subroutine': subroutine_name,
'wave': input_data['coadd']['wave']
}
processed = {
'subroutine': subroutine_name
}
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
#processed[obs]['e2ds'] = input_data[obs]['e2ds']
#processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
#processed[obs]['flux'] = input_data[obs]['e2ds']/calib_data['blaze']/input_data[obs]['step']
#processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds'])/calib_data['blaze']/input_data[obs]['step']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_s1d'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'], input_data[obs]['e2ds'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
"""
processed[obs]['flux_s1d_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'], input_data[obs]['e2ds_err'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00, is_error=True)
"""
processed[obs]['flux_s1d_err'] = processed[obs]['flux_s1d']
processed[obs]['s1d_rescaling'], processed[obs]['s1d_rescaled'], processed[obs]['s1d_rescaled_err'] = \
perform_rescaling(input_data['coadd']['wave'],
processed[obs]['flux_s1d'],
processed[obs]['flux_s1d_err'],
[5450.0, 5550.0])
#observational_pams['wavelength_rescaling'])
""" for plotting purpose only"""
#processed[obs]['e2ds_corr'] = input_data_corrected[obs]['e2ds']
#processed[obs]['e2ds_err_corr'] = input_data_corrected[obs]['e2ds_err']
#processed[obs]['flux_corr'] = input_data_corrected[obs]['e2ds']/calib_data['blaze']/input_data_corrected[obs]['step']
#processed[obs]['flux_err_corr'] = np.sqrt(input_data_corrected[obs]['e2ds'])/calib_data['blaze']/input_data_corrected[obs]['step']
processed[obs]['flux_s1d_corr'] = \
rebin_2d_to_1d(input_data_corrected[obs]['wave'],
input_data_corrected[obs]['step'],
input_data_corrected[obs]['e2ds'],
calib_data['blaze'],
input_data_corrected['coadd']['wave'],
input_data_corrected['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
"""
processed[obs]['flux_s1d_corr_err'] = \
rebin_2d_to_1d(input_data_corrected[obs]['wave'],
input_data_corrected[obs]['step'],
input_data_corrected[obs]['e2ds_err'],
calib_data['blaze'],
input_data_corrected['coadd']['wave'],
input_data_corrected['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00,
is_error=True)
"""
processed[obs]['flux_s1d_corr_err'] = processed[obs]['flux_s1d_corr']
processed[obs]['s1d_corr_rescaling'], processed[obs]['s1d_corr_rescaled'], processed[obs]['s1d_corr_rescaled_err'] = \
perform_rescaling(input_data['coadd']['wave'],
processed[obs]['flux_s1d_corr'],
processed[obs]['flux_s1d_corr_err'],
[5450.0, 5550.0])
processed[obs]['dr_correction'] = processed[obs]['s1d_corr_rescaled']/processed[obs]['s1d_rescaled']
processed[obs]['s1d_DRS_rescaling'], processed[obs]['s1d_DRS_rescaled'], processed[obs]['s1d_DRS_rescaled_err'] = \
perform_rescaling(input_data_s1d[obs]['wave'],
input_data_s1d[obs]['flux'],
np.sqrt(np.abs(input_data_s1d[obs]['flux'])),
[5450.0, 5550.0])
#observational_pams['wavelength_rescaling'])
processed[obs]['DRS_coeff_flux'] = []
processed[obs]['DRS_corr'] = np.zeros(input_data_s1d[obs]['size'], dtype=np.double)
for coeff_index in np.arange(0, 10, 1, dtype=np.int16):
try:
keyword = 'HIERARCH TNG DRS FLUX CORR COEFF' + repr(coeff_index)
processed[obs]['DRS_coeff_flux'].extend([input_data[obs]['header']['ccf'][keyword]])
processed[obs]['DRS_corr'] += \
input_data[obs]['header']['ccf'][keyword] * \
np.power(input_data_s1d[obs]['wave'], coeff_index)
except:
continue
processed[obs]['DRS_corr_rescaling'], processed[obs]['DRS_corr_rescaled'], _ = \
perform_rescaling(input_data_s1d[obs]['wave'],
processed[obs]['DRS_corr'],
processed[obs]['DRS_corr'],
observational_pams['wavelength_rescaling'])
check_drc[obs] = {
's1d': {
'wave': input_data['coadd']['wave'],
'flux': processed[obs]['flux_s1d'],
#'flux_err': processed[obs]['flux_s1d_err'],
'rescaled': processed[obs]['s1d_rescaled'],
#'rescaled_err': processed[obs]['s1d_rescaled_err']
},
's1d_corr': {
'correction': processed[obs]['dr_correction'],
'correction_rescaled': processed[obs]['dr_correction'],
'flux': processed[obs]['flux_s1d_corr'],
#'flux_err': processed[obs]['flux_s1d_corr_err'],
'rescaled': processed[obs]['s1d_corr_rescaled'],
#'rescaled_err': processed[obs]['s1d_corr_rescaled_err']
},
'DRS_s1d':{
'wave': input_data_s1d[obs]['wave'],
'flux': input_data_s1d[obs]['flux'],
#'flux_err': np.sqrt(input_data_s1d[obs]['flux']+0.1),
'rescaled': processed[obs]['s1d_DRS_rescaled'],
#'rescaled_err': processed[obs]['s1d_DRS_rescaled_err']
},
'DRS_s1d_corr': {
'correction': processed[obs]['DRS_corr'],
'correction_rescaled': processed[obs]['DRS_corr_rescaled'],
'flux': input_data_s1d[obs]['flux']/processed[obs]['DRS_corr'],
#'flux_err': np.sqrt(np.abs(input_data_s1d[obs]['flux']))/processed[obs]['DRS_corr'],
'rescaled': processed[obs]['s1d_DRS_rescaled']/processed[obs]['DRS_corr_rescaled'],
#'rescaled_err': processed[obs]['s1d_DRS_rescaled_err']/processed[obs]['DRS_corr_rescaled'],
},
}
save_to_cpickle('check_differential_refraction_processed', processed, config_in['output'], night)
save_to_cpickle('check_differential_refraction', check_drc, config_in['output'], night)
print('Night ', night, ' completed')
print()
def plot_check_differential_refraction(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
try:
""" Retrieving the analysis"""
check_drc = load_from_cpickle('check_differential_refraction', config_in['output'], night)
except:
print(" Failed in retrieving the data")
return
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(observational_pams[obs]['BJD'] - 2450000.0)
am.append(observational_pams[obs]['AIRMASS'])
n_obs = len(lists['observations']) * 1.0 + 1.
colors = np.asarray(bjd)
cmap = plt.cm.viridis
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
gs.update(hspace=0.1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
sel = (check_drc[obs]['s1d']['rescaled'] > -1000000.05)
ax1.plot(check_drc['wave'][sel],
check_drc[obs]['s1d']['rescaled'][sel]+i/5.,
c = line_colors[i], lw = 1, alpha = 1)
ax2.plot(check_drc[obs]['DRS_s1d']['wave'],
check_drc[obs]['DRS_s1d']['rescaled']+i/5.,
c = line_colors[i], lw = 1, alpha = 1)
i_max = 1.5 + i/5.
ax1.set_xlim(check_drc['wave'][0], check_drc['wave'][-1])
ax1.set_ylim(0.00, i_max)
ax1.legend(loc=3)
ax1.set_title('Night: {0:s} \n SLOPpy input s1d'.format(night))
ax2.set_title('DRS input s1d')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
#gs.update(wspace=0.025, hspace=0.05)
gs.update(hspace=0.1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
sel = (check_drc[obs]['s1d']['flux']> 0.05)
#ax1.plot(check_drc['wave'][sel],
# check_drc[obs]['s1d'][sel]+i/5.,
# c=line_colors[i], lw=1, alpha=1.0, zorder=0)
ax1.plot(check_drc['wave'],
check_drc[obs]['s1d_corr']['correction_rescaled']+i/5.,
c=line_colors[i], lw=1, alpha=1)
#ax2.plot(check_drc[obs]['wave_DRS'],
# check_drc[obs]['s1d_DRS']+i/5.,
# c=line_colors[i], lw=1, alpha=1)
ax2.plot(check_drc[obs]['DRS_s1d']['wave'],
1./check_drc[obs]['DRS_s1d_corr']['correction_rescaled']+i/5.,
c=line_colors[i], lw=1, alpha=1)
i_max = 1.5 + i/5.
#ax1.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
#ax2.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
ax1.set_xlim(check_drc['wave'][0], check_drc['wave'][-1])
ax1.set_ylim(0.00, i_max)
ax1.legend(loc=3)
ax1.set_title('Night: {0:s} \n SLOPpy correction function'.format(night))
ax2.set_title('DRS correction function')
ax1.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
#gs.update(wspace=0.025, hspace=0.05)
gs.update(hspace=0.1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
sel = (check_drc[obs]['s1d_corr']['rescaled']> 0.05)
ax1.plot(check_drc['wave'][sel],
check_drc[obs]['s1d_corr']['rescaled'][sel]+i/5.,
c=line_colors[i], lw=1, alpha=0.5)
ax2.plot(check_drc[obs]['DRS_s1d']['wave'],
check_drc[obs]['DRS_s1d_corr']['rescaled']+i/5.,
c=line_colors[i], lw=1, alpha=0.5)
i_max = 1.5 + i/5.
#ax1.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
#ax2.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
ax1.set_xlim(check_drc['wave'][0], check_drc['wave'][-1])
ax1.set_ylim(0.00, i_max)
ax1.legend(loc=3)
ax1.set_title('Night: {0:s} \n SLOPpy corrected spectra'.format(night))
ax2.set_title('DRS corrected spectra')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
def write_differential_refraction(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print('write_differential_refraction Night: ', night)
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=True)
input_data_s1d = load_from_cpickle('input_dataset_s1d_fibA', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# Let's keep it simple to save memory
dir_SLOPpy_drc = night + '_SLOPpy_drc/'
dir_DRS_drc = night + '_DRS_drc/'
os.system('mkdir -p ' + dir_SLOPpy_drc)
os.system('mkdir -p ' + dir_DRS_drc)
for obs in lists['observations']:
processed = dict(n_orders=input_data[obs]['n_orders'], n_pixels=input_data[obs]['n_pixels'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed['flux_s1d_BRF_corr'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
processed['flux_s1d_BRF_corr_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds_err'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'], is_error=True)
processed['flux_s1d_SRF_corr'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
processed['flux_s1d_SRF_corr_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds_err'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'], is_error=True)
processed['DRS_coeff_flux'] = []
processed['DRS_s1d_corr'] = np.zeros(input_data_s1d[obs]['size'], dtype=np.double)
processed['DRS_e2ds_corr'] = np.zeros(np.shape(input_data[obs]['wave']), dtype=np.double)
for coeff_index in np.arange(0, 10, 1, dtype=np.int16):
try:
keyword = 'HIERARCH TNG DRS FLUX CORR COEFF' + repr(coeff_index)
processed['DRS_coeff_flux'].extend([input_data[obs]['header']['ccf'][keyword]])
processed['DRS_s1d_corr'] += \
input_data[obs]['header']['ccf'][keyword] * \
np.power(input_data_s1d[obs]['wave'], coeff_index)
processed['DRS_e2ds_corr'] += \
input_data[obs]['header']['ccf'][keyword] * \
np.power(input_data[obs]['wave'], coeff_index)
except:
continue
processed['DRS_s1d_corr'] = input_data_s1d[obs]['flux']/processed['DRS_s1d_corr']
processed['DRS_e2ds_corr'] = input_data[obs]['e2ds']/processed['DRS_e2ds_corr']
"""Saving the e2ds files"""
hdu_e2ds_SLOPpy = fits.PrimaryHDU()
hdu_e2ds_DRS = fits.PrimaryHDU()
hdu_e2ds_SLOPpy.data = np.asarray(input_data[obs]['e2ds'], dtype=np.float32)
hdu_e2ds_DRS.data = np.asarray(processed['DRS_e2ds_corr'], dtype=np.float32)
for key_name, key_val in input_data[obs]['header']['e2ds'].items():
if key_name == 'SIMPLE' or \
key_name=='BITPIX' or \
key_name == 'NAXIS' or \
key_name == 'NAXIS1' or \
key_name == 'NAXIS2':
continue
if len(key_name) > 8:
hdu_e2ds_SLOPpy.header['HIERARCH '+ key_name] = key_val
hdu_e2ds_DRS.header['HIERARCH '+ key_name] = key_val
else:
hdu_e2ds_SLOPpy.header[key_name] = key_val
hdu_e2ds_DRS.header[key_name] = key_val
hdu_e2ds_SLOPpy.writeto(dir_SLOPpy_drc + obs + '_e2ds_A.fits', overwrite=True)
hdu_e2ds_DRS.writeto(dir_DRS_drc + obs + '_e2ds_A.fits', overwrite=True)
"""Saving the s1d files"""
hdu_s1d_SLOPpy_BRF = fits.PrimaryHDU()
hdu_s1d_SLOPpy_SRF = fits.PrimaryHDU()
hdu_s1d_DRS = fits.PrimaryHDU()
hdu_s1d_SLOPpy_BRF.data = np.asarray(processed['flux_s1d_BRF_corr'], dtype=np.float32)
hdu_s1d_SLOPpy_SRF.data = np.asarray(processed['flux_s1d_SRF_corr'], dtype=np.float32)
hdu_s1d_DRS.data = np.asarray(processed['DRS_s1d_corr'], dtype=np.float32)
for key_name, key_val in input_data[obs]['header']['s1d'].items():
if key_name == 'SIMPLE' or \
key_name=='BITPIX' or \
key_name == 'NAXIS' or \
key_name == 'NAXIS1' or \
key_name == 'NAXIS2':
continue
if len(key_name) > 8:
hdu_s1d_SLOPpy_BRF.header['HIERARCH '+ key_name] = key_val
hdu_s1d_SLOPpy_SRF.header['HIERARCH '+ key_name] = key_val
hdu_s1d_DRS.header['HIERARCH '+ key_name] = key_val
else:
hdu_s1d_SLOPpy_BRF.header[key_name] = key_val
hdu_s1d_SLOPpy_SRF.header[key_name] = key_val
hdu_s1d_DRS.header[key_name] = key_val
""" Fixing SLOPpy s1d keywords """
hdu_s1d_SLOPpy_BRF.header['CRVAL1'] = input_data['coadd']['wave'][0]
hdu_s1d_SLOPpy_BRF.header['CDELT1'] = input_data['coadd']['step'][0]
hdu_s1d_SLOPpy_SRF.header['CRVAL1'] = input_data['coadd']['wave'][0]
hdu_s1d_SLOPpy_SRF.header['CDELT1'] = input_data['coadd']['step'][0]
""" Fixing DRS s1d keywords """
hdu_s1d_DRS.header['CRVAL1'] = input_data_s1d[obs]['wave'][0]
hdu_s1d_DRS.header['CDELT1'] = input_data_s1d[obs]['step'][0]
hdu_s1d_SLOPpy_BRF.writeto(dir_SLOPpy_drc + obs + '_s1d_A.fits', overwrite=True)
hdu_s1d_SLOPpy_SRF.writeto(dir_SLOPpy_drc + obs + '_s1d_A_SRF.fits', overwrite=True)
hdu_s1d_DRS.writeto(dir_DRS_drc + obs + '_s1d_A.fits', overwrite=True)
print()
print('Night ', night, ' completed') | 23,686 | 45.083658 | 143 | py |
SLOPpy | SLOPpy-main/SLOPpy/sky_correction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
__all__ = ["compute_sky_correction", "plot_sky_correction"]
subroutine_name = 'sky_correction'
def compute_sky_correction(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
processed = load_from_cpickle('skycorrected_fibA', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations and calibration data for fiber B, if they exist"""
try:
input_data_B = load_from_cpickle('input_dataset_fibB', config_in['output'], night)
calib_data_B = load_from_cpickle('calibration_fibB', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Skipped'))
continue
""" Retrieving the observations and calibration data for fiber A"""
print()
print(" Retrieving the data for night ", night)
input_data_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
calib_data_A = load_from_cpickle('calibration_fibA', config_in['output'], night)
map_orders_A = calib_data_A['fibAB_orders_match']
map_orders_B = calib_data_B['fibAB_orders_match']
"""map_orders_A = [0,1,2,3] = map_orders_B"""
processed = {
'subroutine': 'sky_correction'
}
for obs in lists['observations']:
processed[obs] = {}
" computing the ratio between the lamp flux of fiber A and B"
print()
print(" Computing the ratio between the lamp flux of fiber A and B")
processed['ratioAB'] = calib_data_A['lamp'][map_orders_A, :]/calib_data_B['lamp'][map_orders_B, :]
first_obs = lists['observations'][0]
wave_difference = \
input_data_A[first_obs]['wave'][map_orders_A, :] - input_data_B[first_obs]['wave'][map_orders_B, :]
print()
print(" Wavelength difference between fiber A and B: ", \
np.average(wave_difference), " +- ", np.std(wave_difference), " \AA")
if np.abs(np.average(wave_difference)) > 0.006 or np.std(wave_difference) > 0.006:
raise ValueError("TO BE IMPLEMENTED!!!!!!! ")
quit()
else:
""" We assume that the relative RV shift between finber A and fiber B in the pixel scale is
is minimal """
for obs in lists['observations']:
processed[obs]['sky_fibA'] = np.zeros([input_data_A['n_orders'], input_data_A['n_pixels']])
processed[obs]['sky_fibA'][map_orders_A, :] = \
processed['ratioAB'] * input_data_B[obs]['e2ds'][map_orders_B, :]
processed[obs]['e2ds'] = input_data_A[obs]['e2ds'] - processed[obs]['sky_fibA']
processed[obs]['e2ds_err'] = np.sqrt(
input_data_A[obs]['e2ds_err'][map_orders_A, :] ** 2 +
(processed['ratioAB'] * input_data_B[obs]['e2ds_err'][map_orders_B, :]) ** 2)
""" Zero or negative values are identified, flagged and substituted with another value """
#replacement = 0.1
#processed[obs]['null'] = (processed[obs]['e2ds'] <= replacement)
#processed[obs]['e2ds'][processed[obs]['null']] = replacement
save_to_cpickle('skycorrected_fibA', processed, config_in['output'], night)
def plot_sky_correction(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_sky_correction Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations and calibration data for fiber B, if they exist"""
try:
input_data_B = load_from_cpickle('input_dataset_fibB', config_in['output'], night)
calib_data_B = load_from_cpickle('calibration_fibB', config_in['output'], night)
except:
print("No fiber_B dataset available, skipping sky correction plot")
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
input_data_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
processed = load_from_cpickle('skycorrected_fibA', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig, gs, cbax1, ax1, ax2, ax3 = grid_3plot_small()
for i, obs in enumerate(lists['observations']):
for k in range(0, input_data_B[obs]['n_orders']):
if i == 0 and k == 0:
ax2.scatter(input_data_B[obs]['wave'][k, :], input_data_B[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.5, label='Sky observations (ORF)')
else:
ax2.scatter(input_data_B[obs]['wave'][k, :], input_data_B[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.5)
for k in range(0, input_data_A[obs]['n_orders']):
if i == 0 and k == 0:
ax1.scatter(input_data_A[obs]['wave'][k, :], input_data_A[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5, label='Target observations (ORF)')
else:
ax1.scatter(input_data_A[obs]['wave'][k, :], input_data_A[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
ax3.scatter(input_data_A[obs]['wave'][k, :], processed[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax1.legend(loc=1)
ax2.set_title('Sky spectrum from fiber B')
ax3.set_title('After Sky correction')
ax3.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
plt.show()
| 7,124 | 43.53125 | 113 | py |
SLOPpy | SLOPpy-main/SLOPpy/write_output_spectra.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.clv_rm_subroutines import *
__all__ = ['write_output_spectra']
def write_output_spectra(config_in):
subroutine_name = 'write_output_spectra'
clv_rm_correction = True
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
lightcurve_dict = from_config_get_transmission_lightcurve(config_in)
for night in night_dict:
clv_rm_correction = True
try:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
except:
clv_rm_correction = False
message = 'Computing'
try:
output_spectra = load_from_cpickle(subroutine_name, config_in['output'], night)
if clv_rm_correction and not output_spectra['clv_rm_correction']:
message = 'Updating with CLV-corrected spectra'
raise ValueError()
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, message))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
output_spectra = {
'subroutine': subroutine_name,
'clv_rm_correction': clv_rm_correction
}
""" Adding the C-bands arrays to the dictionary"""
if clv_rm_correction:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
for n_obs, obs in enumerate( lists['observations']):
output_spectra[obs] = {}
output_spectra[obs]['BJD'] = input_data[obs]['BJD']
preserve_flux = input_data[obs].get('absolute_flux', True)
output_spectra[obs]['SRF_rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
shared_data['coadd']['wave'],
shared_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
output_spectra[obs]['SRF_rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
shared_data['coadd']['wave'],
shared_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
output_spectra[obs]['SRF_rescaling'], \
output_spectra[obs]['SRF_rescaled'], \
output_spectra[obs]['SRF_rescaled_err'] = perform_rescaling(
shared_data['coadd']['wave'], output_spectra[obs]['SRF_rebinned'], output_spectra[obs]['SRF_rebinned_err'],
observational_pams['wavelength_rescaling'])
if clv_rm_correction:
rv_shift = 0.0 # we always stay in SRF
correction, _ = clv_rm_correction_factor_computation(
clv_rm_modelling, shared_data['coadd']['wave'], shared_data['coadd']['step'], rv_shift, obs)
output_spectra[obs]['SRF_clv_rm_correction'] = correction
output_spectra[obs]['SRF_clv_rm_rebinned'] = output_spectra[obs]['SRF_rebinned'] / correction
output_spectra[obs]['SRF_clv_rm_rebinned_err'] = output_spectra[obs]['SRF_rebinned_err'] / correction
output_spectra[obs]['SRF_clv_rm_rescaled'] = output_spectra[obs]['SRF_rescaled'] / correction
output_spectra[obs]['SRF_clv_rm_rescaled_err'] = output_spectra[obs]['SRF_rescaled_err'] / correction
try:
output_spectra[obs]['phase'] = \
(observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
output_spectra[obs]['phase'] = \
(observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
save_to_cpickle(subroutine_name, output_spectra, config_in['output'], night)
| 5,351 | 43.97479 | 123 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/rebin_subroutines.py | from __future__ import print_function, division
import numpy as np
from scipy.interpolate import interp1d
from SLOPpy.subroutines.constants import *
def shift_wavelength(wave, step, rv_shift):
wave_shift = rv_shift / speed_of_light_km + 1.00000
return wave * wave_shift, step * wave_shift
def shift_wavelength_array(wave, rv_shift):
wave_shift = rv_shift / speed_of_light_km + 1.00000
return wave * wave_shift
def shift_wavelength_to_rest(wave, step, rv_shift):
inverse_wave_shift = (-rv_shift) / speed_of_light_km + 1.00000
return wave / inverse_wave_shift, step / inverse_wave_shift
def rebin_exact_flux(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=False,
preserve_flux=True):
"""
Previously named rebin_order
:param wave_in:
:param step_in:
:param flux_in:
:param wave_out:
:param step_out:
:param quadrature:
:param preserve_flux:
:return:
Spectral rebinning with flux conservation
"""
if quadrature:
flux_in = flux_in**2.
flux_out = np.zeros(np.shape(wave_out), dtype=np.double)
n1 = np.size(wave_in)
n2 = np.size(wave_out)
ns_prv = 0
for i in range(0, n2):
# print i, ' of ', n2
# Starting and ending point of the bin
wlb = wave_out[i] - step_out[i] / 2.000
wle = wave_out[i] + step_out[i] / 2.000
# Normalized flux value within the bin
fl_nm = 0.00
# b->blue and r->red side of the original spectrum which include the bin
# ib and ie are initialized with values close to the ones of the last iteration to save time
ib = ns_prv
ir = ns_prv
for ns in range(ns_prv, n1 - 1):
# simple algorithm to search the closest indexes near the bin boundaries
if wave_in[ib] + step_in[ib] / 2.00 < wlb: ib += 1
if wave_in[ir] + step_in[ir] / 2.00 < wle: ir += 1
# when we are close to the boundary of the spectra, we stop
if ir < ns - 3: break
# Fail-safe checks
if ib > ns_prv: ns_prv = ib - 3
if ib < 0 or ir > n1: continue
if ib > ir: continue
if ns_prv < 0: ns_prv = 0
# Now the true rebinning section
if ib == ir:
pix_s = (wle - wlb) / step_in[ib] # fraction
pix_e = 0.
flux_out[i] += pix_s * flux_in[ib]
fl_nm += pix_s
elif ib + 1 == ir:
pix_s = (wave_in[ib] + step_in[ib] * 0.5 - wlb) / step_in[ib]
pix_e = (wle - (wave_in[ir] - step_in[ir] * 0.5)) / step_in[ir]
flux_out[i] += (pix_s * flux_in[ib] + pix_e * flux_in[ir])
fl_nm += (pix_s + pix_e)
else:
pix_s = (wave_in[ib] + step_in[ib] * 0.5 - wlb) / step_in[ib]
pix_e = (wle - (wave_in[ir] - step_in[ir] * 0.5)) / step_in[ir]
flux_out[i] += (pix_s * flux_in[ib] + pix_e * flux_in[ir])
fl_nm += (pix_s + pix_e)
for j in range(ib + 1, ir):
flux_out[i] += flux_in[j]
fl_nm += 1.00
if (not preserve_flux) and fl_nm > 0.0:
if quadrature:
fl_nm *= fl_nm
flux_out[i] /= fl_nm
if quadrature:
return np.sqrt(flux_out)
else:
return flux_out
def rebin_with_interpolation(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=False,
preserve_flux=True,
interp_kind='cubic'):
ndata = len(wave_in)
normalization_factor = 1.0
if preserve_flux:
step_in_internal = np.ones(ndata)
step_out_internal = np.ones(len(step_out))
else:
step_in_internal = step_in
step_out_internal = step_out
if quadrature:
normalization_factor = (np.median(step_out) / np.median(step_in))
if quadrature:
flux_in = np.power(flux_in, 2.)
wave_in_cumul = np.zeros(ndata+1)
flux_in_cumul = np.zeros(ndata+1)
flux_in_cumul[0] = 0.0
wave_in_cumul[0] = wave_in[0] - step_in[0] / 2.0
for i in range(1, ndata):
flux_in_cumul[i] = flux_in_cumul[i - 1] + flux_in[i - 1] * step_in_internal[i - 1]
# wave_in_cumul[i] = wave_in[i]-step_in[i]/2.
wave_in_cumul[i] = wave_in[i] - (wave_in[i] - wave_in[i - 1]) / 2.
flux_in_cumul[ndata] = flux_in_cumul[ndata - 1] + flux_in[ndata - 1] * step_in_internal[ndata - 1]
wave_in_cumul[ndata] = wave_in[ndata - 1] + step_in[ndata - 1] / 2.
flux_cumul_interp1d = interp1d(wave_in_cumul, flux_in_cumul, kind=interp_kind, bounds_error=False, fill_value=0.000)
flux_out = (flux_cumul_interp1d(wave_out + step_out / 2.) - flux_cumul_interp1d(
wave_out - step_out / 2.)) / step_out_internal
if quadrature:
return np.sqrt(flux_out) / normalization_factor
else:
return flux_out
def rebin_1d_to_1d(wave_in, step_in, flux_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
method='cubic_interpolation',
reference_value=None):
if is_error:
quadrature = True
method='exact_flux'
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
if method == 'exact_flux':
flux_out = rebin_exact_flux(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=quadrature, preserve_flux=preserve_flux)
elif method == 'cubic_interpolation':
flux_out = rebin_with_interpolation(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=quadrature, preserve_flux=preserve_flux, interp_kind='cubic')
else:
raise ValueError("method ", method, 'not supported by rebinning subroutine')
if reference_value :
wave_sel = (wave_out<=wave_in[0] +0.005 ) | (wave_out>=wave_in[-1] -0.005)
flux_out[wave_sel] = reference_value
return flux_out
def rebin_2d_to_1d(wave_in, step_in, flux_in, blaze_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
skip_blaze_correction=False,
method='cubic_interpolation',
reference_value=None):
"""
:param wave_in:
:param step_in:
:param flux_in:
:param blaze_in:
:param wave_out:
:param step_out:
:param rv_shift:
:param is_error:
:param quadrature:
:param preserve_flux:
:param skip_blaze_correction:
:param method:
:return: flux_out
"""
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
o_axis, f_axis = np.shape(wave_in)
n_rebin = np.size(wave_out)
if skip_blaze_correction or blaze_in is None:
flux_deblazed_in = flux_in
else:
flux_deblazed_in = flux_in / blaze_in
if is_error:
quadrature = True
method = 'exact_flux'
# Rebinning of the individual orders. We keep track of starting
# and ending points of each order in the rebinned solution
flux_rebin_pix = np.zeros([o_axis, n_rebin], dtype=np.double)
counter_is = np.zeros(o_axis, dtype=np.int64) - 1
counter_ie = np.zeros(o_axis, dtype=np.int64) - 1
skip_order = np.ones(o_axis, dtype=bool)
for ii in range(0, o_axis):
counter_is[ii] = np.argmin(abs(wave_in[ii, 0] - wave_out))
counter_ie[ii] = np.argmin(abs(wave_in[ii, -1] - wave_out))
if wave_in[ii, 0] > np.amax(wave_out) or wave_in[ii, -1] < np.amin(wave_out):
continue
skip_order[ii] = False
i = counter_is[ii]
j = counter_ie[ii]+1
flux_rebin_pix[ii, i:j] = rebin_1d_to_1d(wave_in[ii, :],
step_in[ii, :],
flux_deblazed_in[ii, :],
wave_out[i:j],
step_out[i:j],
quadrature=quadrature,
is_error=is_error,
preserve_flux=preserve_flux,
method=method,
reference_value=reference_value)
flux_out = np.zeros(n_rebin, dtype=np.double)
if reference_value:
flux_out += reference_value
if quadrature or is_error:
flux_rebin_pix = np.power(flux_rebin_pix, 2)
flux_out[counter_is[0]:counter_ie[0]] = flux_rebin_pix[0, counter_is[0]:counter_ie[0]]
for ii in range(1, o_axis):
if skip_order[ii]: continue
p_ie = counter_ie[ii - 1]
j_is = counter_is[ii]
j_ie = counter_ie[ii] + 1 # adding one because it is used in interval definition - Python quirks
if p_ie > j_is:
nr_joint = float(p_ie - j_is)
ij = np.arange(j_is, p_ie, 1, dtype=np.int64)
ij_fraction = np.power((ij-j_is) / nr_joint, 4)
flux_out[ij] = flux_rebin_pix[ii,ij] * ij_fraction + flux_rebin_pix[ii-1,ij] * (1. - ij_fraction)
flux_out[p_ie:j_ie] = flux_rebin_pix[ii, p_ie:j_ie]
else:
flux_out[j_is:j_ie] = flux_rebin_pix[ii, j_is:j_ie]
return np.sqrt(flux_out)
else:
flux_out[counter_is[0]:counter_ie[0]] = flux_rebin_pix[0, counter_is[0]:counter_ie[0]]
for ii in range(1, o_axis):
if skip_order[ii]: continue
p_ie = counter_ie[ii - 1]
j_is = counter_is[ii]
j_ie = counter_ie[ii] + 1
if p_ie > j_is:
nr_joint = float(p_ie - j_is)
ij = np.arange(j_is, p_ie, 1, dtype=np.int64)
ij_fraction = np.power((ij-j_is) / nr_joint, 2.)
flux_out[ij] = flux_rebin_pix[ii,ij] * ij_fraction + flux_rebin_pix[ii-1,ij] * (1. - ij_fraction)
flux_out[p_ie:j_ie] = flux_rebin_pix[ii, p_ie:j_ie]
else:
flux_out[j_is:j_ie] = flux_rebin_pix[ii, j_is:j_ie]
return flux_out
def rebin_1d_to_2d(wave_in, step_in, flux_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
method='cubic_interpolation',
reference_value=None):
"""
:param wave_in:
:param step_in:
:param flux_in:
:param wave_out:
:param step_out:
:param rv_shift:
:param is_error:
:param quadrature:
:param preserve_flux:
:param method:
:return:
"""
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
if is_error:
quadrature = True
method='exact_flux'
o_axis, f_axis = np.shape(wave_out)
flux_out = np.zeros([o_axis, f_axis], dtype=np.double)
if reference_value:
flux_out += reference_value
for ii in range(0, o_axis):
flux_out[ii, :]= rebin_1d_to_1d(wave_in,
step_in,
flux_in,
wave_out[ii, :],
step_out[ii, :],
quadrature=quadrature,
is_error=is_error,
preserve_flux=preserve_flux,
method=method,
reference_value=reference_value)
return flux_out
def rebin_2d_to_2d(wave_in, step_in, flux_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
method='cubic_interpolation',
reference_value=None):
"""
:param wave_in: 1D wavelength array of the input spectrum
:param step_in: 1D step-size array of the input spectrum
:param flux_in: 1D flux array of the input spectrum
:param wave_out: 2D (order-by-order) wavelength array of the rebinned spectrum
:param step_out: 2D (order-by-order) step-size array of the rebinned spectrum
:param rv_shift:
:param is_error:
:param quadrature:
:param preserve_flux:
:param method:
:return: flux_out, 2D (order-by-order) flux array, with same size as wave_out
"""
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
if is_error:
quadrature = True
method='exact_flux'
o_axis_input, f_axis_input = np.shape(wave_in)
o_axis, f_axis = np.shape(wave_out)
if o_axis_input != o_axis:
raise ValueError("Mismatch between input and output number of orders in rebin_2d_to_2d")
flux_out = np.zeros([o_axis, f_axis], dtype=np.double)
if reference_value:
flux_out += reference_value
for ii in range(0, o_axis):
flux_out[ii, :] = rebin_1d_to_1d(wave_in[ii, :],
step_in[ii, :],
flux_in[ii, :],
wave_out[ii, :],
step_out[ii, :],
quadrature=quadrature,
is_error=is_error,
preserve_flux=preserve_flux,
method=method,
reference_value=reference_value)
return flux_out | 13,958 | 33.8975 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/plot_subroutines.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
def make_color_array(lists, input_data):
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(input_data[obs]['BJD'] - 2450000.0)
am.append(input_data[obs]['AIRMASS'])
colors = np.asarray(bjd)
cmap = plt.cm.viridis
#cmap = plt.cm.Spectral
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
return colors, cmap, line_colors
def make_color_array_matplotlib3(lists, input_data):
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
mbjd = []
am = []
for obs in lists['observations']:
bjd.append(input_data[obs]['BJD'])
mbjd.append(input_data[obs]['mBJD'])
am.append(input_data[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_bjd_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors_bjd = color_cmap(color_bjd_norm(np.asarray(bjd)))
color_am_norm = plt.Normalize(vmin=np.amin(am), vmax=np.amax(am))
colors_am = color_cmap(color_am_norm(np.asarray(am)))
colors_plot = {
'BJD' : {},
'mBJD' : {},
'AIRMASS' : {}
}
colors_scatter = {
'BJD' : {},
'mBJD' : {},
'AIRMASS' : {}
}
colors_properties = {
'norm' : {
'BJD': plt.Normalize(vmin=bjd[0], vmax=bjd[-1]),
'mBJD': plt.Normalize(vmin=mbjd[0], vmax=mbjd[-1]),
'AIRMASS': plt.Normalize(vmin=np.amin(am), vmax=np.amax(am))
},
'cmap' : plt.cm.viridis
}
for obs in lists['observations']:
colors_plot['mBJD'][obs] = colors_properties['cmap'](
colors_properties['norm']['mBJD'](input_data[obs]['mBJD']))
colors_plot['BJD'][obs] = colors_properties['cmap'](
colors_properties['norm']['BJD'](input_data[obs]['BJD']))
colors_plot['AIRMASS'][obs] = colors_properties['cmap'](
colors_properties['norm']['AIRMASS'](input_data[obs]['AIRMASS']))
colors_scatter['mBJD'][obs] = [colors_properties['cmap'](
colors_properties['norm']['mBJD'](input_data[obs]['mBJD']))[:-1]]
colors_scatter['BJD'][obs] = [colors_properties['cmap'](
colors_properties['norm']['BJD'](input_data[obs]['BJD']))[:-1]]
colors_scatter['AIRMASS'][obs] = [colors_properties['cmap'](
colors_properties['norm']['AIRMASS'](input_data[obs]['AIRMASS']))[:-1]]
return colors_properties, colors_plot, colors_scatter
def grid_1plot():
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[0, 1])
fig.subplots_adjust(wspace=0.04, hspace=0.25)
return fig, gs, cbax1, ax
def grid_2plot(sharex=True, sharey=True):
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
if sharey and sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
elif sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
elif sharey:
ax2 = plt.subplot(gs[1, 0], sharey=ax1)
else:
ax2 = plt.subplot(gs[1, 0])
cbax1 = plt.subplot(gs[:, 1])
fig.subplots_adjust(wspace=0.04, hspace=0.25)
return fig, gs, cbax1, ax1, ax2
def grid_3plot_small(sharex=False, sharey=False, partial_share=True):
fig = plt.figure(figsize=(12, 9))
gs = GridSpec(3, 2, width_ratios=[50, 1], height_ratios = [3, 1, 3])
ax1 = plt.subplot(gs[0, 0])
if sharey and sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
ax3 = plt.subplot(gs[2, 0], sharex=ax1, sharey=ax1)
elif sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
ax3 = plt.subplot(gs[2, 0], sharex=ax1)
elif sharey:
ax2 = plt.subplot(gs[1, 0], sharey=ax1)
ax3 = plt.subplot(gs[2, 0], sharey=ax1)
elif partial_share:
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
ax3 = plt.subplot(gs[2, 0], sharex=ax1, sharey=ax1)
else:
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
cbax1 = plt.subplot(gs[:, 1])
fig.subplots_adjust(wspace=0.04, hspace=0.25)
return fig, gs, cbax1, ax1, ax2, ax3 | 4,351 | 31 | 83 | py |
Subsets and Splits