id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
4805318 | <reponame>ian0/ARC
"""NUI Galway CT5132/CT5148 Programming and Tools for AI (<NAME>)
Solution for Assignment 3: File ed36ccf7.json
Student name(s): <NAME>
Student ID(s): 12100610
"""
import numpy as np
import sys
from common_utils import load_file, print_grid
def solve(grid):
"""
Given the input grid from any training or evaluation pair in the input json file,
solve should return the correct output grid in the same format.
Allowed formats are : 1. a JSON string containing a list of lists; or 2. a Python list of lists;
or 3. a Numpy 2D array of type int
:param grid: the input grid
:return: the modified grid
>>> ig = [[0, 0, 0], [5, 0, 0], [0, 5, 5]]
>>> solve(ig)
array([[0, 0, 5],
[0, 0, 5],
[0, 5, 0]])
"""
grid = np.asarray(grid)
return np.rot90(grid)
def main():
"""
Main method, reads in file specified file from the command line,
calls the solve function to generate output
"""
inputs = load_file(sys.argv[1])
for grid in inputs:
output = solve(grid)
print_grid(output)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| StarcoderdataPython |
133876 | <filename>raft/node.py
import os
import json
import time
import random
import logging
from .log import Log
from .rpc import Rpc
from .config import config
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(levelname)s %(name)s %(funcName)s [line:%(lineno)d] %(message)s')
logger = logging.getLogger(__name__)
logger.propagate = False
env = os.environ.get("env")
conf = config[env] if env else config['DEV']
class Node(object):
"""
raft node
"""
def __init__(self, meta):
self.role = 'follower'
self.group_id = meta['group_id']
self.id = meta['id']
self.addr = meta['addr']
self.peers = meta['peers']
self.path = conf.node_path
if not os.path.exists(self.path):
os.makedirs(self.path)
# persistent state
self.current_term = 0
self.voted_for = None
# init persistent state
self.load()
logname = self.path+self.group_id + '_' + self.id + "_log.json"
self.log = Log(logname)
# volatile state
# rule 1, 2
self.commit_index = 0
self.last_applied = 0
# volatile state on leaders
# rule 1, 2
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: -1 for _id in self.peers}
# append entries
self.leader_id = None
# request vote
self.vote_ids = {_id: 0 for _id in self.peers}
# client request
self.client_addr = None
# tick
self.wait_ms = (10, 20)
self.next_leader_election_time = time.time() + random.randint(*self.wait_ms)
self.next_heartbeat_time = 0
# rpc
self.rpc_endpoint = Rpc(self.addr, timeout=2)
# log
fmt = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(funcName)s [line:%(lineno)d] %(message)s')
handler = logging.FileHandler(self.path + self.group_id + '_' + self.id + '.log', 'a')
handler.setFormatter(fmt)
logger.addHandler(handler)
def load(self):
filename = self.path + self.group_id + "_" + self.id + '_persistent.json'
if os.path.exists(filename):
with open(filename, 'r') as f:
data = json.load(f)
self.current_term = data['current_term']
self.voted_for = data['voted_for']
else:
self.save()
def save(self):
data = {'current_term': self.current_term,
'voted_for': self.voted_for,
}
filename = self.path + self.group_id + "_" + self.id + '_persistent.json'
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
def redirect(self, data, addr):
if data == None:
return None
if data['type'] == 'client_append_entries':
if self.role != 'leader':
if self.leader_id:
logger.info('redirect: client_append_entries to leader')
self.rpc_endpoint.send(data, self.peers[self.leader_id])
return None
else:
self.client_addr = (addr[0], conf.cport)
# logger.info("client addr " + self.client_addr[0] +'_' +str(self.client_addr[1]))
return data
if data['dst_id'] != self.id:
logger.info('redirect: to ' + data['dst_id'])
# logger.info('redirec to leader')
self.rpc_endpoint.send(data, self.peers[data['dst_id']])
return None
else:
return data
return data
def append_entries(self, data):
'''
append entries rpc
only used in follower state
'''
response = {'type': 'append_entries_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'success': False
}
# append_entries: rule 1
if data['term'] < self.current_term:
logger.info(' 2. smaller term')
logger.info(' 3. success = False: smaller term')
logger.info(' 4. send append_entries_response to leader ' + data['src_id'])
response['success'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
return
self.leader_id = data['leader_id']
# heartbeat
if data['entries'] == []:
logger.info(' 4. heartbeat')
return
prev_log_index = data['prev_log_index']
prev_log_term = data['prev_log_term']
tmp_prev_log_term = self.log.get_log_term(prev_log_index)
# append_entries: rule 2, 3
# append_entries: rule 3
if tmp_prev_log_term != prev_log_term:
logger.info(' 4. success = False: index not match or term not match')
logger.info(' 5. send append_entries_response to leader ' + data['src_id'])
logger.info(' 6. log delete_entries')
logger.info(' 6. log save')
response['success'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
self.log.delete_entries(prev_log_index)
# append_entries rule 4
else:
logger.info(' 4. success = True')
logger.info(' 5. send append_entries_response to leader ' + data['src_id'])
logger.info(' 6. log append_entries')
logger.info(' 7. log save')
response['success'] = True
self.rpc_endpoint.send(response, self.peers[data['src_id']])
self.log.append_entries(prev_log_index, data['entries'])
# append_entries rule 5
leader_commit = data['leader_commit']
if leader_commit > self.commit_index:
commit_index = min(leader_commit, self.log.last_log_index)
self.commit_index = commit_index
logger.info(' 8. commit_index = ' + str(commit_index))
return
def request_vote(self, data):
'''
request vote rpc
only used in follower state
'''
response = {'type': 'request_vote_response',
'src_id': self.id,
'dst_id': data['src_id'],
'term': self.current_term,
'vote_granted': False
}
# request vote: rule 1
if data['term'] < self.current_term:
logger.info(' 2. smaller term')
logger.info(' 3. success = False')
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
response['vote_granted'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
return
logger.info(' 2. same term')
candidate_id = data['candidate_id']
last_log_index = data['last_log_index']
last_log_term = data['last_log_term']
if self.voted_for == None or self.voted_for == candidate_id:
if last_log_index >= self.log.last_log_index and last_log_term >= self.log.last_log_term:
self.voted_for = data['src_id']
self.save()
response['vote_granted'] = True
self.rpc_endpoint.send(response, self.peers[data['src_id']])
logger.info(' 3. success = True: candidate log is newer')
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
else:
self.voted_for = None
self.save()
response['vote_granted'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
logger.info(' 3. success = False: candidate log is older')
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
else:
response['vote_granted'] = False
self.rpc_endpoint.send(response, self.peers[data['src_id']])
logger.info(' 3. success = False: has vated for ' + self.voted_for)
logger.info(' 4. send request_vote_response to candidate ' + data['src_id'])
return
def all_do(self, data):
'''
all servers: rule 1, 2
'''
logger.info('-------------------------------all------------------------------------------')
t = time.time()
if self.commit_index > self.last_applied:
self.last_applied = self.commit_index
logger.info('all: 1. last_applied = ' + str(self.last_applied))
if data == None:
return
if data['type'] == 'client_append_entries':
return
if data['term'] > self.current_term:
logger.info( f'all: 1. bigger term: { data["term"]} > {self.current_term}' )
logger.info(' 2. become follower')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'follower'
self.current_term = data['term']
self.voted_for = None
self.save()
return
def follower_do(self, data):
'''
rules for servers: follower
'''
logger.info('-------------------------------follower-------------------------------------')
t = time.time()
# follower rules: rule 1
if data != None:
if data['type'] == 'append_entries':
logger.info('follower: 1. recv append_entries from leader ' + data['src_id'])
if data['term'] == self.current_term:
logger.info(' 2. same term')
logger.info(' 3. reset next_leader_election_time')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.append_entries(data)
elif data['type'] == 'request_vote':
logger.info('follower: 1. recv request_vote from candidate ' + data['src_id'])
self.request_vote(data)
# follower rules: rule 2
if t > self.next_leader_election_time:
logger.info('follower:1. become candidate')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def candidate_do(self, data):
'''
rules for servers: candidate
'''
logger.info('-------------------------------candidate------------------------------------')
t = time.time()
# candidate rules: rule 1
for dst_id in self.peers:
if self.vote_ids[dst_id] == 0:
logger.info('candidate: 1. send request_vote to peer ' + dst_id)
request = {
'type': 'request_vote',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'candidate_id': self.id,
'last_log_index': self.log.last_log_index,
'last_log_term': self.log.last_log_term
}
# logger.info(request)
self.rpc_endpoint.send(request, self.peers[dst_id])
# if data != None and data['term'] < self.current_term:
# logger.info('candidate: 1. smaller term from ' + data['src_id'])
# logger.info(' 2. ignore')
# return
if data != None and data['term'] == self.current_term:
# candidate rules: rule 2
if data['type'] == 'request_vote_response':
logger.info('candidate: 1. recv request_vote_response from follower ' + data['src_id'])
self.vote_ids[data['src_id']] = data['vote_granted']
vote_count = sum(list(self.vote_ids.values()))
if vote_count >= len(self.peers)//2:
logger.info(' 2. become leader')
self.role = 'leader'
self.voted_for = None
self.save()
self.next_heartbeat_time = 0
self.next_index = {_id: self.log.last_log_index + 1 for _id in self.peers}
self.match_index = {_id: 0 for _id in self.peers}
return
# candidate rules: rule 3
elif data['type'] == 'append_entries':
logger.info('candidate: 1. recv append_entries from leader ' + data['src_id'])
logger.info(' 2. become follower')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'follower'
self.voted_for = None
self.save()
return
# candidate rules: rule 4
if t > self.next_leader_election_time:
logger.info('candidate: 1. leader_election timeout')
logger.info(' 2. become candidate')
self.next_leader_election_time = t + random.randint(*self.wait_ms)
self.role = 'candidate'
self.current_term += 1
self.voted_for = self.id
self.save()
self.vote_ids = {_id: 0 for _id in self.peers}
return
def leader_do(self, data):
'''
rules for servers: leader
'''
logger.info('-------------------------------leader---------------------------------------')
# leader rules: rule 1, 3
t = time.time()
if t > self.next_heartbeat_time:
self.next_heartbeat_time = t + random.randint(0, 5)
for dst_id in self.peers:
logger.info('leader:1. send append_entries to peer ' + dst_id)
request = {'type': 'append_entries',
'src_id': self.id,
'dst_id': dst_id,
'term': self.current_term,
'leader_id': self.id,
'prev_log_index': self.next_index[dst_id] - 1,
'prev_log_term': self.log.get_log_term(self.next_index[dst_id] - 1),
'entries': self.log.get_entries(self.next_index[dst_id]),
'leader_commit': self.commit_index
}
self.rpc_endpoint.send(request, self.peers[dst_id])
# leader rules: rule 2
if data != None and data['type'] == 'client_append_entries':
data['term'] = self.current_term
self.log.append_entries(self.log.last_log_index, [data])
logger.info('leader:1. recv append_entries from client')
logger.info(' 2. log append_entries')
logger.info(' 3. log save')
return
# leader rules: rule 3.1, 3.2
if data != None and data['term'] == self.current_term:
if data['type'] == 'append_entries_response':
logger.info('leader:1. recv append_entries_response from follower ' + data['src_id'])
if data['success'] == False:
self.next_index[data['src_id']] -= 1
logger.info(' 2. success = False')
logger.info(' 3. next_index - 1')
else:
self.match_index[data['src_id']] = self.next_index[data['src_id']]
self.next_index[data['src_id']] = self.log.last_log_index + 1
logger.info(' 2. success = True')
logger.info(' 3. match_index = ' + str(self.match_index[data['src_id']]) + ' next_index = ' + str(self.next_index[data['src_id']]))
# leader rules: rule 4
while True:
N = self.commit_index + 1
count = 0
for _id in self.match_index:
if self.match_index[_id] >= N:
count += 1
if count >= len(self.peers)//2:
self.commit_index = N
logger.info('leader:1. commit + 1')
if self.client_addr:
response = {'index': self.commit_index}
self.rpc_endpoint.send(response, self.client_addr)
break
else:
logger.info('leader:2. commit = ' + str(self.commit_index))
break
def run(self):
# data = {
# "type": "create_node_success",
# "group_id": self.group_id,
# "id": self.id
# }
# self.rpc_endpoint.send(data, (conf.ip, conf.cport))
# data = {
# "type": "create_group_node_success",
# "group_id": self.group_id,
# "id": self.id
# }
# self.rpc_endpoint.send(data, (conf.ip, conf.cport))
while True:
try:
try:
data, addr = self.rpc_endpoint.recv()
except Exception as e:
data, addr = None, None
data = self.redirect(data, addr)
self.all_do(data)
if self.role == 'follower':
self.follower_do(data)
if self.role == 'candidate':
self.candidate_do(data)
if self.role == 'leader':
self.leader_do(data)
except Exception as e:
logger.info(e)
# self.rpc_endpoint.close() | StarcoderdataPython |
3362947 | <filename>ail/wrapper/vev_norm_wrapper.py
import math
from copy import deepcopy
from typing import Dict, Union
import gym
import numpy as np
from ail.common.running_stats import RunningMeanStd
from ail.common.type_alias import GymEnv, GymStepReturn
class VecNormalize(gym.Wrapper):
"""
A moving average, normalizing wrapper for gym environment.
:param venv: the vectorized environment to wrap
:param training: Whether to update or not the moving average
:param norm_obs: Whether to normalize observation or not (default: True)
:param norm_reward: Whether to normalize rewards or not (default: True)
:param clip_obs: Max absolute value for observation
:param clip_reward: Max value absolute for discounted reward
:param gamma: discount factor
:param epsilon: To avoid division by zero
"""
__slots__ = [
"ret_rms",
"ret",
"gamma",
"epsilon",
"training",
"norm_obs",
"norm_reward",
"clip_obs",
"clip_reward",
]
def __init__(
self,
env: GymEnv,
training: bool = True,
norm_obs: bool = True,
norm_reward: bool = True,
clip_obs: float = 10.0,
clip_reward: float = 10.0,
gamma: float = 0.99,
epsilon: float = 1e-8,
):
assert isinstance(
env.observation_space, (gym.spaces.Box, gym.spaces.Dict)
), "VecNormalize only support `gym.spaces.Box` and `gym.spaces.Dict` observation spaces"
params = {clip_obs, clip_reward, gamma, epsilon}
for param in params:
assert isinstance(param, float)
super().__init__(env)
if isinstance(self.observation_space, gym.spaces.Dict):
self.obs_keys = set(self.observation_space.spaces.keys())
self.obs_spaces = self.observation_space.spaces
self.obs_rms = {
key: RunningMeanStd(shape=space.shape)
for key, space in self.obs_spaces.items()
}
else:
self.obs_keys, self.obs_spaces = None, None
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
self.ret_rms = RunningMeanStd(shape=())
self.clip_obs = clip_obs
self.clip_rew = clip_reward
# Returns: discounted rewards
# * Currently only support one env
self.num_envs = 1
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
self.training = training
self.norm_obs = norm_obs
self.norm_reward = norm_reward
self.old_obs = np.array([])
self.old_reward = np.array([])
def step(self, action: np.ndarray) -> GymStepReturn:
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, dones)
where ``dones`` is a boolean vector indicating whether each element is new.
"""
obs, rewards, dones, infos = self.env.step(action)
self.old_obs = obs
self.old_reward = rewards
if self.training:
if isinstance(obs, dict) and isinstance(self.obs_rms, dict):
for key in self.obs_rms.keys():
self.obs_rms[key].update(obs[key])
else:
self.obs_rms.update(obs)
obs = self.normalize_obs(obs)
if self.training:
self._update_reward(rewards)
rewards = self.normalize_reward(rewards)
# # Normalize the terminal observations
# for idx, done in enumerate(dones):
# if not done:
# continue
# if "terminal_observation" in infos[idx]:
# infos[idx]["terminal_observation"] = self.normalize_obs(infos[idx]["terminal_observation"])
return obs, rewards, dones, infos
def _update_reward(self, reward: np.ndarray) -> None:
"""Update reward normalization statistics."""
self.ret = self.ret * self.gamma + reward
self.ret_rms.update(self.ret)
def _normalize_obs(self, obs: np.ndarray, obs_rms: RunningMeanStd) -> np.ndarray:
"""
Helper to normalize observation.
:param obs:
:param obs_rms: associated statistics
:return: normalized observation
"""
norm_obs = (obs - obs_rms.mean) / np.sqrt(obs_rms.var + self.epsilon)
if not math.isinf(self.clip_obs):
np.clip(norm_obs, -self.clip_obs, self.clip_obs, out=norm_obs)
return norm_obs
def _unnormalize_obs(self, obs: np.ndarray, obs_rms: RunningMeanStd) -> np.ndarray:
"""
Helper to unnormalize observation.
:param obs:
:param obs_rms: associated statistics
:return: unnormalized observation
"""
return (obs * np.sqrt(obs_rms.var + self.epsilon)) + obs_rms.mean
def normalize_obs(
self, obs: Union[np.ndarray, Dict[str, np.ndarray]]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Normalize observations using this VecNormalize's observations statistics.
Calling this method does not update statistics.
"""
# Avoid modifying by reference the original object
obs_ = deepcopy(obs)
if self.norm_obs:
if isinstance(obs, dict) and isinstance(self.obs_rms, dict):
for key in self.obs_rms.keys():
obs_[key] = self._normalize_obs(obs[key], self.obs_rms[key]).astype(
np.float32
)
else:
obs_ = self._normalize_obs(obs, self.obs_rms).astype(np.float32)
return obs_
def normalize_reward(self, reward: np.ndarray) -> np.ndarray:
"""
`"Incorrect"` Normalize rewards using this VecNormalize's rewards statistics.
Calling this method does not update statistics.
Incorrect in the sense that we
1. update return
2. divide reward by std(return) *without* subtracting and adding back mean
See: https://openreview.net/attachment?id=r1etN1rtPB&name=original_pdf
"""
if self.norm_reward:
if math.isinf(self.clip_rew):
norm_rew = reward / np.sqrt(self.ret_rms.var + self.epsilon)
else:
norm_rew = np.clip(
reward / np.sqrt(self.ret_rms.var + self.epsilon),
-self.clip_rew,
self.clip_rew,
)
else:
norm_rew = reward
return norm_rew
def unnormalize_obs(
self, obs: Union[np.ndarray, Dict[str, np.ndarray]]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
# Avoid modifying by reference the original object
obs_ = deepcopy(obs)
if self.norm_obs:
if isinstance(obs, dict) and isinstance(self.obs_rms, dict):
for key in self.obs_rms.keys():
obs_[key] = self._unnormalize_obs(obs[key], self.obs_rms[key])
else:
obs_ = self._unnormalize_obs(obs, self.obs_rms)
return obs_
def unnormalize_reward(self, reward: np.ndarray) -> np.ndarray:
if self.norm_reward:
return reward * np.sqrt(self.ret_rms.var + self.epsilon)
return reward
def get_original_obs(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Returns an unnormalized version of the observations from the most recent
step or reset.
"""
return deepcopy(self.old_obs)
def get_original_reward(self) -> np.ndarray:
"""
Returns an unnormalized version of the rewards from the most recent step.
"""
return self.old_reward.copy()
def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Reset all environments
:return: first observation of the episode
"""
obs = self.env.reset()
self.old_obs = obs
self.ret = np.zeros(self.num_envs)
if self.training:
self._update_reward(self.ret)
return self.normalize_obs(obs)
@property
def _max_episode_steps(self) -> int:
return self.env._max_episode_steps
| StarcoderdataPython |
3311378 | from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from .models import *
from .forms import ToolForm
# Create your views here.
@login_required()
def index(request):
# GETアクセス時の処理
tools = Tool.objects.all()
params = {
'tools': tools,
}
return render(request, 'tool/index.htm', params)
@login_required()
def show(request,id=-1):
if id==-1:
return redirect(to='/tool')
tool = Tool.objects.filter(id=id).first()
params={
'tool':tool
}
return render(request,'tool/show.htm',params)
@login_required()
def create(request):
params = {
'form': ToolForm()
}
# POSTアクセス時の処理
if request.method == 'POST':
obj = Tool()
obj.editor = request.user
tool = ToolForm(request.POST,request.FILES, instance=obj)
tool.save()
return redirect(to='/tool')
# GETアクセス時の処理
return render(request, 'tool/create.htm', params)
| StarcoderdataPython |
3286607 | <reponame>yurithebest1/vision4j-collection<filename>external/keras-vgg16-classification/classifier.py
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from time import time
from PIL import Image
import numpy as np
import cv2
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
def read_image_from_response(data, shape):
nparr = np.fromstring(data, np.uint8)
img = cv2.cvtColor(cv2.imdecode(nparr, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB).reshape(shape).astype(np.float64)
return img
class Vgg16Classifier(object):
def __init__(self):
self.model = VGG16()
self.model._make_predict_function()
def predict(self, request):
img = read_image_from_response(request.image_data, (request.width, request.height, request.channels))
img = np.expand_dims(preprocess_input(img), axis=0)
out = self.model.predict(img)
idx = np.argmax(out)
return idx
def predict_image(self, img):
img = np.expand_dims(preprocess_input(img), axis=0)
out = self.model.predict(img)
return out
def main():
model = Vgg16Classifier()
total = 0.
n = 100
for i in range(n):
start = time()
img_path = '/home/hvrigazov/vision4j/vision4j-collection/img/cheetah.resized.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img = preprocess_input(image.img_to_array(img))
print(img)
print(np.argmax(model.predict_image(img)))
total += (time() - start)
break
print(str(total / n))
if __name__ == '__main__':
main() | StarcoderdataPython |
145236 | from flask import Flask, render_template, request, session
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from flask_babel import Babel
import os
from .config import DevelopmentConfig, TestConfig, ProductionConfig, DigitalocenDEV
# database handle
db = SQLAlchemy(session_options={"autoflush": False})
# encryptor handle
bcrypt = Bcrypt()
# manage user login
login_manager = LoginManager()
# function name of the login route that
# tells the path which facilitates authentication
login_manager.login_view = 'users.login'
def run_install(app_ctx):
from fellowcrm.install.routes import install
app_ctx.register_blueprint(install)
return app_ctx
def create_app(config_class=ProductionConfig):
app = Flask(__name__, instance_relative_config=True)
if os.getenv('FLASK_ENV') == 'development':
config_class = DevelopmentConfig()
elif os.getenv('FLASK_ENV') == 'DigitalocenDEV':
config_class = DigitalocenDEV()
elif os.getenv('FLASK_ENV') == 'production':
config_class = ProductionConfig()
elif os.getenv('FLASK_ENV') == 'testing':
config_class = TestConfig()
app.config.from_object(config_class)
configure_extensions(app)
with app.app_context():
# check if the config table exists, otherwise run install
engine = db.get_engine(app)
if not engine.dialect.has_table(engine, 'app_config'):
return run_install(app)
else:
from fellowcrm.settings.models import AppConfig
row = AppConfig.query.first()
if not row:
return run_install(app)
# application is installed so extends the config
from fellowcrm.settings.models import AppConfig, Currency, TimeZone
app_cfg = AppConfig.query.first()
app.config['def_currency'] = Currency.get_currency_by_id(app_cfg.default_currency)
app.config['def_tz'] = TimeZone.get_tz_by_id(app_cfg.default_timezone)
# include the routes
# from fellowcrm import routes
from fellowcrm.main.routes import main
from fellowcrm.users.routes import users
from fellowcrm.leads.routes import leads
from fellowcrm.accounts.routes import accounts
from fellowcrm.contacts.routes import contacts
from fellowcrm.deals.routes import deals
from fellowcrm.activities.routes import activities
from fellowcrm.settings.routes import settings
from fellowcrm.settings.app_routes import app_config
from fellowcrm.reports.routes import reports
from fellowcrm.picklists.routes import picklists
from fellowcrm.upgrade.routes import upgrade
# register routes with blueprint
app.register_blueprint(main)
app.register_blueprint(users)
app.register_blueprint(settings)
app.register_blueprint(app_config)
app.register_blueprint(leads)
app.register_blueprint(accounts)
app.register_blueprint(contacts)
app.register_blueprint(deals)
app.register_blueprint(activities)
app.register_blueprint(reports)
app.register_blueprint(picklists)
app.register_blueprint(upgrade)
return app
def configure_extensions(app):
"""configure flask extensions"""
#jwt.init_app(app)
app.url_map.strict_slashes = False
app.jinja_env.globals.update(zip=zip)
migrate = Migrate(app, db)
babel = Babel(app)
@babel.localeselector
def get_locale():
if 'language' in session:
if session['language'] is not None:
return session['language']
return 'en'
else:
return 'en'
@babel.timezoneselector
def get_timezone():
#user = getattr(g, 'user', None)
#if user is not None:
return user.timezone
manager = Manager(app)
manager.add_command('db', MigrateCommand)
login_manager.init_app(app)
db.init_app(app)
bcrypt.init_app(app)
| StarcoderdataPython |
3234662 | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if divisor == 0:
return None
diff_sign = (divisor < 0) ^ (dividend < 0)
dividend = abs(dividend)
divisor = abs(divisor)
result = 0
max_divisor = divisor
shift_count = 1
while dividend >= (max_divisor << 1):
max_divisor <<= 1
shift_count <<= 1
while shift_count >= 1:
if dividend >= max_divisor:
dividend -= max_divisor
result += shift_count
shift_count >>= 1
max_divisor >>= 1
if diff_sign:
result = -result
return max(min(result, 2**31-1), -2**31) | StarcoderdataPython |
1621489 | import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import gridspec
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from drosoph_vae.data_loading import get_3d_columns_names
from drosoph_vae.settings import config, skeleton
from drosoph_vae.settings.config import SetupConfig
def save_figure(func):
"""Decorator for saving figures. Suptitle must be set."""
def clean_string(s):
_replacements_ = [("\'", ""), (" ", "-"), (",", "-"), ("\n", ""), ("(", "_"), (")", "")]
for m, r in _replacements_:
s = s.replace(m, r)
return s.lower()
def wrapper(*args, **kwargs):
fig = func(*args, **kwargs)
if fig is None:
return fig
s = clean_string(fig._suptitle.get_text())
fig.savefig(f"{SetupConfig.value('figures_root_path')}/{s}.png")
return fig
return wrapper
def _get_feature_name_(tracking_id):
return str(skeleton.tracked_points[tracking_id])[len('Tracked.'):]
def _get_feature_id_(leg_id, tracking_point_id):
if leg_id < 3:
return leg_id * 5 + tracking_point_id
else:
return (leg_id - 5) * 5 + tracking_point_id + 19
def _get_leg_name_(leg_id):
__LEG_NAMES__ = ['foreleg', 'middle leg', 'hind leg']
return __LEG_NAMES__[leg_id]
def ploting_frames(joint_positions):
# TODO move this into one single plot
# TODO provide decorator which saves the figure
for leg in config.LEGS:
fig, axs = plt.subplots(1, config.NB_OF_AXIS, sharex=True, figsize=(20, 10))
for tracked_point in range(config.NB_TRACKED_POINTS):
for axis in range(config.NB_OF_AXIS):
cur_ax = axs[axis]
cur_ax.plot(joint_positions[:, _get_feature_id_(leg, tracked_point), axis], label = f"{_get_feature_name_(tracked_point)}_{('x' if axis == 0 else 'y')}")
if axis == 0:
cur_ax.set_ylabel('x pos')
else:
cur_ax.set_ylabel('y pos')
cur_ax.legend(loc='upper right')
cur_ax.set_xlabel('frame')
#plt.xlabel('frame')
#plt.legend(loc='lower right')
plt.suptitle(_get_leg_name_(leg))
@save_figure
def plot_comparing_joint_position_with_reconstructed(real_joint_positions, reconstructed_joint_positions, validation_cut_off=None, exp_desc=None):
fig, axs = plt.subplots(3, 2 * 2, sharex=True, figsize=(25, 10))
for idx_leg, leg in enumerate(SetupConfig.value('legs')):
for axis in range(SetupConfig.value('n_axis')):
cur_ax = axs[idx_leg][axis * 3]
rec_ax = axs[idx_leg][axis * 3 + 1]
#gen_ax = axs[idx_leg][axis * 3 + 2]
if validation_cut_off is not None:
for a in [cur_ax, rec_ax]:
a.axvline(validation_cut_off, label='validation cut off', linestyle='--')
for tracked_point in range(SetupConfig.value('n_tracked_points')):
_label_ = f"{_get_feature_name_(tracked_point)}_{('x' if axis == 0 else 'y')}"
cur_ax.plot(real_joint_positions[:, _get_feature_id_(leg, tracked_point), axis], label=_label_)
rec_ax.plot(reconstructed_joint_positions[:, _get_feature_id_(leg, tracked_point), axis], label=_label_)
#gen_ax.plot(generated_positions[:, _get_feature_id_(leg, tracked_point), axis], label=_label_)
cur_ax.get_shared_y_axes().join(cur_ax, rec_ax)
#cur_ax.get_shared_y_axes().join(cur_ax, gen_ax)
rec_ax.set_yticks([])
#gen_ax.set_yticks([])
for i in range(config.NB_OF_AXIS):
axs[0][i * 3].set_title('input data')
axs[0][i * 3 + 1].set_title('reconstructed data')
#axs[0][i * 3 + 2].set_title('generated data')
axs[-1][i * 3].set_xlabel('frames')
axs[-1][i * 3 + 1].set_xlabel('frames')
#axs[-1][i * 3 + 2].set_xlabel('frames')
for i in range(len(config.LEGS)):
axs[i][0].set_ylabel(f"{_get_leg_name_(leg)}: x pos")
axs[i][3].set_ylabel(f"{_get_leg_name_(leg)}: y pos")
_, labels = axs[0][0].get_legend_handles_labels()
fig.legend(labels, loc='upper right')
fig.suptitle(f"Comparing input and reconstruction\n({exp_desc})")
fig.align_ylabels(axs)
plt.tight_layout()
plt.subplots_adjust(top=0.9)
return fig
@save_figure
def plot_losses(train_loss, test_loss, exp_desc):
fig = plt.figure(figsize=(15, 8))
plt.plot(train_loss, label='train')
plt.plot(test_loss, label='test')
plt.xlabel('epochs')
plt.ylabel('loss (ELBO)')
plt.legend()
fig.suptitle(f"Loss (ELBO)\n({exp_desc})")
plt.tight_layout()
plt.subplots_adjust(top=0.9)
return fig
def plot_losses_v0(losses, legend=None, title=None):
"""the version for the SOM-VAE model"""
plt.figure(figsize=(15, 8))
if legend is None:
legend = ['train', 'test', 'test_recon']
fig, ax1 = plt.subplots()
for i, l in enumerate(losses[:-1]):
ax1.plot(l, label=legend[i])
ax1.tick_params(axis='y')
ax2 = ax1.twinx()
ax2.plot(losses[-1], label=legend[-1], color='green')
ax2.tick_params(axis='y', labelcolor='green')
ax2.set_xlabel('epoch')
fig.legend()
fig.tight_layout()
plt.title('loss')
if title is not None:
plt.title(f"loss with {title}")
return fig
def plot_latent_frame_distribution(latent_assignments, nb_bins):
plt.figure()
plt.hist(latent_assignments, bins=nb_bins)
plt.title('distribution of latent-space-assignments')
plt.xlabel('latent-space')
plt.ylabel('nb of frames in latent-space')
def plot_cluster_assignment_over_time(cluster_assignments):
plt.figure()
plt.plot(cluster_assignments)
plt.title("cluster assignments over time")
plt.ylabel("index of SOM-embeddings")
plt.xlabel("frame")
def plot_reconstructed_angle_data(real_data, reconstructed_data, columns, fix_ylim=False):
_colors = sns.color_palette(n_colors=2)
fig, axs = plt.subplots(nrows=real_data.shape[1], ncols=1, figsize=(5, 30))
for a in range(real_data.shape[1]):
axs[a].plot(real_data[:,a], c=_colors[0], label='real')
axs[a].plot(reconstructed_data[:,a], c=_colors[1], label='reconstructed')
axs[a].set_title(f"col: {columns[a]}")
if fix_ylim:
axs[a].set_ylim(-np.pi, np.pi)
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)
fig.suptitle('real vs reconstructed angle data')
plt.tight_layout()
plt.subplots_adjust(top=0.96)
return fig
def plot_angle_columns(data, columns):
fig, axs = plt.subplots(ncols=1, nrows=len(columns), figsize=(5, 3 * len(columns)))
for i, c in enumerate(columns):
axs[i].set_title(c)
axs[i].plot(data[:, i])
axs[i].set_xlabel('time')
axs[i].set_ylabel('[radians]')
fig.suptitle('Angle data')
plt.tight_layout()
plt.subplots_adjust(top=0.97) # necessary for the title not to be in the first plot
return fig
def plot_tnse(X, y, title='t-SNE'):
"""X is really the data
y is a pandas dataframe with a column called `label`, which are of type _BehaviorLabel_
"""
X_embedded = TSNE(n_components=2, random_state=42).fit_transform(X)
seen_labels = y.label.unique()
_cs = sns.color_palette(n_colors=len(seen_labels))
fig = plt.figure(figsize=(10, 10))
behaviour_colours = dict(zip(seen_labels, _cs))
for l, c in behaviour_colours.items():
_d = X_embedded[y['label'] == l]
# c=[c] since matplotlib asks for it
plt.scatter(_d[:, 0], _d[:,1], c=[c], label=l.name, marker='.')
plt.legend()
plt.title(title)
return fig
@save_figure
def plot_2d_distribution(X_train, X_test, n_legs=3, exp_desc=None):
fig, ax = plt.subplots(nrows=n_legs, ncols=2, figsize=(10, 8))
for leg_idx in range(n_legs):
for j in range(5 * 2):
cur_col = leg_idx * 10 + j
sns.distplot(X_train[:, cur_col],
ax=ax[leg_idx][0],
bins=50)
sns.distplot(X_test[:, cur_col],
ax=ax[leg_idx][1],
bins=50)
ax[0][0].set_title('training data')
ax[0][1].set_title('testing data')
plt.suptitle(f"distribution of input\n({exp_desc})")
plt.tight_layout()
plt.subplots_adjust(top=0.89) # necessary for the title not to be in the first plot
return fig
@save_figure
def plot_distribution_of_angle_data(data, run_config):
"""
Args:
=====
data: [(exp_id, exp_data)]
run_config: the full run_config (used to fill in the title)
"""
# it's highly unlikely that the selection will change
selected_cols = np.where(np.var(data[0][1], axis=0) > 0.0)[0]
column_names = get_3d_columns_names(selected_cols)
def _get_limb_id(s):
return int(s[len('limb: '):len('limb: x')])
t = np.unique(np.array([_get_limb_id(s) for s in column_names]))
col_name_to_ax = dict(zip(t, np.arange(len(t))))
# This will take some time... you can set `sharey=False` to speed it up.
fig, axs = plt.subplots(nrows=len(data), ncols=len(col_name_to_ax), figsize=(20, len(data)), sharey=False, sharex=True)
for i, (exp_id, data_set) in enumerate(data):
for s, cn, ax_idx in zip(selected_cols, column_names, [col_name_to_ax[_get_limb_id(s)] for s in column_names]):
sns.distplot(data_set[:, s], label=cn, ax=axs[i][ax_idx])
axs[i][0].set_ylabel(exp_id, rotation=0)
plt.suptitle(f"distribution of angled data\n({config.config_description(run_config)})")
plt.tight_layout()
plt.subplots_adjust(top=0.96)
for i, ax in enumerate(axs[0]):
ax.set_title(f"limb {i}")
return fig
@save_figure
def plot_3d_angle_data_distribution(X_train, X_test, selected_columns, exp_desc):
fig, axs = plt.subplots(nrows=X_train.shape[-1] // 3, ncols=2, figsize=(10, 6), sharex=True, sharey=True)
col_names = get_3d_columns_names(selected_columns)
for c in range(X_train.shape[-1]):
sns.distplot(X_train[:, c],ax=axs[c // 3][0])
sns.distplot(X_test[:, c], ax=axs[c // 3][1])
for i, a in enumerate(axs):
a[0].set_xlabel(col_names[i * 3][:len('limb: 0')])
plt.suptitle(f"distribution of train and test data\n({exp_desc})")
axs[0][0].set_title('train')
axs[0][1].set_title('test')
# order of these two calls is important, sadly
plt.tight_layout()
plt.subplots_adjust(top=0.84)
return fig
def _equalize_ylim(ax0, ax1):
ymin0, ymax0 = ax0.get_ylim()
ymin1, ymax1 = ax1.get_ylim()
min_ = min(ymin0, ymin1)
max_ = max(ymax0, ymax1)
ax0.set_ylim((min_, max_))
ax1.set_ylim((min_, max_))
def plot_reconstruction_comparision_pos_2d(real, reconstructed, run_desc, epochs):
fig, axs = plt.subplots(3 * 2, real.shape[2], sharex=True, figsize=(25, 10))
x_axis_values = np.arange(real.shape[0]) / SetupConfig.value('frames_per_second') / 60.
for dim in range(2):
for leg in range(3):
for limb in range(5):
axs[2 * leg][dim].plot(x_axis_values, real[:, limb + leg * 5, dim])
axs[2 * leg + 1][dim].plot(x_axis_values, reconstructed[:, limb + leg * 5, dim])
axs[0][0].set_title('x')
axs[0][1].set_title('y')
for leg in range(3):
axs[2*leg][0].set_ylabel(f"input\n{_get_leg_name_(leg)}")
axs[2*leg + 1][0].set_ylabel(f"reconstructed\n{_get_leg_name_(leg)}")
#axs[2*leg][0].get_shared_y_axes().join(axs[2*leg][0], axs[2*leg + 1][0])
#axs[2*leg][1].get_shared_y_axes().join(axs[2*leg][1], axs[2*leg + 1][1])
_equalize_ylim(axs[2 * leg][0], axs[2 * leg + 1][0])
_equalize_ylim(axs[2 * leg][1], axs[2 * leg + 1][1])
#axs[2*leg][1].set_yticks([])
#axs[2*leg + 1][1].set_yticks([])
axs[0][0].legend([tp.name for tp in skeleton.tracked_points[:5]], loc='upper left')
axs[-1][0].set_xlabel('time [min]')
axs[-1][1].set_xlabel('time [min]')
fig.align_ylabels(axs)
fig.suptitle(f"Comparing input and reconstruction")
plt.tight_layout()
plt.subplots_adjust(top=0.9)
figure_path = f"{SetupConfig.value('figures_root_path')}/{run_desc}_e-{epochs}_input_gen_recon_comparision.png"
plt.savefig(figure_path)
return figure_path
def plot_reconstruction_comparision_angle_3d(X_eval, X_hat_eval, epochs, selected_columns=None, run_desc=None):
xticks = np.arange(0, len(X_eval)) / SetupConfig.value('frames_per_second') / 60.
fig, axs = plt.subplots(nrows=X_eval.shape[1], ncols=1, figsize=(20, 30), sharex=True, sharey=True)
for i, cn in enumerate(get_3d_columns_names(selected_columns)):
_idx_ = np.s_[:, i]
axs[i].plot(xticks, X_eval[_idx_], label='input')
axs[i].plot(xticks, X_hat_eval[_idx_], label='reconstructed')
axs[i].set_title(cn)
axs[-1].set_xlabel('time [min]')
axs[0].legend(loc='upper left')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.suptitle(f"Comparision of selection of data\n({run_desc}_e-{epochs})")
plt.tight_layout()
plt.subplots_adjust(top=0.94)
figure_path = f"{SetupConfig.value('figures_root_path')}/{run_desc}_e-{epochs}_input_gen_recon_comparision.png"
plt.savefig(figure_path)
return figure_path
def plot_latent_space(X_latent, X_latent_mean_tsne_proj, y, cluster_assignments, run_desc, epochs):
cluster_colors = sns.color_palette(n_colors=len(np.unique(cluster_assignments)))
fig = plt.figure(figsize=(15, 12))
gs = gridspec.GridSpec(3, 2, figure=fig)
ax1 = plt.subplot(gs[:2, :])
ax2 = plt.subplot(gs[-1:, :1])
ax3 = plt.subplot(gs[-1:, 1:])
plot_data = pd.DataFrame(X_latent_mean_tsne_proj, columns=['latent_0', 'latent_1'])
plot_data['Cluster'] = cluster_assignments
plot_data['Class'] = y
plot_data['mean_0'], plot_data['mean_1'] = X_latent.mean[:, 0], X_latent.mean[:, 1]
plot_data['var_0'], plot_data['var_1'] = X_latent.var[:, 0], X_latent.var[:, 1]
sns.scatterplot(data=plot_data, x='latent_0', y='latent_1', style='Class', hue='Cluster', ax=ax1, palette=cluster_colors)
sns.scatterplot(data=plot_data, x='mean_0', y='mean_1', style='Class', hue='Cluster', ax=ax2, palette=cluster_colors)
sns.scatterplot(data=plot_data, x='var_0', y='var_1', style='Class', hue='Cluster', ax=ax3, palette=cluster_colors)
ax1.set_title('T-SNE projection of latent space (mean & var stacked)')
ax2.set_title('mean')
ax2.legend(loc='lower left')
ax3.set_title('var')
ax3.legend(loc='lower right')
figure_path = f"{SetupConfig.value('figures_root_path')}/{run_desc}_e-{epochs}_latent_space_tsne.png"
plt.savefig(figure_path)
return figure_path
| StarcoderdataPython |
1790203 | <filename>src/regnet/tests/test_train.py<gh_stars>0
# from src.regnet.models.train_model import train_with_params
# def test_train():
# train_with_params(epochs=1)
| StarcoderdataPython |
180018 | # -*- coding: utf-8 -*-
import json # jsonfy search result
import psycopg2
import sys # sys.exit()
import ldap3 as ldap # ldap connection request
from ldap3 import Server,Connection , NTLM, ALL, MODIFY_ADD, MODIFY_REPLACE
try:
con_db = psycopg2.connect(database='localmap-dev', user='localmap-dev', host='laura.dev.klee.lan.net', password='<PASSWORD>')
except:
print "I am unable to connect to the database"
cur=con_db.cursor()
cur.execute('SELECT firstname, lastname, "Desk".name, "Site".name,"Company".name '+
'FROM "Person" '+
'JOIN "Desk" ON "Desk".person_id="Person".per_id '+
'JOIN "Site" ON "Site".sit_id="Desk".site_id '+
'JOIN "BusinessUnit" ON "BusinessUnit".bus_id="Person"."businessUnit_id" '+
'JOIN "Company" ON "Company".com_id="BusinessUnit".company_id;')
res=cur.fetchall()
#print(res)
######################################################
with open('../config/config-ldap.json') as data_file:
settings = json.load(data_file)
Serv = settings['url']
BaseDN = settings['baseDN']
BaseDNDesactives = settings['BaseDNDesactives']
User = settings['username']
Password = settings['password']
#######################################################
base_dn = BaseDN
server = Server(Serv, get_info=ALL)
con=Connection(server, user=BaseDN+'\\'+User, password=Password,authentication = NTLM ,return_empty_attributes=True)
if con.bind():
#for each person we have to check if the office name in the active directory is up-to-date with the database
for i in range(0,len(res)):
name=(res[i][0]+" "+res[i][1]).decode("utf-8")
#if res[i][3]=="La Boursidière" and res[i][2]!="aucun":
# officeName=("La Boursidière : "+res[i][2]).decode("utf-8")
if res[i][3]=="La Boursidière":
if res[i][2]!="aucun":
officeName=("La Boursidière : "+res[i][2]).decode("utf-8")
else:
officeName="nochange"
else:
officeName=res[i][3].decode("utf-8")
company=res[i][4].decode("utf-8")
base='cn='+name+',ou='+company+','+base_dn.decode("utf-8")
#print (base)
con.search(base,'(cn='+name+')',attributes=['physicalDeliveryOfficeName'])
if con.entries:
if (not con.entries[0].physicalDeliveryOfficeName):
print(name.encode('utf-8'),"add new office", officeName.encode('utf-8'))
con.modify(base,{'physicalDeliveryOfficeName':[(MODIFY_ADD,[officeName])]})
else:
if officeName=="nochange":
print("pas de changement")
elif (officeName=="Issy-les-Moulineaux" and con.entries[0].physicalDeliveryOfficeName.value.find("Issy-les-Moulineaux")==-1) or (officeName!="Issy-les-Moulineaux" and not con.entries[0].physicalDeliveryOfficeName==officeName):
print(name.encode('utf-8'),"change my office",con.entries[0].physicalDeliveryOfficeName,officeName.encode('utf-8'))
con.modify(base,{'physicalDeliveryOfficeName':[(MODIFY_REPLACE,[officeName])]})
con.unbind()
sys.exit()
| StarcoderdataPython |
138889 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
import unittest
from paddle.fluid.dygraph.jit import dygraph_to_static_output
np.random.seed(1)
def dyfunc_with_if_else(x_v):
if fluid.layers.mean(x_v).numpy()[0] > 5:
x_v = x_v - 1
else:
x_v = x_v + 1
return x_v
def dyfunc_with_if_else2(x):
i, j = 0, 0
if fluid.layers.reduce_mean(x).numpy()[0] > x.numpy()[i][j]:
y = fluid.layers.relu(x)
else:
x_pow = fluid.layers.pow(x, 2)
y = fluid.layers.tanh(x_pow)
return y
def nested_if_else(x_v):
batch_size = x_v.shape[0]
feat_size = x_v.shape[-1]
bias = fluid.layers.fill_constant([feat_size], dtype='float32', value=1)
if fluid.layers.mean(x_v).numpy()[0] < 0:
y = x_v + bias
w = fluid.layers.fill_constant([feat_size], dtype='float32', value=10)
if y.numpy()[0] < 10:
tmp = y * w
y = fluid.layers.relu(tmp)
if fluid.layers.mean(y).numpy()[0] < batch_size:
y = fluid.layers.abs(y)
else:
tmp = fluid.layers.fill_constant(
[feat_size], dtype='float32', value=-1)
y = y - tmp
else:
y = x_v - bias
return y
class TestDygraphIfElse(unittest.TestCase):
"""
TestCase for the transformation from control flow `if/else`
dependent on tensor in Dygraph into Static `fluid.layers.cond`.
"""
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = dyfunc_with_if_else
def _run_static(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x_v = fluid.layers.assign(self.x)
# Transform into static graph
out = dygraph_to_static_output(self.dyfunc)(x_v)
exe = fluid.Executor(fluid.CPUPlace())
ret = exe.run(main_program, fetch_list=out)
return ret
def _run_dygraph(self):
with fluid.dygraph.guard():
x_v = fluid.dygraph.to_variable(self.x)
ret = self.dyfunc(x_v)
return ret.numpy()
def test_ast_to_func(self):
self.assertTrue((self._run_dygraph() == self._run_static()).all())
class TestDygraphIfElse2(TestDygraphIfElse):
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = dyfunc_with_if_else2
class TestDygraphIfElse3(TestDygraphIfElse):
def setUp(self):
self.x = np.random.random([10, 16]).astype('float32')
self.dyfunc = nested_if_else
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1768174 | import pytest
from solution import solution
@pytest.mark.parametrize(
["A", "res"],
[
([2, 1, 4, 5, 6, 7, 9, 8], 3),
([1, 2, 4, 5, 3, 9, 8, 7], 6)
]
)
def test_solution(A, res):
assert solution(A) == res
| StarcoderdataPython |
186563 | import logging
from django.core.management.base import BaseCommand
from product.models import Shop
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'This command create sample instances of model `Shop` into DB.'
def get_model(self):
model = Shop
return model
def get_sample_infos(self):
sample_infos = [
{'name': 'UM'},
{'name': 'MS'},
{'name': 'PS'},
]
return sample_infos
def handle(self, *args, **kwargs):
model = self.get_model()
sample_info = self.get_sample_infos()
created_amount = 0
for info in sample_info:
instance, created = model.objects.get_or_create(**info)
if created:
created_amount += 1
logger.warning(f'{created_amount = }')
| StarcoderdataPython |
3342809 | """
AWS API-Gateway Authorizer
==========================
This authorizer is designed to be attached to an AWS API-Gateway, as a
Lambda authorizer. It assumes that AWS Cognito is used to authenticate
a client (UI) and then API requests will pass a JSON Web Token to be
validated for authorization of API method calls. The initial designs
for authorization are very limited in scope.
This auth module is using a recent release of jwcrypto for several reasons:
- jwcrypto supports all JOSE features (see jwt.io libs for python)
- jwcrypto has well designed and documented APIs (python-jose does not)
- it can generate keys as well as other functions for JOSE
.. seealso::
- https://jwcrypto.readthedocs.io/en/latest/index.html
- https://auth0.com/docs/tokens/concepts/jwts
- https://jwt.io/
- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-jwt-authorizer.html
- https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-verifying-a-jwt.html
- https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
License
*******
This auth module is a derivative of various sources of JWT documentation and
source code samples that are covered by the Apache License, Version 2.0.
Copyright 2015-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use
this file except in compliance with the License. A copy of the License is
located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and
limitations under the License.
"""
# WARNING: moto provides python-jose as a dev-dep, which is not part of
# the app-deps and should not be used in this auth module, that is,
# do not use imports like these:
# from jose import jwt
# from jose import jwk
import json
import os
import re
from typing import Dict
import jwcrypto
import jwcrypto.jwk
import jwcrypto.jwt
import requests
from dataclasses import dataclass
from example_app.logger import get_logger
LOGGER = get_logger(__name__)
API_ADMIN_EMAILS = [
email.strip() for email in os.getenv("API_ADMIN_EMAILS", "").split(",")
]
COGNITO_REGION = os.getenv("API_COGNITO_REGION", "us-west-2")
COGNITO_CLIENT_ID = os.getenv("API_COGNITO_CLIENT_ID")
COGNITO_POOL_ID = os.getenv("API_COGNITO_POOL_ID")
@dataclass
class AuthError(Exception):
error: str
status_code: int
@dataclass
class CognitoPool:
id: str
client_id: str
region: str
_jwks: Dict = None
@property
def jwks_uri(self) -> str:
return "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format(
self.region, self.id
)
@property
def jwks(self) -> Dict:
if self._jwks is None:
LOGGER.debug(self.jwks_uri)
response = requests.get(self.jwks_uri)
LOGGER.debug(response)
response.raise_for_status()
# use jwcrypto to parse the JWKS (it takes a json string)
jwks = jwcrypto.jwk.JWKSet.from_json(response.text)
self._jwks = json.loads(jwks.export())
LOGGER.debug(self._jwks)
return self._jwks
@staticmethod
def jwt_decode(jwt_token: str):
try:
jwt_headers, jwt_payload, jwt_signature = jwt_token.split(".")
if not isinstance(jwt_headers, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
if not isinstance(jwt_payload, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
if not isinstance(jwt_signature, str):
raise AuthError("Unauthorized - JWT is malformed", 401)
unverified_token = jwcrypto.jwt.JWT(jwt=jwt_token)
jwt_headers = unverified_token.token.jose_header
if not isinstance(jwt_headers, dict):
raise AuthError("Unauthorized - JWT has malformed headers", 401)
if not jwt_headers.get("alg"):
raise AuthError("Unauthorized - JWT-alg is not in headers", 401)
if not jwt_headers.get("kid"):
raise AuthError("Unauthorized - JWT-kid is not in headers", 401)
jwt_payload = unverified_token.token.objects["payload"].decode("utf-8")
jwt_payload = json.loads(jwt_payload)
if not isinstance(jwt_payload, dict):
raise AuthError("Unauthorized - JWT has malformed payload", 401)
if not jwt_payload.get("token_use") in ["id", "access"]:
raise AuthError("Unauthorized - JWT has malformed payload", 401)
return jwt_headers, jwt_payload, jwt_signature
except Exception as err:
LOGGER.error(err)
raise AuthError("Unauthorized - JWT is malformed", 401)
def jwt_public_key(self, jwt_token: str):
unverified_token = jwcrypto.jwt.JWT(jwt=jwt_token)
jwt_headers = unverified_token.token.jose_header
kid = jwt_headers.get("kid")
if kid is None:
raise AuthError("Unauthorized - JWT-kid is missing", 401)
LOGGER.debug(kid)
for pub_key in self.jwks.get("keys"):
if kid == pub_key.get("kid"):
LOGGER.info("JWT-kid has matching public-kid")
return pub_key
raise AuthError("Unauthorized - JWT-kid has no matching public-kid", 401)
def jwt_claims(self, jwt_token: str):
try:
public_key = self.jwt_public_key(jwt_token)
public_jwk = jwcrypto.jwk.JWK(**public_key)
verified_token = jwcrypto.jwt.JWT(
key=public_jwk, jwt=jwt_token, algs=[public_key["alg"]]
)
return json.loads(verified_token.claims)
except Exception as err:
LOGGER.error(err)
raise AuthError("Unauthorized - token failed to verify", 401)
COGNITO_POOL = CognitoPool(
region=COGNITO_REGION, client_id=COGNITO_CLIENT_ID, id=COGNITO_POOL_ID
)
if os.getenv("AWS_EXECUTION_ENV"):
# instead of re-downloading the public keys every time, memoize them only on cold start
# https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/
# https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html
assert COGNITO_POOL.jwks
@dataclass
class APIGateway:
aws_region: str
aws_account_id: str
api_gateway_arn: str
rest_api_id: str
rest_api_stage: str
@staticmethod
def from_method_arn(method_arn):
tmp = method_arn.split(":")
api_gateway_arn = tmp[5].split("/")
return APIGateway(
aws_region=tmp[3],
aws_account_id=tmp[4],
api_gateway_arn=tmp[5],
rest_api_id=api_gateway_arn[0],
rest_api_stage=api_gateway_arn[1],
)
def get_auth_policy(self, principal_id: str):
policy = AuthPolicy(principal_id, self.aws_account_id)
policy.restApiId = self.rest_api_id
policy.stage = self.rest_api_stage
policy.region = self.aws_region
return policy
def aws_auth_handler(event, context):
"""AWS Authorizer for JWT tokens provided by AWS Cognito
event should have this form:
{
"type": "TOKEN",
"authorizationToken": "{caller-supplied-token}",
"methodArn": "arn:aws:execute-api:{regionId}:{accountId}:{apiId}/{stage}/{httpVerb}/[{resource}/[{child-resources}]]"
}
.. seealso::
- https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html
"""
LOGGER.debug("event: %s", event)
LOGGER.debug("context: %s", context)
try:
# validate the incoming token
# and produce the principal user identifier associated with the token
# this could be accomplished in a number of ways:
# 1. Call out to OAuth provider
# 2. Decode a JWT token inline
# 3. Lookup in a self-managed DB
# TODO: try 2. Decode a JWT token inline
# https://docs.authlib.org/en/stable/jose/index.html
# https://aws.amazon.com/premiumsupport/knowledge-center/decode-verify-cognito-json-token/
# https://github.com/awslabs/aws-support-tools/tree/master/Cognito/decode-verify-jwt
# there are flask plugins for this, but the API-Gateway solution is different
# https://flask-jwt-extended.readthedocs.io/en/stable/basic_usage/
# https://auth0.com/docs/quickstart/backend/python
token = event.get("authorizationToken")
if token is None:
raise AuthError("Unauthorized - authorizationToken is missing", 401)
if token.startswith("Bearer"):
token = token.strip("Bearer").strip()
# TODO: handle a SigV4 token?
# 'authorizationToken': 'AWS<PASSWORD>56
# Credential=<secret_id>/20200529/us-west-2/execute-api/aws4_request,
# Signature=xyz'
claims = COGNITO_POOL.jwt_claims(token) # also validates JWT
issuer = claims.get("iss")
if not (COGNITO_POOL.region in issuer and COGNITO_POOL.id in issuer):
raise AuthError("Unauthorized - invalid issuer in JWT claims", 403)
if claims["token_use"] == "id":
audience = claims.get("aud")
if audience != COGNITO_POOL.client_id:
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
elif claims["token_use"] == "access":
client_id = claims.get("client_id")
if client_id != COGNITO_POOL.client_id:
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
else:
# token validation should check this already, so should not get here
raise AuthError("Unauthorized - invalid client-id in JWT claims", 403)
if claims["token_use"] == "id":
principle_id = claims.get("email")
if not principle_id:
raise AuthError(
"Unauthorized - invalid principle-id in JWT claims", 403
)
if not claims.get("email_verified"):
raise AuthError(
"Unauthorized - email is not verified in JWT claims", 403
)
elif claims["token_use"] == "access":
principle_id = claims.get("username")
if not principle_id:
raise AuthError(
"Unauthorized - invalid principle-id in JWT claims", 403
)
else:
# token validation should check this already, so should not get here
raise AuthError("Unauthorized - invalid principle-id in JWT claims", 403)
# if the token is valid, a policy must be generated which will allow or deny
# access to the client
# if access is denied, the client will receive a 403 Access Denied response
# if access is allowed, API Gateway will proceed with the backend
# integration configured on the method that was called
# this function must generate a policy that is associated with the
# recognized principal user identifier. depending on your use case, you
# might store policies in a DB, or generate them on the fly
# keep in mind, the policy is cached for 5 minutes by default (TTL is
# configurable in the authorizer) and will apply to subsequent calls to any
# method/resource in the RestApi made with the same token
# the example policy below denies access to all resources in the RestApi
LOGGER.info("Method ARN: %s", event["methodArn"])
api_gateway = APIGateway.from_method_arn(event.get("methodArn"))
policy = api_gateway.get_auth_policy(principle_id)
policy.allowAllMethods() # a valid signed JWT is sufficient
#
# TODO: use cognito-groups with an JWT-access token?
#
if principle_id not in API_ADMIN_EMAILS:
policy.denyMethod(HttpVerb.GET, "/api/healthz")
# TODO: restrict the policy by additional options:
# #: The API Gateway API id. By default this is set to '*'
# restApiId = "*"
# #: The region where the API is deployed. By default this is set to '*'
# region = "*"
# #: The name of the stage used in the policy. By default this is set to '*'
# stage = "*"
# Finally, build the policy
auth_response = policy.build()
# # Add additional key-value pairs associated with the authenticated principal
# # these are made available by API-GW like so: $context.authorizer.<key>
# # additional context is cached
# context = {"key": "value", "number": 1, "bool": True} # $context.authorizer.key -> value
# # context['arr'] = ['foo'] <- this is invalid, API-GW will not accept it
# # context['obj'] = {'foo':'bar'} <- also invalid
# auth_response["context"] = context
# TODO: use "usageIdentifierKey": "{api-key}" for API-key use plans, if any.
return auth_response
except AuthError as auth_error:
if auth_error.status_code == 403:
api_gateway = APIGateway.from_method_arn(event.get("methodArn"))
policy = api_gateway.get_auth_policy("nobody")
policy.denyAllMethods()
auth_response = policy.build()
auth_response["error"] = auth_error.error
return auth_response
# API-GW requires the message text to be only "Unauthorized" for a 401
raise Exception("Unauthorized")
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
class AuthPolicy(object):
#: The AWS account id the policy will be generated for.
#: This is used to create the method ARNs.
awsAccountId = ""
#: The principal used for the policy, this should be a unique identifier for the end user.
principalId = ""
#: The policy version used for the evaluation. This should always be '2012-10-17'
version = "2012-10-17"
#: The regular expression used to validate resource paths for the policy
pathRegex = "^[/.a-zA-Z0-9-\*]+$"
#: These are the internal lists of allowed and denied methods. These are lists
#: of objects and each object has 2 properties: A resource ARN and a nullable
#: conditions statement. The build method processes these lists and generates
#: the appropriate statements for the final policy
allowMethods = []
denyMethods = []
#: The API Gateway API id. By default this is set to '*'
restApiId = "*"
#: The region where the API is deployed. By default this is set to '*'
region = "*"
#: The name of the stage used in the policy. By default this is set to '*'
stage = "*"
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError(
"Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class"
)
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError(
"Invalid resource path: "
+ resource
+ ". Path should match "
+ self.pathRegex
)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = (
"arn:aws:execute-api:"
+ self.region
+ ":"
+ self.awsAccountId
+ ":"
+ self.restApiId
+ "/"
+ self.stage
+ "/"
+ verb
+ "/"
+ resource
)
if effect.lower() == "allow":
self.allowMethods.append(
{"resourceArn": resourceArn, "conditions": conditions}
)
elif effect.lower() == "deny":
self.denyMethods.append(
{"resourceArn": resourceArn, "conditions": conditions}
)
def _getEmptyStatement(self, effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
"Action": "execute-api:Invoke",
"Effect": effect[:1].upper() + effect[1:].lower(),
"Resource": [],
}
return statement
def _getStatementForEffect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod["conditions"] is None or len(curMethod["conditions"]) == 0:
statement["Resource"].append(curMethod["resourceArn"])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement["Resource"].append(curMethod["resourceArn"])
conditionalStatement["Condition"] = curMethod["conditions"]
statements.append(conditionalStatement)
statements.append(statement)
return statements
def allowAllMethods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._addMethod("Allow", HttpVerb.ALL, "*", [])
def denyAllMethods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._addMethod("Deny", HttpVerb.ALL, "*", [])
def allowMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._addMethod("Allow", verb, resource, [])
def denyMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._addMethod("Deny", verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide
/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide
/reference_policies_elements.html#Condition"""
self._addMethod("Deny", verb, resource, conditions)
def build(self):
"""Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy."""
if (self.allowMethods is None or len(self.allowMethods) == 0) and (
self.denyMethods is None or len(self.denyMethods) == 0
):
raise NameError("No statements defined for the policy")
policy = {
"principalId": self.principalId,
"policyDocument": {"Version": self.version, "Statement": []},
}
policy["policyDocument"]["Statement"].extend(
self._getStatementForEffect("Allow", self.allowMethods)
)
policy["policyDocument"]["Statement"].extend(
self._getStatementForEffect("Deny", self.denyMethods)
)
return policy
| StarcoderdataPython |
3204263 | # import tensorflow as tf
#
# embedding_table = tf.Variable(initial_value=None,name="embedding_table")
# # Add ops to save and restore all the variables.
# saver = tf.compat.v1.train.Saver({"embedding_table": embedding_table})
#
# # Later, launch the model, use the saver to restore variables from disk, and
# # do some work with the model.
# with tf.compat.v1.Session() as sess:
# # Restore variables from disk.
# saver.restore(sess, "/cs/labs/gabis/bareluz/nematus/output_translate.ckpt")
# print(embedding_table)
print("in debias manager")
import numpy as np
import pickle
import json
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from debiaswe.debiaswe import we
from debiaswe.debiaswe.debias import debias
import sys
from sklearn.decomposition import PCA
import sklearn
import random
from sklearn.svm import LinearSVC, SVC
sys.path.append("..") # Adds higher directory to python modules path.
from consts import get_debias_files_from_config, EMBEDDING_SIZE, DEFINITIONAL_FILE, PROFESSIONS_FILE, \
GENDER_SPECIFIC_FILE, EQUALIZE_FILE
sys.path.append("../..") # Adds higher directory to python modules path.
from nullspace_projection.src.debias import load_word_vectors, project_on_gender_subspaces, get_vectors, get_debiasing_projection
np.set_printoptions(suppress=True)
class DebiasManager():
def __init__(self, consts_config_str):
self.E = None
self.non_debiased_embeddings = None
self.DICT_SIZE, self.ENG_DICT_FILE, self.OUTPUT_TRANSLATE_FILE, self.EMBEDDING_TABLE_FILE, \
self.EMBEDDING_DEBIASWE_FILE, self.DEBIASED_TARGET_FILE = get_debias_files_from_config(consts_config_str)
def __check_all_lines_exist(self):
"""
checks that each line in the embedding table, printed in translate run, exists (since the lines are iterated with threads
and are printed in random order)
"""
lines_count = np.zeros(self.DICT_SIZE)
with open(self.OUTPUT_TRANSLATE_FILE, "r") as output_translate_file:
while True:
line = output_translate_file.readline()
if not line:
break
if line.__contains__("enc_inputs for word"):
a = line.split("enc_inputs for word")
for i in a:
if i.__contains__("[") and not i.__contains__("embedding_table shape"):
line_num = i.split("[")[0]
lines_count[int(line_num)] += 1
# for i in range(len(lines_count)):
# print("line num "+str(i)+": "+str(lines_count[i]))
print("all lines exist?: " + str(not lines_count.__contains__(0)))
return not lines_count.__contains__(0)
def __get_non_debiased_embedding_table(self):
"""
if the embedding table , printed in translate run, contains all lines, creates a matrix with the right order of
lines of the embedding matrix learned during the train phase.
then it saves the matrix to pickle and returns it
:return:
the embedding table as an numpy array
"""
if not self.__check_all_lines_exist():
raise Exception("not all lines exist in the embedding table")
embedding_matrix = (np.zeros((self.DICT_SIZE, EMBEDDING_SIZE))).astype(np.str)
lines_count = np.zeros(self.DICT_SIZE)
with open(self.OUTPUT_TRANSLATE_FILE, "r") as output_translate_file:
while True:
line = output_translate_file.readline()
if not line:
break
if line.__contains__("enc_inputs for word"):
a = line.split("enc_inputs for word")
for i in a:
if i.__contains__("[") and not i.__contains__("embedding_table shape"):
line_num = int(i.split("[")[0])
if lines_count[line_num] > 0:
continue
lines_count[line_num] += 1
row = i[i.find("[") + 1:i.rfind("]")]
row = row.split(" ")
embedding_matrix[line_num, :] = row
embedding_matrix = np.array(embedding_matrix, dtype=np.double)
with open(self.EMBEDDING_TABLE_FILE, 'wb') as file_:
pickle.dump(embedding_matrix, file_)
self.non_debiased_embeddings = embedding_matrix
return embedding_matrix
def __prepare_data_to_debias(self, inlp=False):
"""
given path to dictionary, the path to the embedding table saved in get_embedding_table() and the file name to save the data,
it prepares the embedding table in the format of <word> <embedding>/n , this is the format that debias() in debiaswe, uses.
saves the embedding with the desired format to self.EMBEDDING_DEBIASWE_FILE
"""
with open(self.ENG_DICT_FILE, 'r') as dict_file, open(self.EMBEDDING_DEBIASWE_FILE, 'w') as dest_file:
eng_dictionary = json.load(dict_file)
if inlp:
s = np.shape(self.non_debiased_embeddings)
dest_file.write(str(s[0])+" "+str(s[1]) +"\n")
for w, i in eng_dictionary.items():
dest_file.write(w + " " + ' '.join(map(str, self.non_debiased_embeddings[i, :])) + "\n")
def debias_inlp(self, by_pca):
model, vecs, words = load_word_vectors(fname=self.EMBEDDING_DEBIASWE_FILE)
num_vectors_per_class = 7500
if by_pca:
pairs = [("male", "female"), ("masculine", "feminine"), ("he", "she"), ("him", "her")]
gender_vecs = [model[p[0]] - model[p[1]] for p in pairs]
pca = PCA(n_components=1)
pca.fit(gender_vecs)
gender_direction = pca.components_[0]
else:
gender_direction = model["he"] - model["she"]
gender_unit_vec = gender_direction / np.linalg.norm(gender_direction)
masc_words_and_scores, fem_words_and_scores, neut_words_and_scores = project_on_gender_subspaces(
gender_direction, model, n=num_vectors_per_class)
masc_words, masc_scores = list(zip(*masc_words_and_scores))
neut_words, neut_scores = list(zip(*neut_words_and_scores))
fem_words, fem_scores = list(zip(*fem_words_and_scores))
masc_vecs, fem_vecs = get_vectors(masc_words, model), get_vectors(fem_words, model)
neut_vecs = get_vectors(neut_words, model)
n = min(3000, num_vectors_per_class)
all_significantly_biased_words = masc_words[:n] + fem_words[:n]
all_significantly_biased_vecs = np.concatenate((masc_vecs[:n], fem_vecs[:n]))
all_significantly_biased_labels = np.concatenate((np.ones(n, dtype=int),
np.zeros(n, dtype=int)))
all_significantly_biased_words, all_significantly_biased_vecs, all_significantly_biased_labels = sklearn.utils.shuffle(
all_significantly_biased_words, all_significantly_biased_vecs, all_significantly_biased_labels)
# print(np.random.choice(masc_words, size = 75))
print("TOP MASC")
print(masc_words[:50])
# print("LAST MASC")
# print(masc_words[-120:])
print("-------------------------")
# print(np.random.choice(fem_words, size = 75))
print("TOP FEM")
print(fem_words[:50])
# print("LAST FEM")
# print(fem_words[-120:])
print("-------------------------")
# print(np.random.choice(neut_words, size = 75))
print(neut_words[:50])
print(masc_scores[:10])
print(masc_scores[-10:])
print(neut_scores[:10])
random.seed(0)
np.random.seed(0)
X = np.concatenate((masc_vecs, fem_vecs, neut_vecs), axis=0)
# X = (X - np.mean(X, axis = 0, keepdims = True)) / np.std(X, axis = 0)
y_masc = np.ones(masc_vecs.shape[0], dtype=int)
y_fem = np.zeros(fem_vecs.shape[0], dtype=int)
y_neut = -np.ones(neut_vecs.shape[0], dtype=int)
# y = np.concatenate((masc_scores, fem_scores, neut_scores))#np.concatenate((y_masc, y_fem))
y = np.concatenate((y_masc, y_fem, y_neut))
X_train_dev, X_test, y_train_dev, Y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.3,
random_state=0)
X_train, X_dev, Y_train, Y_dev = sklearn.model_selection.train_test_split(X_train_dev, y_train_dev,
test_size=0.3,
random_state=0)
print("Train size: {}; Dev size: {}; Test size: {}".format(X_train.shape[0], X_dev.shape[0], X_test.shape[0]))
gender_clf = LinearSVC
# gender_clf = SGDClassifier
# gender_clf = LogisticRegression
# gender_clf = LinearDiscriminantAnalysis
# gender_clf = Perceptron
params_svc = {'fit_intercept': False, 'class_weight': None, "dual": False, 'random_state': 0}
params_sgd = {'fit_intercept': False, 'class_weight': None, 'max_iter': 1000, 'random_state': 0}
params = params_svc
# params = {'loss': 'hinge', 'n_jobs': 16, 'penalty': 'l2', 'max_iter': 2500, 'random_state': 0}
# params = {}
n = 35
min_acc = 0
is_autoregressive = True
dropout_rate = 0
P, rowspace_projs, Ws = get_debiasing_projection(gender_clf, params, n, 256, is_autoregressive, min_acc,
X_train, Y_train, X_dev, Y_dev,
Y_train_main=None, Y_dev_main=None,
by_class=False, dropout_rate=dropout_rate)
return (P.dot(vecs.T)).T, gender_direction
def __debias_bolukbasi(self, debiased_target_file=None):
"""
debiases the nematus embedding table that was created through the learning phase and saved in prepare_data_to_debias()
saves the
"""
self.E = we.WordEmbedding(self.EMBEDDING_DEBIASWE_FILE)
if debiased_target_file is None:
debiased_target_file = self.DEBIASED_TARGET_FILE
with open(DEFINITIONAL_FILE, "r") as f:
defs = json.load(f)
print("definitional", defs)
with open(EQUALIZE_FILE, "r") as f:
equalize_pairs = json.load(f)
with open(GENDER_SPECIFIC_FILE, "r") as f:
gender_specific_words = json.load(f)
print("gender specific", len(gender_specific_words), gender_specific_words[:10])
if self.E is None:
raise Exception("WordEmbedding E was not created")
print("Debiasing...")
debias(self.E, gender_specific_words, defs, equalize_pairs)
print("Saving to file...")
if self.EMBEDDING_DEBIASWE_FILE[-4:] == self.EMBEDDING_DEBIASWE_FILE[-4:] == ".bin":
self.E.save_w2v(debiased_target_file)
else:
self.E.save(debiased_target_file)
def load_debias_format_to_array(self, filename=None):
"""
loads a debiased embedding from filename and transforms it to numpy array
:return: the debiased embedding table as numpy array
"""
if filename is None:
filename = self.DEBIASED_TARGET_FILE
embedding_table = []
with open(filename, "rb") as f:
while True:
line = f.readline()
if not line:
break
line = line.decode("utf-8")
embedding = line.split(" ")[1:]
embedding_table.append(embedding)
if (np.shape(embedding_table)[0] != self.DICT_SIZE):
embedding_table = np.vstack([embedding_table, self.non_debiased_embeddings[-1]])
return np.array(embedding_table).astype(np.float32)
def __print_bias_amount(self, word, gender_direction, debiased_embedding, orig_embedding):
if self.E is None:
with open(self.ENG_DICT_FILE, 'r') as dict_file:
index = json.load(dict_file)
else:
index = self.E.index
if word in index:
word_index = index[word]
bieas_before = '{:.20f}'.format(np.dot(orig_embedding[word_index], gender_direction))
bias_after = '{:.20f}'.format(np.dot(debiased_embedding[word_index], gender_direction))
print(word + ": bias before debias= " + bieas_before + ". bias after debias= " + bias_after)
def debias_sanity_check(self, embedding_table_file=None, debiased_embedding_table=None, gender_direction=None):
print("*******************sanity check**************************")
if embedding_table_file is None:
embedding_table_file = self.EMBEDDING_TABLE_FILE
if debiased_embedding_table is None:
debiased_embedding = debiased_embedding_table
else:
debiased_embedding = self.load_debias_format_to_array(self.DEBIASED_TARGET_FILE)
debiased_embedding = debiased_embedding.astype('float32')
with open(DEFINITIONAL_FILE, "r") as f:
defs = json.load(f)
if gender_direction is None:
if self.E is None:
raise Exception("WordEmbedding E was not created")
gender_direction = we.doPCA(defs, self.E).components_[0]
with open(PROFESSIONS_FILE, "r") as f:
professions = json.load(f)
with open(embedding_table_file, 'rb') as embedding_file:
orig_embedding = pickle.load(embedding_file)
orig_embedding = orig_embedding.astype('float32')
print("--------professions--------")
for p in professions:
self.__print_bias_amount(p[0], gender_direction, debiased_embedding, orig_embedding)
with open(DEFINITIONAL_FILE, "r") as f:
defs = json.load(f)
print("--------gender specific--------")
for a, b in defs:
self.__print_bias_amount(a, gender_direction, debiased_embedding, orig_embedding)
self.__print_bias_amount(b, gender_direction, debiased_embedding, orig_embedding)
print("********************************************************")
def load_and_debias(self,inlp):
embedding_matrix = self.__get_non_debiased_embedding_table()
self.__prepare_data_to_debias(inlp)
# self.__debias_bolukbasi()
# return self.load_debias_format_to_array()
return self.debias_inlp(False)
if __name__ == '__main__':
CONSTS_CONFIG_STR = "{'USE_DEBIASED': 0, 'LANGUAGE': 0, 'COLLECT_EMBEDDING_TABLE': 0, 'PRINT_LINE_NUMS': 0}"
debias_manager = DebiasManager(CONSTS_CONFIG_STR)
debiased_embedding, gender_direction = debias_manager.load_and_debias(inlp=True)
print(np.shape(debiased_embedding))
print(debiased_embedding)
debias_manager.debias_sanity_check(debiased_embedding_table=debiased_embedding, gender_direction=gender_direction)
| StarcoderdataPython |
19920 | <gh_stars>0
__author__ = 'sibirrer'
# this file contains a class to make a Moffat profile
__all__ = ['Moffat']
class Moffat(object):
"""
this class contains functions to evaluate a Moffat surface brightness profile
.. math::
I(r) = I_0 * (1 + (r/\\alpha)^2)^{-\\beta}
with :math:`I_0 = amp`.
"""
def __init__(self):
self.param_names = ['amp', 'alpha', 'beta', 'center_x', 'center_y']
self.lower_limit_default = {'amp': 0, 'alpha': 0, 'beta': 0, 'center_x': -100, 'center_y': -100}
self.upper_limit_default = {'amp': 100, 'alpha': 10, 'beta': 10, 'center_x': 100, 'center_y': 100}
def function(self, x, y, amp, alpha, beta, center_x=0, center_y=0):
"""
2D Moffat profile
:param x: x-position (angle)
:param y: y-position (angle)
:param amp: normalization
:param alpha: scale
:param beta: exponent
:param center_x: x-center
:param center_y: y-center
:return: surface brightness
"""
x_shift = x - center_x
y_shift = y - center_y
return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta)
| StarcoderdataPython |
3384726 | <reponame>rtraas/morpy<gh_stars>0
import rebound as rb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def perspective(sim,savename=None):
#fig = rb.OrbitPlot(sim,slices=0.5,xlim=[-5.,5],ylim=[-5.,5])
fig = rb.OrbitPlot(sim,slices=0.5)
if savename is not None:
plt.savefig(savename,bbox_inches='tight',dpi=300)
| StarcoderdataPython |
3353327 | import tensorflow as tf
import tensorflow.contrib.layers as layers
import numpy as np
import chess
from chess_env import FULL_CHESS_INPUT_SHAPE
# A convolutional block as described in AlphaGo Zero
def conv_block(tensor, specs):
tensor = layers.convolution2d(tensor,
num_outputs=specs['num_outputs'],
kernel_size=specs['kernel_size'],
stride=specs['stride'],
activation_fn=None)
tensor = tf.layers.batch_normalization(tensor)
tensor = tf.nn.relu(tensor)
return tensor
# A residual block as described in AlphaGo Zero
def residual_block(tensor, specs):
input_tensor = tensor
tensor = conv_block(tensor, specs)
tensor = layers.convolution2d(tensor,
num_outputs=specs['num_outputs'],
kernel_size=specs['kernel_size'],
stride=specs['stride'],
activation_fn=None)
tensor = tf.layers.batch_normalization(tensor)
tensor += input_tensor
tensor = tf.nn.relu(tensor)
return tensor
def build_model(board_placeholder,
legality_mask_placeholder,
scope,
shared_layers,
policy_head,
value_head):
"""
Returns the output tensors for an model based on the layers in shared_layers,
policy_head, and value_head.
shared_layers is a list of dicts, each dict representing a layer.
- Convolutional layers:
- d['layer'] <- 'conv'
- d['num_outputs'], d['kernel_size'], d['stride'] should be ints
- d['activation_fn'] is a function
- Residual layers:
- d['layer'] <- 'residual'
- other keys same as convolutional
- Fully connected layers:
- d['layer'] <- 'fc'
- d['num_outputs'] is an int
- d['activation_fn'] is a function
policy_head and value_head have the same structure as above but represent
the layers for the policy head and value head, respectively.
returns the policy output and the value output in a tuple
"""
out = board_placeholder
for specs in shared_layers:
if specs['layer'] == 'conv':
out = conv_block(out, specs)
elif specs['layer'] == 'residual':
out = residual_block(out, specs)
elif specs['layer'] == 'fc':
out = layers.flatten(out)
out = layers.fully_connected(out,
num_outputs=specs['num_outputs'],
activation_fn=specs['activation_fn'])
# Policy head
policy_out = out
for specs in policy_head:
if specs['layer'] == 'conv':
policy_out = conv_block(policy_out, specs)
elif specs['layer'] == 'fc':
policy_out = layers.flatten(policy_out)
policy_out = layers.fully_connected(policy_out,
num_outputs=specs['num_outputs'],
activation_fn=specs['activation_fn'])
x = tf.exp(policy_out) * legality_mask_placeholder
# Needs reshape to broadcast properly
policy_out = x / tf.reshape(tf.reduce_sum(x, axis=1), shape=((tf.shape(x)[0],) + (1,)))
# Value head
value_out = out
for specs in value_head:
if specs['layer'] == 'conv':
value_out = conv_block(value_out, specs)
elif specs['layer'] == 'fc':
value_out = layers.flatten(value_out)
value_out = layers.fully_connected(value_out,
num_outputs=specs['num_outputs'],
activation_fn=specs['activation_fn'])
return policy_out, value_out
class DualNet(object):
def __init__(self,
sess,
env,
learning_rate=0.01,
regularization_mult=0.01,
n_residual_layers=2,
#input_shape=FULL_CHESS_INPUT_SHAPE,
#action_size=POSITION_POSITION_ACTION_SIZE,
num_convolutional_filters=256
):
"""
sess: tensorflow session
env: environment to determine move legality with
learning_rate: learning rate for gradient descent
regularization_mult: multiplier for weight regularization loss
n_residual_layers: how many residual layers to add, as described in
AlphaGo Zero.
num_convolutional_filters: how many convolutional filters to have in
each convolutional layer
"""
self.action_size = env.action_size
self.board_placeholder = tf.placeholder(tf.float32, [None] + list(env.input_shape))
self.env = env
shared_layers = [{'layer': 'conv', 'num_outputs':
num_convolutional_filters, 'stride': 3,
'kernel_size': 1, 'activation_fn': tf.nn.relu}]
# add n_residual_layers to the shared layers
shared_layers += n_residual_layers*[{'layer': 'residual',
'num_outputs': num_convolutional_filters,
'stride': 1, 'kernel_size': 3,
'activation_fn': tf.nn.relu}]
policy_layers = [{'layer': 'conv', 'num_outputs': 2, 'stride': 1,
'kernel_size': 1, 'activation_fn': tf.nn.relu},
{'layer': 'fc', 'num_outputs': self.action_size,
'activation_fn': None}]
value_layers = [{'layer': 'conv', 'num_outputs': 1, 'stride': 1,
'kernel_size': 1, 'activation_fn': tf.nn.relu},
{'layer': 'fc', 'num_outputs': num_convolutional_filters,
'activation_fn': tf.nn.relu},
{'layer': 'fc', 'num_outputs': 1,
'activation_fn': tf.nn.tanh}]
self.boards = None
self.move_legality_mask = tf.placeholder(tf.float32, [None, self.action_size])
self.policy_predict, self.value_predict = build_model(self.board_placeholder,
self.move_legality_mask,
scope='net',
shared_layers=shared_layers,
policy_head=policy_layers,
value_head=value_layers)
self.z = tf.placeholder(tf.float32, [None])
# Reshape z for proper broadcasting
reshaped_z = tf.reshape(self.z, [tf.shape(self.z)[0], 1])
self.pi = tf.placeholder(tf.float32, [None, self.action_size])
self.value_diff = self.value_predict - reshaped_z
self.value_loss = tf.reduce_sum(tf.square(self.value_diff))
# when the 0s become 0.000001s for illegal actions, we are counting on the fact that the are
# nullified by the corresponding index of self.pi to be 0
self.policy_loss = tf.reduce_sum(tf.multiply(self.pi, tf.log(self.policy_predict + 0.0001)))
self.regularization_loss = layers.apply_regularization(layers.l2_regularizer(regularization_mult),
weights_list=tf.trainable_variables())
self.loss = self.value_loss - self.policy_loss + tf.reduce_sum(self.regularization_loss)
self.update_op = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
self.sess = sess
def __call__(self, inp):
"""
Gets a feed-forward prediction for a batch of input boards of shape set
during initialization.
"""
move_legality_mask = np.zeros(shape=(inp.shape[0], self.action_size))
for i in range(inp.shape[0]):
move_legality_mask[i] = self.env.get_legality_mask(inp[i])
policy, value = self.sess.run([self.policy_predict, self.value_predict],
feed_dict={self.board_placeholder: inp,
self.move_legality_mask: move_legality_mask})
return policy, value
def train(self, states, pi, z, token_legality_mask=None):
"""
Performs one step of gradient descent based on a batch of input boards,
MCTS policies, and rewards of shape [None, 1]. Shapes of inputs and policies
should match input_shape and action_size as set during initialization.
returns the batch loss
The token_legality_mask is just for test purposes so we can input a mask of our choosing
Otherwise, it gets the legality_mask from the environment
"""
if token_legality_mask is None:
move_legality_mask = np.zeros(shape=(len(states), self.action_size))
for i in range(len(states)):
move_legality_mask[i] = self.env.get_legality_mask(states[i])
else:
move_legality_mask = token_legality_mask
_, loss = self.sess.run([self.update_op, self.loss], feed_dict={self.board_placeholder: states,
self.pi: pi,
self.z: z,
self.move_legality_mask: move_legality_mask})
return loss
| StarcoderdataPython |
1644568 | <reponame>rgerganov/mykioxi
#!/usr/bin/env python3
import datetime
import asyncio
import argparse
import sys
import bleak
CHARACTERISTIC_UUID = "49535343-1e4d-4bd9-ba61-23c647249616"
class DataPrinter:
def __init__(self):
self.last_bpm = -1
self.last_spo2 = -1
self.last_pleth = -1
def multiline(self, pleth, bpm, spo2):
if bpm != self.last_bpm or spo2 != self.last_spo2:
print_bpm = '---' if bpm == 255 else str(bpm).rjust(3)
print_spo2 = '---' if spo2 == 127 else str(spo2).rjust(3)
now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
print("[{}]\tBPM: {}\tSpO2: {}".format(timestamp, print_bpm, print_spo2))
self.last_bpm, self.last_spo2 = bpm, spo2
def oneline(self, pleth, bpm, spo2):
if pleth != self.last_pleth or bpm != self.last_bpm or spo2 != self.last_spo2:
print_bpm = '---' if bpm == 255 else str(bpm).rjust(3)
print_spo2 = '---' if spo2 == 127 else str(spo2).rjust(3)
print_pleth = ('*' * (pleth // 10)).ljust(10)
sys.stdout.write("BPM:{} [{}] SpO2:{}\r".format(print_bpm, print_pleth, print_spo2))
self.last_pleth, self.last_bpm, self.last_spo2 = pleth, bpm, spo2
def make_handler(user_handler):
def raw_handler(sender, data):
pleth = data[1]
bpm = data[3] | ((data[2] & 64) << 1)
spo2 = data[4]
user_handler(pleth, bpm, spo2)
return raw_handler
async def discover():
devices = await bleak.discover()
for d in devices:
if d.name == "BerryMed" or d.address.startswith("00:A0:50"):
return d
async def read_data(address, done, handler):
async with bleak.BleakClient(address) as client:
x = await client.is_connected()
print("Connected: {0}".format(x))
notification_handler = make_handler(handler)
await client.start_notify(CHARACTERISTIC_UUID, notification_handler)
await done.wait()
print('Disconnecting ...')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--device", help="Device address")
parser.add_argument("--multiline", help="Multiline data output", action="store_true")
args = parser.parse_args()
loop = asyncio.get_event_loop()
if not args.device:
dev = loop.run_until_complete(discover())
if dev is None:
print("No device found")
sys.exit(1)
print("Found: {} ({})".format(dev.address, dev.name))
address = dev.address
else:
address = args.device
dp = DataPrinter()
printer = dp.multiline if args.multiline else dp.oneline
done = asyncio.Event()
task = loop.create_task(read_data(address, done, printer))
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
done.set()
loop.run_until_complete(task)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1773806 | # Google
from googletrans import Translator
# Comment en faire: Dans la main: get_main_args = analyzer_input_args()
# input_excel_name = get_main_args.input_ecelname etc..
def analyzer_input_args():
parser = argparse.ArgumentParser(description='Game Simulation Parameters')
parser.add_argument('--input_excelname', type=str,default="default", help="Default name: 'setting.xlsx'")
parser.add_argument('--output_folder', type=str,default='output', help="Default: current path")
parser.add_argument('--show_info', type=bool, default=False,help="Default: False, setting 'True' to printout info")
parser.add_argument('--debug_mode', type=bool, default=False,help="Default: False, setting 'True' to printout debug detail information via print method in code")
parser.add_argument('--show_estimate_time', type=bool,default=False, help="Estimate the processing cnts")
parser.add_argument('--draw_line', type=int,default=False, help="SLOT Sumulator Used Only")
parser.add_argument('--draw_fig', type=bool,default=False, help="Draw support fig.png during running")
parser.add_argument('--unit_test', type=bool,default=False, help="Initial the unit_test for specific funciton in each project")
parser.add_argument('--unit_test__case_number', type=int, default=0, help="Unit_test_case_number: You must set --unit_test True first.")
parser.add_argument('--language',type=str, default='en',help='Default language is English: support German, French, Japanese, Korean Spanish, Russian.....: de, fr, ko, ja,es,ru, zh-TW, zh-CN')
#Get Args:
args = parser.parse_args()
return args
# C'est class , utiliser pour
# Input: 'Je vais bien'
# Output: Translated(src=fr, dest=en, text=I'm fine, pronunciation=I'm fine, extra_data="{'translat...")
class language_translator():
def __init__(self):
self.name = 'language_translater'
self.language_dict = {'zh-TW': "Traditional Chinese" , 'zh-CN':"Simplified Chinese" ,'ja': "Japanese" ,'fr': "French",'de':"German",'ko':"Korean",'ru':"Russian",'es':"Spanish",'en':'English'}
self.translator = Translator()
def set_language_code(self, apply_language_code):
# Initial to 'en' first, va changer apres the setting checking
if apply_language_code in self.language_dict:
# Change the language_cod ici
self.language_code = apply_language_code
else:
try:
_res = self.translate_par_les_mots("The code is not in default list description, but the code is applable in google lib")
print(_res)
except:
print("Error: The code is not support, please check.... code: %2s"%apply_language_code)
def __obtenir_language_full_norm__(self,input_code):
msg = "Current apply language is " + self.language_code + "\n"
msg += "Description is %20s"%self.language_dict[self.language_code] + "\n"
return msg
def show_info(self):
msg = self.name + "\n"
for any_lan in self.language_dict:
_description = self.language_dict[any_lan]
msg += "This translator support: %4s as %20s"%(any_lan,_description)
msg += "\n"
try:
msg += self.__obtenir_language_full_norm__(self.language_code)
except:
print("[INFO]: No setting for self.language_code")
return msg
def translate_par_les_mots(self,input_str):
__result = self.translator.translate(input_str)
# Obtenir the text:
text = __result.extra_data['translation'][0][0]
return text
def translate_par_les_mots_et_dest(self,input_str,input_des='en'):
try:
__result = self.translator.translate(input_str,dest= self.language_code)
except:
__result = self.translator.translate(input_str,dest=input_des)
# Obtenir the text:
text = __result.extra_data['translation'][0][0]
return text
def print(self,input_str,input_dest='en'):
return self.translate_par_les_mots_et_dest(input_str,input_dest)
if __name__ == '__main__':
######################## TEST Zone for laugnage_translator() ###############
print("######################## TEST Zone for laugnage_translator() ###############\n\n")
translator = language_translator()
_info = translator.show_info()
print(_info)
print("\n")
print("load pickle example:")
print("-----------------------------------------")
print("\n\n")
print("try:")
print(" ja_lan_df = pd.read_pickle('ja_lan_env.pkl')")
print(" apply_lan = ja_lan_df['ja_lan'][0]")
print(" print(\"[INFO]: Your apply language is {%s}\"%apply_lan)")
print("except:")
print(" print(\"[INFO]: No ja_lan_env.pkl found !\")")
print(" print(\"Set language as default 'English' \")")
print("\n")
print("---------------------------------------------")
print("\n\n")
for any_language in translator.language_dict:
# Input words
my_words = "Start to convert format png to jpg, Please type the folder name: 'work_tem' for example"
print("Translating .... %30s with Language Code: %4s\n"%(my_words,any_language))
_info = translator.print(my_words,any_language)
print(_info)
print("--------------------------------------------------------------------------------------------\n\n")
| StarcoderdataPython |
3396671 | <filename>scripts/check_latex_spelling.py
import argparse, os
# incorrect -> correct
corrections = {
"pointcloud" : "point cloud",
"Pointcloud" : "Point cloud",
"voxelgrid" : "voxel grid",
"Voxelgrid" : "Voxel grid",
"levelset" : "level set",
"Levelset" : "Level set",
"ray-trac" : "raytrac", # ray-trac(ed|ing|er)
"Ray-trac" : "Raytrac",
"hyper-network" : "hypernetwork",
"Hyper-network" : "Hypernetwork",
"data-set" : "dataset",
"Data-set" : "Dataset",
"data set" : "dataset",
"Data set" : "Dataset",
"edit-able" : "editable",
"Edit-able" : "Editable",
"tri-linear" : "trilinear",
"Tri-linear" : "Trilinear",
"hyper-parameter" : "hyperparameter",
"Hyper-parameter" : "Hyperparameter",
"parametriz" : "parameteriz", # parameteriz(ing|ed)
"Parametriz" : "Parameteriz",
"underparameteriz" : "under-parameteriz",
"Underparameteriz" : "Under-parameteriz",
"overparameteriz" : "over-parameteriz",
"Overparameteriz" : "Over-parameteriz",
"auto-encod" : "autoencod", # auto-encod(er|ing|ed)
"Auto-encod" : "Autoencod",
"finetun" : "fine-tun", # fine-tun(ing|es)
"Finetun" : "Fine-tun",
"pytorch" : "PyTorch",
"Pytorch" : "PyTorch",
"tensorflow" : "TensorFlow",
"Tensorflow" : "TensorFlow",
"feedforward" : "feed-forward", # Based on 1994 paper IEEE
"Feedforward" : "Feed-forward",
"up-sampling": "upsampling",
"backpropagat": "back-propagat", # 1986 paper
"Backpropagat": "Back-propagat",
"realtime": "real-time", # verified
}
warning = ['--']
excluded_filenames = [
"macros.tex",
"archived_tex",
"fixed_tex",
]
def fix(text):
for incorrect, correct in corrections.items():
text = text.replace(incorrect, correct)
return text
def fix_one_file(input_fname, output):
print("Fixing one file: {} --> {}".format(input_fname, output))
f = open(input_fname, mode='r')
text = f.read()
f.close()
text = fix(text)
if output:
o = open(output, mode='w+', encoding="utf-8")
else:
o = open(input_fname, mode='w+', encoding="utf-8")
o.write()
o.close()
def fix_by_item(text, user_approval=True):
no_fix = True
for incorrect, correct in corrections.items():
ind = 0
find_ind = text[ind:].find(incorrect)
lo = find_ind + ind
hi = lo + len(incorrect)
while find_ind >= 0:
print(text[lo-30:lo] + "[" + text[lo:hi] + "]" + text[hi:hi+30:])
print(text[lo-30:lo] + "[" + correct + "]" + text[hi:hi+30:])
action = input('Enter your approval (enter/n/replacement):')
if action == "":
replacement = correct
no_fix = False
elif action == "n":
replacement = incorrect
else:
replacement = action
no_fix = False
# Replace
text = text[:lo] + replacement + text[hi:]
hi = lo + len(replacement)
# Find next
ind = hi
find_ind = text[ind:].find(incorrect)
lo = find_ind + ind
hi = lo + len(incorrect)
return text, no_fix
if __name__ == "__main__":
"""
Usage:
python fix.py --output_dir fixed_tex --all --by_item --approval
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str)
parser.add_argument('--output', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--all', action="store_true", default=False)
parser.add_argument('--by_item', action="store_true", default=False)
parser.add_argument('--approval', action="store_true", default=False)
args = parser.parse_args()
if args.output_dir and (not os.path.exists(args.output_dir)):
os.mkdir(args.output_dir)
if args.all:
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk("."):
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
to_check = list()
for input_fname in listOfFiles:
add = True
if (".tex" == input_fname[-4:]):
for exclude in excluded_filenames:
if exclude in input_fname:
add = False
if add: to_check.append(input_fname)
print("Files to fix ({}): {}".format(len(to_check), "\n".join(to_check)))
for input_fname in to_check:
if args.output_dir:
output = os.path.join(args.output_dir, os.path.basename(input_fname))
else:
output = input_fname
if args.by_item:
print("===================================")
print("Fixing file by item: ", input_fname)
with open(input_fname, mode='r') as f:
text = f.read()
text, no_fix = fix_by_item(text, args.approval)
if not no_fix:
with open(output, mode='w+', encoding="utf-8") as f:
f.write(text)
else:
fix_one_file(input_fname, output)
else:
if output_dir:
output = os.path.join(output_dir, os.path.basename(input_fname))
fix_one_file(input_fname, output)
| StarcoderdataPython |
76079 | """
Splits the given Manga Volume CBZ into the CBZs of the individual chapters
"""
import re
import shutil
import zipfile
from collections import defaultdict
from operator import attrgetter
from pathlib import Path
root = Path("/path/to/volumes/")
output = root / "chapters"
CHAP_PAT = re.compile(r"c(?P<num>\d{3})")
def process_volume(archive: zipfile.ZipFile):
chapters: dict[str, list[zipfile.ZipInfo]] = defaultdict(list)
for page in archive.filelist:
m = CHAP_PAT.search(page.filename)
assert m is not None, page
chapters[m['num']].append(page)
for chapter, pages in chapters.items():
chap_dir = output / chapter
print(chap_dir)
chap_dir.mkdir(exist_ok=True, parents=True)
archive.extractall(chap_dir, map(attrgetter("filename"), pages))
with zipfile.ZipFile(chap_dir.with_suffix(".cbz"), "w") as chap_zip:
chap_zip.write(chap_dir, chapter)
# for page in chap_dir.iterdir():
# chap_zip.write(page, page.name)
shutil.rmtree(chap_dir)
for file in root.iterdir():
if file.suffix != ".cbz":
continue
with zipfile.ZipFile(file) as archive:
process_volume(archive)
| StarcoderdataPython |
167988 | class Constants:
ha2kcalmol = 627.509 # Hartee^-1 kcal mol^-1
ha2kJmol = 2625.50 # Hartree^-1 kJ mol^-1
eV2ha = 0.0367493 # Hartree ev^-1
a02ang = 0.529177 # Å bohr^-1
ang2a0 = 1.0 / a02ang # bohr Å^-1
kcal2kJ = 4.184 # kJ kcal^-1
| StarcoderdataPython |
4822177 | import unittest
from jump_game_iv import Solution
class Test(unittest.TestCase):
def test_1(self):
solution = Solution()
self.assertEqual(
solution.minJumps([100, -23, -23, 404, 100, 23, 23, 23, 3, 404]), 3
)
def test_2(self):
solution = Solution()
self.assertEqual(solution.minJumps([7]), 0)
def test_3(self):
solution = Solution()
self.assertEqual(solution.minJumps([7, 6, 9, 6, 9, 6, 9, 7]), 1)
def test_4(self):
solution = Solution()
self.assertEqual(solution.minJumps([6, 1, 9]), 2)
def test_5(self):
solution = Solution()
self.assertEqual(solution.minJumps([11, 22, 7, 7, 7, 7, 7, 7, 7, 22, 13]), 3)
def test_6(self):
solution = Solution()
self.assertEqual(
solution.minJumps(
[-76, 3, 66, -32, 64, 2, -19, -8, -5, -93, 80, -5, -76, -78, 64, 2, 16]
),
5,
)
def test_7(self):
solution = Solution()
self.assertEqual(solution.minJumps([7] * (5 * 10 ** 4) + [11]), 2)
def test_8(self):
solution = Solution()
self.assertEqual(
solution.minJumps(
[7, 6, 8, 6, 8, 6, 8, 6, 8, 6, 8, 6, 8, 8, 8, 6, 6, 5, 6]
+ [7] * (5 * 10 ** 4)
+ [8, 6, 8, 6, 7]
+ [11]
),
2,
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3256324 | <filename>Core/Logic/FJudgementCompiler.py
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import Core.Common.FGlobals as FGlobals
from Core.Logic.FJudgement import *
class FJudgementCompiler:
""" Used to generate badge earned statements.
This class compiles together all the received badge judgements
into one coherent badge earned statement. """
def __init__(self):
self.__badgesStatus = []
for i in range(len(FGlobals.badgeLevels)):
# Create a new tally list for each badge level.
tally = []
for i in range(FJudgement.STATUS_COUNT): tally.append(0)
self.__badgesStatus.append(tally)
def ProcessJudgement(self, badgeIndex, badgeResult):
""" Considers a local judgement for the given badge.
@param badgeIndex The integer index of the badge that
this judgement relates to.
@param badgeResult The local judgement result to consider
for the given badge level. """
# Just increment the total for this result value in the tally for this badge level.
tally = self.__badgesStatus[badgeIndex]
tally[badgeResult] = tally[badgeResult] + 1
def RemoveJudgement(self, badgeIndex, badgeResult):
""" In order to support partial refreshes, this function allows the UI to remove one
judgement from the current tally.
@param badgeIndex The integer index of the badge that
this judgement relates to.
@param badgeResult The local judgement result to remove. """
# Decrement the total for this result value in the tally for this badge level.
tally = self.__badgesStatus[badgeIndex]
tally[badgeResult] = tally[badgeResult] - 1
def GenerateStatement(self):
""" Generates the badges earned statement.
All the processed judgements are considered. The final statement
will reflect the badges which contain only positive judgements.
@return A string to contains the badges earned statement. """
# For a badge to be earned, you must not have any of the MISSING_DATA and FAILED results.
# And you must have at least one PASSED. The number of NO_SCRIPT results is not relevant.
# Process the badges status, looking for earned badges.
text = ""
for i in range(len(FGlobals.badgeLevels)):
tally = self.__badgesStatus[i]
missingData = tally[FJudgement.MISSING_DATA]
failed = tally[FJudgement.FAILED]
passed = tally[FJudgement.PASSED]
if (passed > 0) and (failed + missingData == 0):
# This badge is earned!
if (len(text) > 0): text += ", "
text += FGlobals.badgeLevels[i]
return text
| StarcoderdataPython |
1625572 | <reponame>sylvielamythepaut/climetlab
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
from collections import defaultdict
import yaml
MAGICS_KEYS = None
MAGICS_DEF = None
MAGICS_PARAMS = None
_inited = False
def init():
global _inited, MAGICS_KEYS, MAGICS_DEF, MAGICS_PARAMS
if not _inited:
MAGICS_KEYS = defaultdict(set)
MAGICS_PARAMS = defaultdict(dict)
with open(os.path.join(os.path.dirname(__file__), "magics.yaml")) as f:
MAGICS_DEF = yaml.load(f, Loader=yaml.SafeLoader)
for action, params in MAGICS_DEF.items():
for param in params:
name = param["name"]
MAGICS_KEYS[name].add(action)
MAGICS_PARAMS[action][name] = param
_inited = True
def magics_keys_to_actions():
init()
return MAGICS_KEYS
def magics_keys_definitions():
init()
return MAGICS_DEF
def magics_keys_parameters(name):
init()
return MAGICS_PARAMS[name]
| StarcoderdataPython |
3394522 | num = int(input('Digite um número natural (até milhar): '))
n = str(num)
print("""
Analisando o número: {}
Unidade: {}
Dezena: {}
Centena: {}
Milhar: {}
""" .format(num, n[3],n[2],n[1],n[0]))
| StarcoderdataPython |
3367251 | from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='testfunc',
version='0.1.1',
description='A sample tool for testing functions',
long_description=long_description,
url='https://github.com/yanxurui/testfunc',
# Author details
author="<NAME>",
author_email="<EMAIL>",
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Testing',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
keywords='test',
py_modules=["testfunc"],
install_requires=['texttable>=1.1.1']
)
| StarcoderdataPython |
1654581 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from DataJoin.common import common_pb2 as DataJoin_dot_common_dot_common__pb2
from DataJoin.common import data_join_service_pb2 as DataJoin_dot_common_dot_data__join__service__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataJoinMasterServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataSource = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/GetDataSource',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_common__pb2.DataSource.FromString,
)
self.GetDataSourceStatus = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/GetDataSourceStatus',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.DataSourceRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.DataSourceStatus.FromString,
)
self.AbortDataSource = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/AbortDataSource',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.DataSourceRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_common__pb2.Status.FromString,
)
self.RequestJoinPartition = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/RequestJoinPartition',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataResponse.FromString,
)
self.FinishJoinPartition = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/FinishJoinPartition',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_common__pb2.Status.FromString,
)
self.QueryRawDataManifest = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/QueryRawDataManifest',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataManifest.FromString,
)
self.FinishRawData = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/FinishRawData',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_common__pb2.Status.FromString,
)
self.AddRawData = channel.unary_unary(
'/DataJoin.common.DataJoinMasterService/AddRawData',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_common__pb2.Status.FromString,
)
class DataJoinMasterServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def GetDataSource(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDataSourceStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AbortDataSource(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RequestJoinPartition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FinishJoinPartition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryRawDataManifest(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FinishRawData(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddRawData(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataJoinMasterServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDataSource': grpc.unary_unary_rpc_method_handler(
servicer.GetDataSource,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=DataJoin_dot_common_dot_common__pb2.DataSource.SerializeToString,
),
'GetDataSourceStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetDataSourceStatus,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.DataSourceRequest.FromString,
response_serializer=DataJoin_dot_common_dot_data__join__service__pb2.DataSourceStatus.SerializeToString,
),
'AbortDataSource': grpc.unary_unary_rpc_method_handler(
servicer.AbortDataSource,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.DataSourceRequest.FromString,
response_serializer=DataJoin_dot_common_dot_common__pb2.Status.SerializeToString,
),
'RequestJoinPartition': grpc.unary_unary_rpc_method_handler(
servicer.RequestJoinPartition,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.FromString,
response_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataResponse.SerializeToString,
),
'FinishJoinPartition': grpc.unary_unary_rpc_method_handler(
servicer.FinishJoinPartition,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.FromString,
response_serializer=DataJoin_dot_common_dot_common__pb2.Status.SerializeToString,
),
'QueryRawDataManifest': grpc.unary_unary_rpc_method_handler(
servicer.QueryRawDataManifest,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.FromString,
response_serializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataManifest.SerializeToString,
),
'FinishRawData': grpc.unary_unary_rpc_method_handler(
servicer.FinishRawData,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.FromString,
response_serializer=DataJoin_dot_common_dot_common__pb2.Status.SerializeToString,
),
'AddRawData': grpc.unary_unary_rpc_method_handler(
servicer.AddRawData,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.RawDataRequest.FromString,
response_serializer=DataJoin_dot_common_dot_common__pb2.Status.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'DataJoin.common.DataJoinMasterService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class DataJoinServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StartPartition = channel.unary_unary(
'/DataJoin.common.DataJoinService/StartPartition',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.StartPartitionRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.StartPartitionResponse.FromString,
)
self.SyncPartition = channel.unary_unary(
'/DataJoin.common.DataJoinService/SyncPartition',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.SyncPartitionRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_common__pb2.Status.FromString,
)
self.FinishPartition = channel.unary_unary(
'/DataJoin.common.DataJoinService/FinishPartition',
request_serializer=DataJoin_dot_common_dot_data__join__service__pb2.FinishPartitionRequest.SerializeToString,
response_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.FinishPartitionResponse.FromString,
)
class DataJoinServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def StartPartition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SyncPartition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FinishPartition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataJoinServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'StartPartition': grpc.unary_unary_rpc_method_handler(
servicer.StartPartition,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.StartPartitionRequest.FromString,
response_serializer=DataJoin_dot_common_dot_data__join__service__pb2.StartPartitionResponse.SerializeToString,
),
'SyncPartition': grpc.unary_unary_rpc_method_handler(
servicer.SyncPartition,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.SyncPartitionRequest.FromString,
response_serializer=DataJoin_dot_common_dot_common__pb2.Status.SerializeToString,
),
'FinishPartition': grpc.unary_unary_rpc_method_handler(
servicer.FinishPartition,
request_deserializer=DataJoin_dot_common_dot_data__join__service__pb2.FinishPartitionRequest.FromString,
response_serializer=DataJoin_dot_common_dot_data__join__service__pb2.FinishPartitionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'DataJoin.common.DataJoinService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| StarcoderdataPython |
9822 | <filename>pywallet/network.py
class BitcoinGoldMainNet(object):
"""Bitcoin Gold MainNet version bytes. """
NAME = "Bitcoin Gold Main Net"
COIN = "BTG"
SCRIPT_ADDRESS = 0x17 # int(0x17) = 23
PUBKEY_ADDRESS = 0x26 # int(0x26) = 38 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488b21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class BitcoinCashMainNet(object):
"""Bitcoin Cash MainNet version bytes."""
NAME = "Bitcoin Cash Main Net"
COIN = "BCH"
SCRIPT_ADDRESS = 0x28 # int(0x28) = 40
PUBKEY_ADDRESS = 0x1C # int(0x00) = 28 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488b21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/145'/0'/"
class DashMainNet(object):
"""Dash MainNet version bytes."""
NAME = "Dash Main Net"
COIN = "DASH"
SCRIPT_ADDRESS = 0x10 # int(0x10) = 16
PUBKEY_ADDRESS = 0x4C # int(0x4C) = 76 # Used to create payment addresses
SECRET_KEY = 0xCC # int(0xCC) = 204 # Used for WIF format
EXT_PUBLIC_KEY = 0X0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0X0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/5'/0'/"
class DashTestNet(object):
"""Dash TestNet version bytes."""
NAME = "Dash Test Net"
COIN = "DASH"
SCRIPT_ADDRESS = 0x13 # int(0x13) = 19
PUBKEY_ADDRESS = 0x8C # int(0x8C) = 140 # Used to create payment addresses
SECRET_KEY = 0xEF # int(0xEF) = 239 # Used for WIF format
EXT_PUBLIC_KEY = <KEY> # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x04358394 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/1'/0'/"
class MarteXMainNet(object):
"""MarteX MainNet version bytes."""
NAME = "MarteX Main Net"
COIN = "MXT"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 05
PUBKEY_ADDRESS = 0x32 # int(0x32) = 50 # Used to create payment addresses
SECRET_KEY = 0xB2 # int(0xB2) = 178 # Used for WIF format
EXT_PUBLIC_KEY = 0X0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0X0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/180'/0'/"
class MarteXTestNet(object):
"""MarteX TestNet version bytes."""
NAME = "MarteX Test Net"
COIN = "MXT"
SCRIPT_ADDRESS = 0xC4 # int(0xC4) = 196
PUBKEY_ADDRESS = 0x6C # int(0x6F) = 111 # Used to create payment addresses
SECRET_KEY = 0x144 # int(0x144) = 324 # Used for WIF format
EXT_PUBLIC_KEY = 0x043587CF # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x04358394 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/1'/0'/"
class OmniMainNet(object):
"""Bitcoin MainNet version bytes.
From https://github.com/OmniLayer/omnicore/blob/develop/src/chainparams.cpp
"""
NAME = "Omni Main Net"
COIN = "USDT"
SCRIPT_ADDRESS = 0x00 # int(0x00) = 0
PUBKEY_ADDRESS = 0x05 # int(0x05) = 5 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class OmniTestNet(object):
"""Bitcoin MainNet version bytes.
From https://github.com/OmniLayer/omnicore/blob/develop/src/chainparams.cpp
"""
NAME = "Omni Test Net"
COIN = "USDT"
SCRIPT_ADDRESS = 0x6f # int(0x6f) = 111
PUBKEY_ADDRESS = 0xc4 # int(0xc4) = 196 # Used to create payment addresses
SECRET_KEY = 0xef # int(0xef) = 239 # Used for WIF format
EXT_PUBLIC_KEY = 0x043587CF # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x04358394 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class BitcoinMainNet(object):
"""Bitcoin MainNet version bytes.
From https://github.com/bitcoin/bitcoin/blob/v0.9.0rc1/src/chainparams.cpp
"""
NAME = "Bitcoin Main Net"
COIN = "BTC"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 5
PUBKEY_ADDRESS = 0x00 # int(0x00) = 0 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/0'/0'/"
class FeathercoinMainNet(object):
"""Feathercoin MainNet version bytes.
From https://github.com/FeatherCoin/Feathercoin/blob/master-0.13/src/chainparams.cpp
"""
NAME = "Feathercoin Main Net"
COIN = "FTC"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 5
PUBKEY_ADDRESS = 0x0E # int(0x0E) = 14 # Used to create payment addresses
SECRET_KEY = 0x8E # int(0x8E) = 142 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488BC26 # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488DAEE # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/4'/0'/"
class BitcoinTestNet(object):
"""Bitcoin TestNet version bytes.
From https://github.com/bitcoin/bitcoin/blob/v0.9.0rc1/src/chainparams.cpp
"""
NAME = "Bitcoin Test Net"
COIN = "BTC"
SCRIPT_ADDRESS = 0xc4 # int(0xc4) = 196
PUBKEY_ADDRESS = 0x6f # int(0x6f) = 111
SECRET_KEY = 0xEF # int(0xef) = 239
EXT_PUBLIC_KEY = 0x043587CF
EXT_SECRET_KEY = 0x04358394
BIP32_PATH = "m/44'/1'/0'/"
class LitecoinMainNet(object):
"""Litecoin MainNet version bytes
Primary version bytes from:
https://github.com/litecoin-project/litecoin/blob/master-0.8/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=453395.0
"""
NAME = "Litecoin Main Net"
COIN = "LTC"
SCRIPT_ADDRESS = 0x05 # int(0x05) = 5
PUBKEY_ADDRESS = 0x30 # int(0x30) = 48
SECRET_KEY = PUBKEY_ADDRESS + 128 # = int(0xb0) = 176
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=453395.0
# EXT_PUBLIC_KEY = <KEY>
# EXT_SECRET_KEY = <KEY>
# same as Bitcoin's
# https://github.com/ranaroussi/pywallet/issues/6
EXT_PUBLIC_KEY = 0x0488B21E
EXT_SECRET_KEY = 0x0488ADE4
BIP32_PATH = "m/44'/2'/0'/"
class LitecoinTestNet(object):
"""Litecoin TestNet version bytes
Primary version bytes from:
https://github.com/litecoin-project/litecoin/blob/master-0.8/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=453395.0
"""
NAME = "Litecoin Test Net"
COIN = "LTC"
SCRIPT_ADDRESS = 0xc4 # int(0xc4) = 196
PUBKEY_ADDRESS = 0x6f # int(0x6f) = 111
SECRET_KEY = PUBKEY_ADDRESS + 128 # = int(0xef) = 239
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=453395.0
# EXT_PUBLIC_KEY = 0x0436f6e1
# EXT_SECRET_KEY = 0x0436ef7d
# same as Bitcoin's
# https://github.com/ranaroussi/pywallet/issues/6
EXT_PUBLIC_KEY = 0x043587CF
EXT_SECRET_KEY = 0x04358394
BIP32_PATH = "m/44'/1'/0'/"
class DogecoinMainNet(object):
"""Dogecoin MainNet version bytes
Primary version bytes from:
https://github.com/dogecoin/dogecoin/blob/1.5.2/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=409731
"""
NAME = "Dogecoin Main Net"
COIN = "DOGE"
SCRIPT_ADDRESS = 0x16 # int(0x16) = 22
PUBKEY_ADDRESS = 0x1e # int(0x1e) = 30
SECRET_KEY = PUBKEY_ADDRESS + 128 # int(0x9e) = 158
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=409731
EXT_PUBLIC_KEY = 0x02facafd
EXT_SECRET_KEY = 0x02fac398
BIP32_PATH = "m/44'/3'/0'/"
class DogecoinTestNet(object):
"""Dogecoin TestNet version bytes
Primary version bytes from:
https://github.com/dogecoin/dogecoin/blob/1.5.2/src/base58.h
Unofficial extended version bytes from
https://bitcointalk.org/index.php?topic=409731
"""
NAME = "Dogecoin Test Net"
COIN = "DOGE"
SCRIPT_ADDRESS = 0xc4 # int(0xc4) = 196
PUBKEY_ADDRESS = 0x71 # int(0x71) = 113
SECRET_KEY = PUBKEY_ADDRESS + 128 # int(0xf1) = 241
# Unofficial extended version bytes taken from
# https://bitcointalk.org/index.php?topic=409731
EXT_PUBLIC_KEY = 0x0432a9a8
EXT_SECRET_KEY = 0x0432a243
BIP32_PATH = "m/44'/1'/0'/"
class BlockCypherTestNet(object):
"""BlockCypher TestNet version bytes.
From http://dev.blockcypher.com/#testing
"""
NAME = "BlockCypher Test Net"
COIN = "BlockCypher"
SCRIPT_ADDRESS = 0x1f # int(0x1f) = 31
PUBKEY_ADDRESS = 0x1b # int(0x1b) = 27 # Used to create payment addresses
SECRET_KEY = 0x49 # int(0x49) = 73 # Used for WIF format
EXT_PUBLIC_KEY = 0x2d413ff # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x2d40fc3 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/1'/0'/"
class QtumMainNet(object):
"""Qtum MainNet version bytes
Primary version bytes from:
https://github.com/qtumproject/qtum/blob/master/src/chainparams.cpp
"""
NAME = "Qtum Main Net"
COIN = "QTUM"
SCRIPT_ADDRESS = 0x32 # int(0x32) = 50
PUBKEY_ADDRESS = 0x3A # int(0x3A) = 58 # Used to create payment addresses
SECRET_KEY = 0x80 # int(0x80) = 128 # Used for WIF format
EXT_PUBLIC_KEY = 0x0488B21E # Used to serialize public BIP32 addresses
EXT_SECRET_KEY = 0x0488ADE4 # Used to serialize private BIP32 addresses
BIP32_PATH = "m/44'/88'/0'/"
class QtumTestNet(object):
"""Qtum TestNet version bytes
Primary version bytes from:
https://github.com/qtumproject/qtum/blob/master/src/chainparams.cpp
"""
NAME = "Qtum Test Net"
COIN = "QTUM"
SCRIPT_ADDRESS = 0x6E # int(0x6e) = 110
PUBKEY_ADDRESS = 0x78 # int(0x78) = 120
SECRET_KEY = 0xEF # int(0xef) = 239
EXT_PUBLIC_KEY = 0x043587CF
EXT_SECRET_KEY = 0x04358394
BIP32_PATH = "m/44'/88'/0'/"
| StarcoderdataPython |
4801806 | import collections
fruit = collections.Counter(['apple', 'orange', 'pear', 'apple', 'orange', 'apple'])
print(fruit)
print(fruit['orange'])
print('fruit.most_common(1):', fruit.most_common(1))
fruit1 = collections.Counter(['apple', 'orange', 'pear', 'orange'])
fruit2 = collections.Counter(['banana', 'apple', 'apple'])
print('fruit1:', fruit1)
print('fruit2:', fruit2)
print('fruit1 + fruit2:', fruit1 + fruit2)
print('fruit1 - fruit2:', fruit1 - fruit2)
# Union (max(fruit1[n], fruit2[n])
print('fruit1 | fruit2:', fruit1 | fruit2)
# Intersection (min(fruit1[n], fruit2[n])
print('fruit1 & fruit2:', fruit1 & fruit2) | StarcoderdataPython |
3322310 | """Module for development/debugging execution of the Web Service
In production the service will probably be executed using asgi in
a proper webserving environment in a container.
This file can be used for development/testing/debugging of the
webservice using uvicorn as development web server.
Usage: Call with activated virtual environment via
python main.py
from project directory.
"""
import os
# pylint: disable=wrong-import-order
import logging
if __name__ == "__main__":
if os.environ.get("HD_RUNTIME_ENVIRONMENT_FILE", None) is None:
# if this script is called directly, default to local dev setup.
os.environ["HD_RUNTIME_ENVIRONMENT_FILE"] = "local_dev.env"
from hetdesrun import configure_logging
logger = logging.getLogger(__name__)
configure_logging(logger)
# must be after logging config:
from hetdesrun.webservice.application import app
from hetdesrun.webservice.config import runtime_config
def detect_in_memory_db() -> bool:
from hetdesrun.persistence import get_db_engine
engine = get_db_engine()
backend_name = engine.url.get_backend_name()
# driver_name = engine.url.get_driver_name() # pysqlite
database = engine.url.database # ":memory:"
if backend_name.lower() == "sqlite" and (
(database is None) or database.lower() in (":memory:",)
):
return True
return False
def run_migrations(
alembic_dir: str = "./alembic",
connection_url=runtime_config.sqlalchemy_connection_string,
) -> None:
"""Runs alembic migrations from within Python code
Should only be used for local development server. Not recommended
for multi-process/thread production servers.
Note: The docker container runs migrations via prestart.sh script in the
production setup.
"""
from hetdesrun import migrations_invoked_from_py
migrations_invoked_from_py = True
from alembic.config import Config
from alembic import command
from pydantic import SecretStr
import hetdesrun.persistence.dbmodels
from hetdesrun.persistence import get_db_engine
engine = get_db_engine()
logger.info("Using DB engine driver: %s", str(engine.url.drivername))
if isinstance(connection_url, SecretStr):
connection_url_to_use = connection_url.get_secret_value()
else:
connection_url_to_use = connection_url
logger.info("Running DB migrations in %s", alembic_dir)
alembic_cfg = Config()
alembic_cfg.set_main_option("script_location", alembic_dir)
# alembic_cfg.set_main_option("sqlalchemy.url", connection_url_to_use)
# alembic_cfg.set_section_option("logger_root", "level", "DEBUG")
# alembic_cfg.set_section_option("logger_alembic", "level", "DEBUG")
# alembic_cfg.set_section_option("logger_sqlalchemy", "level", "DEBUG")
command.upgrade(alembic_cfg, "head")
logger.info("Finished running migrations.")
in_memory_db = detect_in_memory_db()
if in_memory_db:
logger.info(
"Detected in-memory db usage: Running migrations during importing of main.py."
)
run_migrations()
if __name__ == "__main__":
if not in_memory_db:
logger.info(
"Running migrations from main.py since main.py was invoked directly."
)
run_migrations()
import os
import uvicorn
host = os.environ.get("HOST", "127.0.0.1")
port = int(os.environ.get("PORT", 8000))
logger.info("Start app as host %s with port %s", str(host), str(port))
uvicorn.run(
"hetdesrun.webservice.application:app",
debug=True,
reload=True,
host=host,
port=port,
)
| StarcoderdataPython |
3365697 | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Date: October 20, 2011
"""
This is the scons rules helper module which should be
imported by Scons script
"""
import os
import shutil
import signal
import string
import subprocess
import sys
import tempfile
import SCons
import SCons.Action
import SCons.Builder
import SCons.Scanner
import SCons.Scanner.Prog
import blade_util
import console
# option_verbose to indicate print verbose or not
option_verbose = False
# linking tmp dir
linking_tmp_dir = ''
def generate_python_binary(target, source, env):
setup_file = ''
if not str(source[0]).endswith('setup.py'):
console.warning('setup.py not existed to generate target %s, '
'blade will generate a default one for you' %
str(target[0]))
else:
setup_file = str(source[0])
init_file = ''
source_index = 2
if not setup_file:
source_index = 1
init_file = str(source[0])
else:
init_file = str(source[1])
init_file_dir = os.path.dirname(init_file)
dep_source_list = []
for s in source[source_index:]:
dep_source_list.append(str(s))
target_file = str(target[0])
target_file_dir_list = target_file.split('/')
target_profile = target_file_dir_list[0]
target_dir = '/'.join(target_file_dir_list[0:-1])
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if setup_file:
shutil.copyfile(setup_file, os.path.join(target_dir, 'setup.py'))
else:
target_name = os.path.basename(init_file_dir)
if not target_name:
console.error_exit('invalid package for target %s' % str(target[0]))
# generate default setup.py for user
setup_str = """
#!/usr/bin/env python
# This file was generated by blade
from setuptools import find_packages, setup
setup(
name='%s',
version='0.1.0',
packages=find_packages(),
zip_safe=True
)
""" % target_name
default_setup_file = open(os.path.join(target_dir, 'setup.py'), 'w')
default_setup_file.write(setup_str)
default_setup_file.close()
package_dir = os.path.join(target_profile, init_file_dir)
if os.path.exists(package_dir):
shutil.rmtree(package_dir, ignore_errors=True)
cmd = 'cp -r %s %s' % (init_file_dir, target_dir)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode:
console.info(std_out)
console.info(std_err)
console.error_exit('failed to copy source files from %s to %s' % (
init_file_dir, target_dir))
return p.returncode
# copy file to package_dir
for f in dep_source_list:
dep_file_basename = os.path.basename(f)
dep_file_dir = os.path.dirname(f)
sub_dir = ''
sub_dir_list = dep_file_dir.split('/')
if len(sub_dir_list) > 1:
sub_dir = '/'.join(dep_file_dir.split('/')[1:])
if sub_dir:
package_sub_dir = os.path.join(package_dir, sub_dir)
if not os.path.exists(package_sub_dir):
os.makedirs(package_sub_dir)
sub_init_file = os.path.join(package_sub_dir, '__init__.py')
if not os.path.exists(sub_init_file):
sub_f = open(sub_init_file, 'w')
sub_f.close()
shutil.copyfile(f, os.path.join(package_sub_dir, dep_file_basename))
make_egg_cmd = 'python setup.py bdist_egg'
p = subprocess.Popen(
make_egg_cmd,
env={},
cwd=target_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode:
console.info(std_out)
console.info(std_err)
console.error_exit('failed to generate python binary in %s' % target_dir)
return p.returncode
return 0
def generate_resource_index(target, source, env):
res_source_path = str(target[0])
res_header_path = str(target[1])
if not os.path.exists(os.path.dirname(res_header_path)):
os.mkdir(os.path.dirname(res_header_path))
h = open(res_header_path, 'w')
c = open(res_source_path, 'w')
source_path = env["SOURCE_PATH"]
full_name = blade_util.regular_variable_name("%s/%s" % (source_path, env["TARGET_NAME"]))
guard_name = 'BLADE_RESOURCE_%s_H' % full_name.upper()
print >>h, '#ifndef %s\n#define %s' % (guard_name, guard_name)
print >>h, '''
// This file was automatically generated by blade
#ifdef __cplusplus
extern "C" {
#endif
#ifndef BLADE_RESOURCE_TYPE_DEFINED
#define BLADE_RESOURCE_TYPE_DEFINED
struct BladeResourceEntry {
const char* name;
const char* data;
unsigned int size;
};
#endif
'''
res_index_name = 'RESOURCE_INDEX_%s' % full_name
print >>c, '// This file was automatically generated by blade\n'
print >>c, '#include "%s"\n' % res_header_path
print >>c, 'const struct BladeResourceEntry %s[] = {' % res_index_name
for s in source:
src = str(s)
var_name = blade_util.regular_variable_name(src)
org_src = blade_util.relative_path(src, source_path)
print >>h, '// %s' % org_src
print >>h, 'extern const char RESOURCE_%s[%d];' % (var_name, s.get_size())
print >>h, 'extern const unsigned RESOURCE_%s_len;\n' % var_name
print >>c, ' { "%s", RESOURCE_%s, %s },' % (org_src, var_name, s.get_size())
print >>c, '};'
print >>c, 'const unsigned %s_len = %s;' % (res_index_name, len(source))
print >>h, '// Resource index'
print >>h, 'extern const struct BladeResourceEntry %s[];' % res_index_name
print >>h, 'extern const unsigned %s_len;' % res_index_name
print >>h, '\n#ifdef __cplusplus\n} // extern "C"\n#endif\n'
print >>h, '\n#endif // %s' % guard_name
c.close()
h.close()
def generate_resource_file(target, source, env):
src_path = str(source[0])
new_src_path = str(target[0])
cmd = ('xxd -i %s | sed -e "s/^unsigned char /const char RESOURCE_/g" '
'-e "s/^unsigned int /const unsigned int RESOURCE_/g"> %s') % (
src_path, new_src_path)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode or std_err:
error = 'failed to generate resource file'
if std_err:
error = error + ': ' + std_err
console.error_exit(error)
return p.returncode
def MakeAction(cmd, cmdstr):
global option_verbose
if option_verbose:
return SCons.Action.Action(cmd)
else:
return SCons.Action.Action(cmd, cmdstr)
_ERRORS = [': error:', ': fatal error:', ': undefined reference to',
': cannot find ', ': ld returned 1 exit status',
' is not defined'
]
_WARNINGS = [': warning:', ': note: ', '] Warning: ']
def error_colorize(message):
colored_message = []
for t in message.splitlines(True):
color = 'cyan'
# For clang column indicator, such as '^~~~~~'
if t.strip().startswith('^'):
color = 'green'
else:
for w in _WARNINGS:
if w in t:
color = 'yellow'
break
for w in _ERRORS:
if w in t:
color = 'red'
break
colored_message.append(console.colors(color))
colored_message.append(t)
colored_message.append(console.colors('end'))
return console.inerasable(''.join(colored_message))
def echospawn(sh, escape, cmd, args, env):
# convert env from unicode strings
asciienv = {}
for key, value in env.iteritems():
asciienv[key] = str(value)
cmdline = ' '.join(args)
p = subprocess.Popen(
cmdline,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
sys.stdout.write(error_colorize(stdout))
sys.stderr.write(error_colorize(stderr))
else:
if stderr:
# Only warnings
sys.stdout.write(error_colorize(stdout))
sys.stderr.write(error_colorize(stderr))
else:
sys.stdout.write(stdout)
return p.returncode
def _blade_action_postfunc(closing_message):
"""To do post jobs if blade's own actions failed to build. """
console.info(closing_message)
# Remember to write the dblite incase of re-linking once fail to
# build last time. We should elaborate a way to avoid rebuilding
# after failure of our own builders or actions.
SCons.SConsign.write()
def _fast_link_helper(target, source, env, link_com):
"""fast link helper function. """
target_file = str(target[0])
prefix_str = 'blade_%s' % target_file.replace('/', '_').replace('.', '_')
fd, temporary_file = tempfile.mkstemp(suffix='xianxian',
prefix=prefix_str,
dir=linking_tmp_dir)
os.close(fd)
sources = []
for s in source:
sources.append(str(s))
link_com_str = link_com.substitute(
FL_TARGET=temporary_file,
FL_SOURCE=' '.join(sources))
p = subprocess.Popen(
link_com_str,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if std_out:
print std_out
if std_err:
print std_err
if p.returncode == 0:
shutil.move(temporary_file, target_file)
if not os.path.exists(target_file):
console.warning('failed to genreate %s in link on tmpfs mode' % target_file)
else:
_blade_action_postfunc('failed while fast linking')
return p.returncode
def fast_link_sharelib_action(target, source, env):
# $SHLINK -o $TARGET $SHLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$SHLINK'),
env.subst('$SHLINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def fast_link_prog_action(target, source, env):
# $LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$LINK'),
env.subst('$LINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def create_fast_link_prog_builder(env):
"""
This is the function to create blade fast link
program builder. It will overwrite the program
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_action = MakeAction(fast_link_prog_action, '$LINKCOMSTR')
program = SCons.Builder.Builder(action=new_link_action,
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$PROGSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='Object',
target_scanner=SCons.Scanner.Prog.ProgramScanner())
env['BUILDERS']['Program'] = program
def create_fast_link_sharelib_builder(env):
"""
This is the function to create blade fast link
sharelib builder. It will overwrite the sharelib
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_actions = []
new_link_actions.append(SCons.Defaults.SharedCheck)
new_link_actions.append(MakeAction(fast_link_sharelib_action, '$SHLINKCOMSTR'))
sharedlib = SCons.Builder.Builder(action=new_link_actions,
emitter='$SHLIBEMITTER',
prefix='$SHLIBPREFIX',
suffix='$SHLIBSUFFIX',
target_scanner=SCons.Scanner.Prog.ProgramScanner(),
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
env['BUILDERS']['SharedLibrary'] = sharedlib
def create_fast_link_builders(env):
"""Creates fast link builders - Program and SharedLibrary. """
# Check requirement
acquire_temp_place = "df | grep tmpfs | awk '{print $5, $6}'"
p = subprocess.Popen(
acquire_temp_place,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
# Do not try to overwrite builder with error
if p.returncode:
console.warning('you have link on tmp enabled, but it is not fullfilled to make it.')
return
# No tmpfs to do fastlink, will not overwrite the builder
if not std_out:
console.warning('you have link on tmp enabled, but there is no tmpfs to make it.')
return
# Use the first one
global linking_tmp_dir
usage, linking_tmp_dir = tuple(std_out.splitlines(False)[0].split())
# Do not try to do that if there is no memory space left
usage = int(usage.replace('%', ''))
if usage > 90:
console.warning('you have link on tmp enabled, '
'but there is not enough space on %s to make it.' %
linking_tmp_dir)
return
console.info('building in link on tmpfs mode')
create_fast_link_sharelib_builder(env)
create_fast_link_prog_builder(env)
| StarcoderdataPython |
1649042 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
THEME = "themes/Flex"
AUTHOR = "<NAME>"
SITENAME = "<NAME>"
DOMAIN = "http://localhost:8000"
PATH = "content"
TIMEZONE = "Asia/Kathmandu"
DEFAULT_LANG = "en"
THEME_COLOR_ENABLE_USER_OVERRIDE = True
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
CC_LICENSE = True
DEFAULT_PAGINATION = 5
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
# Pelican plugins
PLUGIN_PATHS = ["pelican-plugins"]
PLUGINS = [
"assets",
"sitemap",
"post_stats",
"feed_summary",
"share_post",
"related_posts",
"render_math",
]
STATIC_PATHS = ["images", "extras", "extra"]
extras = ["CNAME", "favicon.ico", "robots.txt"]
EXTRA_PATH_METADATA = {"extra/%s" % file: {"path": file} for file in extras}
DEFAULT_DATE = "fs"
DEFAULT_DATE_FORMAT = "%B %d, %Y"
DEFAULT_PAGINATION = 5
PAGE_EXCLUDES = ["404.html"]
SITEURL = "http://localhost:8000"
SITETITLE = "Aaditya Chapagain" # Replace with your name
SITESUBTITLE = (
'notiones a solis ortu usque ad occasum <br /> "Ideas from sunrise to Sunsets"'
)
# Sitemap Settings
SITEMAP = {
"format": "xml",
"priorities": {
"articles": 0.6,
"indexes": 0.6,
"pages": 0.5,
},
"changefreqs": {
"articles": "monthly",
"indexes": "daily",
"pages": "monthly",
},
}
# Add a link to your social media accounts
SOCIAL = (
("github", "https://github.com/aadityachapagain"),
("linkedin", "https://www.linkedin.com/in/aaditya-chapagain-b5170a104/"),
("twitter", "https://twitter.com/chapagainA"),
("facebook", "https://www.facebook.com/aaditya.chapagain"),
)
SITELOGO = "/images/profile.jpg"
FAVICON = "/images/favicon.ico"
LINKS = [
("Resume", "https://aadityachapagain.com/AboutMe/"),
("Sapiens", "https://aadityachapagain.com/sapiens"),
]
MARKDOWN = {
"extension_configs": {
"markdown.extensions.codehilite": {"css_class": "highlight"},
"markdown.extensions.extra": {},
"markdown.extensions.meta": {},
},
"output_format": "html5",
}
# Main Menu Items
MAIN_MENU = True
MENUITEMS = (
("All Posts", "/archives"),
("Categories", "/categories"),
("Tags", "/tags"),
)
# Code highlighting the theme
PYGMENTS_STYLE = "emacs"
PYGMENTS_STYLE_DARK = "monokai"
ARTICLE_URL = "{date:%Y}/{date:%m}/{slug}/"
ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
PAGE_URL = "{slug}/"
PAGE_SAVE_AS = PAGE_URL + "index.html"
ARCHIVES_SAVE_AS = "archives.html"
YEAR_ARCHIVE_SAVE_AS = "{date:%Y}/index.html"
MONTH_ARCHIVE_SAVE_AS = "{date:%Y}/{date:%m}/index.html"
# Disable authors (common for all themes)
DIRECT_TEMPLATES = ["index", "archives", "categories", "tags"]
AUTHOR_SAVE_AS = ""
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = "feeds/all.atom.xml"
CATEGORY_FEED_ATOM = "feeds/%s.atom.xml"
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# HOME_HIDE_TAGS = True
FEED_USE_SUMMARY = True
DISQUS_SITENAME = "aadityachapagain"
GOOGLE_ANALYTICS = "UA-175516093-1"
| StarcoderdataPython |
117467 | # -*- coding: utf-8 -*-
"""
canteen
~~~~~~~
a minimal web framework for the modern web
:author: <NAME> <<EMAIL>>
:copyright: (c) <NAME>, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
debug, __version__ = __debug__, (0, 3)
# stdlib
import __builtin__; export = None
# yay modules!
from . import core
from . import rpc
from . import util
from . import test
from . import base
from . import model
from . import logic
from . import runtime
from . import dispatch
from . import exceptions
# canteen :)
from .rpc import *
from .core import *
from .util import *
from .test import *
from .base import *
from .logic import *
from .model import *
from .runtime import *
from .dispatch import *
from .exceptions import *
__all__ = [export for export in globals() if (
export not in __builtin__.__dict__ and (
not export.startswith('__')))] # export all the things!
| StarcoderdataPython |
161358 | <reponame>teddy4445/lecture_website_app_generator
import zipfile
import sys
import os
from web_logic.github_pages_manager import GithubPagesManager
def create_new_user(user_name) -> None:
# TODO: insert user to DB
pass
manager = GithubPagesManager()
dir_path = '\\'.join([manager.users_websites_folder, user_name])
try:
# create new folder for the user in the users directory
os.mkdir(dir_path)
# download template code into the user's directory
manager.login(user_name='username', password='password')
manager.download_template(dir_path)
file_path = ''
# get the zip file path
for file in os.listdir(dir_path):
if file.endswith('.zip'):
file_path = '\\'.join([dir_path, file])
if file_path != '':
# extract files from zip
with zipfile.ZipFile(file_path, 'r') as zip_ref:
zip_ref.extractall(dir_path)
os.remove(file_path)
# get data from user
# TODO: get the data from the user
data = {'f1.txt': 'dude', 'f2.txt': 'dude2'}
# update local user's directory
for file_name, content in data.items():
# create the path to the file inside the user's directory
file_path = '/'.join([dir_path, file_name])
with open(file_path, 'w') as f:
f.write(content)
# TODO: continue to git...
pass
except Exception as e:
print(e, file=sys.stderr)
def update_user_data(user_name):
# get the path to folder of the user from the users directory
dir_path = '/'.join([GithubPagesManager.users_websites_folder, user_name])
# get data from user
# TODO: get the data from the user
data = {'f1.txt': '<PASSWORD>', 'f2.txt': '<PASSWORD>'}
# update local user's directory
for file_name, content in data.items():
# create the path to the file inside the user's directory
file_path = '/'.join([dir_path, file_name])
with open(file_path, 'w') as f:
f.write(content)
print(f'file updated: {f.name}')
# TODO: continue to git...
pass
| StarcoderdataPython |
3304640 | #!/usr/bin/env python3
"""
Data-transformer-app.
1. Grub CSV files located in ./data/original_data folder with
SaveEcoBot structure (device_id,phenomenon,value,logged_at,value_text).
2. Separate CSV files per device_id and sensor type (phenomenon)
and write result to ./data/csv/*.csv files.
3. Transform data from ./data/csv to InfluxDB format
and write result to ./data/influx/*.influx files.
Recommended way to run:
docker build -t data-transformer ./data-transformer-app
docker run -v $PWD/data/:/app/data/ --rm data-transformer
"""
import csv
import errno
import logging
import sys
import time
from datetime import datetime
from os import listdir
import configs as conf
import pandas as pd
try:
from typeguard import typechecked # noqa: WPS433
except ModuleNotFoundError:
def typechecked(func=None): # noqa: WPS440
"""Skip runtime type checking on the function arguments."""
return func
else:
from types import MappingProxyType # noqa: WPS433 pylint: disable=unused-import
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logs = logging.StreamHandler(sys.stdout)
logger.addHandler(logs)
#######################################################################
# F U N C T I O N S #
#######################################################################
@typechecked
def process_chunk_rows(
dataframe: pd.core.frame.DataFrame,
filename: str,
sensor: str,
):
"""Split sensors data to separate file and sort it.
Args:
dataframe: (pandas.core.frame.DataFrame) Data chunk from CSV file.
filename: (str) Filename of processed file.
sensor: (str) Sensor name what will be proccessed.
"""
dataframe.loc[
# Choose only rows where with 'phenomenon' colum == sensor name
dataframe['phenomenon'] == sensor
].sort_values(
by=['logged_at'],
# Make valid numbers for data sheets
).to_csv(
# Name file as sensor name
f'data/csv/{filename}.csv',
# Save only time and value
columns=['device_id', 'logged_at', 'value'],
# Don't wrote doc num colum
index=False,
# Don't wrote header name - it writes on each DataFrame Iteration
header=False,
# Append data to file
mode='a',
)
@typechecked
def remove_duplicate_rows(filename: str, extention: str = '.csv'):
"""Remove duplicate rows from provided file.
Args:
filename: (str) Filename of processed file.
extention (str): File extention. Default to ".csv"
"""
with open(f'data/csv/{filename}{extention}', 'r+') as csv_file:
# Get unique rows
lines = set(csv_file.readlines())
# Cleanup file
csv_file.seek(0)
csv_file.truncate()
# Write unique rows
csv_file.writelines(lines)
@typechecked
def write_influx_data(filename: str, collection: set):
"""Append file with data in InfluxDB format.
Args:
filename: (str) Filename.
collection: (set) Data for file append.
"""
with open(f'data/influx/{filename}.influx', mode='a') as influx_file:
influx_file.writelines(element for element in collection)
@typechecked
def find_csv_filenames(path_to_dir: str, suffix: str = '.csv'):
"""Find all files with specified extention.
Args:
path_to_dir (str): Path to dir where files where to look for files
suffix (str): File extention. Default to ".csv"
Returns:
array
"""
filenames = listdir(path_to_dir)
return [filename for filename in filenames if filename.endswith(suffix)]
@typechecked
def calculate_aqi(aqi: 'MappingProxyType[str, dict]', sensor: str, concentration: float) -> int:
"""Calculate Air Quality Index.
Calculations based on:
https://www.airnow.gov/sites/default/files/2018-05/aqi-technical-assistance-document-may2016.pdf
Args:
aqi: (MappingProxyType[str, dict]) Nested dictionary with values for AQI calculation.
sensor: (str) Sensor name for which it will AQI count.
concentration: (float) Raw data from sensor.
Returns:
int: Air Quality Index value.
"""
for upper_bound, _ in aqi[sensor].items():
if concentration < float(upper_bound):
aqi_value = (
(_['aqi_high'] - _['aqi_low'])
/ (_['pollutant_high'] - _['pollutant_low'])
* (concentration - _['pollutant_low'])
+ _['aqi_low']
)
break
return round(aqi_value)
@typechecked
def transform_date_to_nanoseconds(date) -> int:
"""Get date from string and return it in UNIX nanoseconds format.
Args:
date: (str) Datetime string in `%Y-%m-%d %H:%M:%S` format.
Returns:
int: Date in UNIX nanoseconds.
"""
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').timetuple() # noqa: WPS323
date = time.mktime(date) * 10 ** 9
return int(date)
#######################################################################
# M A I N #
#######################################################################
@typechecked
def main() -> None: # pylint: disable=R0914 # noqa: WPS210, WPS213, WPS231
"""Logic."""
files = find_csv_filenames(conf.PATH)
if not files:
logger.error( # pylint: disable=logging-not-lazy
'CSV-files not found. Did you add any in `./data/original_data` as it specified in '
+ 'https://github.com/MaxymVlasov/eco-data-visualizer#quick-start ?',
)
sys.exit(errno.ENOENT)
logger.info(f'Found next files: {files}')
for filename in files:
for sensor, human_readable_sensor_name in conf.SENSORS.items():
logs.setFormatter(
logging.Formatter(
'\n{asctime} - {message}', datefmt='%H:%M:%S', style='{',
),
)
logger.info(
f'Start work on "{human_readable_sensor_name}" sensor data from {filename}',
)
logs.setFormatter(
logging.Formatter(
'{asctime} ----- {message}', datefmt='%H:%M:%S', style='{',
),
)
sensor_file = f'{filename}-{sensor}'
#
# Split sensors data to separate file and sort it
#
# Cleanup previous data
open(f'data/csv/{sensor_file}.csv', 'w').close() # noqa: WPS515
pandas_csv = pd.read_csv(
f'{conf.PATH}/{filename}',
chunksize=conf.CHUNKSIZE,
delimiter=',',
dtype=str,
)
for chunk in pandas_csv:
logger.info(f'Proccess chunk rows: {conf.CHUNKSIZE}')
process_chunk_rows(chunk, sensor_file, sensor)
logger.info('Get unique rows')
remove_duplicate_rows(sensor_file)
#
# Get data for Influx
#
logger.info('Transform data to Database format')
# Cleanup previous data
with open(f'data/influx/{sensor_file}.influx', 'w') as influx_file:
influx_file.write("""
# DDL
CREATE DATABASE sensors
# DML
# CONTEXT-DATABASE: sensors
""")
influx_data = set()
can_calculate_aqi = sensor in conf.AQI
with open(f'data/csv/{sensor_file}.csv', mode='r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
device_id = row[0]
date = transform_date_to_nanoseconds(row[1])
concentration = round(float(row[2]), 1)
if can_calculate_aqi:
aqi = calculate_aqi(conf.AQI, sensor, concentration) # noqa: WPS220
influx_data.add( # noqa: WPS220
f'{human_readable_sensor_name},device_id={device_id},have_aqi=true '
+ f'aqi={aqi},concentration={concentration} {date}\n',
)
else:
influx_data.add( # noqa: WPS220
f'{human_readable_sensor_name},device_id={device_id},have_aqi=false '
+ f'concentration={concentration} {date}\n',
)
write_influx_data(sensor_file, influx_data)
if __name__ == '__main__':
# execute only if run as a script
main()
| StarcoderdataPython |
3399978 | <reponame>mikofski/solar-data-tools
# -*- coding: utf-8 -*-
''' Utilities Module
This module contains utility function used by other modules.
'''
import sys
import numpy as np
import cvxpy as cvx
def total_variation_filter(signal, C=5):
'''
This function performs total variation filtering or denoising on a 1D signal. This filter is implemented as a
convex optimization problem which is solved with cvxpy.
(https://en.wikipedia.org/wiki/Total_variation_denoising)
:param signal: A 1d numpy array (must support boolean indexing) containing the signal of interest
:param C: The regularization parameter to control the total variation in the final output signal
:return: A 1d numpy array containing the filtered signal
'''
s_hat = cvx.Variable(len(signal))
mu = cvx.Constant(value=C)
index_set = ~np.isnan(signal)
objective = cvx.Minimize(cvx.sum(cvx.huber(signal[index_set] - s_hat[index_set]))
+ mu * cvx.norm1(cvx.diff(s_hat, k=1)))
problem = cvx.Problem(objective=objective)
try:
problem.solve(solver='MOSEK')
except Exception as e:
print(e)
print('Trying ECOS solver')
problem.solve(solver='ECOS')
return s_hat.value
def total_variation_plus_seasonal_filter(signal, c1=10, c2=500,
residual_weights=None, tv_weights=None,
use_ixs=None, periodic_detector=False,
transition_locs=None,
seas_max=None):
'''
This performs total variation filtering with the addition of a seasonal baseline fit. This introduces a new
signal to the model that is smooth and periodic on a yearly time frame. This does a better job of describing real,
multi-year solar PV power data sets, and therefore does an improved job of estimating the discretely changing
signal.
:param signal: A 1d numpy array (must support boolean indexing) containing the signal of interest
:param c1: The regularization parameter to control the total variation in the final output signal
:param c2: The regularization parameter to control the smoothness of the seasonal signal
:return: A 1d numpy array containing the filtered signal
'''
if residual_weights is None:
residual_weights = np.ones_like(signal)
if tv_weights is None:
tv_weights = np.ones(len(signal) - 1)
if use_ixs is None:
index_set = ~np.isnan(signal)
else:
index_set = np.logical_and(use_ixs, ~np.isnan(signal))
s_hat = cvx.Variable(len(signal))
s_seas = cvx.Variable(len(signal))
s_error = cvx.Variable(len(signal))
c1 = cvx.Constant(value=c1)
c2 = cvx.Constant(value=c2)
#w = len(signal) / np.sum(index_set)
if transition_locs is None:
objective = cvx.Minimize(
# (365 * 3 / len(signal)) * w *
# cvx.sum(cvx.huber(cvx.multiply(residual_weights, s_error)))
10 * cvx.norm(cvx.multiply(residual_weights, s_error))
+ c1 * cvx.norm1(cvx.multiply(tv_weights, cvx.diff(s_hat, k=1)))
+ c2 * cvx.norm(cvx.diff(s_seas, k=2))
# + c2 * .1 * cvx.norm(cvx.diff(s_seas, k=1))
)
else:
objective = cvx.Minimize(
10 * cvx.norm(cvx.multiply(residual_weights, s_error))
+ c2 * cvx.norm(cvx.diff(s_seas, k=2))
)
constraints = [
signal[index_set] == s_hat[index_set] + s_seas[index_set] + s_error[index_set],
cvx.sum(s_seas[:365]) == 0
]
if len(signal) > 365:
constraints.append(s_seas[365:] - s_seas[:-365] == 0)
if periodic_detector:
constraints.append(s_hat[365:] - s_hat[:-365] == 0)
if transition_locs is not None:
loc_mask = np.ones(len(signal) - 1, dtype=bool)
loc_mask[transition_locs] = False
# loc_mask[transition_locs + 1] = False
constraints.append(cvx.diff(s_hat, k=1)[loc_mask] == 0)
if seas_max is not None:
constraints.append(s_seas <= seas_max)
problem = cvx.Problem(objective=objective, constraints=constraints)
problem.solve()
return s_hat.value, s_seas.value
def local_median_regression_with_seasonal(signal, use_ixs=None, c1=1e3, solver='ECOS'):
'''
for a list of available solvers, see:
https://www.cvxpy.org/tutorial/advanced/index.html#solve-method-options
:param signal: 1d numpy array
:param use_ixs: optional index set to apply cost function to
:param c1: float
:param solver: string
:return: median fit with seasonal baseline removed
'''
if use_ixs is None:
use_ixs = np.arange(len(signal))
x = cvx.Variable(len(signal))
objective = cvx.Minimize(
cvx.norm1(signal[use_ixs] - x[use_ixs]) + c1 * cvx.norm(cvx.diff(x, k=2))
)
if len(signal) > 365:
constraints = [
x[365:] == x[:-365]
]
else:
constraints = []
prob = cvx.Problem(objective, constraints=constraints)
prob.solve(solver=solver)
return x.value
def local_quantile_regression_with_seasonal(signal, use_ixs=None, tau=0.75,
c1=1e3, solver='ECOS',
residual_weights=None,
tv_weights=None):
'''
https://colab.research.google.com/github/cvxgrp/cvx_short_course/blob/master/applications/quantile_regression.ipynb
:param signal: 1d numpy array
:param use_ixs: optional index set to apply cost function to
:param tau: float, parameter for quantile regression
:param c1: float
:param solver: string
:return: median fit with seasonal baseline removed
'''
if use_ixs is None:
use_ixs = np.arange(len(signal))
x = cvx.Variable(len(signal))
r = signal[use_ixs] - x[use_ixs]
objective = cvx.Minimize(
cvx.sum(0.5 * cvx.abs(r) + (tau - 0.5) * r) + c1 * cvx.norm(cvx.diff(x, k=2))
)
if len(signal) > 365:
constraints = [
x[365:] == x[:-365]
]
else:
constraints = []
prob = cvx.Problem(objective, constraints=constraints)
prob.solve(solver=solver)
return x.value
def total_variation_plus_seasonal_quantile_filter(signal, use_ixs=None, tau=0.995,
c1=1e3, c2=1e2, c3=1e2,
solver='ECOS',
residual_weights=None,
tv_weights=None):
'''
This performs total variation filtering with the addition of a seasonal baseline fit. This introduces a new
signal to the model that is smooth and periodic on a yearly time frame. This does a better job of describing real,
multi-year solar PV power data sets, and therefore does an improved job of estimating the discretely changing
signal.
:param signal: A 1d numpy array (must support boolean indexing) containing the signal of interest
:param c1: The regularization parameter to control the total variation in the final output signal
:param c2: The regularization parameter to control the smoothness of the seasonal signal
:return: A 1d numpy array containing the filtered signal
'''
n = len(signal)
if residual_weights is None:
residual_weights = np.ones_like(signal)
if tv_weights is None:
tv_weights = np.ones(len(signal) - 1)
if use_ixs is None:
use_ixs = np.ones(n, dtype=np.bool)
# selected_days = np.arange(n)[index_set]
# np.random.shuffle(selected_days)
# ix = 2 * n // 3
# train = selected_days[:ix]
# validate = selected_days[ix:]
# train.sort()
# validate.sort()
s_hat = cvx.Variable(n)
s_seas = cvx.Variable(max(n, 366))
s_error = cvx.Variable(n)
s_linear = cvx.Variable(n)
c1 = cvx.Parameter(value=c1, nonneg=True)
c2 = cvx.Parameter(value=c2, nonneg=True)
c3 = cvx.Parameter(value=c3, nonneg=True)
tau = cvx.Parameter(value=tau)
# w = len(signal) / np.sum(index_set)
beta = cvx.Variable()
objective = cvx.Minimize(
# (365 * 3 / len(signal)) * w * cvx.sum(0.5 * cvx.abs(s_error) + (tau - 0.5) * s_error)
2 * cvx.sum(0.5 * cvx.abs(cvx.multiply(residual_weights, s_error))
+ (tau - 0.5) * cvx.multiply(residual_weights, s_error))
+ c1 * cvx.norm1(cvx.multiply(tv_weights, cvx.diff(s_hat, k=1)))
+ c2 * cvx.norm(cvx.diff(s_seas, k=2))
+ c3 * beta ** 2
)
constraints = [
signal[use_ixs] == s_hat[use_ixs] + s_seas[:n][use_ixs] + s_error[use_ixs],
cvx.sum(s_seas[:365]) == 0
]
if True:
constraints.append(s_seas[365:] - s_seas[:-365] == beta)
constraints.extend([beta <= 0.01, beta >= -0.1])
problem = cvx.Problem(objective=objective, constraints=constraints)
problem.solve(solver='MOSEK')
return s_hat.value, s_seas.value[:n]
def basic_outlier_filter(x, outlier_constant=1.5):
'''
Applies an outlier filter based on the interquartile range definition:
any data point more than 1.5 interquartile ranges (IQRs) below the
first quartile or above the third quartile
Function returns a boolean mask for entries in the input array that are
not outliers.
:param x: ndarray
:param outlier_constant: float, multiplier constant on IQR
:return: boolean mask
'''
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
iqr = (upper_quartile - lower_quartile) * outlier_constant
quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
mask = np.logical_and(
a >= quartile_set[0],
a <= quartile_set[1]
)
return mask
def progress(count, total, status='', bar_length=60):
"""
Python command line progress bar in less than 10 lines of code. · GitHub
https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
:param count: the current count, int
:param total: to total count, int
:param status: a message to display
:return:
"""
bar_len = bar_length
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
def find_runs(x):
"""Find runs of consecutive items in an array.
https://gist.github.com/alimanfoo/c5977e87111abe8127453b21204c1065"""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError('only 1D array supported')
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
| StarcoderdataPython |
4839751 | <gh_stars>0
from unet.unet_base_binary_arch import *
IMG_HEIGHT = IMG_WIDTH = 256
IMG_CHANNELS = 3
def retMask(img, weights_path):
"""Return mask given image aand weights path"""
# Below line needed only if weights are to be loaded and not the entire model.
#model = uNet()
model=load_model(weights_path, custom_objects={'getIOU':getIOU})
X_test = np.zeros((1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)
X_test[0] = img
preds_test=model.predict(X_test, verbose=1)
preds_test = (preds_test > 0.5).astype(np.uint8)
mask=preds_test[0]
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if mask[i][j] == 1:
mask[i][j] = 255
else:
mask[i][j] = 0
merged_image = cv2.merge((mask,mask,mask))
return merged_image
| StarcoderdataPython |
40100 | # -*- coding: utf-8 -*-
# Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Authentication and Authorization models
"""
import base64
import logging
import urllib2
import xml
from beaker.cache import cache_region
from boto import ec2
from boto.ec2.connection import EC2Connection
# uncomment to enable boto request logger. Use only for development (see ref in _euca_connection)
#from boto.requestlog import RequestLogger
import boto.ec2.autoscale
import boto.ec2.cloudwatch
import boto.ec2.elb
import boto.iam
from boto.handler import XmlHandler as BotoXmlHandler
from boto.regioninfo import RegionInfo
from boto.sts.credentials import Credentials
from pyramid.security import Authenticated, authenticated_userid
class User(object):
"""Authenticated/Anonymous User object for Pyramid Auth."""
def __init__(self, user_id=None):
self.user_id = user_id
@classmethod
def get_auth_user(cls, request):
"""Get an authenticated user. Note that self.user_id = None if not authenticated.
See: http://docs.pylonsproject.org/projects/pyramid_cookbook/en/latest/auth/user_object.html
"""
user_id = authenticated_userid(request)
return cls(user_id=user_id)
def is_authenticated(self):
"""user_id will be None if the user isn't authenticated"""
return self.user_id
@staticmethod
def get_account_id(iam_conn=None, request=None):
"""Get 12-digit account ID for the currently signed-in user's account"""
from ..views import boto_error_handler
if iam_conn and request:
with boto_error_handler(request):
user = iam_conn.get_user()
if user and user.arn:
return user.arn.split(':')[4]
class ConnectionManager(object):
"""Returns connection objects, pulling from Beaker cache when available"""
@staticmethod
def aws_connection(region, access_key, secret_key, token, conn_type):
"""Return AWS EC2 connection object
Pulls from Beaker cache on subsequent calls to avoid connection overhead
:type region: string
:param region: region name (e.g. 'us-east-1')
:type access_key: string
:param access_key: AWS access key
:type secret_key: string
:param secret_key: AWS secret key
:type conn_type: string
:param conn_type: Connection type ('ec2', 'autoscale', 'cloudwatch', or 'elb')
"""
cache_key = 'aws_connection_cache_{conn_type}_{region}'.format(conn_type=conn_type, region=region)
# @cache_region('short_term', cache_key)
def _aws_connection(_region, _access_key, _secret_key, _token, _conn_type):
conn = None
if conn_type == 'ec2':
conn = ec2.connect_to_region(
_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token)
elif conn_type == 'autoscale':
conn = ec2.autoscale.connect_to_region(
_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token)
elif conn_type == 'cloudwatch':
conn = ec2.cloudwatch.connect_to_region(
_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token)
if conn_type == 'elb':
conn = ec2.elb.connect_to_region(
_region, aws_access_key_id=_access_key, aws_secret_access_key=_secret_key, security_token=_token)
return conn
return _aws_connection(region, access_key, secret_key, token, conn_type)
@staticmethod
def euca_connection(clchost, port, access_id, secret_key, token, conn_type):
"""Return Eucalyptus connection object
Pulls from Beaker cache on subsequent calls to avoid connection overhead
:type clchost: string
:param clchost: FQDN or IP of Eucalyptus CLC (cloud controller)
:type port: int
:param port: Port of Eucalyptus CLC (usually 8773)
:type access_id: string
:param access_id: Euca access id
:type secret_key: string
:param secret_key: Eucalyptus secret key
:type conn_type: string
:param conn_type: Connection type ('ec2', 'autoscale', 'cloudwatch', or 'elb')
"""
cache_key = 'euca_connection_cache_{conn_type}_{clchost}_{port}'.format(
conn_type=conn_type, clchost=clchost, port=port
)
# @cache_region('short_term', cache_key)
def _euca_connection(_clchost, _port, _access_id, _secret_key, _token, _conn_type):
region = RegionInfo(name='eucalyptus', endpoint=_clchost)
path = '/services/Eucalyptus'
conn_class = EC2Connection
api_version = '2012-12-01'
# Configure based on connection type
if conn_type == 'autoscale':
api_version = '2011-01-01'
conn_class = boto.ec2.autoscale.AutoScaleConnection
path = '/services/AutoScaling'
elif conn_type == 'cloudwatch':
path = '/services/CloudWatch'
conn_class = boto.ec2.cloudwatch.CloudWatchConnection
elif conn_type == 'elb':
path = '/services/LoadBalancing'
conn_class = boto.ec2.elb.ELBConnection
elif conn_type == 'iam':
path = '/services/Euare'
conn_class = boto.iam.IAMConnection
if conn_type == 'sts':
conn = EucaAuthenticator(_clchost, _port)
elif conn_type != 'iam':
conn = conn_class(
_access_id, _secret_key, region=region, port=_port, path=path, is_secure=True, security_token=_token
)
else:
conn = conn_class(
_access_id, _secret_key, host=_clchost, port=_port, path=path, is_secure=True, security_token=_token
)
# AutoScaling service needs additional auth info
if conn_type == 'autoscale':
conn.auth_region_name = 'Eucalyptus'
if conn_type != 'sts': # this is the only non-boto connection
setattr(conn, 'APIVersion', api_version)
conn.https_validate_certificates = False
conn.http_connection_kwargs['timeout'] = 30
# uncomment to enable boto request logger. Use only for development
#conn.set_request_hook(RequestLogger())
return conn
return _euca_connection(clchost, port, access_id, secret_key, token, conn_type)
def groupfinder(user_id, request):
if user_id is not None:
return [Authenticated]
return []
class EucaAuthenticator(object):
"""Eucalyptus cloud token authenticator"""
TEMPLATE = 'https://{host}:{port}/services/Tokens?Action=GetAccessToken&DurationSeconds={dur}&Version=2011-06-15'
def __init__(self, host, port):
"""
Configure connection to Eucalyptus STS service to authenticate with the CLC (cloud controller)
:type host: string
:param host: IP address or FQDN of CLC host
:type port: integer
:param port: port number to use when making the connection
"""
self.host = host
self.port = port
def authenticate(self, account, user, passwd, new_passwd=None, timeout=15, duration=3600):
if user == 'admin' and duration > 3600: # admin cannot have more than 1 hour duration
duration = 3600
# because of the variability, we need to keep this here, not in __init__
self.auth_url = self.TEMPLATE.format(
host=self.host,
port=self.port,
dur=duration,
)
req = urllib2.Request(self.auth_url)
if new_passwd:
auth_string = "{user}@{account};{pw}@{new_pw}".format(
user=base64.b64encode(user),
account=base64.b64encode(account),
pw=base64.b64encode(passwd),
new_pw=new_passwd
)
else:
auth_string = "{user}@{account}:{pw}".format(
user=base64.b64encode(user),
account=base64.b64encode(account),
pw=passwd
)
encoded_auth = base64.b64encode(auth_string)
req.add_header('Authorization', "Basic %s" % encoded_auth)
response = urllib2.urlopen(req, timeout=timeout)
body = response.read()
# parse AccessKeyId, SecretAccessKey and SessionToken
creds = Credentials()
h = BotoXmlHandler(creds, None)
xml.sax.parseString(body, h)
logging.info("Authenticated Eucalyptus user: " + account + "/" + user)
return creds
class AWSAuthenticator(object):
def __init__(self, package):
"""
Configure connection to AWS STS service
:type package: string
:param package: a pre-signed request string for the STS GetSessionToken call
"""
self.endpoint = 'https://sts.amazonaws.com'
self.package = package
def authenticate(self, timeout=20):
""" Make authentication request to AWS STS service
Timeout defaults to 20 seconds"""
req = urllib2.Request(self.endpoint, data=self.package)
response = urllib2.urlopen(req, timeout=timeout)
body = response.read()
# parse AccessKeyId, SecretAccessKey and SessionToken
creds = Credentials()
h = BotoXmlHandler(creds, None)
xml.sax.parseString(body, h)
logging.info("Authenticated AWS user")
return creds
| StarcoderdataPython |
3308174 | import requests
from connection.connection_variables import link_for_rebill
from parameters.subscription.case_1.subsciption_params import *
from parameters.rebill.case_1.rebill_params import *
from parameters.cancel.case_1.cancel_params import *
def case_one_subscription():
return requests.get(link_for_rebill, case_1_subscription_params)
def case_one_first_rebill():
return requests.get(link_for_rebill, case_1_first_rebill_params)
def case_one_second_rebill():
return requests.get(link_for_rebill, case_1_second_rebill_params)
def case_one_third_rebill():
return requests.get(link_for_rebill, case_1_third_rebill_params)
def case_one_fourth_rebill():
return requests.get(link_for_rebill, case_1_fourth_rebill_params)
def case_one_cancel():
return requests.get(link_for_rebill, case_1_cancel_params)
| StarcoderdataPython |
1735158 | """
Cubicle is a high-level language for describing the structure, formatting,
and boilerplate for tabular reports (and perhaps eventually also charts),
combined with an API for populating and emitting these via xlsxwriter.
"""
from . import compiler, dynamic, runtime, version
from .version import __version__, __version_info__
| StarcoderdataPython |
3360434 | from os import environ
import sys
from dotenv import load_dotenv
from explorerClient.eth import ETHExplorerClient
if __name__ == "__main__":
load_dotenv()
# client = ETHExplorerClient.create(rpc_endpoint=str(environ.get("APP_API_CLIENT_INFURA_RPC_ENDPOINT"))
client = ETHExplorerClient.create(rpc_endpoint=str(environ.get("APP_API_CLIENT_BIFINANCE_RPC_ENDPOINT")))
print(client.get_transaction(sys.argv[1]).json())
| StarcoderdataPython |
190038 | <filename>servi-bench/benchmarks_old/cpustress/google/nodejs/cpustress_benchmark.py
import os
from timeit import default_timer as timer
def setup():
os.system('sls deploy')
# TODO: need a harness-supported way of injecting credentials for all sls calls
# os.system('sls deploy --credentials=/path/to/credentials.json')
def pre_execute():
print('needed?')
def execute():
print('executing')
os.system('sls invoke --function google-cpustress-nodejs --data \'{ "level": 2 }\'')
def post_execute():
os.system('cat response.json')
def cleanup():
os.system('sls remove')
def main():
setup()
print('Cold start')
start = timer()
execute()
end = timer()
print(end - start)
print('Warm start')
start = timer()
execute()
end = timer()
print(end - start)
cleanup()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3333558 | from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField
class TagSetupForm0(FlaskForm):
submit = SubmitField("Parse Part 1")
class TagSetupForm1(FlaskForm):
submit = SubmitField("Parse Part 2")
class NewNationForm(FlaskForm):
select = SelectField("Add Nation")
submit = SubmitField("Confirm")
| StarcoderdataPython |
27434 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:44:34 2018
@author: Moha-Thinkpad
"""
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow.keras
import argparse
import tensorflow as tf
from tensorflow.keras import backend as K
#cfg = K.tf.ConfigProto()
#cfg.gpu_options.allow_growth = True
#K.set_session(K.tf.Session(config=cfg))
####################################
########################################################################
####################################
def custom_loss_seg (y_true, y_pred):
#A = tensorflow.keras.losses.mean_squared_error(y_true, y_pred)
B = tensorflow.keras.losses.mean_absolute_error(y_true, y_pred)
return(B)
from tensorflow.keras.layers import Lambda
sum_dim_channel = Lambda(lambda xin: K.sum(xin, axis=3))
def lrelu(x): #from pix2pix code
a=0.2
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def lrelu_output_shape(input_shape):
shape = list(input_shape)
return tuple(shape)
layer_lrelu=Lambda(lrelu, output_shape=lrelu_output_shape)
def PreProcess(InputImages):
#output=np.zeros(InputImages.shape,dtype=np.float)
InputImages=InputImages.astype(np.float)
for i in range(InputImages.shape[0]):
try:
InputImages[i,:,:,:]=InputImages[i,:,:,:]/np.max(InputImages[i,:,:,:])
# output[i,:,:,:] = (output[i,:,:,:]* 2)-1
except:
InputImages[i,:,:]=InputImages[i,:,:]/np.max(InputImages[i,:,:])
# output[i,:,:] = (output[i,:,:]* 2) -1
return InputImages
####################################
########################################################################
####################################
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["train", "test", "export"])
parser.add_argument("--input_dir", help="path to folder containing images")
parser.add_argument("--target_dir", help="where to")
parser.add_argument("--checkpoint", help="where to ")
parser.add_argument("--output_dir", help="where to p")
parser.add_argument("--landmarks", help=" -,-,-")
parser.add_argument("--lr", help="adam learning rate")
parser.add_argument("--ngf", type=int, default=64, help="number of generator filters in first conv layer")
# export options
a = parser.parse_args()
a.batch_size=40
a.max_epochs_seg=1
a.lr_seg=0.0001
a.beta1=0.5
a.ngf=64
#a.seed=1
# a.mode="train"
# a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_png/'
# a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_train_lm/'
# a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
# a.landmarks='43,43,43'
#a.mode="test"
#a.batch_size=1
#a.input_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_png/'
#a.target_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/temp_test_lm/'
#a.checkpoint='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.output_dir='C:\\Users\\User\\Desktop\\Example_LoSoCo_Inputs_3_large_heatmaps/Models_lm/'
#a.landmarks='43,43,43'
######## ------------ Config
#Ind_impo_landmarks_matlab=np.array([5, 6, 15,16,17,18,20,21,22,23,24,25,26,27,28,29,30,32,33,34,35,36,37,38,41])
#Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
#Num_landmarks=25
# 33,23,16 - 29,15, - 30,20,26 - 5,18,21 - 44,17,41 - 28,22,34, - 27,43,37
StrLandmarks=a.landmarks
StrLandmarks=StrLandmarks.split(",")
Ind_impo_landmarks_matlab=np.array([0,0,0])
Ind_impo_landmarks_matlab[0]=int(StrLandmarks[0])
Ind_impo_landmarks_matlab[1]=int(StrLandmarks[1])
Ind_impo_landmarks_matlab[2]=int(StrLandmarks[2])
Ind_impo_landmarks_python=Ind_impo_landmarks_matlab-1
Num_landmarks=3
print('============================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
#########----------------------DATA
from os import listdir
ImageFileNames=[]
FileNames=listdir(a.input_dir)
for names in FileNames:
if names.endswith(".png"):
ImageFileNames.append(names)
#LMFileNames=listdir(a.target_dir)
from skimage import io as ioSK
from numpy import genfromtxt
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.uint8)
#Images_seg=np.zeros((len(ImageFileNames),256,256),dtype=np.uint8)
LandmarkLocations=np.zeros((len(ImageFileNames),2,44),dtype=np.uint8)
for i in range(len(ImageFileNames)):
Image = ioSK.imread(a.input_dir+'/'+ImageFileNames[i])
Images[i,:,:,:]=Image
FileName=ImageFileNames[i]
FileName=FileName[:-4]
# Image = ioSK.imread(a.target_dir_seg+'/'+ImageFileNames[i])
# Images_seg[i,:,:]=Image
Landmarks0 = genfromtxt(a.target_dir+'/'+FileName+'.csv', delimiter=',')
Landmarks0 = Landmarks0.astype(int)
LandmarkLocations[i,0,:]=Landmarks0[:,0]
LandmarkLocations[i,1,:]=Landmarks0[:,1]
#Landmarks = np.flip(Landmarks0, axis=1)
#plt.figure()
#plt.imshow(Images[100,:,:,:])
#plt.scatter(LandmarkLocations[100,0,:],LandmarkLocations[100,1,:])
X_train = PreProcess(Images)
del Images
import gc
gc.collect()
LandmarkLocations_row=LandmarkLocations[:,0,:]
LandmarkLocations_col=LandmarkLocations[:,1,:]
LandmarkLocations_row=LandmarkLocations_row[:,Ind_impo_landmarks_python]
LandmarkLocations_col=LandmarkLocations_col[:,Ind_impo_landmarks_python]
from scipy.ndimage import gaussian_filter
Images_HeatMaps=np.zeros((X_train.shape[0],X_train.shape[1],X_train.shape[2],Num_landmarks),dtype=np.float)
Image_heatmap=np.zeros((256,256),dtype=np.float)
for i in range(X_train.shape[0]):
for k in range(Num_landmarks):
# h=np.argwhere(Images_seg[i,:,:]==2*Ind_impo_landmarks_matlab[k])
lms_1=LandmarkLocations_row[i,k]
lms_2=LandmarkLocations_col[i,k]
Image_heatmap[:,:]=0
Image_heatmap[lms_2,lms_1]=1
Image_heatmap=gaussian_filter(Image_heatmap, sigma=10)
Image_heatmap=(Image_heatmap/np.max(Image_heatmap))
Images_HeatMaps[i,:,:,k]=Image_heatmap
gc.collect()
#plt.figure()
#plt.imshow(np.squeeze(Images_HeatMaps[2,:,:,5]), cmap='gray')
#plt.imshow(Images[2,:,:,:],cmap='jet', alpha=0.5)
#plt.show()
Y_train_heatmap = PreProcess(Images_HeatMaps)
del Images_HeatMaps
gc.collect()
# del Images_seg
import os
if not os.path.exists(a.checkpoint):
os.makedirs(a.checkpoint)
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
if a.mode=='test':
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('loading model ...')
model_final=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('model is loaded ')
Images=np.zeros((len(ImageFileNames),256,256,3),dtype=np.float)
newLandmarks=np.zeros((Num_landmarks,2),dtype=np.float16)
Y_test_heatmap=Y_train_heatmap
X_test=X_train
# fig = plt.figure()
# plt.imshow(X_train[0,:,:,:],cmap='gray', alpha=0.95)
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.grid(True)
pred_example_heatmaps=model_final.predict(X_test[:,:,:,:])
print('writing results ...')
for i in range(len(ImageFileNames)):
# print(i)
FileName=ImageFileNames[i]
FileName=FileName[:-4]
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for k in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(Y_train_heatmap[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
True_chan=np.squeeze(Y_test_heatmap[i,:,:,k])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[k,:]=lms_True
Pred_chan=np.squeeze(pred_example_heatmaps[i,:,:,k])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[k,:]=lms_pred
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_test_heatmap[i,:,:,i])
# ax[1].imshow(pred_example_heatmaps[i,:,:,i])
# plt.show()
np.savetxt(a.output_dir+FileName+'_pred.csv',
lms_pred_all , delimiter=",", fmt='%i')
np.savetxt(a.output_dir+FileName+'_true.csv',
lms_True_all , delimiter=",", fmt='%i')
fig = plt.figure()
plt.imshow(X_test[i,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
# plt.grid(True)
fig.savefig(a.output_dir+FileName+'.png')
plt.close(fig)
if a.mode=='train':
# plt.figure()
# plt.imshow(X_train[90,:,:,:])
# plt.figure()
# plt.imshow(Y_train_heatmap[90,:,:,4])
try: # continue training
checkpoint_model_file=a.checkpoint+'LandMarkModel'
from tensorflow.keras.models import load_model
print('======== loading model ...')
model_4_heatmap=load_model(checkpoint_model_file+'_weights.h5', custom_objects={
'custom_loss_seg': custom_loss_seg,
'layer_lrelu':layer_lrelu,
'lrelu':lrelu,
'lrelu_output_shape':lrelu_output_shape,
'tf': tf})
print('======== continue training ...')
except: # new training
print('======== new training ...')
checkpoint_model_file=a.output_dir+'LandMarkModel'
########### network
kernelSize=(4,4)
InputLayer=tensorflow.keras.layers.Input(shape=(256,256,3))
e_1=tensorflow.keras.layers.Conv2D(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(InputLayer)
e_2=layer_lrelu(e_1)
e_2=tensorflow.keras.layers.Conv2D(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_2)
e_2=tensorflow.keras.layers.BatchNormalization()(e_2)
e_3=layer_lrelu(e_2)
e_3=tensorflow.keras.layers.Conv2D(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_3)
e_3=tensorflow.keras.layers.BatchNormalization()(e_3)
e_4=layer_lrelu(e_3)
e_4=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_4)
e_4=tensorflow.keras.layers.BatchNormalization()(e_4)
e_5=layer_lrelu(e_4)
e_5=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_5)
e_5=tensorflow.keras.layers.BatchNormalization()(e_5)
e_6=layer_lrelu(e_5)
e_6=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_6)
e_6=tensorflow.keras.layers.BatchNormalization()(e_6)
e_7=layer_lrelu(e_6)
e_7=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_7)
e_7=tensorflow.keras.layers.BatchNormalization()(e_7)
e_8=layer_lrelu(e_7)
e_8=tensorflow.keras.layers.Conv2D(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(e_8)
e_8=tensorflow.keras.layers.BatchNormalization()(e_8)
d_8=e_8
d_8=tensorflow.keras.layers.Activation('relu')(d_8)
d_8=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_8)
d_8=tensorflow.keras.layers.BatchNormalization()(d_8)
d_8=tensorflow.keras.layers.Dropout(0.5)(d_8)
d_7=tensorflow.keras.layers.concatenate(inputs=[d_8, e_7], axis=3)
d_7=tensorflow.keras.layers.Activation('relu')(d_7)
d_7=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_7)
d_7=tensorflow.keras.layers.BatchNormalization()(d_7)
d_7=tensorflow.keras.layers.Dropout(0.5)(d_7)
d_6=tensorflow.keras.layers.concatenate(inputs=[d_7, e_6], axis=3)
d_6=tensorflow.keras.layers.Activation('relu')(d_6)
d_6=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_6)
d_6=tensorflow.keras.layers.BatchNormalization()(d_6)
d_6=tensorflow.keras.layers.Dropout(0.5) (d_6)
d_5=tensorflow.keras.layers.concatenate(inputs=[d_6, e_5], axis=3)
d_5=tensorflow.keras.layers.Activation('relu')(d_5)
d_5=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 8, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_5)
d_5=tensorflow.keras.layers.BatchNormalization()(d_5)
d_5=tensorflow.keras.layers.Dropout(0.5) (d_5)
d_4=tensorflow.keras.layers.concatenate(inputs=[d_5, e_4], axis=3)
d_4=tensorflow.keras.layers.Activation('relu')(d_4)
d_4=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 4, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_4)
d_4=tensorflow.keras.layers.BatchNormalization()(d_4)
d_3=tensorflow.keras.layers.concatenate(inputs=[d_4, e_3], axis=3)
d_3=tensorflow.keras.layers.Activation('relu')(d_3)
d_3=tensorflow.keras.layers.Conv2DTranspose(a.ngf * 2, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_3)
d_3=tensorflow.keras.layers.BatchNormalization()(d_3)
d_2=tensorflow.keras.layers.concatenate(inputs=[d_3, e_2], axis=3)
d_2=tensorflow.keras.layers.Activation('relu')(d_2)
# d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.Conv2DTranspose(a.ngf, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_2)
d_2=tensorflow.keras.layers.BatchNormalization()(d_2)
d_1=tensorflow.keras.layers.concatenate(inputs=[d_2, e_1], axis=3)
d_1=tensorflow.keras.layers.Activation('relu')(d_1)
d_1=tensorflow.keras.layers.Conv2DTranspose(Num_landmarks, kernel_size=kernelSize, strides=(2, 2), dilation_rate=(1, 1), padding='same',)(d_1)
HeatMaps=tensorflow.keras.layers.Activation('sigmoid', name='last_layer_of_decoder')(d_1)
model_4_heatmap=Model(inputs=InputLayer, outputs=HeatMaps)
###########Train
print('trainable_count =',int(np.sum([K.count_params(p) for p in set(model_4_heatmap.trainable_weights)])))
print('non_trainable_count =', int(np.sum([K.count_params(p) for p in set(model_4_heatmap.non_trainable_weights)])))
# fix random seed for reproducibility
seed = 1
import random
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
#### compile and train the model
UsedOptimizer=optimizers.Adam(lr=a.lr_seg, beta_1=a.beta1)
model_4_heatmap.compile(loss=custom_loss_seg, optimizer=UsedOptimizer)
History=model_4_heatmap.fit(X_train, Y_train_heatmap,
batch_size=a.batch_size, shuffle=True, validation_split=0.05,
epochs=a.max_epochs_seg,
verbose=1)
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.grid()
plt.savefig(a.output_dir+'History_'+str(a.lr)+'.png')
plt.close()
import pickle
Dict={'History_loss_train':History.history['loss'],
'History_loss_val':History.history['val_loss'],}
pickle.dump( Dict, open(a.output_dir+'History_'+str(a.lr)+'.pkl', "wb" ) )
# show an exemplary result
Num_example_train=0
pred_example_heatmaps=model_4_heatmap.predict(X_train[Num_example_train:Num_example_train+1,:,:,:])
lms_pred_all=np.zeros((Num_landmarks,2),dtype=np.int)
lms_True_all=np.zeros((Num_landmarks,2),dtype=np.int)
for i in range(Num_landmarks):
# plt.figure()
# plt.imshow(example_segmentation[0,:,:,i], cmap='gray')
# plt.imshow(X_train[0,:,:,:],cmap='jet', alpha=0.5)
# plt.show()
Pred_chan=np.squeeze(pred_example_heatmaps[0,:,:,i])
lms_pred=np.unravel_index(np.argmax(Pred_chan, axis=None), Pred_chan.shape)
lms_pred_all[i,:]=lms_pred
True_chan=np.squeeze(Y_train_heatmap[Num_example_train,:,:,i])
lms_True=np.unravel_index(np.argmax(True_chan, axis=None), True_chan.shape)
lms_True_all[i,:]=lms_True
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(Y_train_heatmap[Num_example_train,:,:,i])
# ax[1].imshow(pred_example_heatmaps[0,:,:,i])
# plt.show()
fig = plt.figure()
plt.imshow(X_train[Num_example_train,:,:,:],cmap='jet', alpha=0.9)
plt.scatter(lms_True_all[:,1],lms_True_all[:,0], marker='+', color='red')
plt.scatter(lms_pred_all[:,1],lms_pred_all[:,0], marker='x', color='blue')
plt.grid(True)
# fig.savefig('scatter-result'+str(i)+'_pred.png')
plt.close(fig)
print('===========training done=================')
print('============================')
print(datetime.datetime.now())
print('============================')
print('============================')
print('Saving model ...')
model_4_heatmap.save(checkpoint_model_file+'_weights.h5')
| StarcoderdataPython |
1747194 | <gh_stars>10-100
# ==BEGIN LICENSE==
#
# MIT License
#
# Copyright (c) 2018 SRI Lab, ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==END LICENSE==
import numpy as np
import math
from dpfinder.logging import logger
from dpfinder.searcher.searcher import Searcher
from dpfinder.algorithms.algorithms import Algorithm
from dpfinder.searcher.statistics.confidence_interval import get_confidence_interval
from dpfinder.utils.utils import my_to_str
from dpfinder.utils.timer import time_measure
n_opt_steps = 50
min_p = 1e-2
class TensorFlowSearcher(Searcher):
def __init__(self, confirming, confirmer, min_n_samples, max_n_samples, confidence, eps_err_goal, alg: Algorithm):
super().__init__(confirming, confirmer, alg)
self.min_n_samples = min_n_samples
self.n_samples = self.min_n_samples
self.max_n_samples = max_n_samples
self.confidence = confidence
self.eps_err_goal = eps_err_goal
self.alg = alg
# set seed for randomness
np.random.seed(0)
# build graph
with time_measure('build_graph'):
self.imp = alg.get_tensorflow_implementation()
self.imp.build_fresh_graph()
self.imp.fresh_randomness(self.n_samples)
with time_measure('init_optimizer'):
logger.info("Started setting up optimizer")
self.optimizer = self.imp.get_optimizer(n_opt_steps, min_p)
logger.info("Finished setting up optimizer")
# internal variables
self.s = None
def step_internal(self, s):
if s % 2 == 0:
with time_measure('random'):
self.random_start(s)
else:
with time_measure('optimize'):
self.optimize(s)
return self.s.a, self.s.b, self.s.o, self.s.eps
def random_start(self, s):
self.alg.set_random_start(self.imp)
self.check_error()
logger.data('n_samples', self.n_samples)
logger.info("Result after step (random,%s):\n%s", s, self.current_state())
return self.s.eps
def current_state(self):
a_str = my_to_str(self.s.a)
b_str = my_to_str(self.s.b)
o_str = my_to_str(self.s.o)
return "\ta={}\n\tb={}\n\to={}\n\teps={}".format(a_str, b_str, o_str, self.s.eps)
def check_error(self):
while True:
self.imp.fresh_randomness(self.n_samples)
self.s = self.imp.run_all()
error = get_confidence_interval(self.s.pas, self.s.pbs, self.confidence, self.eps_err_goal)
if error * 4 < self.eps_err_goal and self.n_samples / 1.4 >= self.min_n_samples:
self.n_samples = int(self.n_samples / 1.4)
logger.debug("Error too small:%s, decreasing size of network to %s...", error, self.n_samples)
elif error > self.eps_err_goal and self.n_samples < self.max_n_samples:
self.n_samples = self.n_samples * 2
logger.debug("Error too large:%s, increasing size of network to %s...", error, self.n_samples)
elif math.isnan(error):
logger.warning("Error is nan, resetting size of network to %s...", self.n_samples)
break
else:
break
logger.info("Tensorflow: eps=%s+-%s", self.s.eps, error)
def optimize(self, s):
if np.isnan(self.s.a).any() or np.isnan(self.s.d).any() or np.isnan(self.s.o).any():
logger.warning("Parameters contain 'nan', will not run gradient descent. Returning 0.0 instead...")
elif np.isnan(self.s.eps):
logger.warning("eps is 'nan', will not run gradient descent. Returning 0.0 instead...")
elif np.isinf(self.s.eps):
logger.warning("eps is already 'inf', will not run gradient descent....")
else:
logger.debug("Starting optimization step")
self.imp.minimize(self.optimizer)
logger.debug("Finished optimization step")
self.check_error()
logger.data('n_samples', self.n_samples)
logger.info("Result after step (optimized,%s):\n%s", s, self.current_state())
return self.s.eps
def close(self):
self.imp.close() | StarcoderdataPython |
131228 | # -*- coding: utf-8 -*-
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
import sys
import argparse
from app import cfg
from app import util
def cron_chiayi_city():
pass
def parse_args():
''' '''
parser = argparse.ArgumentParser(description='roadpin_backend')
parser.add_argument('-i', '--ini', type=str, required=True, help="ini filename")
args = parser.parse_args()
return (S_OK, args)
if __name__ == '__main__':
(error_code, args) = parse_args()
cfg.init({"ini_filename": args.ini})
cron_chiayi_city()
| StarcoderdataPython |
156366 | <reponame>PsiPhiTheta/LeetCode
class Solution:
def reverseString(self, s):
"""
:type s: str
:rtype: str
"""
output = list(s)
output = output[::-1]
output = "".join(output)
return output
| StarcoderdataPython |
1675258 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
new_task.py to allow arbitrary messages to be sent from the command line. This program will schedule tasks to our work queue.
The main idea behind Work Queues is to avoid doing a resource-intensive task immediately and having to wait for it to complete.
Instead we schedule the task to be done later. We encapsulate a task as a message and send it to the queue. A worker
processer running in the background will pop the tasks and eventually execute the job. When you run many works the tasks
will be shared between them.
This concept is especially useful in web applications where it's impossible to handle a complex task during a short HTTP request window
"""
# Pika is a pure-Python implementation of the AMQP 0-9-1 protocol
import pika
import sys
# guest user can only connect via localhost
#credentials = pika.PlainCredentials('guest', 'guest')
credentials = pika.PlainCredentials('pi', 'macintosh')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.31.156',
port=5672,
virtual_host='/',
credentials=credentials))
channel = connection.channel()
channel.queue_declare(queue='hello')
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='hello',
body=message)
print("[x] Sent 'Hello World!'")
connection.close()
"""
Please keep in mind that this and other tutorials are, well, tutorials, They demonstrate one new concept at a time and may
intentionally oversimplify some things and leave out others. For example topics such as connection management, error handling,
connection recovery, concurrency and metric collection are largely omitted for the sake of brevity. Such simplified code
should not be considered production ready.
""" | StarcoderdataPython |
3288852 | # ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# --------------------------------------------------------------------------
# <pep8 compliant>
# Contributor(s): <NAME>" Boshoff, <NAME>, <NAME>
def get_vcolor_layer_data(me):
for lay in me.vertex_colors:
if lay.active:
return lay.data
lay = me.vertex_colors.new()
lay.active = True
return lay.data
def applyVertexDirt(me, blur_iterations, blur_strength, clamp_dirt, clamp_clean, dirt_only):
from mathutils import Vector
from math import acos
import array
vert_tone = array.array("f", [0.0]) * len(me.vertices)
# create lookup table for each vertex's connected vertices (via edges)
con = [[] for i in range(len(me.vertices))]
# add connected verts
for e in me.edges:
con[e.vertices[0]].append(e.vertices[1])
con[e.vertices[1]].append(e.vertices[0])
for i, v in enumerate(me.vertices):
vec = Vector()
no = v.normal
co = v.co
# get the direction of the vectors between the vertex and it's connected vertices
for c in con[i]:
vec += (me.vertices[c].co - co).normalized()
# average the vector by dividing by the number of connected verts
tot_con = len(con[i])
if tot_con == 0:
continue
vec /= tot_con
# angle is the acos() of the dot product between normal and connected verts.
# > 90 degrees: convex
# < 90 degrees: concave
ang = acos(no.dot(vec))
# enforce min/max
ang = max(clamp_dirt, ang)
if not dirt_only:
ang = min(clamp_clean, ang)
vert_tone[i] = ang
# blur tones
for i in range(blur_iterations):
# backup the original tones
orig_vert_tone = vert_tone[:]
# use connected verts look up for blurring
for j, c in enumerate(con):
for v in c:
vert_tone[j] += blur_strength * orig_vert_tone[v]
vert_tone[j] /= len(c) * blur_strength + 1
del orig_vert_tone
min_tone = min(vert_tone)
max_tone = max(vert_tone)
tone_range = max_tone - min_tone
if tone_range < 0.0001:
# weak, don't cancel, see T43345
tone_range = 0.0
else:
tone_range = 1.0 / tone_range
active_col_layer = get_vcolor_layer_data(me)
if not active_col_layer:
return {'CANCELLED'}
use_paint_mask = me.use_paint_mask
for i, p in enumerate(me.polygons):
if not use_paint_mask or p.select:
for loop_index in p.loop_indices:
loop = me.loops[loop_index]
v = loop.vertex_index
col = active_col_layer[loop_index].color
tone = vert_tone[v]
tone = (tone - min_tone) * tone_range
if dirt_only:
tone = min(tone, 0.5) * 2.0
col[0] = tone * col[0]
col[1] = tone * col[1]
col[2] = tone * col[2]
me.update()
return {'FINISHED'}
import bpy
from bpy.types import Operator
from bpy.props import FloatProperty, IntProperty, BoolProperty
from math import pi
class VertexPaintDirt(Operator):
bl_idname = "paint.vertex_color_dirt"
bl_label = "Dirty Vertex Colors"
bl_options = {'REGISTER', 'UNDO'}
blur_strength: FloatProperty(
name="Blur Strength",
description="Blur strength per iteration",
min=0.01, max=1.0,
default=1.0,
)
blur_iterations: IntProperty(
name="Blur Iterations",
description="Number of times to blur the colors (higher blurs more)",
min=0, max=40,
default=1,
)
clean_angle: FloatProperty(
name="Highlight Angle",
description="Less than 90 limits the angle used in the tonal range",
min=0.0, max=pi,
default=pi,
unit='ROTATION',
)
dirt_angle: FloatProperty(
name="Dirt Angle",
description="Less than 90 limits the angle used in the tonal range",
min=0.0, max=pi,
default=0.0,
unit='ROTATION',
)
dirt_only: BoolProperty(
name="Dirt Only",
description="Don't calculate cleans for convex areas",
default=False,
)
@classmethod
def poll(cls, context):
obj = context.object
return (obj and obj.type == 'MESH')
def execute(self, context):
obj = context.object
mesh = obj.data
ret = applyVertexDirt(mesh, self.blur_iterations, self.blur_strength, self.dirt_angle, self.clean_angle, self.dirt_only)
return ret
classes = (
VertexPaintDirt,
)
| StarcoderdataPython |
62835 | <reponame>netrack/bayes
import aiofiles
import asyncio
import io
import pathlib
import tarfile
import shutil
from typing import IO
def run(main):
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(main)
finally:
loop.close()
async def reader(path: pathlib.Path, chunk_size=64*1024) -> bytes:
async with aiofiles.open(str(path), "rb") as f:
chunk = await f.read(chunk_size)
while len(chunk):
yield chunk
chunk = await f.read(chunk_size)
class AsyncIO:
def __init__(self, io: IO):
self.io = io
async def read(self, size=-1):
return self.io.read(size)
async def write(self, b):
return self.io.write(b)
async def extract_tar(fileobj: io.IOBase, dest: str) -> None:
"""Extract content of the TAR archive into the given directory."""
with tarfile.open(fileobj=fileobj, mode="r") as tf:
tf.extractall(dest)
async def create_tar(fileobj: io.IOBase, path: str) -> None:
"""Create TAR archive with the data specified by path."""
with tarfile.open(fileobj=fileobj, mode="w") as tf:
tf.add(path, arcname="")
async def remove_dir(path: pathlib.Path, ignore_errors: bool = False):
shutil.rmtree(path, ignore_errors=ignore_errors)
class _AsyncContextManager:
def __init__(self, async_generator):
self.agen = async_generator.__aiter__()
async def __aenter__(self):
return await self.agen.__anext__()
async def __aexit__(self, typ, value, traceback):
try:
await self.agen.__anext__()
except StopAsyncIteration:
return False
def asynccontextmanager(func):
"""Simple implementation of async context manager decorator."""
def _f(*args, **kwargs):
async_generator = func(*args, **kwargs)
return _AsyncContextManager(async_generator)
return _f
# Prefer the run function from the standard library over the custom
# implementation.
run = asyncio.run if hasattr(asyncio, "run") else run
| StarcoderdataPython |
31492 | <reponame>wallisyan/alibabacloud-python-sdk-v2<gh_stars>0
# Copyright 2018 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from alibabacloud.endpoint import EndpointResolver
from alibabacloud.endpoint.chained_endpoint_resolver import ChainedEndpointResolver
from alibabacloud.endpoint.local_config_global_endpoint_resolver \
import LocalConfigGlobalEndpointResolver
from alibabacloud.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
from alibabacloud.endpoint.location_service_endpoint_resolver \
import LocationServiceEndpointResolver
from alibabacloud.endpoint.user_customized_endpoint_resolver import UserCustomizedEndpointResolver
class DefaultEndpointResolver(EndpointResolver):
"""
`Alibaba Cloud Python` endpoint 解析链
.. note::
Deprecated use for add_endpoint and modify_endpoint
Not recommended
"""
predefined_endpoint_resolver = UserCustomizedEndpointResolver()
def __init__(self, config, credentials_provider, user_config=None):
self._user_customized_endpoint_resolver = UserCustomizedEndpointResolver()
endpoint_resolvers = [
self.predefined_endpoint_resolver,
self._user_customized_endpoint_resolver,
LocalConfigRegionalEndpointResolver(user_config),
LocalConfigGlobalEndpointResolver(user_config),
LocationServiceEndpointResolver(config, credentials_provider),
]
self._resolver = ChainedEndpointResolver(endpoint_resolvers)
def resolve(self, request):
return self._resolver.resolve(request)
def put_endpoint_entry(self, region_id, product_code, endpoint):
self._user_customized_endpoint_resolver.put_endpoint_entry(region_id, product_code,
endpoint)
| StarcoderdataPython |
3230337 | import unittest
from User import User
class TestUser(unittest.TestCase):
def setUp(self):
self.new_user = User("John","Paul")
def tearDown(self):
'''
clean up after each test to prevent errors
'''
User.userList = []
#2nd test
def test__init(self):
'''
check if class is initialiazing as expected
'''
self.assertEqual(self.new_user.username, "John")
self.assertEqual(self.new_user.password, "<PASSWORD>")
def test_saveUser(self):
'''
check whether the user information can be saved
in the user list
'''
self.new_user.saveUser()
self.assertEqual(len(User.userList), 1)
#3rd test - adding multiple users
def test_add_mutliple_users(self):
'''
check whether you can store more than one user
'''
self.new_user.saveUser()
test_user = User("Tony", "Tryne")
test_user.saveUser()
self.assertEqual(len(User.userList), 2)
#4th test - Deleting user
def test_delUser(self):
'''
check whether one can delete a user account
'''
self.new_user.saveUser()
test_user = User("Tony", "Tryne")
test_user.saveUser()
self.new_user.delUser()
self.assertEqual(len(User.userList), 1)
#5th test
def test_search_by_name(self):
'''
find a user using name
'''
self.new_user.saveUser()
test_user = User("Tony", "Tryne")
test_user.saveUser()
found_user = User.search_by_name("Tryne")
self.assertEqual(found_user, self.new_user.username)
def test_display_users(self):
"""Method that returns a list of all users"""
self.assertEqual(User.display_users(),User.userList)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
187726 | <filename>flashnrf.py
"""
This file contains example code meant to be used in order to test the
pynrfjprog API and Hex. If multiple devices are connected, pop-up will appear.
Sample program: program_hex.py
Requires nrf51-DK or nrf52-DK for visual confirmation (LEDs).
Run from command line:
python program_hex.py
or if imported as "from pynrfjprog import examples"
examples.program_hex.run()
Program flow:
0. API is opened and checked to see if correct family type is used
1. Memory is erased
2. test_program_path is parsed and written to memory
3. Device is reset and application is run
"""
from __future__ import division
from __future__ import print_function
from builtins import int
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
# Import pynrfjprog API module and HEX parser module
from pynrfjprog import API, Hex
import os # Used to create path to .hex file
class myHandler(PatternMatchingEventHandler):
patterns = ["*.hex"]
ignore_patterns = ["_OTA"]
def process(self, event):
print (event.dest_path, event.event_type)
def on_moved(self, event):
self.process(event)
print('# pynrfjprog program hex example started... ')
device_family = API.DeviceFamily.NRF51 # Start out with nrf51, will be checked and changed if needed
# Init API with NRF51, open, connect, then check if NRF51 is correct
print('# Opening API with device %s, checking if correct ' % device_family)
api = API.API(device_family) # Initializing API with correct NRF51 family type (will be checked later if correct)
api.open() # Open the dll with the set family type
api.connect_to_emu_without_snr() # Connect to emulator, it multiple are connected - pop up will appear
# Check if family used was correct or else change
try:
device_version = api.read_device_version()
except API.APIError as e:
if e.err_code == API.NrfjprogdllErr.WRONG_FAMILY_FOR_DEVICE:
device_family = API.DeviceFamily.NRF52
print('# Closing API and re-opening with device %s ' % device_family)
api.close() # Close API so that correct family can be used to open
# Re-Init API, open, connect, and erase device
api = API.API(device_family) # Initializing API with correct family type [API.DeviceFamily.NRF51 or ...NRF52]
api.open() # Open the dll with the set family type
api.connect_to_emu_without_snr() # Connect to emulator, it multiple are connected - pop up will appear# change
else:
raise e
print('# Erasing all... ')
api.erase_all() # Erase memory of device
# Find path to test hex file
#module_dir, module_file = os.path.split(__file__)
#hex_file_path = os.path.join(os.path.abspath(module_dir), device_family.name + '_dk_blinky.hex')
# Parse hex, program to device
print('# Parsing hex file into segments ')
program = Hex.Hex(event.dest_path) # Parse .hex file into segments
print('# Writing %s to device ' % event.dest_path)
for segment in program:
api.write(segment.address, segment.data, True)
# Reset device, run
api.sys_reset() # Reset device
api.go() # Run application
print('# Application running ')
# Close API
api.close() # Close the dll
print('# done... ')
os.system('say "done"')
def run():
args = sys.argv[1:]
observer = Observer()
observer.schedule(myHandler(), path=args[0] if args else '.')
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
run()
| StarcoderdataPython |
38870 | <reponame>qorrect/sisy
'''
Created on Feb 7, 2017
@author: julien
'''
import unittest
from keras.layers.core import Dense
from minos.experiment.experiment import ExperimentParameters, Experiment,\
check_experiment_parameters, InvalidParametersException
from minos.experiment.training import Training
from minos.model.parameter import random_param_value, int_param, float_param,\
string_param
from minos.model.parameters import reference_parameters,\
register_custom_activation, register_custom_layer
class ParametersTest(unittest.TestCase):
def test_parameters(self):
experiment_parameters = ExperimentParameters(use_default_values=False)
for layer in reference_parameters['layers'].keys():
for name, _value in reference_parameters['layers'][layer].items():
self.assertIsNotNone(
experiment_parameters.get_layer_parameter('%s.%s' % (layer, name)),
'Parameter %s should exist for layer %s' % (name, layer))
def test_custom_parameters(self):
experiment_parameters = ExperimentParameters()
experiment_parameters.layout_parameter('blocks', int_param(1, 10))
param = experiment_parameters.get_layout_parameter('blocks')
self.assertTrue(
1 == param.lo and 10 == param.hi,
'Should have set values')
experiment_parameters.layout_parameter('layers', int_param(1, 3))
param = experiment_parameters.get_layout_parameter('layers')
self.assertTrue(
1 == param.lo and 3 == param.hi,
'Should have set values')
experiment_parameters.layer_parameter('Dense.activation', string_param(['relu', 'tanh']))
param = experiment_parameters.get_layer_parameter('Dense.activation')
self.assertTrue(
'relu' == param.values[0] and 'tanh' == param.values[1],
'Should have set values')
def test_random_value(self):
param = int_param(values=list(range(10)))
val = random_param_value(param)
self.assertTrue(
isinstance(val, int),
'Should be an int')
self.assertTrue(
val in param.values,
'Value should be in predefined values')
param = float_param(values=[i * 0.1 for i in range(10)])
val = random_param_value(param)
self.assertTrue(
isinstance(val, float),
'Should be a float')
self.assertTrue(
val in param.values,
'Value should be in predefined values')
param = float_param(lo=.5, hi=.7)
for _ in range(100):
val = random_param_value(param)
self.assertTrue(
isinstance(val, float),
'Should be a float')
self.assertTrue(
val <= param.hi and val >= param.lo,
'Value should be in range')
param = {
'a': float_param(optional=False),
'b': float_param(optional=False)}
for _ in range(10):
val = random_param_value(param)
self.assertTrue(
isinstance(val, dict),
'Should be a dict')
self.assertEqual(
len(param), len(val),
'Should respect non optional setting')
param = {
'a': float_param(optional=True),
'b': float_param(optional=True)}
for _ in range(10):
val = random_param_value(param)
self.assertTrue(
isinstance(val, dict),
'Should be a dict')
self.assertTrue(
len(val) >= 0 and len(val) <= len(param),
'Should respect non optional setting')
def test_search_parameters(self):
experiment = Experiment(
label='test',
parameters=ExperimentParameters(use_default_values=False))
valid_parameters = True
try:
check_experiment_parameters(experiment)
except InvalidParametersException:
valid_parameters = False
self.assertFalse(valid_parameters, 'Should have raised an exception')
def test_custom_definitions(self):
def custom_activation(x):
return x
register_custom_activation('custom_activation', custom_activation)
register_custom_layer('Dense2', Dense, dict(test='test'))
experiment_parameters = ExperimentParameters(use_default_values=False)
custom_params = experiment_parameters.get_layer_parameter('Dense2')
self.assertIsNotNone(
custom_params,
'Should have registered custom layer')
self.assertTrue(
'test' in custom_params,
'Should have registered custom layer params')
activations = experiment_parameters.get_layer_parameter('Dense.activation')
self.assertTrue(
'custom_activation' in activations.values,
'Should have registered custom_activation')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| StarcoderdataPython |
4840524 | import datetime
def return_dif(target_year, target_mont, target_day):
today = datetime.date.today()
targer = datetime.date(int(target_year), int(target_mont), int(target_day))
if targer < today:
output_day = today - targer
print('Эта дата уже наступила и прошла', output_day.days)
return 'нисколько, эта дата уже наступила и прошла'
elif targer == today:
return 'нисколько, потому что, этот день настал сегодня'
else:
output_day = targer - today
return output_day.days | StarcoderdataPython |
160175 | <filename>setup.py<gh_stars>0
from distutils.core import setup
setup(
name='Model Generator',
version='1',
packages=['generator'],
license='MIT',
long_description='Project to generate model for Whisper product'
) | StarcoderdataPython |
3361944 | from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.translation import ungettext, ugettext_lazy as _
from common.views import (
AssignRemoveView, MultipleObjectConfirmActionView,
MultipleObjectFormActionView, SingleObjectCreateView,
SingleObjectDeleteView, SingleObjectEditView, SingleObjectListView
)
from .forms import UserForm
from .permissions import (
permission_group_create, permission_group_delete, permission_group_edit,
permission_group_view, permission_user_create, permission_user_delete,
permission_user_edit, permission_user_view
)
class GroupCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new group')}
fields = ('name',)
model = Group
post_action_redirect = reverse_lazy('user_management:group_list')
view_permission = permission_group_create
class GroupEditView(SingleObjectEditView):
fields = ('name',)
model = Group
object_permission = permission_group_edit
post_action_redirect = reverse_lazy('user_management:group_list')
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Edit group: %s') % self.get_object(),
}
class GroupListView(SingleObjectListView):
extra_context = {
'hide_link': True,
'title': _('Groups'),
}
model = Group
object_permission = permission_group_view
class GroupDeleteView(SingleObjectDeleteView):
model = Group
object_permission = permission_group_delete
post_action_redirect = reverse_lazy('user_management:group_list')
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Delete the group: %s?') % self.get_object(),
}
class GroupMembersView(AssignRemoveView):
decode_content_type = True
left_list_title = _('Available users')
right_list_title = _('Users in group')
object_permission = permission_group_edit
@staticmethod
def generate_choices(choices):
results = []
for choice in choices:
ct = ContentType.objects.get_for_model(choice)
label = choice.get_full_name() if choice.get_full_name() else choice
results.append(('%s,%s' % (ct.model, choice.pk), '%s' % (label)))
# Sort results by the label not the key value
return sorted(results, key=lambda x: x[1])
def add(self, item):
self.get_object().user_set.add(item)
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Users of group: %s') % self.get_object()
}
def get_object(self):
return get_object_or_404(Group, pk=self.kwargs['pk'])
def left_list(self):
return GroupMembersView.generate_choices(
get_user_model().objects.exclude(
groups=self.get_object()
).exclude(is_staff=True).exclude(is_superuser=True)
)
def right_list(self):
return GroupMembersView.generate_choices(
self.get_object().user_set.all()
)
def remove(self, item):
self.get_object().user_set.remove(item)
class UserCreateView(SingleObjectCreateView):
extra_context = {
'title': _('Create new user'),
}
form_class = UserForm
view_permission = permission_user_create
def form_valid(self, form):
user = form.save(commit=False)
user.set_unusable_password()
user.save()
messages.success(
self.request, _('User "%s" created successfully.') % user
)
return HttpResponseRedirect(
reverse('user_management:user_set_password', args=(user.pk,))
)
class UserDeleteView(MultipleObjectConfirmActionView):
model = get_user_model()
object_permission = permission_user_delete
success_message = _('User delete request performed on %(count)d user')
success_message_plural = _(
'User delete request performed on %(count)d users'
)
def get_extra_context(self):
queryset = self.get_queryset()
result = {
'title': ungettext(
'Delete user',
'Delete users',
queryset.count()
)
}
if queryset.count() == 1:
result.update(
{
'object': queryset.first(),
'title': _('Delete user: %s') % queryset.first()
}
)
return result
def object_action(self, form, instance):
try:
if instance.is_superuser or instance.is_staff:
messages.error(
self.request,
_(
'Super user and staff user deleting is not '
'allowed, use the admin interface for these cases.'
)
)
else:
instance.delete()
messages.success(
self.request, _(
'User "%s" deleted successfully.'
) % instance
)
except Exception as exception:
messages.error(
self.request, _(
'Error deleting user "%(user)s": %(error)s'
) % {'user': instance, 'error': exception}
)
class UserEditView(SingleObjectEditView):
fields = ('username', 'first_name', 'last_name', 'email', 'is_active',)
object_permission = permission_user_edit
post_action_redirect = reverse_lazy('user_management:user_list')
queryset = get_user_model().objects.filter(
is_superuser=False, is_staff=False
)
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Edit user: %s') % self.get_object(),
}
class UserGroupsView(AssignRemoveView):
decode_content_type = True
left_list_title = _('Available groups')
right_list_title = _('Groups joined')
object_permission = permission_user_edit
def add(self, item):
item.user_set.add(self.get_object())
def get_extra_context(self):
return {
'object': self.get_object(),
'title': _('Groups of user: %s') % self.get_object()
}
def get_object(self):
return get_object_or_404(get_user_model(), pk=self.kwargs['pk'])
def left_list(self):
return AssignRemoveView.generate_choices(
Group.objects.exclude(user=self.get_object())
)
def right_list(self):
return AssignRemoveView.generate_choices(
Group.objects.filter(user=self.get_object())
)
def remove(self, item):
item.user_set.remove(self.get_object())
class UserListView(SingleObjectListView):
object_permission = permission_user_view
def get_extra_context(self):
return {
'hide_link': True,
'title': _('Users'),
}
def get_object_list(self):
return get_user_model().objects.exclude(
is_superuser=True
).exclude(is_staff=True).order_by('last_name', 'first_name')
class UserSetPasswordView(MultipleObjectFormActionView):
form_class = SetPasswordForm
model = get_user_model()
object_permission = permission_user_edit
success_message = _('Password change request performed on %(count)d user')
success_message_plural = _(
'Password change request performed on %(count)d users'
)
def get_extra_context(self):
queryset = self.get_queryset()
result = {
'submit_label': _('Submit'),
'title': ungettext(
'Change user password',
'Change users passwords',
queryset.count()
)
}
if queryset.count() == 1:
result.update(
{
'object': queryset.first(),
'title': _('Change password for user: %s') % queryset.first()
}
)
return result
def get_form_extra_kwargs(self):
queryset = self.get_queryset()
result = {}
if queryset:
result['user'] = queryset.first()
return result
else:
raise PermissionDenied
def object_action(self, form, instance):
try:
if instance.is_superuser or instance.is_staff:
messages.error(
self.request,
_(
'Super user and staff user password '
'reseting is not allowed, use the admin '
'interface for these cases.'
)
)
else:
instance.set_password(form.cleaned_data['<PASSWORD>'])
instance.save()
messages.success(
self.request, _(
'Successful password reset for user: %s.'
) % instance
)
except Exception as exception:
messages.error(
self.request, _(
'Error reseting password for user "%(user)s": %(error)s'
) % {
'user': instance, 'error': exception
}
)
| StarcoderdataPython |
26900 | <reponame>rancp/ducktape-docs
# Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import psutil
import shutil
import tempfile
from ducktape.tests.loggermaker import LoggerMaker, close_logger
class DummyFileLoggerMaker(LoggerMaker):
def __init__(self, log_dir, n_handles):
"""Create a logger with n_handles file handles, with files in log_dir"""
self.log_dir = log_dir
self.n_handles = n_handles
@property
def logger_name(self):
return "a.b.c"
def configure_logger(self):
for i in range(self.n_handles):
fh = logging.FileHandler(os.path.join(self.log_dir, "log-" + str(i)))
self._logger.addHandler(fh)
def open_files():
# current process
p = psutil.Process()
return p.open_files()
class CheckLogger(object):
def setup_method(self, _):
self.temp_dir = tempfile.mkdtemp()
def check_close_logger(self):
"""Check that calling close_logger properly cleans up resources."""
initial_open_files = open_files()
n_handles = 100
l = DummyFileLoggerMaker(self.temp_dir, n_handles)
# accessing logger attribute lazily triggers configuration of logger
the_logger = l.logger
assert len(open_files()) == len(initial_open_files) + n_handles
close_logger(the_logger)
assert len(open_files()) == len(initial_open_files)
def teardown_method(self, _):
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
| StarcoderdataPython |
9639 | <reponame>cirobarradov/kafka-connect-hdfs-datalab<filename>kafka-connect-azblob/docs/autoreload.py
#!/usr/bin/env python
from livereload import Server, shell
server = Server()
server.watch('*.rst', shell('make html'))
server.serve()
| StarcoderdataPython |
4813204 | <filename>ProblemSet1/FileCreator/create2Drules.py
f= open("ProblemSet1/gameOfLife.txt","w+")
get_bin = lambda x, n: format(x, 'b').zfill(n)
for i in range(512):
bistr = get_bin(i, 9)
numneig = bistr.count('1')
if bistr[4] == '1':
if numneig < 3 or numneig > 4:
bistr = bistr + " 0\n"
else:
bistr = bistr + " 1\n"
else:
if numneig == 3:
bistr = bistr + " 1\n"
else:
bistr = bistr + " 0\n"
f.write(bistr)
f.close() | StarcoderdataPython |
1659810 | <reponame>jiupinjia/neural-magic-eye
import argparse
import numpy as np
import matplotlib.pyplot as plt
import datasets
from neural_decoder import *
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='mnist', metavar='str',
help='dataset name from [mnist, shapenet, watermarking, watermarking] (default: mnist)')
parser.add_argument('--batch_size', type=int, default=16, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--net_G', type=str, default='unet_256', metavar='str',
help='net_G: resnet18fcn or resnet50fcn or unet_64 or unet_128 or unet_256 (default: resnet18)')
parser.add_argument('--norm_type', type=str, default='batch', metavar='str',
help='norm_type: instance or batch or none (default: batch)')
parser.add_argument('--with_disparity_conv', action='store_true', default=False,
help='insert a disparity convolution layer at the input end of the network')
parser.add_argument('--with_skip_connection', action='store_true', default=False,
help='using unet-fashion skip-connection at prediction layers')
parser.add_argument('--in_size', type=int, default=256, metavar='N',
help='input image size for training (default: 128)')
parser.add_argument('--checkpoint_dir', type=str, default=r'./checkpoints', metavar='str',
help='dir to save checkpoints (default: ./checkpoints)')
parser.add_argument('--vis_dir', type=str, default=r'./val_out', metavar='str',
help='dir to save results during training (default: ./val_out)')
parser.add_argument('--lr', type=float, default=2e-4,
help='learning rate (default: 0.0002)')
parser.add_argument('--max_num_epochs', type=int, default=100, metavar='N',
help='max number of training epochs (default 200)')
parser.add_argument('--scheduler_step_size', type=int, default=50, metavar='N',
help='after m epochs then reduce lr to 0.1*lr (default 500)')
args = parser.parse_args()
if __name__ == '__main__':
# # How to check if the data is loading correctly?
# dataloaders = datasets.get_loaders(args)
# for i in range(100):
# data = next(iter(dataloaders['train']))
# vis_A = utils.make_numpy_grid(data['stereogram'])
# vis_B = utils.make_numpy_grid(data['dmap'])
# vis = np.concatenate([vis_A, vis_B], axis=0)
# plt.imshow(vis)
# plt.show()
dataloaders = datasets.get_loaders(args)
nn_decoder = Decoder(args=args, dataloaders=dataloaders)
nn_decoder.train_models()
| StarcoderdataPython |
1612298 | <gh_stars>0
"""
pearlmemory is a variation of french-genanki-jupyter made for German
learners.
Copyright (C) 2020 errbufferoverfl.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
MODEL_TEMPLATES = [
{
"name": "German to English",
# Front template format
"qfmt": """
{{Wort_DE}}{{Audio_Wort}}
""",
# Back template Format
"afmt": """
{{#Artikel}}{{Artikel}}{{/Artikel}}
{{Wort_DE}}
{{#Plural}}{{Plural}}{{/Plural}}
{{Audio_Wort}}
<div style='font-family: Arial; font-size: 16px;'>
{{#Verbformen}}<br>Verbformen: {{Verbformen}}{{/Verbformen}}
{{#Hinweis}}<br>Hinweis: {{Hinweis}}{{/Hinweis}}
</div>
<hr id=answer>
{{Wort_EN}}
<hr>
<div style="display:none">[sound:_LongSilence.mp3]</div>
{{#Satz1_DE}}
<div style='font-family: Arial; font-size: 16px;'>{{Satz1_DE}}{{Audio_S1}}</div>
<div style='font-family: Arial; font-size: 14px;'>{{hint:Satz1_EN}}</div><br>
{{/Satz1_DE}}
{{#Satz2_DE}}
<div style='font-family: Arial; font-size: 16px;'>{{Satz2_DE}}{{Audio_S2}}</div>
<div style='font-family: Arial; font-size: 14px;'>{{hint:Satz2_EN}}</div><br>
{{/Satz2_DE}}
{{#Satz3_DE}}
<div style='font-family: Arial; font-size: 16px;'>{{Satz3_DE}}{{Audio_S3}}</div>
<div style='font-family: Arial; font-size: 14px;'>{{hint:Satz3_EN}}</div><br>
{{/Satz3_DE}}
{{#Satz4_DE}}
<div style='font-family: Arial; font-size: 16px;'>{{Satz4_DE}}{{Audio_S4}}</div>
<div style='font-family: Arial; font-size: 14px;'>{{hint:Satz4_EN}}</div><br>
{{/Satz4_DE}}
"""
},
]
| StarcoderdataPython |
1730604 | from unittest import TestCase
from parameterized import parameterized
from src.util.load_data import load_data
from src.year2021.day22 import Cuboid, part_1, part_2, prepare_data
from test.decorators import sample
data = load_data(2021, 22)
@sample
class Test2021Day22Samples(TestCase):
prepared_data: list[list[Cuboid]]
@classmethod
def setUpClass(cls) -> None:
cls.prepared_data = [prepare_data(s) for s in data.samples]
@parameterized.expand(((0, 39), (1, 590784), (2, 474140)))
def test_part_1(self, i, expected) -> None:
self.assertEqual(expected, part_1(self.prepared_data[i]))
def test_part_2(self) -> None:
self.assertEqual(2758514936282235, part_2(self.prepared_data[2]))
class Test2021Day22(TestCase):
prepared_data: list[Cuboid]
@classmethod
def setUpClass(cls) -> None:
cls.prepared_data = prepare_data(data.input)
def test_part_1(self) -> None:
self.assertEqual(620241, part_1(self.prepared_data))
def test_part_2(self) -> None:
self.assertEqual(1284561759639324, part_2(self.prepared_data))
| StarcoderdataPython |
1725868 | <filename>rssant_common/blacklist.py<gh_stars>0
import re
from urllib.parse import urlparse
def _parse_blacklist(text):
lines = set()
for line in text.strip().splitlines():
if line.strip():
lines.add(line.strip())
items = []
for line in list(sorted(lines)):
items.append(r'((.*\.)?{})'.format(line))
pattern = re.compile('|'.join(items), re.I)
return pattern
def compile_url_blacklist(text):
black_re = _parse_blacklist(text)
def is_in_blacklist(url):
url = urlparse(url)
return black_re.fullmatch(url.netloc) is not None
return is_in_blacklist
| StarcoderdataPython |
75987 | from src.data_loader.data_set import Data_Set
from src.utils import read_json
from easydict import EasyDict as edict
from src.constants import TRAINING_CONFIG_PATH
from src.data_loader.utils import error_in_conversion, get_data
from tqdm import tqdm
def main():
train_param = edict(read_json(TRAINING_CONFIG_PATH))
data = get_data(
Data_Set,
train_param,
sources=["youtube"],
experiment_type="supervised",
split="train",
)
for id in tqdm(range(len(data))):
sample = data[id]
# joints25D = sample["joints"]
# scale = sample["scale"]
# K = sample["K"]
true_joints_3D = sample["joints3D"]
cal_joints_3D = sample["joints3D_recreated"]
error = error_in_conversion(true_joints_3D, cal_joints_3D)
if error > 1e-3:
print(f"High error found {error} of the true ")
break
if __name__ == "__main__":
main()
| StarcoderdataPython |
1677787 | <reponame>torrotitans/torro_community
#!/usr/bin/python
# -*- coding: UTF-8 -*
from flask_restful import Api
from api.form.interface_base_form import interfaceBaseForm
from api.form.interface_detail_form import interfaceDetailForm, interfaceDetailFormList
from api.form.interface_edit_form import interfaceEditForm
from api.form.interface_field_template import interfaceFieldTemplate
from api.workflow.interface_stages import interfaceStages
from api.workflow.interface_edit_workflow import interfaceEditWorkflow
from api.workflow.interface_base_workflow import interfaceBaseWorkflow
from api.workflow.interface_details_workflow import interfaceDetailsWorkflow
from api.user.interface_user_login import interfaceUserLogin
from api.org.interface_org_setting import interfaceOrgSetting
from api.workspace.interface_workspace_info import interfaceWorkspaceInfo
from api.workspace.interface_workspace_setting import interfaceWorkspaceSetting
from api.workspace.interface_usecase_resource import interfaceUsecaseResource
from api.usecase.interface_usecase_info import interfaceUseCaseInfo
from api.usecase.interface_usecase_setting import interfaceUseCaseSetting
from api.login.interface_login import interfaceLogin
# from api.login.interface_offine import interfaceOffine
from api.it.interface_debug import interfaceDebug
from api.it.interface_torro_config import interfaceTorroConfig
from api.org.interface_role_info import interfaceRoleInfo
from api.gcp.interface_gcp_execute import interfaceGCPExecute
from api.gcp.interface_table_schema import interfaceTableSchema
from api.dashboard.interface_dashboard import interfaceDashboard
from api.input_form.interface_input_form import interfaceInputForm, interfaceInputFormList
from api.input_form.interface_input_form_details import interfaceInputFormDetails, interfaceInputFormDetailsList
from api.governance.interface_governance import interfaceGovernance, interfaceGovernanceBatch
from api.user.interface_user_info import interfaceUserInfo
from api.dashboard.interface_options import interfaceOptions
from api.input_form.interface_comment import interfaceComment
from api.workspace.interface_policy_tags_info import interfacePolicyTagsList
from api.workspace.interface_tag_template_info import interfaceTagTemplateList
from api.system.interface_system_trigger import interfaceSystemTrigger
from api.system.interface_system_notify import interfaceSystemNotify
api = Api()
api.add_resource(
interfaceUsecaseResource,
'/api/usecaseResource',
)
api.add_resource(
interfaceSystemTrigger,
'/api/systemTrigger',
)
api.add_resource(
interfaceSystemNotify,
'/api/systemNotify',
)
api.add_resource(
interfaceTagTemplateList,
'/api/getTagTemplateList',
)
api.add_resource(
interfacePolicyTagsList,
'/api/getPolicyTagsList',
)
api.add_resource(
interfaceDashboard,
'/api/getInputFormInfo',
)
api.add_resource(
interfaceComment,
'/api/inputFormComment',
)
api.add_resource(
interfaceInputFormDetails,
'/api/getInputFormDetails',
)
api.add_resource(
interfaceInputFormDetailsList,
'/api/getInputFormDetailsList',
)
api.add_resource(
interfaceTorroConfig,
'/api/torroConfig/<string:configName>',
)
api.add_resource(
interfaceBaseForm,
'/api/getFormList',
'/api/getFormList/<int:system>',
)
# get
api.add_resource(
interfaceOptions,
'/api/getDashboardOptions',
)
api.add_resource(
interfaceDetailForm,
'/api/getFormData',
)
api.add_resource(
interfaceDetailFormList,
'/api/getFormDataList',
)
# post: {'id': int}
api.add_resource(
interfaceEditForm,
'/api/postFormData'
)
api.add_resource(
interfaceFieldTemplate,
'/api/getFieldTemplate'
)
# post: {'style': int}
api.add_resource(
interfaceStages,
'/api/getAllStages',
)
api.add_resource(
interfaceEditWorkflow,
'/api/postWorkflowData',
)
api.add_resource(
interfaceBaseWorkflow,
'/api/getBaseWorkflowListByFormId',
)
api.add_resource(
interfaceDetailsWorkflow,
'/api/getDetailsWorkflowDataById',
)
api.add_resource(
interfaceUserLogin,
'/api/userLogin',
)
api.add_resource(
interfaceUserInfo,
'/api/getUserInfo',
)
api.add_resource(
interfaceOrgSetting,
'/api/orgSetting',
)
api.add_resource(
interfaceLogin,
'/api/login',
)
# api.add_resource(
# interfaceOffine,
# '/api/offline',
# )
api.add_resource(
interfaceWorkspaceSetting,
'/api/workspaceSetting',
)
api.add_resource(
interfaceWorkspaceInfo,
'/api/workspaceInfo',
)
api.add_resource(
interfaceUseCaseSetting,
'/api/usecaseSetting',
)
api.add_resource(
interfaceUseCaseInfo,
'/api/usecaseInfo',
)
api.add_resource(
interfaceDebug,
'/api/debug',
)
api.add_resource(
interfaceRoleInfo,
'/api/getRolesInfo',
)
api.add_resource(
interfaceGCPExecute,
'/api/taskExecute'
)
api.add_resource(
interfaceTableSchema,
'/api/tableSchema'
)
api.add_resource(
interfaceInputForm,
'/api/inputFormData',
)
api.add_resource(
interfaceInputFormList,
'/api/inputFormDataList',
)
api.add_resource(
interfaceGovernance,
'/api/changeStatus',
)
api.add_resource(
interfaceGovernanceBatch,
'/api/changeStatusList',
)
# tbc: org admin, group checking
# workspace:sa, group checking | StarcoderdataPython |
1682365 | #! /usr/bin/env python
"""
File: plot_sin_eps.py
Copyright (c) 2016 <NAME>
License: MIT
Course: PHYS227
Assignment: B.2
Date: March 17th, 2016
Email: <EMAIL>
Name: <NAME>
Description: Studies a function for different parameter values
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def sin_graph(eps, n):
fig = plt.figure(1)
x = np.linspace(0, 1, n + 1)
y = np.sin(1 / (x + eps))
plt.plot(x, y, 'b-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('f(x)')
plt.axis([-0.2, 1.2, -1.2, 1.2])
plt.show()
def multigraph(eps, n1):
n2 = n1 + 10
fig = plt.figure(1)
x1 = np.linspace(0, 1, n1)
y1 = np.sin(1 / (x1 + eps))
x2 = np.linspace(0, 1, n2)
y2 = np.sin(1 / (x2 + eps))
plt.plot(x1, y1, 'b-')
plt.plot(x2, y2, 'r-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('f(x)')
plt.axis([-0.2, 1.2, -1.2, 1.2])
plt.show()
def choose_n(eps):
"""
Finds the smallest n such that the difference between the max of the function using n nodes and n + 1 nodes is less than 0.1
"""
n1 = 1
n2 = n1 + 10
x1 = np.linspace(0, 1, n1)
y1 = np.sin(1 / (x1 + eps))
x2 = np.linspace(0, 1, n2)
y2 = np.sin(1 / (x2 + eps))
while (abs(max(y2) - max(y1)) >= 0.1):
n1 += 1
n2 += 1
x1 = np.linspace(0, 1, n1)
y1 = np.sin(1 / (x1 + eps))
x2 = np.linspace(0, 1, n2)
y2 = np.sin(1 / (x2 + eps))
return n1 | StarcoderdataPython |
3263893 | <reponame>ffffff0x/python-hacker<filename>com/binghe/hacker/tools/script/network/loic/analysis_loic_online.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: gbk -*-
# Date: 2019/2/17
# Created by 冰河
# Description 实时检测DDos攻击
# 要识别攻击,需要设置一个不正常的数据包的阈值,如果某一个用户发送到某个地址的数据包的数量超过
# 这个阈值,就可以把它视为攻击做进一步调查。尽管这并不是绝对证明攻击是用户主动发起的
# 博客 https://blog.csdn.net/l1028386804
import dpkt
import socket
THRESH = 10000
def findAttack(pcap):
pktCount = {}
for (ts, buf) in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src = socket.inet_ntoa(ip.src)
dst = socket.inet_ntoa(ip.dst)
tcp = ip.data
dport = tcp.dport
if dport == 80:
stream = src + ':' + dst
if pktCount.has_key(stream):
pktCount[stream] = pktCount[stream] + 1
else:
pktCount[stream] = 1
except:
pass
for stream in pktCount:
pktsSent = pktCount[stream]
if pktsSent > THRESH:
src = stream.split(':')[0]
dst = stream.split(':')[1]
print '[+] ' + src + ' attacked ' + dst + ' with ' + str(pktsSent) + ' pkts.' | StarcoderdataPython |
46104 | <gh_stars>1-10
#!/usr/bin/env python3
"""Module for parsing tem setting files"""
from base.rcon import echo
import base.settings
class TemParseError(Exception):
"""Tem Parser Exception"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
def parse_error(err_type: str, err_msg: str):
"""Throw parser error"""
echo("[ERROR:settings] " + str(err_type) + ": " + str(err_msg))
raise TemParseError(err_type + ": " + err_msg)
def parse_bool(val: str, line_num: int) -> bool:
"""Parse boolean"""
val = val.lower()
if val in ("0", "false"):
return False
if val in ("1", "true"):
return True
parse_error("BoolError", "cannot parse bool " + str(line_num) + ":'" + str(val) + "'")
return False
def parse_list_dyn(val: str):
"""Parse dynamic list type"""
if val is None or val == "" or val == ",":
return None
return val.split(',')
def parse_list(sett: str, val: str, line_num: int):
"""Parse list type"""
raw_list = base.settings.Settings().settings_dict[sett][0]
raw_list = raw_list[1:-1]
list_vals = raw_list.split(',')
if val in list_vals:
return str(val)
parse_error("ListError", str(line_num) + ":'" + str(val) + "' not in list " + str(list_vals))
return ""
def read_settings_line(line: str, line_num: int):
"""Parse single line of tem settings file"""
split = line.find("=")
sett = line[3:split]
val = line[split+1:].strip()
settings = base.settings.Settings()
if sett not in settings.settings_dict:
parse_error(
"UnkownSetting",
"line[" + str(line_num) + "] setting[" + str(sett) + "] value[" + str(val) + "]")
# make sure file_database is a folder
if sett == "file_database":
if val[-1] != "/":
val += "/"
if settings.settings_dict[sett][0] == "str":
settings.settings_dict[sett][1] = str(val)
elif settings.settings_dict[sett][0] == "int":
settings.settings_dict[sett][1] = int(val)
elif settings.settings_dict[sett][0] == "bool":
settings.settings_dict[sett][1] = parse_bool(val, line_num)
elif settings.settings_dict[sett][0][0] == "[":
if settings.settings_dict[sett][0][1] == "]": # empty list ( no limit )
settings.settings_dict[sett][1] = parse_list_dyn(val)
else: # pre defined allowed values in list
settings.settings_dict[sett][1] = parse_list(sett, val, line_num)
else:
parse_error(
"TypeError",
"invalid type " + str(line_num) + ":'" + str(settings.settings_dict[sett][0]) + "'")
def read_settings_file(file: str):
"""Parse settings file given a filepath"""
line_num = 0
with open(file, encoding='UTF-8') as file_io:
for line in file_io:
line_num += 1
if line[0] == "#":
continue # ignore comments
if line[:3] == "sh_":
continue # ignore shell settings
if not line.strip():
continue # ignore empty lines
read_settings_line(line, line_num)
| StarcoderdataPython |
1763295 | import torch
import torch.nn as nn
import numpy as np
from model.layers import PointNet, GeneralKNNFusionModule, EquivariantLayer, InstanceBranch, JointBranch2
import index_max
class OMAD_PriorNet(nn.Module):
def __init__(self,
surface_normal_len=0,
basis_num=10,
node_num=16,
part_num=2,
k=1,
node_knn_k_1=3,
symtype='shape',
device=torch.device('cuda:0'),
init_n_pl=None,
init_basis=None
):
super(OMAD_PriorNet, self).__init__()
self.surface_normal_len = surface_normal_len
self.basis_num = basis_num
self.node_num = node_num
self.part_num = part_num
self.joint_num = part_num - 1
self.k = k
self.node_knn_k_1 = node_knn_k_1
self.symtype = symtype # defines the symmetric deformation space "shape" or "basis" (default: {"shape"})
assert self.symtype in ('shape', 'basis', 'none')
self.device = device
assert self.node_num % self.part_num == 0, 'node number should be devided by part number'
if self.symtype == 'shape':
assert self.node_num % 2 == 0, 'node number must be an even number'
assert (self.node_num // 2) % self.part_num == 0
# ---- Nodes branch definition ----
self.C1 = 128
self.C2 = 512
input_channels = self.C1 + self.C2
output_channels = 4 # 3 coordinates + sigma
assert self.node_knn_k_1 >= 2
self.first_pointnet = PointNet(3 + self.surface_normal_len,
[int(self.C1 / 2), int(self.C1 / 2), int(self.C1 / 2)],
activation='relu',
normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
self.second_pointnet = PointNet(self.C1, [self.C1, self.C1],
activation='relu',
normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
self.knnlayer_1 = GeneralKNNFusionModule(3 + self.C1, (int(self.C2 / 2), int(self.C2 / 2), int(self.C2 / 2)),
(self.C2, self.C2),
activation='relu',
normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
self.node_mlp1 = EquivariantLayer(input_channels, 512,
activation='relu', normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
self.node_mlp2 = EquivariantLayer(512, 256,
activation='relu', normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
self.node_mlp3 = EquivariantLayer(256, output_channels, activation=None, normalization=None)
# ---- Joint branch defination ---
self.C0 = 64
self.joint_net = JointBranch2(self.basis_num, [self.C0, self.C0, self.joint_num * 6],
activation='relu', normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
# ---- Pose and coefficients branch definition ----
self.third_pointnet2 = InstanceBranch(3 + self.surface_normal_len,
[int(self.C1 / 2), self.C1, self.basis_num],
self.basis_num,
activation='relu',
normalization='batch',
momentum=0.1,
bn_momentum_decay_step=None,
bn_momentum_decay=1.0)
# ---- Additional learnable parameters ----
if self.symtype == 'shape':
self.basis = torch.nn.Parameter(
(torch.rand(1, 3, self.node_num // 2, self.basis_num) - 0.5).to(device), requires_grad=True) # 1x3xM/2xK
elif self.symtype == 'basis':
raise NotImplementedError
elif self.symtype == 'none':
if init_basis is not None:
self.basis = torch.nn.Parameter(init_basis.to(device)) # 1x3xMxK
else:
self.basis = torch.nn.Parameter((torch.rand(1, 3, self.node_num, self.basis_num) - 0.5).to(device)) # 1x3xMxK
if init_n_pl is None:
self.n_pl = torch.nn.Parameter(torch.rand(1, 2).to(device), requires_grad=True)
else:
self.n_pl = torch.nn.Parameter(torch.tensor(init_n_pl).to(device), requires_grad=True)
def forward(self, x, sn, node, epoch=None):
'''
:param x: BxNx3 Tensor
:param sn: BxNx3 Tensor
:param node: BxMx3 FloatTensor
:return:
'''
bs = x.size(0)
x = x.transpose(1, 2)
if sn is not None:
sn = sn.transpose(1, 2)
node = node.transpose(1, 2)
# modify the x according to the nodes, minus the center
mask, mask_row_max, min_idx = self.query_topk(node, x, node.size()[2],
k=self.k) # BxkNxnode_num, Bxnode_num, BxkN
mask_row_sum = torch.sum(mask, dim=1) # Bxnode_num
mask = mask.unsqueeze(1) # Bx1xkNxnode_num
# if necessary, stack the x
x_stack = x.repeat(1, 1, self.k)
if self.surface_normal_len >= 1:
sn_stack = sn.repeat(1, 1, self.k)
x_stack_data_unsqueeze = x_stack.unsqueeze(3) # BxCxkNx1
x_stack_data_masked = x_stack_data_unsqueeze * mask.float() # BxCxkNxnode_num
cluster_mean = torch.sum(x_stack_data_masked, dim=2) / (
mask_row_sum.unsqueeze(1).float() + 1e-5).detach() # BxCxnode_num
som_node_cluster_mean = cluster_mean
B, N, kN, M = x.size()[0], x.size()[2], x_stack.size()[2], som_node_cluster_mean.size()[2]
# assign each point with a center
node_expanded = som_node_cluster_mean.unsqueeze(2) # BxCx1xnode_num, som.node is BxCxnode_num
centers = torch.sum(mask.float() * node_expanded, dim=3).detach() # BxCxkN
x_decentered = (x_stack - centers).detach() # Bx3xkN
if self.surface_normal_len >= 1:
x_augmented = torch.cat((x_decentered, sn_stack), dim=1) # Bx6xkN
# ---- Nodes branch ----
# First PointNet
if self.surface_normal_len >= 1:
first_pn_out = self.first_pointnet(x_augmented, epoch)
else:
first_pn_out = self.first_pointnet(x_decentered, epoch)
with torch.cuda.device(first_pn_out.get_device()):
first_gather_index = index_max.forward_cuda_shared_mem(first_pn_out.detach(), min_idx.int(),
M).detach().long()
first_pn_out_masked_max = first_pn_out.gather(dim=2, index=first_gather_index) * mask_row_max.unsqueeze(
1).float() # BxCxM
# scatter the masked_max back to the kN points
scattered_first_masked_max = torch.gather(first_pn_out_masked_max,
dim=2,
index=min_idx.unsqueeze(1).expand(B, first_pn_out.size()[1],
kN)) # BxCxkN
first_pn_out_fusion = torch.cat((first_pn_out, scattered_first_masked_max), dim=1) # Bx2CxkN
# Second PointNet
second_pn_out = self.second_pointnet(first_pn_out_fusion, epoch)
with torch.cuda.device(second_pn_out.get_device()):
second_gather_index = index_max.forward_cuda_shared_mem(second_pn_out, min_idx.int(), M).detach().long()
second_pn_out_masked_max = second_pn_out.gather(dim=2, index=second_gather_index) * mask_row_max.unsqueeze(
1).float() # BxCxM
# knn search on nodes
knn_feature_1 = self.knnlayer_1(query=som_node_cluster_mean,
database=som_node_cluster_mean,
x=second_pn_out_masked_max,
K=self.node_knn_k_1,
epoch=epoch)
node_feature_aggregated = torch.cat((second_pn_out_masked_max, knn_feature_1), dim=1) # Bx(C1+C2)xM
# mlp to calculate the per-node keypoint
y = self.node_mlp1(node_feature_aggregated)
point_descriptor = self.node_mlp2(y)
keypoint_sigma = self.node_mlp3(point_descriptor) # Bx(3+1)xkN
nodes = keypoint_sigma[:, 0:3, :] + som_node_cluster_mean # Bx3xM
nodes = nodes.transpose(1, 2).contiguous() # BxMx3
part_nodes = nodes.reshape(bs, self.part_num, -1, 3) # (B, K, M/K, 3)
# -- Pose and coefficients branch --
if self.surface_normal_len >= 1:
x_init_augmented = torch.cat((x_stack, sn_stack), dim=1)
coefs = self.third_pointnet2(x_init_augmented, epoch)
else:
coefs = self.third_pointnet2(x_stack, epoch)
coefs_expand = coefs.clone().unsqueeze(2)
joint_params = self.joint_net(coefs_expand).reshape(B, self.joint_num, 6)
joint_loc = joint_params[:, :, :3]
joint_axis = joint_params[:, :, 3:]
return part_nodes, coefs, joint_loc, joint_axis
def get_transformed_pred_keypoints(self, c, gt_r, gt_t):
"""The category-specific symmetric 3D keypoints are computed with the deformation function.
(transformed based on gt_r and gt_t)
Arguments:
c {torch.Tensor} -- predicted def coefficients - BxK
gt_r {torch.Tensor} -- ground truth rotation - Bx3x3
gt_t {torch.Tensor} -- ground truth translation - Bx1x3
Returns:
torch.Tensor -- kpts: category-specific symmetric 3D keypoints - BXpart_numx(M/part_num)X3
"""
refl_mat = self.get_reflection_operator(self.n_pl)
if self.symtype != "none":
basis_half = self.basis
else:
basis = self.basis
c = c.unsqueeze_(1).unsqueeze_(1) # (B, 1, 1, K)
bs = c.size(0)
if self.symtype == "shape":
refl_batch = refl_mat.repeat(c.shape[0], 1, 1) # (B, 3, 3)
kpts_half = torch.sum(c * basis_half, 3) # (B, 3, M/2)
kpts_half_reflected = torch.matmul(refl_batch, kpts_half) # (B, 3, M/2)
part_kpts_half = kpts_half.reshape(bs, 3, self.part_num, -1) # (B, 3, part_num, M/2/part_num)
part_kpts_half_reflected = kpts_half_reflected.reshape(bs, 3, self.part_num, -1) # (B, 3, part_num, M/2/part_num)
part_kpts = torch.cat((part_kpts_half, part_kpts_half_reflected), dim=-1) # (B, 3, part_num, M/part_num)
kpts = part_kpts.reshape(bs, 3, -1) # (B, 3, M)
elif self.symtype == "basis":
raise NotImplementedError
elif self.symtype == "none":
kpts = torch.sum(c * basis, 3) # (B, 3, M)
else:
raise NotImplementedError
kpts = torch.bmm(gt_r, kpts).transpose(1, 2) + gt_t # (BxMx3)
part_kpts = kpts.reshape(bs, self.part_num, -1, 3) # (B, part_num, M/part_num, 3)
return part_kpts
@staticmethod
def get_transformed_joint_params(pred_joint_loc, pred_joint_axis, gt_r, gt_t):
"""
transform predicted joint params based on gt_r and gt_t
:param pred_joint_loc: joint location, BxJx3 Tensor
:param pred_joint_axis: joint axis, BxJx3 Tensor
:param gt_r: ground truth rotation matrix, Bx3x3 Tensor
:param gt_t: ground truth translation, Bx1x3 Tensor
:return:
trans_joint_loc: transformed joint location
trans_joint_axis: transformed joint axis
"""
trans_joint_loc = torch.bmm(gt_r, pred_joint_loc.transpose(1, 2)).transpose(1, 2) + gt_t
trans_joint_axis = torch.bmm(gt_r, pred_joint_axis.transpose(1, 2)).transpose(1, 2) + gt_t
return trans_joint_loc, trans_joint_axis
@staticmethod
def get_reflection_operator(n_pl):
""" The reflection operator is parametrized by the normal vector
of the plane of symmetry passing through the origin. """
norm_npl = torch.norm(n_pl, 2)
n_x = n_pl[0, 0] / norm_npl # torch.tensor(1.0).cuda()
n_y = torch.tensor(0.0).cuda()
n_z = n_pl[0, 1] / norm_npl
refl_mat = torch.stack(
[
1 - 2 * n_x * n_x,
-2 * n_x * n_y,
-2 * n_x * n_z,
-2 * n_x * n_y,
1 - 2 * n_y * n_y,
-2 * n_y * n_z,
-2 * n_x * n_z,
-2 * n_y * n_z,
1 - 2 * n_z * n_z,
],
dim=0,
).reshape(1, 3, 3)
return refl_mat
@staticmethod
def query_topk(node, x, M, k):
'''
:param node: SOM node of BxCxM tensor
:param x: input data BxCxN tensor
:param M: number of SOM nodes
:param k: topk
:return: mask: Nxnode_num
'''
# ensure x, and other stored tensors are in the same device
device = x.device
node = node.to(x.device)
node_idx_list = torch.from_numpy(np.arange(M).astype(np.int64)).to(device) # node_num LongTensor
# expand as BxCxNxnode_num
node = node.unsqueeze(2).expand(x.size(0), x.size(1), x.size(2), M)
x_expanded = x.unsqueeze(3).expand_as(node)
# calcuate difference between x and each node
diff = x_expanded - node # BxCxNxnode_num
diff_norm = (diff ** 2).sum(dim=1) # BxNxnode_num
# find the nearest neighbor
_, min_idx = torch.topk(diff_norm, k=k, dim=2, largest=False, sorted=False) # BxNxk
min_idx_expanded = min_idx.unsqueeze(2).expand(min_idx.size()[0], min_idx.size()[1], M, k) # BxNxnode_numxk
node_idx_list = node_idx_list.unsqueeze(0).unsqueeze(0).unsqueeze(3).expand_as(
min_idx_expanded).long() # BxNxnode_numxk
mask = torch.eq(min_idx_expanded, node_idx_list).int() # BxNxnode_numxk
# mask = torch.sum(mask, dim=3) # BxNxnode_num
# debug
B, N, M = mask.size()[0], mask.size()[1], mask.size()[2]
mask = mask.permute(0, 2, 3, 1).contiguous().view(B, M, k * N).permute(0, 2,
1).contiguous() # BxMxkxN -> BxMxkN -> BxkNxM
min_idx = min_idx.permute(0, 2, 1).contiguous().view(B, k * N)
mask_row_max, _ = torch.max(mask, dim=1) # Bxnode_num, this indicates whether the node has nearby x
return mask, mask_row_max, min_idx | StarcoderdataPython |
1696860 | <reponame>hsoft/hscommon<filename>util.py<gh_stars>1-10
# Created By: <NAME>
# Created On: 2011-01-11
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import sys
import os
import os.path as op
import re
from math import ceil
import glob
import shutil
from datetime import timedelta
from .path import Path, pathify, log_io_error
def nonone(value, replace_value):
"""Returns ``value`` if ``value`` is not ``None``. Returns ``replace_value`` otherwise.
"""
if value is None:
return replace_value
else:
return value
def tryint(value, default=0):
"""Tries to convert ``value`` to in ``int`` and returns ``default`` if it fails.
"""
try:
return int(value)
except (TypeError, ValueError):
return default
def minmax(value, min_value, max_value):
"""Returns `value` or one of the min/max bounds if `value` is not between them.
"""
return min(max(value, min_value), max_value)
#--- Sequence related
def dedupe(iterable):
"""Returns a list of elements in ``iterable`` with all dupes removed.
The order of the elements is preserved.
"""
result = []
seen = {}
for item in iterable:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result
def flatten(iterables, start_with=None):
"""Takes a list of lists ``iterables`` and returns a list containing elements of every list.
If ``start_with`` is not ``None``, the result will start with ``start_with`` items, exactly as
if ``start_with`` would be the first item of lists.
"""
result = []
if start_with:
result.extend(start_with)
for iterable in iterables:
result.extend(iterable)
return result
def first(iterable):
"""Returns the first item of ``iterable``.
"""
try:
return next(iter(iterable))
except StopIteration:
return None
def stripfalse(seq):
"""Returns a sequence with all false elements stripped out of seq.
"""
return [x for x in seq if x]
def extract(predicate, iterable):
"""Separates the wheat from the shaft (`predicate` defines what's the wheat), and returns both.
"""
wheat = []
shaft = []
for item in iterable:
if predicate(item):
wheat.append(item)
else:
shaft.append(item)
return wheat, shaft
def allsame(iterable):
"""Returns whether all elements of 'iterable' are the same.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("iterable cannot be empty")
return all(element == first_item for element in it)
def trailiter(iterable, skipfirst=False):
"""Yields (prev_element, element), starting with (None, first_element).
If skipfirst is True, there will be no (None, item1) element and we'll start
directly with (item1, item2).
"""
it = iter(iterable)
if skipfirst:
try:
prev = next(it)
except StopIteration:
return
else:
prev = None
for item in it:
yield prev, item
prev = item
def iterconsume(seq, reverse=True):
"""Iterate over ``seq`` and pops yielded objects.
Because we use the ``pop()`` method, we reverse ``seq`` before proceeding. If you don't need
to do that, set ``reverse`` to ``False``.
This is useful in tight memory situation where you are looping over a sequence of objects that
are going to be discarded afterwards. If you're creating other objects during that iteration
you might want to use this to avoid ``MemoryError``.
"""
if reverse:
seq.reverse()
while seq:
yield seq.pop()
#--- String related
def escape(s, to_escape, escape_with='\\'):
"""Returns ``s`` with characters in ``to_escape`` all prepended with ``escape_with``.
"""
return ''.join((escape_with + c if c in to_escape else c) for c in s)
def get_file_ext(filename):
"""Returns the lowercase extension part of filename, without the dot.
"""
pos = filename.rfind('.')
if pos > -1:
return filename[pos + 1:].lower()
else:
return ''
def rem_file_ext(filename):
"""Returns the filename without extension.
"""
pos = filename.rfind('.')
if pos > -1:
return filename[:pos]
else:
return filename
def pluralize(number, word, decimals=0, plural_word=None):
"""Returns a pluralized string with ``number`` in front of ``word``.
Adds a 's' to s if ``number`` > 1.
``number``: The number to go in front of s
``word``: The word to go after number
``decimals``: The number of digits after the dot
``plural_word``: If the plural rule for word is more complex than adding a 's', specify a plural
"""
number = round(number, decimals)
format = "%%1.%df %%s" % decimals
if number > 1:
if plural_word is None:
word += 's'
else:
word = plural_word
return format % (number, word)
def format_time(seconds, with_hours=True):
"""Transforms seconds in a hh:mm:ss string.
If ``with_hours`` if false, the format is mm:ss.
"""
minus = seconds < 0
if minus:
seconds *= -1
m, s = divmod(seconds, 60)
if with_hours:
h, m = divmod(m, 60)
r = '%02d:%02d:%02d' % (h, m, s)
else:
r = '%02d:%02d' % (m,s)
if minus:
return '-' + r
else:
return r
def format_time_decimal(seconds):
"""Transforms seconds in a strings like '3.4 minutes'.
"""
minus = seconds < 0
if minus:
seconds *= -1
if seconds < 60:
r = pluralize(seconds, 'second', 1)
elif seconds < 3600:
r = pluralize(seconds / 60.0, 'minute', 1)
elif seconds < 86400:
r = pluralize(seconds / 3600.0, 'hour', 1)
else:
r = pluralize(seconds / 86400.0, 'day', 1)
if minus:
return '-' + r
else:
return r
SIZE_DESC = ('B','KB','MB','GB','TB','PB','EB','ZB','YB')
SIZE_VALS = tuple(1024 ** i for i in range(1,9))
def format_size(size, decimal=0, forcepower=-1, showdesc=True):
"""Transform a byte count in a formatted string (KB, MB etc..).
``size`` is the number of bytes to format.
``decimal`` is the number digits after the dot.
``forcepower`` is the desired suffix. 0 is B, 1 is KB, 2 is MB etc.. if kept at -1, the suffix
will be automatically chosen (so the resulting number is always below 1024).
if ``showdesc`` is ``True``, the suffix will be shown after the number.
Usage example::
>>> format_size(1234, decimal=2, showdesc=True)
'1.21 KB'
"""
if forcepower < 0:
i = 0
while size >= SIZE_VALS[i]:
i += 1
else:
i = forcepower
if i > 0:
div = SIZE_VALS[i-1]
else:
div = 1
format = '%%%d.%df' % (decimal,decimal)
negative = size < 0
divided_size = ((0.0 + abs(size)) / div)
if decimal == 0:
divided_size = ceil(divided_size)
else:
divided_size = ceil(divided_size * (10 ** decimal)) / (10 ** decimal)
if negative:
divided_size *= -1
result = format % divided_size
if showdesc:
result += ' ' + SIZE_DESC[i]
return result
_valid_xml_range = '\x09\x0A\x0D\x20-\uD7FF\uE000-\uFFFD'
if sys.maxunicode > 0x10000:
_valid_xml_range += '%s-%s' % (chr(0x10000), chr(min(sys.maxunicode, 0x10FFFF)))
RE_INVALID_XML_SUB = re.compile('[^%s]' % _valid_xml_range, re.U).sub
def remove_invalid_xml(s, replace_with=' '):
return RE_INVALID_XML_SUB(replace_with, s)
def multi_replace(s, replace_from, replace_to=''):
"""A function like str.replace() with multiple replacements.
``replace_from`` is a list of things you want to replace. Ex: ['a','bc','d']
``replace_to`` is a list of what you want to replace to.
If ``replace_to`` is a list and has the same length as ``replace_from``, ``replace_from``
items will be translated to corresponding ``replace_to``. A ``replace_to`` list must
have the same length as ``replace_from``
If ``replace_to`` is a string, all ``replace_from`` occurence will be replaced
by that string.
``replace_from`` can also be a str. If it is, every char in it will be translated
as if ``replace_from`` would be a list of chars. If ``replace_to`` is a str and has
the same length as ``replace_from``, it will be transformed into a list.
"""
if isinstance(replace_to, str) and (len(replace_from) != len(replace_to)):
replace_to = [replace_to for r in replace_from]
if len(replace_from) != len(replace_to):
raise ValueError('len(replace_from) must be equal to len(replace_to)')
replace = list(zip(replace_from, replace_to))
for r_from, r_to in [r for r in replace if r[0] in s]:
s = s.replace(r_from, r_to)
return s
#--- Date related
# It might seem like needless namespace pollution, but the speedup gained by this constant is
# significant, so it stays.
ONE_DAY = timedelta(1)
def iterdaterange(start, end):
"""Yields every day between ``start`` and ``end``.
"""
date = start
while date <= end:
yield date
date += ONE_DAY
#--- Files related
@pathify
def modified_after(first_path: Path, second_path: Path):
"""Returns ``True`` if first_path's mtime is higher than second_path's mtime.
If one of the files doesn't exist or is ``None``, it is considered "never modified".
"""
try:
first_mtime = first_path.stat().st_mtime
except (EnvironmentError, AttributeError):
return False
try:
second_mtime = second_path.stat().st_mtime
except (EnvironmentError, AttributeError):
return True
return first_mtime > second_mtime
def find_in_path(name, paths=None):
"""Search for `name` in all directories of `paths` and return the absolute path of the first
occurrence. If `paths` is None, $PATH is used.
"""
if paths is None:
paths = os.environ['PATH']
if isinstance(paths, str): # if it's not a string, it's already a list
paths = paths.split(os.pathsep)
for path in paths:
if op.exists(op.join(path, name)):
return op.join(path, name)
return None
@log_io_error
@pathify
def delete_if_empty(path: Path, files_to_delete=[]):
"""Deletes the directory at 'path' if it is empty or if it only contains files_to_delete.
"""
if not path.exists() or not path.isdir():
return
contents = path.listdir()
if any(p for p in contents if (p.name not in files_to_delete) or p.isdir()):
return False
for p in contents:
p.remove()
path.rmdir()
return True
def open_if_filename(infile, mode='rb'):
"""If ``infile`` is a string, it opens and returns it. If it's already a file object, it simply returns it.
This function returns ``(file, should_close_flag)``. The should_close_flag is True is a file has
effectively been opened (if we already pass a file object, we assume that the responsibility for
closing the file has already been taken). Example usage::
fp, shouldclose = open_if_filename(infile)
dostuff()
if shouldclose:
fp.close()
"""
if isinstance(infile, Path):
return (infile.open(mode), True)
if isinstance(infile, str):
return (open(infile, mode), True)
else:
return (infile, False)
def ensure_folder(path):
"Create `path` as a folder if it doesn't exist."
if not op.exists(path):
os.makedirs(path)
def ensure_file(path):
"Create `path` as an empty file if it doesn't exist."
if not op.exists(path):
open(path, 'w').close()
def delete_files_with_pattern(folder_path, pattern, recursive=True):
"""Delete all files (or folders) in `folder_path` that match the glob `pattern`.
"""
to_delete = glob.glob(op.join(folder_path, pattern))
for fn in to_delete:
if op.isdir(fn):
shutil.rmtree(fn)
else:
os.remove(fn)
if recursive:
subpaths = [op.join(folder_path, fn) for fn in os.listdir(folder_path)]
subfolders = [p for p in subpaths if op.isdir(p)]
for p in subfolders:
delete_files_with_pattern(p, pattern, True)
class FileOrPath:
"""Does the same as :func:`open_if_filename`, but it can be used with a ``with`` statement.
Example::
with FileOrPath(infile):
dostuff()
"""
def __init__(self, file_or_path, mode='rb'):
self.file_or_path = file_or_path
self.mode = mode
self.mustclose = False
self.fp = None
def __enter__(self):
self.fp, self.mustclose = open_if_filename(self.file_or_path, self.mode)
return self.fp
def __exit__(self, exc_type, exc_value, traceback):
if self.fp and self.mustclose:
self.fp.close()
| StarcoderdataPython |
197079 | <filename>app/core/events.py
from hq2redis import HQ2Redis
from loguru import logger
from app import settings, state
from app.core.logging import init_logger
from app.db.events import (
close_db_connection,
close_redis_connection,
connect_to_db,
connect_to_redis,
)
from app.schedulers import load_jobs_with_lock, stop_jobs
from app.services.events import close_engine, start_engine
async def connect_to_quotes_api():
quotes_api = HQ2Redis(
redis_host=settings.hq2redis_host,
redis_port=settings.hq2redis_port,
redis_db=settings.hq2redis_db,
redis_password=<PASSWORD>,
)
await quotes_api.startup()
state.quotes_api = quotes_api
async def close_quotes_api_conn():
await state.quotes_api.shutdown()
async def start_app() -> None:
await init_logger()
await connect_to_db()
await connect_to_redis()
await connect_to_quotes_api()
await start_engine()
await load_jobs_with_lock()
@logger.catch
async def stop_app() -> None:
await stop_jobs()
await close_engine()
await close_redis_connection()
await close_db_connection()
await close_quotes_api_conn()
| StarcoderdataPython |
3276184 | <filename>neon_api_proxy/api_connector.py
# NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2021 Neongecko.com Inc.
# BSD-3
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pika.channel
from typing import Optional
from neon_utils import LOG
from neon_utils.socket_utils import b64_to_dict, dict_to_b64
from neon_mq_connector.connector import MQConnector
from neon_api_proxy.controller import NeonAPIProxyController
class NeonAPIMQConnector(MQConnector):
"""Adapter for establishing connection between Neon API and MQ broker"""
def __init__(self, config: Optional[dict], service_name: str, proxy: NeonAPIProxyController):
"""
Additionally accepts message bus connection properties
:param config: dictionary containing MQ configuration data
:param service_name: name of the service instance
"""
super().__init__(config, service_name)
self.vhost = '/neon_api'
self.proxy = proxy
def handle_api_input(self,
channel: pika.channel.Channel,
method: pika.spec.Basic.Deliver,
_: pika.spec.BasicProperties,
body: bytes):
"""
Handles input requests from MQ to Neon API
:param channel: MQ channel object (pika.channel.Channel)
:param method: MQ return method (pika.spec.Basic.Deliver)
:param _: MQ properties (pika.spec.BasicProperties)
:param body: request body (bytes)
"""
message_id = None
try:
if body and isinstance(body, bytes):
request = b64_to_dict(body)
tokens = self.extract_agent_tokens(request)
message_id = tokens.pop('message_id', request.get("message_id", None))
LOG.info(f"request={request}; message_id={message_id}")
respond = self.proxy.resolve_query(request)
LOG.info(f"message={message_id} status={respond.get('status_code')}")
try:
respond['content'] = bytes(respond.get('content', b'')).decode(encoding='utf-8')
except Exception as e:
LOG.error(e)
respond = {**respond, **tokens}
LOG.debug(f"respond={respond}")
data = dict_to_b64(respond)
routing_key = request.get('routing_key', 'neon_api_output')
# queue declare is idempotent, just making sure queue exists
channel.queue_declare(queue=routing_key)
channel.basic_publish(exchange='',
routing_key=routing_key,
body=data,
properties=pika.BasicProperties(expiration='1000')
)
channel.basic_ack(method.delivery_tag)
else:
raise TypeError(f'Invalid body received, expected: bytes string; got: {type(body)}')
except Exception as e:
LOG.error(f"message_id={message_id}")
LOG.error(e)
@staticmethod
def extract_agent_tokens(msg_data: dict) -> dict:
"""
Extracts tokens from msg data based on received "agent"
:param msg_data: desired message data
:return: dictionary containing tokens dedicated to resolved agent
"""
tokens = dict()
request_agent = msg_data.pop('agent', 'undefined')
if 'klatchat' in request_agent:
LOG.info('Resolved agent is "klatchat"')
tokens['cid'] = msg_data.pop("cid", None)
tokens['message_id'] = tokens['replied_message'] = msg_data.get('messageID', None)
else:
LOG.warning('Failed to resolve an agent from the message data')
return tokens
def handle_error(self, thread, exception):
LOG.error(f"{exception} occurred in {thread}")
LOG.info(f"Restarting Consumers")
self.stop()
self.run()
def pre_run(self, **kwargs):
self.register_consumer("neon_api_consumer", self.vhost, 'neon_api_input', self.handle_api_input, auto_ack=False)
self.register_consumer("neon_api_consumer_targeted",
self.vhost,
f'neon_api_input_{self.service_id}',
self.handle_api_input, auto_ack=False)
| StarcoderdataPython |
1669887 | """
Кастомизация админки FastAPI Admin
https://fastapi-admin.github.io/reference/resource/
"""
from fastapi_admin.app import app
from fastapi_admin.resources import Model
from app.models import Booking, Hotel, User
@app.register
class HotelResource(Model):
label = "Hotels"
model = Hotel
@app.register
class UserResource(Model):
label = "Users"
model = User
fields = ["email", "first_name", "middle_name", "last_name", "birthdate"]
@app.register
class BookingResource(Model):
label = "Bookings"
model = Booking
| StarcoderdataPython |
1667956 | <filename>squadron/libraries/apt/test_apt.py
import json
from . import schema, verify, apply
import wrap_apt
from mock import MagicMock
import os
def set_test_hook_if_not_root(hook=True):
if(os.geteuid() != 0):
wrap_apt.FAKE_RETURN = hook
def test_schema():
assert len(schema()) > 0
def test_verify_fail():
set_test_hook_if_not_root(False)
inputh = ["idonotexist"]
assert len(verify(inputh)) != 0
def test_verify():
inputh = ["git"]
set_test_hook_if_not_root()
assert len(verify(inputh)) == 0
def test_apply():
set_test_hook_if_not_root()
inputh = ["git"]
assert len(apply(inputh, "")) == 0
def test_apply_new():
set_test_hook_if_not_root()
wrap_apt.uninstall_package("zivot")
inputh = ["zivot"]
assert len(apply(inputh, "")) == 0
| StarcoderdataPython |
3290814 | <reponame>johannvk/ProjectEuler
import operator
# Integer Right Triangles
# Implementing stupid brute force trials:
def brute_force_triangle_check(max_perim):
for p in range(3, max_perim + 1):
for a in range(1, p - 2):
b = 0.5*(p - a)
c = 0.5*(p - a)
def is_perfect_square(num):
return (int(num**(1/2)))**2 == num
# perimeter_length_solutions = {}
#
#
# def len_hyp(a, b):
# return (a**2 + b**2)**(1/2)
#
#
# def check_right_triangle(kat1, kat2, hyp):
# return kat1**2 + kat2**2 - hyp**2 == 0
#
#
# base_pairs = []
# triangle_perimeter = 125
#
# # Real part of the imaginary number
# a = 2
#
#
# while a**2 + a < triangle_perimeter - 2:
# # Complex part of the imaginary number
# b = 1
# while b < a:
# if (a, b) not in base_pairs and (b, a) not in base_pairs:
# base_pairs.append((a, b))
# base_pairs.append((b, a))
# perim = (a**2 - b**2) + 2*a*b + (len_hyp(a, b))
# if perim in perimeter_length_solutions.keys():
# perimeter_length_solutions[perim] += 1
# else:
# perimeter_length_solutions[perim] = 1
#
# b += 1
#
# # Ønsker å implementere skalering av triplettene som dannes ved multiplisere (a + ib) med seg selv.
#
# a += 1
#
#
# max_key = max(perimeter_length_solutions.keys(), key=lambda k: perimeter_length_solutions[k])
#
# print("The perimeter value with the most amount of solutions was: {}\nWith {} solutions"
# .format(max_key, perimeter_length_solutions[max_key]))
| StarcoderdataPython |
3300947 | <filename>shared/gast_to_code/gast_to_code_router.py
import shared.gast_to_code.general_helpers as general_helpers
from shared.gast_to_code.converter_registry import ConverterRegistry
def gast_to_code(gast, out_lang, lvl=0):
"""
gast router that takes generic ast and the output language
that the gast needs to be converted to and executes the
conversion recursively
out_lang correspond to the language codes defined in datastructure:
javascript: js
python: py
"""
converter = ConverterRegistry.get_converter(out_lang)
if type(gast) == list:
return general_helpers.list_helper(gast, out_lang)
# Primitives
elif gast["type"] == "num":
return str(gast["value"])
elif gast["type"] == "arr":
return converter.handle_arr(gast)
elif gast["type"] == "str":
return '"' + gast["value"] + '"'
elif gast["type"] == "bool":
return converter.handle_bool(gast)
elif gast["type"] == "if":
return converter.handle_if(gast, lvl)
elif gast["type"] == "none":
return converter.handle_none(gast)
# Loops
elif gast["type"] == "whileStatement":
return converter.handle_while(gast, lvl)
elif gast["type"] == "forRangeStatement":
return converter.handle_for_range(gast, lvl)
elif gast["type"] == "forOfStatement":
return converter.handle_for_of(gast, lvl)
# Other
elif gast["type"] == "root":
return converter.handle_root(gast)
elif gast["type"] == "break":
return "break"
elif gast["type"] == "continue":
return "continue"
elif gast["type"] == "logStatement":
return converter.handle_log_statement(gast)
elif gast["type"] == "varAssign":
return converter.handle_var_assign(gast)
elif gast["type"] == "augAssign":
return converter.handle_aug_assign(gast)
elif gast["type"] == "funcCall":
return converter.handle_func_call(gast)
elif gast["type"] == "subscript":
return converter.handle_subscript(gast)
elif gast["type"] == "name":
return converter.handle_name(gast)
elif gast["type"] == "attribute":
return converter.handle_attribute(gast)
elif gast["type"] == "builtInAttribute":
return converter.handle_built_in_attribute(gast)
elif gast["type"] == "dict":
return converter.handle_dict(gast)
elif gast["type"] == "property":
return converter.handle_property(gast)
elif gast["type"] == "binOp":
return general_helpers.gast_to_node_bin_op_helper(gast, out_lang)
elif gast["type"] == "boolOp":
return converter.handle_bool_op(gast)
elif gast["type"] == "unaryOp":
return converter.handle_unary_op(gast)
elif gast["type"] == "functionDeclaration":
return converter.handle_function_declaration(gast, lvl)
elif gast["type"] == "returnStatement":
return converter.handle_return_statement(gast)
elif gast["type"] == "assignPattern":
return converter.handle_assign_pattern(gast)
elif gast["type"] == "arrowExpression":
return converter.handle_arrow_func(gast)
elif gast["type"] == "error" and gast["value"] == "unsupported":
error_string = converter.get_error_handler().unsupported_feature()
return error_string
else:
return converter.get_error_handler().unknown_error(
"shared/gast_to_code/gast_to_code_router.py", gast)
| StarcoderdataPython |
3202957 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for module json_summary_combiner.py"""
import filecmp
import os
import shutil
import tempfile
import unittest
import json_summary_combiner
class TestJsonSummaryCombiner(unittest.TestCase):
def setUp(self):
self._test_data_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_data', 'combiner')
self._actual_html_dir = tempfile.mkdtemp()
self._absolute_url = 'http://dummy-link.foobar/'
self._render_pictures_args = '--test1=test --test2=test --test3'
self._nopatch_gpu = 'False'
self._withpatch_gpu = 'True'
def tearDown(self):
shutil.rmtree(self._actual_html_dir)
def test_CombineJsonSummaries_WithDifferences(self):
worker_name_to_info = json_summary_combiner.CombineJsonSummaries(
os.path.join(self._test_data_dir, 'differences'))
for worker_name, worker_info in worker_name_to_info.items():
worker_num = worker_name[-1]
file_count = 0
for file_info in worker_info.failed_files:
file_count += 1
self.assertEquals(file_info.file_name,
'file%s_%s.png' % (worker_name, file_count))
self.assertEquals(file_info.skp_location,
'http://storage.cloud.google.com/dummy-bucket/skps'
'/%s/file%s_.skp' % (worker_name, worker_name))
self.assertEquals(file_info.num_pixels_differing,
int('%s%s1' % (worker_num, file_count)))
self.assertEquals(file_info.percent_pixels_differing,
int('%s%s2' % (worker_num, file_count)))
self.assertEquals(file_info.max_diff_per_channel,
int('%s%s4' % (worker_num, file_count)))
self.assertEquals(
worker_info.skps_location,
'gs://dummy-bucket/skps/%s' % worker_name)
self.assertEquals(
worker_info.files_location_nopatch,
'gs://dummy-bucket/output-dir/%s/nopatch-images' % worker_name)
self.assertEquals(
worker_info.files_location_diffs,
'gs://dummy-bucket/output-dir/%s/diffs' % worker_name)
self.assertEquals(
worker_info.files_location_whitediffs,
'gs://dummy-bucket/output-dir/%s/whitediffs' % worker_name)
def test_CombineJsonSummaries_NoDifferences(self):
worker_name_to_info = json_summary_combiner.CombineJsonSummaries(
os.path.join(self._test_data_dir, 'no_output'))
self.assertEquals(worker_name_to_info, {})
def _get_test_worker_name_to_info(self):
worker_name_to_info = {
'worker1': json_summary_combiner.WorkerInfo(
worker_name='worker1',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker1_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker1/'
'fileworker1_.skp',
111, 112, 114, 115),
json_summary_combiner.FileInfo(
'fileworker1_2.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker1/'
'fileworker1_.skp',
121, 122, 124, 125)],
skps_location='gs://dummy-bucket/skps/worker1',
files_location_diffs='gs://dummy-bucket/worker1/diffs',
files_location_whitediffs='gs://dummy-bucket/worker1/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker1/nopatch',
files_location_withpatch='gs://dummy-bucket/worker1/withpatch'),
'worker2': json_summary_combiner.WorkerInfo(
worker_name='worker2',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker2_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker2/'
'fileworker2_.skp',
211, 212, 214, 215)],
skps_location='gs://dummy-bucket/skps/worker2',
files_location_diffs='gs://dummy-bucket/worker2/diffs',
files_location_whitediffs='gs://dummy-bucket/worker2/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker2/nopatch',
files_location_withpatch='gs://dummy-bucket/worker2/withpatch'),
'worker3': json_summary_combiner.WorkerInfo(
worker_name='worker3',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker3_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
311, 312, 314, 315),
json_summary_combiner.FileInfo(
'fileworker3_2.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
321, 322, 324, 325),
json_summary_combiner.FileInfo(
'fileworker3_3.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
331, 332, 334, 335),
json_summary_combiner.FileInfo(
'fileworker3_4.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
341, 342, 344, 345)],
skps_location='gs://dummy-bucket/skps/worker3',
files_location_diffs='gs://dummy-bucket/worker3/diffs',
files_location_whitediffs='gs://dummy-bucket/worker3/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker3/nopatch',
files_location_withpatch='gs://dummy-bucket/worker3/withpatch')
}
return worker_name_to_info
def test_OutputToHTML_WithDifferences_WithAbsoluteUrl(self):
worker_name_to_info = self._get_test_worker_name_to_info()
json_summary_combiner.OutputToHTML(
worker_name_to_info=worker_name_to_info,
output_html_dir=self._actual_html_dir,
absolute_url=self._absolute_url,
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'differences_with_url')
for html_file in ('index.html', 'list_of_all_files.html',
'fileworker1_1.png.html', 'fileworker1_2.png.html',
'fileworker2_1.png.html', 'fileworker3_1.png.html',
'fileworker3_2.png.html', 'fileworker3_3.png.html',
'fileworker3_4.png.html'):
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, html_file),
os.path.join(self._actual_html_dir, html_file)))
def test_OutputToHTML_WithDifferences_WithNoUrl(self):
worker_name_to_info = self._get_test_worker_name_to_info()
json_summary_combiner.OutputToHTML(
worker_name_to_info=worker_name_to_info,
output_html_dir=self._actual_html_dir,
absolute_url='',
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'differences_no_url')
for html_file in ('index.html', 'list_of_all_files.html',
'fileworker1_1.png.html', 'fileworker1_2.png.html',
'fileworker2_1.png.html', 'fileworker3_1.png.html',
'fileworker3_2.png.html', 'fileworker3_3.png.html',
'fileworker3_4.png.html'):
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, html_file),
os.path.join(self._actual_html_dir, html_file)))
def test_OutputToHTML_NoDifferences(self):
json_summary_combiner.OutputToHTML(
worker_name_to_info={},
output_html_dir=self._actual_html_dir,
absolute_url='',
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'nodifferences')
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, 'index.html'),
os.path.join(self._actual_html_dir, 'index.html')))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
62356 | <filename>oving_02/oppg2.py
#!/usr/bin/env python3
# O<NAME> <NAME>
# 2.a
# x = 10
print("{:<21}{}".format("x = 10 :", "Datatypen til x er int"))
# x = 10 + 10
print("{:<21}{}".format("x = 10 + 10 :", "Datatypen til x er int"))
# x = 5.5
print("{:<21}{}".format("x = 5.5 :", "Datatypen til x er float"))
# x = 10 + 5.5
print("{:<21}{}".format("x = 10 + 5.5 :", "Datatypen til x er float"))
# x = 5.5 + 5.5
print("{:<21}{}".format("x = 5.5 + 5.5 :", "Datatypen til x er float"))
# x = "abc"
print("{:<21}{}".format("x = \"abc\" :", "Datatypen til x er str"))
# x = "abc" + "5"
print("{:<21}{}".format("x = \"abc\" + \"5\" :", "Datatypen til x er str"))
# x = "abc" + 5
print("{:<21}{}".format("x = \"abc\" + 5 :", "Det er ikke mulig å sette sammen en streng(\"abc\") og et heltall(5)."))
# x = "abc" + str(5)
print("{:<21}{}".format("x = \"abc\" + str(5) :", "Datatypen til x str"))
# x = "5" + 5
print("{:<21}{}".format("x = \"5\" + 5 :", "Det er ikke mulig å sette sammen en streng(\"5\") og et heltall(5)."))
# x = int("5") + 5
print("{:<21}{}".format("x = int(\"5\") + 5 :", "Datatypen til x er int"))
| StarcoderdataPython |
8780 | # -*- coding: utf-8 -*-
from cwr.acknowledgement import AcknowledgementRecord, MessageRecord
from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, \
InterestedPartyForAgreementRecord
from cwr.group import Group, GroupHeader, GroupTrailer
from cwr.info import AdditionalRelatedInfoRecord
from cwr.parser.decoder.common import Decoder
from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, \
PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord
from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, \
NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, \
NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, \
NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord
from cwr.transmission import Transmission, TransmissionTrailer, \
TransmissionHeader
from cwr.work import RecordingDetailRecord, ComponentRecord, \
AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, \
InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, \
WorkRecord
from cwr.file import CWRFile, FileTag
from cwr.other import AVIKey, VISAN
from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue
"""
Classes for transforming dictionaries into instances of the CWR model.
There is a decoder for each of the model classes, and all of them expect a
dictionary having at least one key for each field, having the same name as the
field, which will refer to a valid value.
As said, the values on the dictionary should be valid values, for example if
an integer is expected, then the dictionary contains an integer. The values
contained in the dictionary entries should not need to be parsed.
These decoders are useful for handling JSON transmissions or Mongo databases.
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class TransactionRecordDictionaryDecoder(Decoder):
def __init__(self):
super(TransactionRecordDictionaryDecoder, self).__init__()
self._decoders = {}
self._decoders['ACK'] = AcknowledgementDictionaryDecoder()
self._decoders['AGR'] = AgreementDictionaryDecoder()
self._decoders['TER'] = AgreementTerritoryDictionaryDecoder()
self._decoders['ARI'] = AdditionalRelatedInformationDictionaryDecoder()
self._decoders['ALT'] = AlternateTitleDictionaryDecoder()
self._decoders['EWT'] = AuthoredWorkDictionaryDecoder()
self._decoders['VER'] = AuthoredWorkDictionaryDecoder()
self._decoders['COM'] = ComponentDictionaryDecoder()
self._decoders['IPA'] = InterestedPartyForAgreementDictionaryDecoder()
self._decoders['SPT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['SWT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['IND'] = InstrumentationDetailDictionaryDecoder()
self._decoders['INS'] = InstrumentationSummaryDictionaryDecoder()
self._decoders['MSG'] = MessageDictionaryDecoder()
self._decoders['PER'] = PerformingArtistDictionaryDecoder()
self._decoders['PWR'] = PublisherForWriterDictionaryDecoder()
self._decoders['REC'] = RecordingDetailDictionaryDecoder()
self._decoders['EXC'] = WorkDictionaryDecoder()
self._decoders['ISW'] = WorkDictionaryDecoder()
self._decoders['NWR'] = WorkDictionaryDecoder()
self._decoders['REV'] = WorkDictionaryDecoder()
self._decoders['ORN'] = WorkOriginDictionaryDecoder()
self._decoders['SWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders[
'NPA'] = NonRomanAlphabetAgreementPartyDictionaryDecoder()
self._decoders['NOW'] = NonRomanAlphabetOtherWriterDictionaryDecoder()
self._decoders[
'NPR'] = NonRomanAlphabetPerformanceDataDictionaryDecoder()
self._decoders['NPN'] = NonRomanAlphabetPublisherNameDictionaryDecoder()
self._decoders['NAT'] = NonRomanAlphabetTitleDictionaryDecoder()
self._decoders['NET'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NCT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NVT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NWN'] = NonRomanAlphabetWriterNameDictionaryDecoder()
self._decoders['SPU'] = PublisherRecordDictionaryDecoder()
self._decoders['OPU'] = PublisherRecordDictionaryDecoder()
def decode(self, data):
return self._decoders[data['record_type']].decode(data)
class AcknowledgementDictionaryDecoder(Decoder):
def __init__(self):
super(AcknowledgementDictionaryDecoder, self).__init__()
def decode(self, data):
return AcknowledgementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
original_group_id=data[
'original_group_id'],
original_transaction_sequence_n=data[
'original_transaction_sequence_n'],
original_transaction_type=data[
'original_transaction_type'],
transaction_status=data[
'transaction_status'],
creation_date_time=data[
'creation_date_time'],
processing_date=data['processing_date'],
creation_title=data['creation_title'],
submitter_creation_n=data[
'submitter_creation_n'],
recipient_creation_n=data[
'recipient_creation_n'])
class AgreementDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
agreement_type=data['agreement_type'],
agreement_start_date=data[
'agreement_start_date'],
prior_royalty_status=data[
'prior_royalty_status'],
post_term_collection_status=data[
'post_term_collection_status'],
number_of_works=data['number_of_works'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'],
international_standard_code=data[
'international_standard_code'],
sales_manufacture_clause=data[
'sales_manufacture_clause'],
agreement_end_date=data['agreement_end_date'],
date_of_signature=data['date_of_signature'],
retention_end_date=data['retention_end_date'],
prior_royalty_start_date=data[
'prior_royalty_start_date'],
post_term_collection_end_date=data[
'post_term_collection_end_date'],
shares_change=data['shares_change'],
advance_given=data['advance_given'])
class AgreementTerritoryDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementTerritoryDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementTerritoryRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
tis_numeric_code=data[
'tis_numeric_code'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'])
class AdditionalRelatedInformationDictionaryDecoder(Decoder):
def __init__(self):
super(AdditionalRelatedInformationDictionaryDecoder, self).__init__()
def decode(self, data):
return AdditionalRelatedInfoRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
society_n=data['society_n'],
type_of_right=data['type_of_right'],
work_n=data['work_n'],
subject_code=data['subject_code'],
note=data['note'])
class AlternateTitleDictionaryDecoder(Decoder):
def __init__(self):
super(AlternateTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return AlternateTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
alternate_title=data['alternate_title'],
title_type=data['title_type'],
language_code=data['language_code'])
class AuthoredWorkDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(AuthoredWorkDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data[
'writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data[
'writer_2_ipi_base_n'])
return AuthoredWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_first_name=data[
'writer_1_first_name'],
writer_1_last_name=data['writer_1_last_name'],
writer_2_first_name=data[
'writer_2_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data[
'writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data[
'writer_2_ipi_name_n'],
source=data['source'],
language_code=data['language_code'],
iswc=data['iswc'])
class ComponentDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(ComponentDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data['writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data['writer_2_ipi_base_n'])
return ComponentRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_last_name=data['writer_1_last_name'],
writer_1_first_name=data['writer_1_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_2_first_name=data['writer_2_first_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data['writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data['writer_2_ipi_name_n'],
iswc=data['iswc'],
duration=data['duration'])
class GroupHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(GroupHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
return GroupHeader(record_type=data['record_type'],
group_id=data['group_id'],
transaction_type=data['transaction_type'],
version_number=data['version_number'],
batch_request_id=data['batch_request_id'])
class GroupTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(GroupTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
total_monetary_value = None
if 'total_monetary_value' in data:
total_monetary_value = data['total_monetary_value']
currency_indicator = None
if 'currency_indicator' in data:
currency_indicator = data['currency_indicator']
return GroupTrailer(record_type=data['record_type'],
group_id=data['group_id'],
transaction_count=data['transaction_count'],
record_count=data['record_count'],
currency_indicator=currency_indicator,
total_monetary_value=total_monetary_value,
)
class InterestedPartyForAgreementDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(InterestedPartyForAgreementDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
return InterestedPartyForAgreementRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_n=data['ip_n'],
ip_last_name=data['ip_last_name'],
agreement_role_code=data['agreement_role_code'],
ip_writer_first_name=data['ip_writer_first_name'],
ipi_name_n=data['ipi_name_n'], ipi_base_n=ipi_base,
pr_society=data['pr_society'], pr_share=data['pr_share'],
mr_society=data['mr_society'], mr_share=data['mr_share'],
sr_society=data['sr_society'], sr_share=data['sr_share'])
class IPTerritoryOfControlDictionaryDecoder(Decoder):
def __init__(self):
super(IPTerritoryOfControlDictionaryDecoder, self).__init__()
def decode(self, data):
record = IPTerritoryOfControlRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
ip_n=data['ip_n'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'],
tis_numeric_code=data[
'tis_numeric_code'],
sequence_n=data['sequence_n'],
pr_collection_share=data[
'pr_collection_share'],
mr_collection_share=data[
'mr_collection_share'],
shares_change=data['shares_change'])
if 'sr_collection_share' in data:
record.sr_collection_share = data['sr_collection_share']
return record
class InstrumentationDetailDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationDetailDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
instrument_code=data[
'instrument_code'],
number_players=data[
'number_players'])
class InstrumentationSummaryDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationSummaryDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationSummaryRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
number_voices=data['number_voices'],
standard_instrumentation_type=data['standard_instrumentation_type'],
instrumentation_description=data['instrumentation_description'])
class MessageDictionaryDecoder(Decoder):
def __init__(self):
super(MessageDictionaryDecoder, self).__init__()
def decode(self, data):
return MessageRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
message_type=data['message_type'],
message_text=data['message_text'],
original_record_sequence_n=data[
'original_record_sequence_n'],
message_record_type=data['message_record_type'],
message_level=data['message_level'],
validation_n=data['validation_n'])
class PerformingArtistDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PerformingArtistDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = None
if 'performing_artist_ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['performing_artist_ipi_base_n'])
performing_artist_first_name = None
if 'performing_artist_first_name' in data:
performing_artist_first_name = data['performing_artist_first_name']
performing_artist_ipi_name_n = None
if 'performing_artist_ipi_name_n' in data:
performing_artist_ipi_name_n = data['performing_artist_ipi_name_n']
return PerformingArtistRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
performing_artist_last_name=data[
'performing_artist_last_name'],
performing_artist_first_name=performing_artist_first_name,
performing_artist_ipi_name_n=performing_artist_ipi_name_n,
performing_artist_ipi_base_n=ipi_base)
class PublisherForWriterDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherForWriterDictionaryDecoder, self).__init__()
def decode(self, data):
publisher_name = None
if 'publisher_name' in data:
publisher_name = data['publisher_name']
return PublisherForWriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
publisher_ip_n=data['publisher_ip_n'],
publisher_name=publisher_name,
writer_ip_n=data['writer_ip_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'])
class RecordingDetailDictionaryDecoder(Decoder):
def __init__(self):
super(RecordingDetailDictionaryDecoder, self).__init__()
def decode(self, data):
media_type = None
if 'media_type' in data:
media_type = data['media_type']
return RecordingDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
first_release_date=data[
'first_release_date'],
first_release_duration=data[
'first_release_duration'],
first_album_title=data[
'first_album_title'],
first_album_label=data[
'first_album_label'],
first_release_catalog_n=data[
'first_release_catalog_n'],
ean=data['ean'],
isrc=data['isrc'],
recording_format=data['recording_format'],
recording_technique=data[
'recording_technique'],
media_type=media_type)
class FileDictionaryDecoder(Decoder):
def __init__(self):
super(FileDictionaryDecoder, self).__init__()
self._tag_decoder = FileTagDictionaryDecoder()
self._transmission_decoder = TransmissionDictionaryDecoder()
def decode(self, data):
tag = data['tag']
if isinstance(tag, dict):
tag = self._tag_decoder.decode(tag)
transmission = data['transmission']
if isinstance(transmission, dict):
transmission = self._transmission_decoder.decode(transmission)
return CWRFile(tag, transmission)
class TransmissionDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionDictionaryDecoder, self).__init__()
self._header_decoder = TransmissionHeaderDictionaryDecoder()
self._trailer_decoder = TransmissionTrailerDictionaryDecoder()
self._group_decoder = GroupDictionaryDecoder()
def decode(self, data):
header = data['header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
groups = []
if len(data['groups']) > 0:
if isinstance(data['groups'][0], dict):
for group in data['groups']:
groups.append(self._group_decoder.decode(group))
else:
groups = data['groups']
return Transmission(header, trailer, groups)
class GroupDictionaryDecoder(Decoder):
def __init__(self):
super(GroupDictionaryDecoder, self).__init__()
self._header_decoder = GroupHeaderDictionaryDecoder()
self._trailer_decoder = GroupTrailerDictionaryDecoder()
self._transaction_decoder = TransactionRecordDictionaryDecoder()
def decode(self, data):
header = data['group_header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['group_trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
transactions = []
if len(data['transactions']) > 0:
if isinstance(data['transactions'][0][0], dict):
for transaction in data['transactions']:
transaction_records = []
for record in transaction:
transaction_records.append(
self._transaction_decoder.decode(record))
transactions.append(transaction_records)
else:
transactions = data['transactions']
return Group(header, trailer, transactions)
class TransmissionHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
header = TransmissionHeader(record_type=data['record_type'],
sender_id=data['sender_id'],
sender_name=data['sender_name'],
sender_type=data['sender_type'],
creation_date_time=data[
'creation_date_time'],
transmission_date=data['transmission_date'],
edi_standard=data['edi_standard'])
if 'character_set' in data:
header.character_set = data['character_set']
return header
class TransmissionTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
return TransmissionTrailer(record_type=data['record_type'],
group_count=data['group_count'],
transaction_count=data['transaction_count'],
record_count=data['record_count'])
class WorkDictionaryDecoder(Decoder):
def __init__(self):
super(WorkDictionaryDecoder, self).__init__()
def decode(self, data):
catalogue_number = None
if 'catalogue_number' in data:
catalogue_number = data['catalogue_number']
exceptional_clause = None
if 'exceptional_clause' in data:
exceptional_clause = data['exceptional_clause']
opus_number = None
if 'opus_number' in data:
opus_number = data['opus_number']
priority_flag = None
if 'priority_flag' in data:
priority_flag = data['priority_flag']
return WorkRecord(record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_work_n=data['submitter_work_n'],
title=data['title'],
version_type=data['version_type'],
musical_work_distribution_category=data[
'musical_work_distribution_category'],
date_publication_printed_edition=data[
'date_publication_printed_edition'],
text_music_relationship=data[
'text_music_relationship'],
language_code=data['language_code'],
copyright_number=data['copyright_number'],
copyright_date=data['copyright_date'],
music_arrangement=data['music_arrangement'],
lyric_adaptation=data['lyric_adaptation'],
excerpt_type=data['excerpt_type'],
composite_type=data['composite_type'],
composite_component_count=data[
'composite_component_count'],
iswc=data['iswc'],
work_type=data['work_type'],
duration=data['duration'],
catalogue_number=catalogue_number,
opus_number=opus_number,
contact_id=data['contact_id'],
contact_name=data['contact_name'],
recorded_indicator=data['recorded_indicator'],
priority_flag=priority_flag,
exceptional_clause=exceptional_clause,
grand_rights_indicator=data['grand_rights_indicator'])
class WorkOriginDictionaryDecoder(Decoder):
def __init__(self):
super(WorkOriginDictionaryDecoder, self).__init__()
def decode(self, data):
return WorkOriginRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
intended_purpose=data['intended_purpose'],
production_title=data['production_title'],
cd_identifier=data['cd_identifier'],
cut_number=data['cut_number'],
library=data['library'],
bltvr=data['bltvr'],
visan=data['visan'],
production_n=data['production_n'],
episode_title=data['episode_title'],
episode_n=data['episode_n'],
year_production=data['year_production'],
audio_visual_key=data['audio_visual_key'])
class WriterDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(WriterDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_n = self._ipi_base_decoder.decode(data['ipi_base_n'])
return Writer(ip_n=data['ip_n'],
personal_number=data['personal_number'],
ipi_base_n=ipi_base_n,
writer_first_name=data['writer_first_name'],
writer_last_name=data['writer_last_name'],
tax_id=data['tax_id'],
ipi_name_n=data['ipi_name_n'])
class WriterRecordDictionaryDecoder(Decoder):
def __init__(self):
super(WriterRecordDictionaryDecoder, self).__init__()
self._writer_decoder = WriterDictionaryDecoder()
def decode(self, data):
writer = self._writer_decoder.decode(data['writer'])
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
return WriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer=writer,
writer_designation=data['writer_designation'],
work_for_hire=data['work_for_hire'],
writer_unknown=data['writer_unknown'],
reversionary=data['reversionary'],
first_recording_refusal=data[
'first_recording_refusal'],
usa_license=usa_license,
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'])
class NonRomanAlphabetAgreementPartyDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetAgreementPartyDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetAgreementPartyRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_name=data['ip_name'],
ip_writer_name=data['ip_writer_name'],
ip_n=data['ip_n'],
language_code=data['language_code'])
class NonRomanAlphabetOtherWriterDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetOtherWriterDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetOtherWriterRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer_first_name=data['writer_first_name'],
writer_name=data['writer_name'],
position=data['position'],
language_code=data['language_code'])
class NonRomanAlphabetPerformanceDataDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(NonRomanAlphabetPerformanceDataDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(
data['performing_artist_ipi_base_n'])
return NonRomanAlphabetPerformanceDataRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
performing_artist_first_name=data['performing_artist_first_name'],
performing_artist_name=data['performing_artist_name'],
performing_artist_ipi_name_n=data['performing_artist_ipi_name_n'],
performing_artist_ipi_base_n=ipi_base,
language_code=data['language_code'],
performance_language=data['performance_language'],
performance_dialect=data['performance_dialect'])
class NonRomanAlphabetPublisherNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetPublisherNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetPublisherNameRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher_sequence_n=data['publisher_sequence_n'],
ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
language_code=data['language_code'])
class NonRomanAlphabetTitleDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
title_type=data['title_type'],
language_code=data['language_code'])
class NonRomanAlphabetWorkDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWorkDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
language_code=data['language_code'])
class NonRomanAlphabetWriterNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWriterNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWriterNameRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
writer_first_name=data[
'writer_first_name'],
writer_last_name=data[
'writer_last_name'],
ip_n=data['ip_n'],
language_code=data[
'language_code'])
class PublisherDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PublisherDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
if 'ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
else:
ipi_base = None
return Publisher(ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
ipi_name_n=data['ipi_name_n'],
ipi_base_n=ipi_base,
tax_id=data['tax_id'])
class PublisherRecordDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherRecordDictionaryDecoder, self).__init__()
self._publisher_decoder = PublisherDictionaryDecoder()
def decode(self, data):
publisher = self._publisher_decoder.decode(data['publisher'])
special_agreements = None
if 'special_agreements' in data:
special_agreements = data['special_agreements']
first_recording_refusal = None
if 'first_recording_refusal' in data:
first_recording_refusal = data['first_recording_refusal']
agreement_type = None
if 'agreement_type' in data:
agreement_type = data['agreement_type']
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
international_standard_code = None
if 'international_standard_code' in data:
international_standard_code = data['international_standard_code']
society_assigned_agreement_n = None
if 'society_assigned_agreement_n' in data:
society_assigned_agreement_n = data['society_assigned_agreement_n']
return PublisherRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher=publisher,
publisher_sequence_n=data['publisher_sequence_n'],
submitter_agreement_n=data['submitter_agreement_n'],
publisher_type=data['publisher_type'],
publisher_unknown=data['publisher_unknown'],
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'],
special_agreements=special_agreements,
first_recording_refusal=first_recording_refusal,
international_standard_code=international_standard_code,
society_assigned_agreement_n=society_assigned_agreement_n,
agreement_type=agreement_type,
usa_license=usa_license)
class TableValueDictionaryDecoder(Decoder):
def __init__(self):
super(TableValueDictionaryDecoder, self).__init__()
def decode(self, data):
return TableValue(code=data['code'],
name=data['name'],
description=data['description'])
class MediaTypeValueDictionaryDecoder(Decoder):
def __init__(self):
super(MediaTypeValueDictionaryDecoder, self).__init__()
def decode(self, data):
return MediaTypeValue(code=data['code'],
name=data['name'],
media_type=data['media_type'],
duration_max=data['duration_max'],
works_max=data['works_max'],
fragments_max=data['fragments_max'])
class InstrumentValueDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentValueDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentValue(code=data['code'],
name=data['name'],
family=data['family'],
description=data['description'])
class FileTagDictionaryDecoder(Decoder):
def __init__(self):
super(FileTagDictionaryDecoder, self).__init__()
def decode(self, data):
return FileTag(data['year'],
data['sequence_n'],
data['sender'],
data['receiver'],
data['version'])
class AVIKeyDictionaryDecoder(Decoder):
def __init__(self):
super(AVIKeyDictionaryDecoder, self).__init__()
def decode(self, data):
return AVIKey(data['society_code'],
data['av_number'])
class IPIBaseDictionaryDecoder(Decoder):
def __init__(self):
super(IPIBaseDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class ISWCDictionaryDecoder(Decoder):
def __init__(self):
super(ISWCDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class VISANDictionaryDecoder(Decoder):
def __init__(self):
super(VISANDictionaryDecoder, self).__init__()
def decode(self, data):
return data
| StarcoderdataPython |
99480 | <reponame>hylang/comphyle<filename>comphyle/jinja.py
#!/usr/bin/env python
from email.Utils import formatdate
from jinja2 import Template
import time
import os
def rfc_2822(dateobj):
return formatdate(time.mktime(dateobj.timetuple()))
def render_fd(fpath, ctx):
output_name = fpath.replace(".jinja2", "")
t = Template(open(fpath, "r").read())
ctx['rfc_2822_date'] = rfc_2822(ctx['when'])
open(output_name, "w").write(t.render(**ctx))
os.unlink(fpath)
| StarcoderdataPython |
1664110 | velocidade = float(input('Qual é a velocidade atual do carro ? '))
if velocidade > 80:
print('MULTADO ! Você excedeu o limite de velocidade que é de 80km/h')
multa = (velocidade - 80) * 7
print('Você deve pagara a multa de R$ {:.2f}'.format(multa))
print('Tenha um bom dia e dirija om segurança ! ')
| StarcoderdataPython |
44297 | #<NAME>
# todo mov not working
import nuke
from PySide import QtGui
def run(node):
clipboard = QtGui.QApplication.clipboard()
filename = node['file'].evaluate()
filesplit = filename.rsplit('.',-2)
filesplit[1] = '%0'+str(len(filesplit[1]))+'d'
filep = '.'.join(filesplit)
filenameFrame = nuke.getFileNameList(os.path.dirname(filep))[0].rsplit(' ',-1)[1]
clipboard.setText(( filep+" "+filenameFrame))
nuke.nodePaste("%clipboard%")
#run(nuke.selectedNode())
| StarcoderdataPython |
1753953 | <gh_stars>0
import contextlib
import itertools
import re
import subprocess
import sys
from array import array
from collections import namedtuple
from gopro_overlay.common import temporary_file
from gopro_overlay.dimensions import dimension_from, Dimension
def run(cmd, **kwargs):
return subprocess.run(cmd, check=True, **kwargs)
def invoke(cmd, **kwargs):
try:
return run(cmd, **kwargs, text=True, capture_output=True)
except subprocess.CalledProcessError as e:
raise IOError(f"Error: {cmd}\n stdout: {e.stdout}\n stderr: {e.stderr}")
StreamInfo = namedtuple("StreamInfo", ["audio", "video", "meta", "video_dimension"])
def cut_file(input, output, start, duration):
streams = find_streams(input)
maps = list(itertools.chain.from_iterable(
[["-map", f"0:{stream}"] for stream in [streams.video, streams.audio, streams.meta]]))
args = ["ffmpeg",
"-y",
"-i", input,
"-map_metadata", "0",
*maps,
"-copy_unknown",
"-ss", str(start),
"-t", str(duration),
"-c", "copy",
output]
print(args)
run(args)
def join_files(filepaths, output):
"""only for joining parts of same trip"""
streams = find_streams(filepaths[0])
maps = list(itertools.chain.from_iterable(
[["-map", f"0:{stream}"] for stream in [streams.video, streams.audio, streams.meta]]))
with temporary_file() as commandfile:
with open(commandfile, "w") as f:
for path in filepaths:
f.write(f"file '{path}\n")
args = ["ffmpeg",
"-y",
"-f", "concat",
"-safe", "0",
"-i", commandfile,
"-map_metadata", "0",
*maps,
"-copy_unknown",
"-c", "copy",
output]
print(f"Running {args}")
run(args)
def find_streams(filepath, invoke=invoke):
ffprobe_output = str(invoke(["ffprobe", "-hide_banner", filepath]).stderr)
video_re = re.compile(r"Stream #\d+:(\d+)\(.+\): Video.*, (\d+x\d+)")
audio_re = re.compile(r"Stream #\d+:(\d+)\(.+\): Audio")
meta_re = re.compile(r"Stream #\d+:(\d+)\(.+\): Data: bin_data \(gpmd")
video_stream = None
video_dimension = None
audio_stream = None
meta_stream = None
for line in ffprobe_output.split("\n"):
video_match = video_re.search(line)
if video_match:
video_stream = int(video_match.group(1))
video_dimension = dimension_from(video_match.group(2))
audio_match = audio_re.search(line)
if audio_match:
audio_stream = int(audio_match.group(1))
meta_match = meta_re.search(line)
if meta_match:
meta_stream = int(meta_match.group(1))
if video_stream is None or audio_stream is None or meta_stream is None or video_dimension is None:
raise IOError("Invalid File? The data stream doesn't seem to contain GoPro audio, video & metadata ")
return StreamInfo(audio_stream, video_stream, meta_stream, video_dimension)
def load_gpmd_from(filepath):
track = find_streams(filepath).meta
if track:
cmd = ["ffmpeg", '-y', '-i', filepath, '-codec', 'copy', '-map', '0:%d' % track, '-f', 'rawvideo', "-"]
result = run(cmd, capture_output=True, timeout=10)
if result.returncode != 0:
raise IOError(f"ffmpeg failed code: {result.returncode} : {result.stderr.decode('utf-8')}")
arr = array("b")
arr.frombytes(result.stdout)
return arr
def ffmpeg_is_installed():
try:
invoke(["ffmpeg", "-version"])
return True
except FileNotFoundError:
return False
def ffmpeg_libx264_is_installed():
output = invoke(["ffmpeg", "-v", "quiet", "-codecs"]).stdout
libx264s = [x for x in output.split('\n') if "libx264" in x]
return len(libx264s) > 0
class FFMPEGGenerate:
def __init__(self, output, overlay_size: Dimension, popen=subprocess.Popen):
self.output = output
self.overlay_size = overlay_size
self.popen = popen
@contextlib.contextmanager
def generate(self):
cmd = [
"ffmpeg",
"-y",
"-loglevel", "info",
"-f", "rawvideo",
"-framerate", "10.0",
"-s", f"{self.overlay_size.x}x{self.overlay_size.y}",
"-pix_fmt", "rgba",
"-i", "-",
"-r", "30",
"-vcodec", "libx264",
"-preset", "veryfast",
self.output
]
process = self.popen(cmd, stdin=subprocess.PIPE, stdout=None, stderr=None)
try:
yield process.stdin
finally:
process.stdin.flush()
process.stdin.close()
process.wait(10)
class FFMPEGOverlay:
def __init__(self, input, output, overlay_size: Dimension, vsize=1080, redirect=None):
self.output = output
self.input = input
self.overlay_size = overlay_size
self.vsize = vsize
self.redirect = redirect
@contextlib.contextmanager
def generate(self):
if self.vsize == 1080:
filter_extra = ""
else:
filter_extra = f",scale=-1:{self.vsize}"
cmd = [
"ffmpeg",
"-y",
"-loglevel", "info",
"-i", self.input,
"-f", "rawvideo",
"-framerate", "10.0",
"-s", f"{self.overlay_size.x}x{self.overlay_size.y}",
"-pix_fmt", "rgba",
"-i", "-",
"-filter_complex", f"[0:v][1:v]overlay{filter_extra}",
"-vcodec", "libx264",
"-preset", "veryfast",
self.output
]
try:
if self.redirect:
with open(self.redirect, "w") as std:
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=std, stderr=std)
else:
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=None, stderr=None)
try:
yield process.stdin
finally:
process.stdin.flush()
process.stdin.close()
# really long wait as FFMPEG processes all the mpeg input file - not sure how to prevent this atm
process.wait(5 * 60)
except FileNotFoundError:
raise IOError("Unable to start the 'ffmpeg' process - is FFMPEG installed?") from None
except BrokenPipeError:
if self.redirect:
print("FFMPEG Output:")
with open(self.redirect) as f:
print("".join(f.readlines()), file=sys.stderr)
raise IOError("FFMPEG reported an error - can't continue") from None
if __name__ == "__main__":
print(ffmpeg_libx264_is_installed())
| StarcoderdataPython |
1682421 | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
#
import unittest
import os
import time
import torch
from fast_transformers.aggregate import aggregate_cpu, broadcast_cpu
class TestAggregateCPU(unittest.TestCase):
def test_aggregate(self):
N = 1
H = 1
L = 40
E = 2
C = 4
x = torch.rand((N, H, L, E))
g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int()
c = (.1*torch.ones(N, H, C))
y = torch.zeros(N, H, C, E)
aggregate_cpu(x, g, c, y)
for i in range(C):
self.assertLess(
torch.abs(
x[:, :, i::C, :].mean(2) - y[:, :, i, :]
).max().item(),
1e-6
)
def test_broadcast(self):
N = 10
H = 3
L = 40
E = 32
C = 4
y = torch.rand(N, H, C, E)
g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int()
c = (.1*torch.ones(N, H, C))
x = torch.rand((N, H, L, E))
broadcast_cpu(y, g, c, x)
for i in range(C):
self.assertTrue(
torch.all(x[:, :, i::C] == 0.1*y[:, :, i:i+1, :])
)
def test_both(self):
N = 10
H = 3
L = 40
E = 32
C = 4
x_start = torch.rand(N, H, L, E)
x_end = torch.rand(N, H, L, E)
g = (torch.rand(N, H, L)*C).int()
c = torch.zeros(N, H, C)
y = torch.zeros((N, H, C, E))
# Aggregating ones should give us the counts
aggregate_cpu(
torch.ones(N, H, L, 1),
g,
torch.ones(N, H, C),
c
)
for i in range(C):
self.assertTrue(torch.all((g == i).sum(2) == c[:, :, i].long()))
# Aggregating into averages twice should be a noop
aggregate_cpu(x_start, g, 1/c, y)
broadcast_cpu(y, g, torch.ones(N, H, C), x_start)
y.zero_()
aggregate_cpu(x_start, g, 1/c, y)
broadcast_cpu(y, g, torch.ones(N, H, C), x_end)
self.assertLess(
torch.abs(x_start-x_end).max().item(),
1e-6
)
def test_aggregate_masked(self):
N = 10
H = 3
L = 40
E = 32
C = 4
x = torch.rand((N, H, L, E))
g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int()
g[:, :, -4:] = -1
c = torch.ones(N, H, C)/9.
y = torch.zeros(N, H, C, E)
aggregate_cpu(x, g, c, y)
for i in range(C):
self.assertLess(
torch.abs(
x[:, :, i::C, :][:, :, :-1, :].mean(2) - y[:, :, i, :]
).max().item(),
1e-6
)
@unittest.skipUnless(os.getenv("BENCHMARK_TESTS", ""), "no benchmarks")
def test_aggregate_benchmark(self):
N = 12
H = 8
L = 1000
S = 1000
E = 32
C = 100
x = torch.rand((N, H, L, E))
g = (torch.arange(L) % C).view(L, 1, 1).repeat(1, N, H).int()
c = 0.1*torch.ones(C, N, H)
y = torch.zeros((C, N, H, E))
s = time.time()
for i in range(100):
aggregate_cpu(x, g, c, y)
e = time.time()
t_aggregate = e - s
print('Aggregate Time: {}'.format(t_aggregate))
@unittest.skipUnless(os.getenv("BENCHMARK_TESTS", ""), "no benchmarks")
def test_broadcast_benchmark(self):
N = 12
H = 8
L = 1000
S = 1000
E = 32
C = 100
y = torch.rand((N, H, C, E))
g = (torch.arange(L) % C).view(1, 1, L).repeat(N, H, 1).int()
c = 0.1*torch.ones(N, H, C)
x = torch.zeros((N, H, L, E))
s = time.time()
for i in range(100):
broadcast_cpu(y, g, c, x)
e = time.time()
t_broadcast = e - s
print('Broadcast Time: {}'.format(t_broadcast))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
81182 | <reponame>blackbat13/stv
import sys
from stv.models import SimpleVotingModel
from stv.comparing_strats import StrategyComparer
no_voters = int(sys.argv[3])
no_candidates = int(sys.argv[4])
heuristic = int(sys.argv[5])
simple_voting = SimpleVotingModel(no_candidates, no_voters)
simple_voting.generate()
print(simple_voting.model.js_dump_model())
winning = []
state_id = -1
voter_number = 0
for state in simple_voting.states:
state_id += 1
if state['finish'][voter_number] == 1 and state['coercer_actions'][voter_number] != 'pun' and state['voted'][
voter_number] != 1:
winning.append(state_id)
agents = [1]
strategy_comparer = StrategyComparer(simple_voting.model, simple_voting.get_actions()[1])
if heuristic == 0:
(result, strategy) = strategy_comparer.domino_dfs(0, set(winning), agents, strategy_comparer.basic_h)
elif heuristic == 1:
(result, strategy) = strategy_comparer.domino_dfs(0, set(winning), agents, strategy_comparer.control_h)
elif heuristic == 2:
(result, strategy) = strategy_comparer.domino_dfs(0, set(winning), agents, strategy_comparer.epistemic_h)
elif heuristic == 3:
(result, strategy) = strategy_comparer.domino_dfs(0, set(winning), agents, strategy_comparer.visited_states_h)
if result:
print("1")
else:
print("0")
print(simple_voting.model.js_dump_strategy_objective(strategy)) | StarcoderdataPython |
4813931 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
TILE = [' ', 'U', 'R', 'D', 'L', '#', 'S', 'S', 'S', 'S']
with open(sys.argv[1], 'r') as fd:
txt = fd.read()
print(*[(TILE[ord(i) - 48] if 48 <=ord(i) < 58 else i) for i in txt], sep='')
| StarcoderdataPython |
104998 | from mgt.datamanagers.data_manager import Dictionary
class DictionaryGenerator(object):
@staticmethod
def create_dictionary() -> Dictionary:
"""
Creates a dictionary for a REMI-like mapping of midi events.
"""
dictionary = [{}, {}]
def append_to_dictionary(word):
if word not in dictionary[0]:
offset = len(dictionary[0])
dictionary[0].update({word: offset})
dictionary[1].update({offset: word})
# First word is reserved for padding
append_to_dictionary("pad")
append_to_dictionary("mask")
append_to_dictionary("start-track")
append_to_dictionary("end-track")
# Instrument indicates the midi instrument program value 0-127
# and value 128 reserved for instruments with is_drum = true
for i in range(129):
append_to_dictionary(f"program_{i}")
# Midi pitch value between 0-127
for i in range(128):
append_to_dictionary(f"note_{i}")
# Duration indicates the duration of a note in 1/32th note intervals (1-128)
for i in range(128):
append_to_dictionary(f"duration_{i + 1}")
# Time shift in 1/32th note intervals (1-128)
for i in range(128):
append_to_dictionary(f"time-shift_{i + 1}")
# Velocity is a value between 0-127, which we divide into 32 bins
for i in range(32):
append_to_dictionary(f"velocity_{i}")
# Tempo is a value between 10-200 divided into bins of 5 (so 1-40)
# for i in range(20):
# append_to_dictionary(f"tempo_{i + 1}")
return Dictionary(dictionary[0], dictionary[1])
| StarcoderdataPython |
3257418 | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
name = models.CharField(_("Name of User"), blank=True, max_length=255)
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse("user_detail", kwargs={"username": self.username})
| StarcoderdataPython |
4827224 | <reponame>anisayari/pywikibot<filename>pywikibot/families/commons_family.py
# -*- coding: utf-8 -*-
"""Family module for Wikimedia Commons."""
#
# (C) Pywikibot team, 2005-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
"""Family class for Wikimedia Commons."""
name = 'commons'
langs = {
'commons': 'commons.wikimedia.org',
'beta': 'commons.wikimedia.beta.wmflabs.org'
}
interwiki_forward = 'wikipedia'
# Templates that indicate a category redirect
# Redirects to these templates are automatically included
category_redirect_templates = {
'_default': (
'Category redirect',
'Synonym taxon category redirect',
'Invalid taxon category redirect',
'Monotypic taxon category redirect',
'Endashcatredirect',
),
}
# Subpages for documentation.
doc_subpages = {
'_default': (('/doc', ), ['commons']),
}
| StarcoderdataPython |
1701824 | from __future__ import division
import glob
import numpy as NP
from functools import reduce
import numpy.ma as MA
import progressbar as PGB
import h5py
import healpy as HP
import warnings
import copy
import astropy.cosmology as CP
from astropy.time import Time, TimeDelta
from astropy.io import fits
from astropy import units as U
from astropy import constants as FCNST
from scipy import interpolate
from astroutils import DSP_modules as DSP
from astroutils import constants as CNST
from astroutils import nonmathops as NMO
from astroutils import mathops as OPS
from astroutils import lookup_operations as LKP
import prisim
from prisim import interferometry as RI
from prisim import primary_beams as PB
from prisim import delay_spectrum as DS
try:
from pyuvdata import UVBeam
except ImportError:
uvbeam_module_found = False
else:
uvbeam_module_found = True
prisim_path = prisim.__path__[0]+'/'
cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology
cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc
################################################################################
def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix,
triads=None, bltriplet=None,
hdf5file_prefix=None, infmt='npz',
datakey='noisy', blltol=0.1):
"""
----------------------------------------------------------------------------
Write closure phases computed in a PRISim simulation to a NPZ file with
appropriate format for further analysis.
Inputs:
infile_prefix
[string] HDF5 file or NPZ file created by a PRISim simulation or
its replication respectively. If infmt is specified as 'hdf5',
then hdf5file_prefix will be ignored and all the observing
info will be read from here. If infmt is specified as 'npz',
then hdf5file_prefix needs to be specified in order to read the
observing parameters.
triads [list or numpy array or None] Antenna triads given as a list of
3-element lists or a ntriads x 3 array. Each element in the
inner list is an antenna label. They will be converted to
strings internally. If set to None, then all triads determined
by bltriplet will be used. If specified, then inputs in blltol
and bltriplet will be ignored.
bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline
vectors. The first axis denotes the three baselines, the second
axis denotes the East, North, Up coordinates of the baseline
vector. Units are in m. Will be used only if triads is set to
None.
outfile_prefix
[string] Prefix of the NPZ file. It will be appended by
'_noiseless', '_noisy', and '_noise' and further by extension
'.npz'
infmt [string] Format of the input file containing visibilities.
Accepted values are 'npz' (default), and 'hdf5'. If infmt is
specified as 'npz', then hdf5file_prefix also needs to be
specified for reading the observing parameters
datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or
'noise' -- visibilities are to be written to the output. If set
to None, and infmt is 'hdf5', then all three sets of
visibilities are written. The datakey string will also be added
as a suffix in the output file.
blltol [scalar] Baseline length tolerance (in m) for matching baseline
vectors in triads. It must be a scalar. Default = 0.1 m. Will
be used only if triads is set to None and bltriplet is to be
used.
----------------------------------------------------------------------------
"""
if not isinstance(infile_prefix, str):
raise TypeError('Input infile_prefix must be a string')
if not isinstance(outfile_prefix, str):
raise TypeError('Input outfile_prefix must be a string')
if (triads is None) and (bltriplet is None):
raise ValueError('One of triads or bltriplet must be set')
if triads is None:
if not isinstance(bltriplet, NP.ndarray):
raise TypeError('Input bltriplet must be a numpy array')
if not isinstance(blltol, (int,float)):
raise TypeError('Input blltol must be a scalar')
if bltriplet.ndim != 2:
raise ValueError('Input bltriplet must be a 2D numpy array')
if bltriplet.shape[0] != 3:
raise ValueError('Input bltriplet must contain three baseline vectors')
if bltriplet.shape[1] != 3:
raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame')
else:
if not isinstance(triads, (list, NP.ndarray)):
raise TypeError('Input triads must be a list or numpy array')
triads = NP.asarray(triads).astype(str)
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input file format must be npz or hdf5')
if infmt.lower() == 'npz':
if not isinstance(hdf5file_prefix, str):
raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information')
if datakey is None:
datakey = ['noisy']
if isinstance(datakey, str):
datakey = [datakey]
elif not isinstance(datakey, list):
raise TypeError('Input datakey must be a list')
for dkey in datakey:
if dkey.lower() not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input found in datakey')
if infmt.lower() == 'hdf5':
fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower())
fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension]
else:
fullfnames_without_extension = [infile_prefix]
if len(fullfnames_without_extension) == 0:
raise IOError('No input files found with pattern {0}'.format(infile_prefix))
try:
if infmt.lower() == 'hdf5':
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0])
else:
simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix)
except:
raise IOError('Input PRISim file does not contain a valid PRISim output')
latitude = simvis.latitude
longitude = simvis.longitude
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day
last = last.reshape(-1,1)
daydata = NP.asarray(simvis.timestamp[0]).ravel()
if infmt.lower() == 'npz':
simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower())
skyvis = simvisinfo['noiseless'][0,...]
vis = simvisinfo['noisy']
noise = simvisinfo['noise']
n_realize = vis.shape[0]
else:
n_realize = len(fullfnames_without_extension)
cpdata = {}
outfile = {}
for fileind in range(n_realize):
if infmt.lower() == 'npz':
simvis.vis_freq = vis[fileind,...]
simvis.vis_noise_freq = noise[fileind,...]
else:
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind])
if fileind == 0:
if triads is None:
triads, bltriplets = simvis.getThreePointCombinations(unique=False)
# triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3)
# bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets'])
triads = NP.asarray(triads).reshape(-1,3)
bltriplets = NP.asarray(bltriplets)
blinds = []
matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
revind = []
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
revind += [blnum]
if len(revind) > 0:
flip_factor = NP.ones(3, dtype=NP.float)
flip_factor[NP.array(revind)] = -1
rev_bltriplet = bltriplet * flip_factor.reshape(-1,1)
matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
raise ValueError('Some baselines in the triplet are not found in the model triads')
triadinds = []
for blnum in NP.arange(bltriplet.shape[0]):
triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1]))
triadinds += [triadind]
triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2]))
if triadind_intersection.size == 0:
raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.')
triads = triads[triadind_intersection,:]
selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3)
prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(),
delay_filter_info=None,
specsmooth_info=None,
spectral_window_info=None,
unique=False)
if fileind == 0:
triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips)
for outkey in datakey:
if fileind == 0:
outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey)
if outkey == 'noiseless':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0)
if outkey == 'noisy':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0)
if outkey == 'noise':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:]
cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0)
for outkey in datakey:
cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0)
flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool)
NP.savez_compressed(outfile[outkey], closures=cpdata[outkey],
flags=flagsdata, triads=triads,
last=last+NP.zeros((1,n_realize)),
days=daydata+NP.arange(n_realize))
################################################################################
def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
return a dictionary
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
Output:
cpinfo [dictionary] Contains one top level keys, namely, 'raw'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan), and some other optional keys
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
cpinfo = {}
datapool = ['raw']
for dpool in datapool:
cpinfo[dpool] = {}
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
if qty == 'cphase':
cpinfo[dpool][qty] = NP.copy(cp)
elif qty == 'triads':
cpinfo[dpool][qty] = NP.copy(triadsdata)
elif qty == 'flags':
cpinfo[dpool][qty] = NP.copy(flags)
elif qty == 'lst':
cpinfo[dpool][qty] = NP.copy(lstHA)
elif qty == 'lst-day':
cpinfo[dpool][qty] = NP.copy(lstday.jd)
elif qty == 'days':
cpinfo[dpool][qty] = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_lst)
return cpinfo
################################################################################
def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0,
lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
save it to HDF5 format
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
ehich is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
hdf5file [string] Output HDF5 file including full path.
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
if 'averaged_closures' in npzdata:
day_avg_cpdata = npzdata['averaged_closures']
cp_dayavg = day_avg_cpdata.astype(NP.float64)
if 'std_dev_triad' in npzdata:
std_triads_cpdata = npzdata['std_dev_triad']
cp_std_triads = std_triads_cpdata.astype(NP.float64)
if 'std_dev_lst' in npzdata:
std_lst_cpdata = npzdata['std_dev_lst']
cp_std_lst = std_lst_cpdata.astype(NP.float64)
with h5py.File(hdf5file, 'w') as fobj:
datapool = ['raw']
for dpool in datapool:
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
data = None
if qty == 'cphase':
data = NP.copy(cp)
elif qty == 'triads':
data = NP.copy(triadsdata)
elif qty == 'flags':
data = NP.copy(flags)
elif qty == 'lst':
data = NP.copy(lstHA)
elif qty == 'lst-day':
data = NP.copy(lstday.jd)
elif qty == 'days':
data = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
data = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
data = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
data = NP.copy(cp_std_lst)
if data is not None:
dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9)
################################################################################
def save_CPhase_cross_power_spectrum(xcpdps, outfile):
"""
----------------------------------------------------------------------------
Save cross-power spectrum information in a dictionary to a HDF5 file
Inputs:
xcpdps [dictionary] This dictionary is essentially an output of the
member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
with h5py.File(outfile, 'w') as fileobj:
hdrgrp = fileobj.create_group('header')
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
dset = hdrgrp.create_dataset(key, data=xcpdps[key])
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
smplgrp = fileobj.create_group(smplng)
for key in sampling_keys:
dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key])
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
dpoolgrp = smplgrp.create_group(dpool)
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][key], dict):
subgrp = dpoolgrp.create_group(key)
for subkey in xcpdps[smplng][dpool][key]:
dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey])
else:
dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key])
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][stat], list):
for ii in range(len(xcpdps[smplng][dpool][stat])):
dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit)
else:
dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit)
################################################################################
def read_CPhase_cross_power_spectrum(infile):
"""
----------------------------------------------------------------------------
Read information about cross power spectrum from an external HDF5 file into
a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum()
Input:
infile [string] Full path to the external HDF5 file that contains info
about cross-power spectrum.
Output:
xcpdps [dictionary] This dictionary has structure the same as output
of the member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
xcpdps = {}
with h5py.File(infile, 'r') as fileobj:
hdrgrp = fileobj['header']
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
xcpdps[key] = hdrgrp[key].value
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in fileobj:
smplgrp = fileobj[smplng]
xcpdps[smplng] = {}
for key in sampling_keys:
xcpdps[smplng][key] = smplgrp[key].value
for dpool in dpool_keys:
if dpool in smplgrp:
xcpdps[smplng][dpool] = {}
dpoolgrp = smplgrp[dpool]
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in dpoolgrp:
if isinstance(dpoolgrp[key], h5py.Group):
xcpdps[smplng][dpool][key] = {}
for subkey in dpoolgrp[key]:
xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value
elif isinstance(dpoolgrp[key], h5py.Dataset):
xcpdps[smplng][dpool][key] = dpoolgrp[key].value
else:
raise TypeError('Invalid h5py data type encountered')
for stat in ['mean', 'median']:
if stat in dpoolgrp:
if isinstance(dpoolgrp[stat], h5py.Dataset):
valunits = dpoolgrp[stat].attrs['units']
xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits)
elif isinstance(dpoolgrp[stat], h5py.Group):
xcpdps[smplng][dpool][stat] = []
for diagcomb_ind in range(len(dpoolgrp[stat].keys())):
if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]:
valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units']
xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)]
return xcpdps
################################################################################
def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None):
"""
----------------------------------------------------------------------------
Perform incoherent averaging of cross power spectrum along specified axes
Inputs:
xcpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information coming possible from different sources, and they
will be averaged be averaged incoherently. If a single
dictionary is provided instead of a list of dictionaries, the
said averaging does not take place. Each dictionary is
essentially an output of the member function
compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It
has the following key-value structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and
'residual' each of which is a dictionary. 'whole' contains power
spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained
as a difference between 'whole' and 'submodel'. It contains the
following keys and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
excpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information of subsample differences coming possible from
different sources, and they will be averaged be averaged
incoherently. This is optional. If not set (default=None), no
incoherent averaging happens. If a single dictionary is provided
instead of a list of dictionaries, the said averaging does not
take place. Each dictionary is essentially an output of the
member function compute_power_spectrum_uncertainty() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,)
array), 'dday' ((ndaycomb,) array), 'oversampled' and
'resampled' corresponding to whether resample was set to False
or True in call to member function FT(). Values under keys
'triads_ind' and 'lst_ind' are numpy array corresponding to
triad and time indices used in selecting the data. Values under
keys 'oversampled' and 'resampled' each contain a dictionary
with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of
the frequency subbands of the subband delay spectra. It
is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform.
It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary.
It contains information about power spectrum uncertainties
obtained from subsample differences. It contains the following
keys and values:
'mean' [numpy array] Delay power spectrum uncertainties
incoherently estimated over the axes specified in
xinfo['axes'] using the 'mean' key in input cpds or
attribute cPhaseDS['errinfo']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties
incoherently averaged over the axes specified in incohax
using the 'median' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends
on the combination of input parameters. See examples
below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets
for those axes. If 'avgcov' was set, those entries will
be removed from 'diagoffsets' since all the leading
diagonal elements have been collapsed (averaged) further.
Value under each key is a numpy array where each element
in the array corresponds to the index of that leading
diagonal. This should match the size of the output along
that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
diagoffsets [NoneType or dictionary or list of dictionaries] This info is
used for incoherent averaging along specified diagonals along
specified axes. This incoherent averaging is performed after
incoherently averaging multiple cross-power spectra (if any).
If set to None, this incoherent averaging is not performed.
Many combinations of axes and diagonals can be specified as
individual dictionaries in a list. If only one dictionary is
specified, then it assumed that only one combination of axes
and diagonals is requested. If a list of dictionaries is given,
each dictionary in the list specifies a different combination
for incoherent averaging. Each dictionary should have the
following key-value pairs. The key is the axis number (allowed
values are 1, 2, 3) that denote the axis type (1=LST, 2=Days,
3=Triads to be averaged), and the value under they keys is a
list or numpy array of diagonals to be averaged incoherently.
These axes-diagonal combinations apply to both the inputs
xcpdps and excpdps, except axis=2 does not apply to excpdps
(since it is made of subsample differences already) and will be
skipped.
Outputs:
A tuple consisting of two dictionaries. The first dictionary contains the
incoherent averaging of xcpdps as specified by the inputs, while the second
consists of incoherent of excpdps as specified by the inputs. The structure
of these dictionaries are practically the same as the dictionary inputs
xcpdps and excpdps respectively. The only differences in dictionary
structure are:
* Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual'
/'effinfo']['mean'/'median'] is a list of numpy arrays, where each
array in the list corresponds to the dictionary in the list in input
diagoffsets that defines the axes-diagonal combination.
----------------------------------------------------------------------------
"""
if isinstance(xcpdps, dict):
xcpdps = [xcpdps]
if not isinstance(xcpdps, list):
raise TypeError('Invalid data type provided for input xcpdps')
if excpdps is not None:
if isinstance(excpdps, dict):
excpdps = [excpdps]
if not isinstance(excpdps, list):
raise TypeError('Invalid data type provided for input excpdps')
if len(xcpdps) != len(excpdps):
raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values')
out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']}
out_excpdps = None
if excpdps is not None:
out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']}
for smplng in ['oversampled', 'resampled']:
if smplng in xcpdps[0]:
out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']}
if excpdps is not None:
out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']}
for dpool in ['whole', 'submodel', 'residual']:
if dpool in xcpdps[0][smplng]:
out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in xcpdps[0][smplng][dpool]:
out_xcpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(xcpdps)):
arr += [xcpdps[i][smplng][dpool][stat].si.value]
arr_units = xcpdps[i][smplng][dpool][stat].si.unit
if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in xcpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_xcpdps[smplng][dpool][stat] = arr
out_xcpdps[smplng][dpool]['diagweights'] = diagweights
for dpool in ['errinfo']:
if dpool in excpdps[0][smplng]:
out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in excpdps[0][smplng][dpool]:
out_excpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(excpdps)):
arr += [excpdps[i][smplng][dpool][stat].si.value]
arr_units = excpdps[i][smplng][dpool][stat].si.unit
if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in excpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_excpdps[smplng][dpool][stat] = arr
out_excpdps[smplng][dpool]['diagweights'] = diagweights
if diagoffsets is not None:
if isinstance(diagoffsets, dict):
diagoffsets = [diagoffsets]
if not isinstance(diagoffsets, list):
raise TypeError('Input diagoffsets must be a list of dictionaries')
for ind in range(len(diagoffsets)):
for ax in diagoffsets[ind]:
if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)):
raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array')
diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax])
for smplng in ['oversampled', 'resampled']:
if smplng in out_xcpdps:
for dpool in ['whole', 'submodel', 'residual']:
if dpool in out_xcpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights'])
out_xcpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_xcpdps[smplng][dpool]:
arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value)
arr_units = out_xcpdps[smplng][dpool][stat].si.unit
out_xcpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]])
out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
if excpdps is not None:
for smplng in ['oversampled', 'resampled']:
if smplng in out_excpdps:
for dpool in ['errinfo']:
if dpool in out_excpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
if ax != 2:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights'])
out_excpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_excpdps[smplng][dpool]:
arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value)
arr_units = out_excpdps[smplng][dpool][stat].si.unit
out_excpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2])
out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
return (out_xcpdps, out_excpdps)
################################################################################
def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'):
"""
----------------------------------------------------------------------------
Averages the power spectrum incoherently by binning in bins of k. Returns
the power spectrum in units of both standard power spectrum and \Delta^2
Inputs:
xcpdps [dictionary] A dictionary that contains the incoherent averaged
power spectrum along LST and/or triads axes. This dictionary is
essentially the one(s) returned as the output of the function
incoherent_cross_power_spectrum_average()
kbins [NoneType, list or numpy array] Bins in k. If set to None
(default), it will be determined automatically based on the
inputs in num_kbins, and kbintype. If num_kbins is None and
kbintype='linear', the negative and positive values of k are
folded into a one-sided power spectrum. In this case, the
bins will approximately have the same resolution as the k-values
in the input power spectrum for all the spectral windows.
num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is
set to None. If kbintype is set to 'linear', the negative and
positive values of k are folded into a one-sided power spectrum.
In this case, the bins will approximately have the same
resolution as the k-values in the input power spectrum for all
the spectral windows.
kbintype [string] Specifies the type of binning, used only if kbins is
set to None. Accepted values are 'linear' and 'log' for linear
and logarithmic bins respectively.
Outputs:
Dictionary containing the power spectrum information. At the top level, it
contains keys specifying the sampling to be 'oversampled' or 'resampled'.
Under each of these keys is another dictionary containing the following
keys:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
or one key named 'errinfo' each of which is a dictionary. 'whole'
contains power spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained as a
difference between 'whole' and 'submodel'. 'errinfo' contains power
spectrum information about the subsample differences. There is also
another dictionary under key 'kbininfo' that contains information about
k-bins. These dictionaries contain the following keys and values:
'whole'/'submodel'/'residual'/'errinfo'
[dictionary] It contains the following keys and values:
'mean' [dictionary] Delay power spectrum information under the
'mean' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'median'
[dictionary] Delay power spectrum information under the
'median' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'kbininfo'
[dictionary] Contains the k-bin information. It contains the
following key-value pairs:
'counts'
[list] List of numpy arrays where each numpy array in the stores
the counts in the determined k-bins. Each numpy array in the
list corresponds to a spectral window (redshift subband). The
shape of each numpy array is (nkbins,)
'kbin_edges'
[list] List of numpy arrays where each numpy array contains the
k-bin edges. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nkbins+1,).
'kbinnum'
[list] List of numpy arrays containing the bin number under
which the k value falls. Each array in the list corresponds to
a spectral window (redshift subband). The shape of each array
is (nlags,).
'ri'
[list] List of numpy arrays containing the reverse indices for
each k-bin. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nlags+nkbins+1,).
'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info
estimated for the different datapools under different stats
and PS definitions. It has the keys 'mean' and 'median' for the
mean and median statistic respectively. Each of them contain a
dictionary with the following key-value pairs:
'PS' [list] List of numpy arrays where each numpy array
contains a standard power spectrum typically in units of
'K2 Mpc3'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
'Del2' [list] List of numpy arrays where each numpy array
contains a Delta^2 power spectrum typically in units of
'K2'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
if kbins is not None:
if not isinstance(kbins, (list,NP.ndarray)):
raise TypeError('Input kbins must be a list or numpy array')
else:
if not isinstance(kbintype, str):
raise TypeError('Input kbintype must be a string')
if kbintype.lower() not in ['linear', 'log']:
raise ValueError('Input kbintype must be set to "linear" or "log"')
if kbintype.lower() == 'log':
if num_kbins is None:
num_kbins = 10
psinfo = {}
keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in keys:
psinfo[key] = xcpdps[key]
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
psinfo[smplng] = {}
for key in sampling_keys:
psinfo[smplng][key] = xcpdps[smplng][key]
kprll = xcpdps[smplng]['kprll']
lags = xcpdps[smplng]['lags']
eps = 1e-10
if kbins is None:
dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1))
if kbintype.lower() == 'linear':
bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True)
else:
bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True)
bins_kprll = NP.insert(bins_kprll, 0, -eps)
else:
bins_kprll = NP.asarray(kbins)
num_kbins = bins_kprll.size - 1
psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []}
for spw in range(kprll.shape[0]):
counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll)
counts = counts.astype(NP.int)
psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)]
psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc]
psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)]
psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)]
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
psinfo[smplng][dpool] = {}
psinfo[smplng]['kbininfo'][dpool] = {}
keys = ['diagoffsets', 'diagweights', 'axesmap']
for key in keys:
psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key]
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []}
psinfo[smplng]['kbininfo'][dpool][stat] = []
for combi in range(len(xcpdps[smplng][dpool][stat])):
outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape)
outshape[-1] = num_kbins
tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit)
tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3)
tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc
for spw in range(kprll.shape[0]):
counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw])
ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw])
print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw))
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start()
for binnum in range(num_kbins):
if counts[binnum] > 0:
ind_kbin = ri[ri[binnum]:ri[binnum+1]]
tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1)
k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int)
k_shape[-1] = -1
tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2)
tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1)
progress.update(binnum+1)
progress.finish()
psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)]
psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)]
psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)]
return psinfo
################################################################################
class ClosurePhase(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
extfile [string] Full path to external file containing information
of ClosurePhase instance. The file is in HDF5 format
cpinfo [dictionary] Contains the following top level keys,
namely, 'raw', 'processed', and 'errinfo'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan).
Under the 'processed' key are more subkeys, namely,
'native', 'prelim', and optionally 'submodel' and 'residual'
each holding a dictionary.
Under 'native' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked
array: (nlst,ndays,ntriads,nchan)).
Under 'prelim' dictionary, the subsubkeys for further
dictionaries are 'tbins' (numpy array of tbin centers
after smoothing), 'dtbins' (numpy array of tbin
intervals), 'wts' (masked array:
(ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'.
The dictionaries under 'eicp' are indexed by keys
'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
'median' (masked array: (ntbins,ndays,ntriads,nchan)),
'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and
'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The
last one denotes Median Absolute Deviation.
Under 'submodel' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)).
Under 'residual' dictionary, the subsubkeys for further
dictionaries are 'cphase' and 'eicp'. These are
dictionaries too. The dictionaries under 'eicp' are
indexed by keys 'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
and 'median' (masked array:
(ntbins,ndays,ntriads,nchan)).
Under key 'errinfo', it contains the following keys and
values:
'list_of_pair_of_pairs'
List of pair of pairs for which differences of
complex exponentials have been computed, where the
elements are bins of days. The number of elements
in the list is ncomb. And each element is a smaller
(4-element) list of pair of pairs
'eicp_diff'
Difference of complex exponentials between pairs
of day bins. This will be used in evaluating noise
properties in power spectrum. It is a dictionary
with two keys '0' and '1' where each contains the
difference from a pair of subsamples. Each of these
keys contains a numpy array of shape
(nlstbins,ncomb,2,ntriads,nchan)
'wts' Weights in difference of complex exponentials
obtained by sum of squares of weights that are
associated with the pair that was used in the
differencing. It is a dictionary with two keys '0'
and '1' where each contains the weights associated
It is of shape (nlstbins,ncomb,2,ntriads,nchan)
Member functions:
__init__() Initialize an instance of class ClosurePhase
expicp() Compute and return complex exponential of the closure phase
as a masked array
smooth_in_tbins()
Smooth the complex exponentials of closure phases in LST
bins. Both mean and median smoothing is produced.
subtract() Subtract complex exponential of the bispectrum phase
from the current instance and updates the cpinfo attribute
subsample_differencing()
Create subsamples and differences between subsamples to
evaluate noise properties from the data set.
save() Save contents of attribute cpinfo in external HDF5 file
----------------------------------------------------------------------------
"""
def __init__(self, infile, freqs, infmt='npz'):
"""
------------------------------------------------------------------------
Initialize an instance of class ClosurePhase
Inputs:
infile [string] Input file including full path. It could be a NPZ
with raw data, or a HDF5 file that could contain raw or
processed data. The input file format is specified in the
input infmt. If it is a NPZ file, it must contain the
following keys/files:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA
units which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is
(nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard
deviation of closure phases across days. Shape
is (nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard
deviation of closure phases across triads.
Shape is (nlst,ndays,nchan)
freqs [numpy array] Frequencies (in Hz) in the input. Size is
nchan.
infmt [string] Input file format. Accepted values are 'npz'
(default) and 'hdf5'.
------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
if not isinstance(freqs, NP.ndarray):
raise TypeError('Input freqs must be a numpy array')
freqs = freqs.ravel()
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input infmt must be "npz" or "hdf5"')
if infmt.lower() == 'npz':
infilesplit = infile.split('.npz')
infile_noext = infilesplit[0]
self.cpinfo = loadnpz(infile)
# npz2hdf5(infile, infile_noext+'.hdf5')
self.extfile = infile_noext + '.hdf5'
else:
# if not isinstance(infile, h5py.File):
# raise TypeError('Input infile is not a valid HDF5 file')
self.extfile = infile
self.cpinfo = NMO.load_dict_from_hdf5(self.extfile)
if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]:
raise ValueError('Input frequencies do not match with dimensions of the closure phase data')
self.f = freqs
self.df = freqs[1] - freqs[0]
force_expicp = False
if 'processed' not in self.cpinfo:
force_expicp = True
else:
if 'native' not in self.cpinfo['processed']:
force_expicp = True
self.expicp(force_action=force_expicp)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['errinfo'] = {}
############################################################################
def expicp(self, force_action=False):
"""
------------------------------------------------------------------------
Compute the complex exponential of the closure phase as a masked array
Inputs:
force_action [boolean] If set to False (default), the complex
exponential is computed only if it has not been done so
already. Otherwise the computation is forced.
------------------------------------------------------------------------
"""
if 'processed' not in self.cpinfo:
self.cpinfo['processed'] = {}
force_action = True
if 'native' not in self.cpinfo['processed']:
self.cpinfo['processed']['native'] = {}
force_action = True
if 'cphase' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags'])
force_action = True
if not force_action:
if 'eicp' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
else:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
############################################################################
def smooth_in_tbins(self, daybinsize=None, ndaybins=None, lstbinsize=None):
"""
------------------------------------------------------------------------
Smooth the complex exponentials of closure phases in time bins. Both
mean and median smoothing is produced.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value.
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins > 1:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
daybinintervals = NP.asarray(daybinsize).reshape(-1)
daybincenters = daybins[0] + 0.5 * daybinintervals
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['eicp'] = {}
# self.cpinfo['processed']['prelim']['cphase'] = {}
# self.cpinfo['processed']['prelim']['daybins'] = daybincenters
# self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
# mask = wts_daybins <= 0.0
# self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins <= 0:
raise ValueError('Input ndaybins must be positive')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['eicp'] = {}
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['daybins'] = daybincenters
self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
mask = wts_daybins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution, data and weights
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['processed']['prelim']['lstbins'][0] += eps
self.cpinfo['processed']['prelim']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
if 'wts' not in self.cpinfo['processed']['prelim']:
outshape = (counts.size, self.cpinfo['processed']['native']['eicp'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
else:
outshape = (counts.size, self.cpinfo['processed']['prelim']['wts'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128)
cp_trms = NP.zeros(outshape)
cp_tmad = NP.zeros(outshape)
for binnum in xrange(counts.size):
if no_change_in_lstbins:
ind_lstbin = [binnum]
else:
ind_lstbin = ri[ri[binnum]:ri[binnum+1]]
if 'wts' not in self.cpinfo['processed']['prelim']:
indict = self.cpinfo['processed']['native']
else:
indict = self.cpinfo['processed']['prelim']
wts_lstbins[binnum,:,:,:] = NP.sum(indict['wts'][ind_lstbin,:,:,:].data, axis=0)
if 'wts' not in self.cpinfo['processed']['prelim']:
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(indict['eicp'][ind_lstbin,:,:,:], axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(indict['eicp'][ind_lstbin,:,:,:].real, axis=0) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][ind_lstbin,:,:,:].imag, axis=0)))
cp_trms[binnum,:,:,:] = MA.std(indict['cphase'][ind_lstbin,:,:,:], axis=0).data
cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data
else:
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*indict['cphase']['mean'][ind_lstbin,:,:,:]), axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0)))
cp_trms[binnum,:,:,:] = MA.std(indict['cphase']['mean'][ind_lstbin,:,:,:], axis=0).data
cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase']['median'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data
mask = wts_lstbins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_lstbins, mask=mask)
if 'eicp' not in self.cpinfo['processed']['prelim']:
self.cpinfo['processed']['prelim']['eicp'] = {}
if 'cphase' not in self.cpinfo['processed']['prelim']:
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_tmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_tmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_tmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_tmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_trms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_tmad, mask=mask)
# else:
# # Perform no binning and keep the current LST resolution, data and weights
# warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
# lstbinsize = tres
# lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
# nlstbins = lstbins.size - 1
# if nlstbins > 1:
# lstbinintervals = lstbins[1:] - lstbins[:-1]
# lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
# else:
# lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
# lstbincenters = lstbins[0] + 0.5 * lstbinintervals
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
# self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
if (rawlst.shape[0] <= 1) or (lstbinsize is None):
nlstbins = rawlst.shape[0]
lstbins = NP.mean(rawlst, axis=1)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['lstbins'] = lstbins
if lstbinsize is not None:
self.cpinfo['processed']['prelim']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1)
else:
self.cpinfo['processed']['prelim']['dlstbins'] = NP.zeros(1)
############################################################################
def subtract(self, cphase):
"""
------------------------------------------------------------------------
Subtract complex exponential of the bispectrum phase from the current
instance and updates the cpinfo attribute
Inputs:
cphase [masked array] Bispectrum phase array as a maked array. It
must be of same size as freqs along the axis specified in
input axis.
Action: Updates 'submodel' and 'residual' keys under attribute
cpinfo under key 'processed'
------------------------------------------------------------------------
"""
if not isinstance(cphase, NP.ndarray):
raise TypeError('Input cphase must be a numpy array')
if not isinstance(cphase, MA.MaskedArray):
cphase = MA.array(cphase, mask=NP.isnan(cphase))
if not OPS.is_broadcastable(cphase.shape, self.cpinfo['processed']['prelim']['cphase']['median'].shape):
raise ValueError('Input cphase has shape incompatible with that in instance attribute')
else:
minshape = tuple(NP.ones(self.cpinfo['processed']['prelim']['cphase']['median'].ndim - cphase.ndim, dtype=NP.int)) + cphase.shape
cphase = cphase.reshape(minshape)
# cphase = NP.broadcast_to(cphase, minshape)
eicp = NP.exp(1j*cphase)
self.cpinfo['processed']['submodel'] = {}
self.cpinfo['processed']['submodel']['cphase'] = cphase
self.cpinfo['processed']['submodel']['eicp'] = eicp
self.cpinfo['processed']['residual'] = {'eicp': {}, 'cphase': {}}
for key in ['mean', 'median']:
eicpdiff = self.cpinfo['processed']['prelim']['eicp'][key] - eicp
eicpratio = self.cpinfo['processed']['prelim']['eicp'][key] / eicp
self.cpinfo['processed']['residual']['eicp'][key] = eicpdiff
self.cpinfo['processed']['residual']['cphase'][key] = MA.array(NP.angle(eicpratio.data), mask=self.cpinfo['processed']['residual']['eicp'][key].mask)
############################################################################
def subsample_differencing(self, daybinsize=None, ndaybins=4, lstbinsize=None):
"""
------------------------------------------------------------------------
Create subsamples and differences between subsamples to evaluate noise
properties from the data set.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
Must yield greater than or equal to 4 bins
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value. If set,
it must be set to greater than or equal to 4
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins >= 4:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
raise ValueError('Could not find at least 4 bins along repeating days. Adjust binning interval.')
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins < 4:
raise ValueError('Input ndaybins must be greater than or equal to 4')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
mask = wts_daybins <= 0.0
wts_daybins = MA.array(wts_daybins, mask=mask)
cp_dmean = MA.array(NP.angle(eicp_dmean), mask=mask)
cp_dmedian = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['errinfo']['daybins'] = daybincenters
self.cpinfo['errinfo']['diff_dbins'] = daybinintervals
self.cpinfo['errinfo']['wts'] = {'{0}'.format(ind): None for ind in range(2)}
self.cpinfo['errinfo']['eicp_diff'] = {'{0}'.format(ind): {} for ind in range(2)}
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['errinfo']['lstbins'] = lstbincenters
self.cpinfo['errinfo']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['errinfo']['dlstbins'] = lstbinintervals
self.cpinfo['errinfo']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['errinfo']['lstbins'][0] += eps
self.cpinfo['errinfo']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
outshape = (counts.size, wts_daybins.shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128)
cp_trms = NP.zeros(outshape)
cp_tmad = NP.zeros(outshape)
for binnum in xrange(counts.size):
if no_change_in_lstbins:
ind_lstbin = [binnum]
else:
ind_lstbin = ri[ri[binnum]:ri[binnum+1]]
wts_lstbins[binnum,:,:,:] = NP.sum(wts_daybins[ind_lstbin,:,:,:].data, axis=0)
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*cp_dmean[ind_lstbin,:,:,:]), axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(cp_dmedian[ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(cp_dmedian[ind_lstbin,:,:,:]), axis=0)))
mask = wts_lstbins <= 0.0
wts_lstbins = MA.array(wts_lstbins, mask=mask)
eicp_tmean = MA.array(eicp_tmean, mask=mask)
eicp_tmedian = MA.array(eicp_tmedian, mask=mask)
else:
wts_lstbins = MA.copy(wts_daybins)
mask = wts_lstbins.mask
eicp_tmean = MA.array(NP.exp(1j*NP.angle(NP.exp(1j*cp_dmean))), mask=mask)
eicp_tmedian = MA.array(NP.exp(1j*NP.angle(NP.cos(cp_dmedian) + 1j * NP.sin(cp_dmedian))), mask=mask)
if (rawlst.shape[0] <= 1) or (lstbinsize is None):
nlstbins = rawlst.shape[0]
lstbins = NP.mean(rawlst, axis=1)
self.cpinfo['errinfo']['lstbins'] = lstbins
if lstbinsize is not None:
self.cpinfo['errinfo']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1)
else:
self.cpinfo['errinfo']['dlstbins'] = NP.zeros(1)
ncomb = NP.sum(NP.asarray([(ndaybins-i-1)*(ndaybins-i-2)*(ndaybins-i-3)/2 for i in range(ndaybins-3)])).astype(int)
diff_outshape = (nlstbins, ncomb, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
for diffind in range(2):
self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['mean'] = MA.empty(diff_outshape, dtype=NP.complex)
self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['median'] = MA.empty(diff_outshape, dtype=NP.complex)
self.cpinfo['errinfo']['wts']['{0}'.format(diffind)] = MA.empty(diff_outshape, dtype=NP.float)
ind = -1
self.cpinfo['errinfo']['list_of_pair_of_pairs'] = []
list_of_pair_of_pairs = []
for i in range(ndaybins-1):
for j in range(i+1,ndaybins):
for k in range(ndaybins-1):
if (k != i) and (k != j):
for m in range(k+1,ndaybins):
if (m != i) and (m != j):
pair_of_pairs = [set([i,j]), set([k,m])]
if (pair_of_pairs not in list_of_pair_of_pairs) and (pair_of_pairs[::-1] not in list_of_pair_of_pairs):
ind += 1
list_of_pair_of_pairs += [copy.deepcopy(pair_of_pairs)]
self.cpinfo['errinfo']['list_of_pair_of_pairs'] += [[i,j,k,m]]
for stat in ['mean', 'median']:
if stat == 'mean':
self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,j,:,:].data - eicp_tmean[:,i,:,:].data), mask=NP.logical_or(eicp_tmean[:,j,:,:].mask, eicp_tmean[:,i,:,:].mask))
self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,m,:,:].data - eicp_tmean[:,k,:,:].data), mask=NP.logical_or(eicp_tmean[:,m,:,:].mask, eicp_tmean[:,k,:,:].mask))
self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,j,:,:].data**2 + wts_lstbins[:,i,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,j,:,:].mask, wts_lstbins[:,i,:,:].mask))
self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,m,:,:].data**2 + wts_lstbins[:,k,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,m,:,:].mask, wts_lstbins[:,k,:,:].mask))
# self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,j,:,:] - eicp_tmean[:,i,:,:])
# self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,m,:,:] - eicp_tmean[:,k,:,:])
# self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,j,:,:]**2 + wts_lstbins[:,i,:,:]**2)
# self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,m,:,:]**2 + wts_lstbins[:,k,:,:]**2)
else:
self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,j,:,:].data - eicp_tmedian[:,i,:,:].data), mask=NP.logical_or(eicp_tmedian[:,j,:,:].mask, eicp_tmedian[:,i,:,:].mask))
self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,m,:,:].data - eicp_tmedian[:,k,:,:].data), mask=NP.logical_or(eicp_tmedian[:,m,:,:].mask, eicp_tmedian[:,k,:,:].mask))
# self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,j,:,:] - eicp_tmedian[:,i,:,:])
# self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,m,:,:] - eicp_tmedian[:,k,:,:])
mask0 = self.cpinfo['errinfo']['wts']['0'] <= 0.0
mask1 = self.cpinfo['errinfo']['wts']['1'] <= 0.0
self.cpinfo['errinfo']['eicp_diff']['0'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['0'][stat], mask=mask0)
self.cpinfo['errinfo']['eicp_diff']['1'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['1'][stat], mask=mask1)
self.cpinfo['errinfo']['wts']['0'] = MA.array(self.cpinfo['errinfo']['wts']['0'], mask=mask0)
self.cpinfo['errinfo']['wts']['1'] = MA.array(self.cpinfo['errinfo']['wts']['1'], mask=mask1)
############################################################################
def save(self, outfile=None):
"""
------------------------------------------------------------------------
Save contents of attribute cpinfo in external HDF5 file
Inputs:
outfile [NoneType or string] Output file (HDF5) to save contents to.
If set to None (default), it will be saved in the file
pointed to by the extfile attribute of class ClosurePhase
------------------------------------------------------------------------
"""
if outfile is None:
outfile = self.extfile
NMO.save_dict_to_hdf5(self.cpinfo, outfile, compressinfo={'compress_fmt': 'gzip', 'compress_opts': 9})
################################################################################
class ClosurePhaseDelaySpectrum(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
cPhase [instance of class ClosurePhase] Instance of class
ClosurePhase
f [numpy array] Frequencies (in Hz) in closure phase spectra
df [float] Frequency resolution (in Hz) in closure phase
spectra
cPhaseDS [dictionary] Possibly oversampled Closure Phase Delay
Spectrum information.
cPhaseDS_resampled
[dictionary] Resampled Closure Phase Delay Spectrum
information.
Member functions:
__init__() Initialize instance of class ClosurePhaseDelaySpectrum
FT() Fourier transform of complex closure phase spectra mapping
from frequency axis to delay axis.
subset() Return triad and time indices to select a subset of
processed data
compute_power_spectrum()
Compute power spectrum of closure phase data. It is in units
of Mpc/h.
rescale_power_spectrum()
Rescale power spectrum to dimensional quantity by converting
the ratio given visibility amplitude information
average_rescaled_power_spectrum()
Average the rescaled power spectrum with physical units
along certain axes with inverse variance or regular
averaging
beam3Dvol() Compute three-dimensional volume of the antenna power
pattern along two transverse axes and one LOS axis.
----------------------------------------------------------------------------
"""
def __init__(self, cPhase):
"""
------------------------------------------------------------------------
Initialize instance of class ClosurePhaseDelaySpectrum
Inputs:
cPhase [class ClosurePhase] Instance of class ClosurePhase
------------------------------------------------------------------------
"""
if not isinstance(cPhase, ClosurePhase):
raise TypeError('Input cPhase must be an instance of class ClosurePhase')
self.cPhase = cPhase
self.f = self.cPhase.f
self.df = self.cPhase.df
self.cPhaseDS = None
self.cPhaseDS_resampled = None
############################################################################
def FT(self, bw_eff, freq_center=None, shape=None, fftpow=None, pad=None,
datapool='prelim', visscaleinfo=None, method='fft', resample=True,
apply_flags=True):
"""
------------------------------------------------------------------------
Fourier transform of complex closure phase spectra mapping from
frequency axis to delay axis.
Inputs:
bw_eff [scalar or numpy array] effective bandwidths (in Hz) on the
selected frequency windows for subband delay transform of
closure phases. If a scalar value is provided, the same
will be applied to all frequency windows
freq_center [scalar, list or numpy array] frequency centers (in Hz) of
the selected frequency windows for subband delay transform
of closure phases. The value can be a scalar, list or numpy
array. If a scalar is provided, the same will be applied to
all frequency windows. Default=None uses the center
frequency from the class attribute named channels
shape [string] frequency window shape for subband delay transform
of closure phases. Accepted values for the string are
'rect' or 'RECT' (for rectangular), 'bnw' and 'BNW' (for
Blackman-Nuttall), and 'bhw' or 'BHW' (for
Blackman-Harris). Default=None sets it to 'rect'
(rectangular window)
fftpow [scalar] the power to which the FFT of the window will be
raised. The value must be a positive scalar. Default = 1.0
pad [scalar] padding fraction relative to the number of
frequency channels for closure phases. Value must be a
non-negative scalar. For e.g., a pad of 1.0 pads the
frequency axis with zeros of the same width as the number
of channels. After the delay transform, the transformed
closure phases are downsampled by a factor of 1+pad. If a
negative value is specified, delay transform will be
performed with no padding. Default=None sets to padding
factor to 1.0
datapool [string] Specifies which data set is to be Fourier
transformed
visscaleinfo
[dictionary] Dictionary containing reference visibilities
based on which the closure phases will be scaled to units
of visibilities. It contains the following keys and values:
'vis' [numpy array or instance of class
InterferometerArray] Reference visibilities from the
baselines that form the triad. It can be an instance
of class RI.InterferometerArray or a numpy array.
If an instance of class InterferometerArray, the
baseline triplet must be set in key 'bltriplet'
and value in key 'lst' will be ignored. If the
value under this key 'vis' is set to a numpy array,
it must be of shape (nbl=3, nlst_vis, nchan). In
this case the value under key 'bltriplet' will be
ignored. The nearest LST will be looked up and
applied after smoothing along LST based on the
smoothing parameter 'smooth'
'bltriplet'
[Numpy array] Will be used in searching for matches
to these three baseline vectors if the value under
key 'vis' is set to an instance of class
InterferometerArray. However, if value under key
'vis' is a numpy array, this key 'bltriplet' will
be ignored.
'lst' [numpy array] Reference LST (in hours). It is of
shape (nlst_vis,). It will be used only if value
under key 'vis' is a numpy array, otherwise it will
be ignored and read from the instance of class
InterferometerArray passed under key 'vis'. If the
specified LST range does not cover the data LST
range, those LST will contain NaN in the delay
spectrum
'smoothinfo'
[dictionary] Dictionary specifying smoothing and/or
interpolation parameters. It has the following keys
and values:
'op_type' [string] Specifies the interpolating
operation. Must be specified (no
default). Accepted values are
'interp1d' (scipy.interpolate),
'median' (skimage.filters), 'tophat'
(astropy.convolution) and 'gaussian'
(astropy.convolution)
'interp_kind' [string (optional)] Specifies the
interpolation kind (if 'op_type' is
set to 'interp1d'). For accepted
values, see
scipy.interpolate.interp1d()
'window_size' [integer (optional)] Specifies the
size of the interpolating/smoothing
kernel. Only applies when 'op_type'
is set to 'median', 'tophat' or
'gaussian' The kernel is a tophat
function when 'op_type' is set to
'median' or 'tophat'. If refers to
FWHM when 'op_type' is set to
'gaussian'
resample [boolean] If set to True (default), resample the delay
spectrum axis to independent samples along delay axis. If
set to False, return the results as is even if they may be
be oversampled and not all samples may be independent
method [string] Specifies the Fourier transform method to be used.
Accepted values are 'fft' (default) for FFT and 'nufft' for
non-uniform FFT
apply_flags [boolean] If set to True (default), weights determined from
flags will be applied. If False, no weights from flagging
will be applied, and thus even flagged data will be included
Outputs:
A dictionary that contains the oversampled (if resample=False) or
resampled (if resample=True) delay spectrum information. It has the
following keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed. It
is of size n_win. It is roughly equivalent to width
in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform. It
is of size nlags=nchan+npad if resample=True, where
npad is the number of frequency channels padded
specified under the key 'npad'. If resample=False,
nlags = number of delays after resampling only
independent delays. The lags roughly correspond to
k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_win x nlst x ndays x ntriads x nlags.
nlags=nchan+npad if resample=True, where npad is the
number of frequency channels padded specified under
the key 'npad'. If resample=False, nlags = number of
delays after resampling only independent delays.
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth. It
is of size n_win. The unit size of a pixel is
determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth of
the subband specified in bw_eff
'whole' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'prelim' key of attribute cpinfo.
Contains the following keys and values:
'dspec' [dictionary] Contains the following keys and
values:
'twts' [numpy array] Weights from time-based
flags that went into time-averaging.
Shape=(nlst,ndays,ntriads,nchan)
'mean' [numpy array] Delay spectrum of closure
phases based on their mean across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'median'
[numpy array] Delay spectrum of closure
phases based on their median across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'submodel' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'submodel' key of attribute cpinfo.
Contains the following keys and values:
'dspec' [numpy array] Delay spectrum of closure phases
Shape=(nspw,nlst,ndays,ntriads,nlags)
'residual' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'residual' key of attribute cpinfo
after subtracting 'submodel' bispectrum phase from that
of 'prelim'. It contains the following keys and values:
'dspec' [dictionary] Contains the following keys and
values:
'twts' [numpy array] Weights from time-based
flags that went into time-averaging.
Shape=(nlst,ndays,ntriads,nchan)
'mean' [numpy array] Delay spectrum of closure
phases based on their mean across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'median'
[numpy array] Delay spectrum of closure
phases based on their median across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'errinfo' [dictionary] It has two keys 'dspec0' and 'dspec1' each
of which are dictionaries with the following keys and
values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using the
mean statistic. It is of shape (nspw, nlst,
ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the subsample
difference obtained by using the median
statistic. It is of shape (nspw, nlst, ndays,
ntriads, nlags)
------------------------------------------------------------------------
"""
try:
bw_eff
except NameError:
raise NameError('Effective bandwidth must be specified')
else:
if not isinstance(bw_eff, (int, float, list, NP.ndarray)):
raise TypeError('Value of effective bandwidth must be a scalar, list or numpy array')
bw_eff = NP.asarray(bw_eff).reshape(-1)
if NP.any(bw_eff <= 0.0):
raise ValueError('All values in effective bandwidth must be strictly positive')
if freq_center is None:
freq_center = NP.asarray(self.f[self.f.size/2]).reshape(-1)
elif isinstance(freq_center, (int, float, list, NP.ndarray)):
freq_center = NP.asarray(freq_center).reshape(-1)
if NP.any((freq_center <= self.f.min()) | (freq_center >= self.f.max())):
raise ValueError('Value(s) of frequency center(s) must lie strictly inside the observing band')
else:
raise TypeError('Values(s) of frequency center must be scalar, list or numpy array')
if (bw_eff.size == 1) and (freq_center.size > 1):
bw_eff = NP.repeat(bw_eff, freq_center.size)
elif (bw_eff.size > 1) and (freq_center.size == 1):
freq_center = NP.repeat(freq_center, bw_eff.size)
elif bw_eff.size != freq_center.size:
raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements')
if shape is not None:
if not isinstance(shape, str):
raise TypeError('Window shape must be a string')
if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']:
raise ValueError('Invalid value for window shape specified.')
else:
shape = 'rect'
if fftpow is None:
fftpow = 1.0
else:
if not isinstance(fftpow, (int, float)):
raise TypeError('Power to raise window FFT by must be a scalar value.')
if fftpow < 0.0:
raise ValueError('Power for raising FFT of window by must be positive.')
if pad is None:
pad = 1.0
else:
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
print('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).')
if not isinstance(datapool, str):
raise TypeError('Input datapool must be a string')
if datapool.lower() not in ['prelim']:
raise ValueError('Specified datapool not supported')
if visscaleinfo is not None:
if not isinstance(visscaleinfo, dict):
raise TypeError('Input visscaleinfo must be a dictionary')
if 'vis' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "vis"')
if not isinstance(visscaleinfo['vis'], RI.InterferometerArray):
if 'lst' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "lst"')
lst_vis = visscaleinfo['lst'] * 15.0
if not isinstance(visscaleinfo['vis'], (NP.ndarray,MA.MaskedArray)):
raise TypeError('Input visibilities must be a numpy or a masked array')
if not isinstance(visscaleinfo['vis'], MA.MaskedArray):
visscaleinfo['vis'] = MA.array(visscaleinfo['vis'], mask=NP.isnan(visscaleinfo['vis']))
vistriad = MA.copy(visscaleinfo['vis'])
else:
if 'bltriplet' not in visscaleinfo:
raise KeyError('Input dictionary visscaleinfo does not contain key "bltriplet"')
blind, blrefind, dbl = LKP.find_1NN(visscaleinfo['vis'].baselines, visscaleinfo['bltriplet'], distance_ULIM=0.2, remove_oob=True)
if blrefind.size != 3:
blind_missing = NP.setdiff1d(NP.arange(3), blind, assume_unique=True)
blind_next, blrefind_next, dbl_next = LKP.find_1NN(visscaleinfo['vis'].baselines, -1*visscaleinfo['bltriplet'][blind_missing,:], distance_ULIM=0.2, remove_oob=True)
if blind_next.size + blind.size != 3:
raise ValueError('Exactly three baselines were not found in the reference baselines')
else:
blind = NP.append(blind, blind_missing[blind_next])
blrefind = NP.append(blrefind, blrefind_next)
else:
blind_missing = []
vistriad = NP.transpose(visscaleinfo['vis'].skyvis_freq[blrefind,:,:], (0,2,1))
if len(blind_missing) > 0:
vistriad[-blrefind_next.size:,:,:] = vistriad[-blrefind_next.size:,:,:].conj()
vistriad = MA.array(vistriad, mask=NP.isnan(vistriad))
lst_vis = visscaleinfo['vis'].lst
viswts = MA.array(NP.ones_like(vistriad.data), mask=vistriad.mask, dtype=NP.float)
lst_out = self.cPhase.cpinfo['processed']['prelim']['lstbins'] * 15.0
if lst_vis.size == 1: # Apply the visibility scaling from one reference LST to all LST
vis_ref = vistriad * NP.ones(lst_out.size).reshape(1,-1,1)
wts_ref = viswts * NP.ones(lst_out.size).reshape(1,-1,1)
else:
vis_ref, wts_ref = OPS.interpolate_masked_array_1D(vistriad, viswts, 1, visscaleinfo['smoothinfo'], inploc=lst_vis, outloc=lst_out)
if not isinstance(method, str):
raise TypeError('Input method must be a string')
if method.lower() not in ['fft', 'nufft']:
raise ValueError('Specified FFT method not supported')
if not isinstance(apply_flags, bool):
raise TypeError('Input apply_flags must be boolean')
flagwts = 1.0
visscale = 1.0
if datapool.lower() == 'prelim':
if method.lower() == 'fft':
freq_wts = NP.empty((bw_eff.size, self.f.size), dtype=NP.float_) # nspw x nchan
frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff / self.df).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
n_window = n_window[sortind]
for i,ind_chan in enumerate(ind_channels):
window = NP.sqrt(frac_width * n_window[i]) * DSP.window_fftpow(n_window[i], shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True)
window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts[i,:] = window
npad = int(self.f.size * pad)
lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True)
result = {'freq_center': freq_center, 'shape': shape, 'freq_wts': freq_wts, 'bw_eff': bw_eff, 'fftpow': fftpow, 'npad': npad, 'lags': lags, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=-1), 'whole': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'residual': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'errinfo': {'dspec0': {'twts': self.cPhase.cpinfo['errinfo']['wts']['0']}, 'dspec1': {'twts': self.cPhase.cpinfo['errinfo']['wts']['1']}}, 'submodel': {}}
if visscaleinfo is not None:
visscale = NP.nansum(NP.transpose(vis_ref[NP.newaxis,NP.newaxis,:,:,:], axes=(0,3,1,2,4)) * freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) / NP.nansum(freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x (ndays=1) x (nbl=3) x (nchan=1)
visscale = NP.sqrt(1.0/NP.nansum(1/NP.abs(visscale)**2, axis=-2, keepdims=True)) # nspw x nlst x (ndays=1) x (ntriads=1) x (nchan=1)
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in range(2):
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['errinfo']['wts']['{0}'.format(diffind)].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
for stat in self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)]:
eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].filled(0.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndayscomb x ntriads x nchan --> (nspw=1) x nlst x ndayscomb x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec{0}'.format(diffind)][stat] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
if dpool in self.cPhase.cpinfo['processed']:
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['processed'][datapool]['wts'].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
if dpool == 'submodel':
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].filled(1.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo['processed'][datapool]['eicp']['mean'].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec'] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
for key in self.cPhase.cpinfo['processed'][dpool]['eicp']:
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].data)
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].filled(1.0))
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
if dpool == 'prelim':
result['whole']['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
result[dpool]['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
result['lag_kernel'] = DSP.FT1D(NP.pad(flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.cPhaseDS = result
if resample:
result_resampled = copy.deepcopy(result)
downsample_factor = NP.min((self.f.size + npad) * self.df / bw_eff)
result_resampled['lags'] = DSP.downsampler(result_resampled['lags'], downsample_factor, axis=-1, method='interp', kind='linear')
result_resampled['lag_kernel'] = DSP.downsampler(result_resampled['lag_kernel'], downsample_factor, axis=-1, method='interp', kind='linear')
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in self.cPhase.cpinfo[dpool]['eicp_diff']:
for key in self.cPhase.cpinfo[dpool]['eicp_diff'][diffind]:
result_resampled[dpool]['dspec'+diffind][key] = DSP.downsampler(result_resampled[dpool]['dspec'+diffind][key], downsample_factor, axis=-1, method='FFT')
if dpool in self.cPhase.cpinfo['processed']:
if dpool == 'submodel':
result_resampled[dpool]['dspec'] = DSP.downsampler(result_resampled[dpool]['dspec'], downsample_factor, axis=-1, method='FFT')
else:
for key in self.cPhase.cpinfo['processed'][datapool]['eicp']:
if dpool == 'prelim':
result_resampled['whole']['dspec'][key] = DSP.downsampler(result_resampled['whole']['dspec'][key], downsample_factor, axis=-1, method='FFT')
else:
result_resampled[dpool]['dspec'][key] = DSP.downsampler(result_resampled[dpool]['dspec'][key], downsample_factor, axis=-1, method='FFT')
self.cPhaseDS_resampled = result_resampled
return result_resampled
else:
return result
############################################################################
def subset(self, selection=None):
"""
------------------------------------------------------------------------
Return triad and time indices to select a subset of processed data
Inputs:
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
Outputs:
Tuple (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) containing the
triad, LST, day, and day-pair (for subsample differences) indices,
each as a numpy array
------------------------------------------------------------------------
"""
if selection is None:
selsection = {}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
triads = map(tuple, self.cPhase.cpinfo['raw']['triads'])
if 'triads' not in selection:
selection['triads'] = triads
if selection['triads'] is None:
selection['triads'] = triads
triad_ind = [triads.index(triad) for triad in selection['triads']]
triad_ind = NP.asarray(triad_ind)
lst_ind = None
if 'lst' not in selection:
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])
else:
if selection['lst'] is None:
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])
elif isinstance(selection['lst'], (list,NP.ndarray)):
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = selection['lst']
if NP.any(NP.logical_or(lst_ind < 0, lst_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])):
raise ValueError('Input processed lst indices out of bounds')
else:
raise TypeError('Wrong type for processed lst indices')
if lst_ind is None:
raise ValueError('LST index selection could not be performed')
day_ind = None
day_ind_eicpdiff = None
if 'days' not in selection:
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']))
else:
if selection['days'] is None:
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']))
elif isinstance(selection['days'], (list,NP.ndarray)):
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = selection['days']
if NP.any(NP.logical_or(day_ind < 0, day_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])):
raise ValueError('Input processed day indices out of bounds')
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = [i for i,item in enumerate(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']) if len(set(item)-set(selection['days']))==0]
else:
raise TypeError('Wrong type for processed day indices')
if day_ind is None:
raise ValueError('Day index selection could not be performed')
return (triad_ind, lst_ind, day_ind, day_ind_eicpdiff)
############################################################################
def compute_power_spectrum(self, cpds=None, selection=None, autoinfo=None,
xinfo=None, cosmo=cosmo100, units='K', beamparms=None):
"""
------------------------------------------------------------------------
Compute power spectrum of closure phase data. It is in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information. If it is not specified the attributes
cPhaseDS['processed'] and cPhaseDS_resampled['processed'] are
used. Under each of these keys, it holds a dictionary that has
the following keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'processed' [dictionary] Contains the following keys and
values:
'dspec' [dictionary] Contains the following keys
and values:
'twts' [numpy array] Weights from
time-based flags that went into
time-averaging.
Shape=(ntriads,npol,nchan,nt)
'mean' [numpy array] Delay spectrum of
closure phases based on their
mean across time intervals.
Shape=(nspw,npol,nt,ntriads,nlags)
'median'
[numpy array] Delay spectrum of
closure phases based on their
median across time intervals.
Shape=(nspw,npol,nt,ntriads,nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
autoinfo
[NoneType or dictionary] Specifies parameters for processing
before power spectrum in auto or cross modes. If set to None,
a dictionary will be created with the default values as
described below. The dictionary must have the following keys
and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes that will
be averaged coherently before squaring (for auto) or
cross-multiplying (for cross) power spectrum. If set
to None (default), no axes are averaged coherently.
If set to int, list, tuple or numpy array, those axes
will be averaged coherently after applying the weights
specified under key 'wts' along those axes. 1=lst,
2=days, 3=triads.
'wts' [NoneType/list/numpy array] If not provided (equivalent
to setting it to None) or set to None (default), it is
set to a one element list which is a one element numpy
array of unity. Otherwise, it must be a list of same
number of elements as in key 'axes' and each of these
must be a numpy broadcast compatible array corresponding
to each of the axis specified in 'axes'
xinfo [NoneType or dictionary] Specifies parameters for processing
cross power spectrum. If set to None, a dictionary will be
created with the default values as described below. The
dictionary must have the following keys and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes over which
power spectrum will be computed incoherently by cross-
multiplication. If set to None (default), no cross-
power spectrum is computed. If set to int, list, tuple
or numpy array, cross-power over those axes will be
computed incoherently by cross-multiplication. The
cross-spectrum over these axes will be computed after
applying the pre- and post- cross-multiplication
weights specified in key 'wts'. 1=lst, 2=days,
3=triads.
'collapse_axes'
[list] The axes that will be collpased after the
cross-power matrix is produced by cross-multiplication.
If this key is not set, it will be initialized to an
empty list (default), in which case none of the axes
is collapsed and the full cross-power matrix will be
output. it must be a subset of values under key 'axes'.
This will reduce it from a square matrix along that axis
to collapsed values along each of the leading diagonals.
1=lst, 2=days, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday'
((ndays,) array), 'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function FT().
Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
each of which is a dictionary. 'whole' contains power spectrum info
about the input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure phase) from
the 'whole' model. 'residual' contains power spectrum info about the
closure phases obtained as a difference between 'whole' and 'submodel'.
It contains the following keys and values:
'mean' [numpy array] Delay power spectrum incoherently estiamted over
the axes specified in xinfo['axes'] using the 'mean' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided but avgcov is False, those axes will be
of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged over
the axes specified in incohax using the 'median' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided bu avgcov is False, those axes will be
of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but is
not collapsed, the number of dimensions in the output will have
changed. This parameter tracks where the original axis is now
placed. The keys are the original axes that are involved in
incoherent cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the power
spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the power
spectrum
Examples:
(1)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False}}
Output delay power spectrum has shape (Nspw, Nlst, 1, Ntriads, Nlags)
(2)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False},
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, 1, Ntriads, Ntriads, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)},
axesmap = {1: [1,2], 3: [4,5]}
(3)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3],
'dlst_range': [0.0, 1.0, 2.0]}
Output delay power spectrum has shape
(Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1,2], 3: [4]}
(4)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3],
'dlst_range': [1.0, 2.0, 3.0, 4.0]}
Output delay power spectrum has shape
(Nspw, 4, Ndays, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1], 3: [3]}
(5)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3],
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndays, 1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]}
(6)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []}
Output delay power spectrum has shape
(Nspw, 1, Ndays, 1, Nlags)
diagoffsets = {}, axesmap = {1: [1], 3: [3]}
------------------------------------------------------------------------
"""
if not isinstance(units,str):
raise TypeError('Input parameter units must be a string')
if units.lower() == 'k':
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if 'freqs' not in beamparms:
beamparms['freqs'] = self.f
beamparms_orig = copy.deepcopy(beamparms)
if autoinfo is None:
autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]}
elif not isinstance(autoinfo, dict):
raise TypeError('Input autoinfo must be a dictionary')
if 'axes' not in autoinfo:
autoinfo['axes'] = None
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array')
else:
autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1)
if 'wts' not in autoinfo:
if autoinfo['axes'] is not None:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes'])
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['wts'], list):
raise TypeError('wts in input autoinfo must be a list of numpy arrays')
else:
if len(autoinfo['wts']) != len(autoinfo['axes']):
raise ValueError('Input list of wts must be same as length of autoinfo axes')
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
if xinfo is None:
xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}}
elif not isinstance(xinfo, dict):
raise TypeError('Input xinfo must be a dictionary')
if 'axes' not in xinfo:
xinfo['axes'] = None
else:
if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array')
else:
xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1)
if 'wts' not in xinfo:
xinfo['wts'] = {}
for xkey in ['preX', 'postX']:
if xinfo['axes'] is not None:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes'])
else:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
xinfo['wts']['preXnorm'] = False
xinfo['wts']['postXnorm'] = False
else:
if xinfo['axes'] is not None:
if not isinstance(xinfo['wts'], dict):
raise TypeError('wts in input xinfo must be a dictionary')
for xkey in ['preX', 'postX']:
if not isinstance(xinfo['wts'][xkey], list):
raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey))
else:
if len(xinfo['wts'][xkey]) != len(xinfo['axes']):
raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey))
else:
for xkey in ['preX', 'postX']:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
if 'preXnorm' not in xinfo['wts']:
xinfo['wts']['preXnorm'] = False
if 'postXnorm' not in xinfo['wts']:
xinfo['wts']['postXnorm'] = False
if not isinstance(xinfo['wts']['preXnorm'], NP.bool):
raise TypeError('preXnorm in input xinfo must be a boolean')
if not isinstance(xinfo['wts']['postXnorm'], NP.bool):
raise TypeError('postXnorm in input xinfo must be a boolean')
if 'avgcov' not in xinfo:
xinfo['avgcov'] = False
if not isinstance(xinfo['avgcov'], NP.bool):
raise TypeError('avgcov under input xinfo must be boolean')
if 'collapse_axes' not in xinfo:
xinfo['collapse_axes'] = []
if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)):
raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array')
else:
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1)
if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None):
if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0:
raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection")
cohax = autoinfo['axes']
if cohax is None:
cohax = []
incohax = xinfo['axes']
if incohax is None:
incohax = []
if selection is None:
selection = {'triads': None, 'lst': None, 'days': None}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
if cpds is None:
cpds = {}
sampling = ['oversampled', 'resampled']
for smplng in sampling:
if smplng == 'oversampled':
cpds[smplng] = copy.deepcopy(self.cPhaseDS)
else:
cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled)
triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection)
result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['processed']['prelim']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['processed']['prelim']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['processed']['prelim']['daybins'][day_ind], 'day_ind': day_ind, 'dday': self.cPhase.cpinfo['processed']['prelim']['diff_dbins'][day_ind]}
dlstbin = NP.mean(self.cPhase.cpinfo['processed']['prelim']['dlstbins'])
if 'dlst_range' in xinfo:
if xinfo['dlst_range'] is None:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
else:
dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours)
if dlst_range.size == 1:
dlst_range = NP.insert(dlst_range, 0, 0.0)
lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size]))
else:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance
for smplng in sampling:
result[smplng] = {}
wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz)
z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1
dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff']
dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo)
kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags']
rz_los = cosmo.comoving_distance(z) # in Mpc/h
drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h
if units == 'Jy':
jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz)
jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = 1.0
elif units == 'K':
beamparms = copy.deepcopy(beamparms_orig)
omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts'])
jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned
jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B)
else:
raise ValueError('Input value for units invalid')
factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2
result[smplng]['z'] = z
result[smplng]['kprll'] = kprll
result[smplng]['lags'] = NP.copy(cpds[smplng]['lags'])
result[smplng]['freq_center'] = cpds[smplng]['freq_center']
result[smplng]['bw_eff'] = cpds[smplng]['bw_eff']
result[smplng]['shape'] = cpds[smplng]['shape']
result[smplng]['freq_wts'] = cpds[smplng]['freq_wts']
result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length']
for dpool in ['whole', 'submodel', 'residual']:
if dpool in cpds[smplng]:
result[smplng][dpool] = {}
inpshape = list(cpds[smplng]['whole']['dspec']['mean'].shape)
inpshape[1] = lst_ind.size
inpshape[2] = day_ind.size
inpshape[3] = triad_ind.size
if len(cohax) > 0:
nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)])
else:
nsamples_coh = 1
if len(incohax) > 0:
nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)])
nsamples_incoh = nsamples * (nsamples - 1)
else:
nsamples_incoh = 1
twts_multidim_idx = NP.ix_(lst_ind,day_ind,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1)
dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan)
max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2)))
select_chan = NP.argmax(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2)))
twts = NP.copy(cpds[smplng]['whole']['dspec']['twts'].data[:,:,:,[select_chan]]) # shape=(nlst,ndays,ntriads,nlags=1)
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['whole']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
if dpool == 'submodel':
dspec = NP.copy(cpds[smplng][dpool]['dspec'][dspec_multidim_idx])
else:
dspec = NP.copy(cpds[smplng][dpool]['dspec'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec = NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts * dspec[dspec_multidim_idx], axis=cohax, keepdims=True) / NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec = NP.median(dspec[dspec_multidim_idx], axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
dspec1 = NP.copy(dspec)
dspec2 = NP.copy(dspec)
preXwts1 = NP.copy(preXwts)
preXwts2 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec1 = NP.expand_dims(dspec1, axis=incax)
preXwts1 = NP.expand_dims(preXwts1, axis=incax)
if incax == 1:
preXwts1_outshape = list(preXwts1.shape)
preXwts1_outshape[incax+1] = dspec1.shape[incax+1]
preXwts1_outshape = tuple(preXwts1_outshape)
preXwts1 = NP.broadcast_to(preXwts1, preXwts1_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
preXwts2_tmp = NP.expand_dims(preXwts2, axis=incax)
preXwts2_shape = NP.asarray(preXwts2_tmp.shape)
preXwts2_shape[incax] = lstshifts.size
preXwts2_shape[incax+1] = preXwts1_outshape[incax+1]
preXwts2_shape = tuple(preXwts2_shape)
preXwts2 = NP.broadcast_to(preXwts2_tmp, preXwts2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
dspec2_tmp = NP.expand_dims(dspec2, axis=incax)
dspec2_shape = NP.asarray(dspec2_tmp.shape)
dspec2_shape[incax] = lstshifts.size
# dspec2_shape = NP.insert(dspec2_shape, incax, lstshifts.size)
dspec2_shape = tuple(dspec2_shape)
dspec2 = NP.broadcast_to(dspec2_tmp, dspec2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
for lstshiftind, lstshift in enumerate(lstshifts):
dspec2[:,lstshiftind,...] = NP.roll(dspec2_tmp[:,0,...], lstshift, axis=incax)
dspec2[:,lstshiftind,:lstshift,...] = NP.nan
preXwts2[:,lstshiftind,...] = NP.roll(preXwts2_tmp[:,0,...], lstshift, axis=incax)
preXwts2[:,lstshiftind,:lstshift,...] = NP.nan
else:
dspec2 = NP.expand_dims(dspec2, axis=incax+1)
preXwts2 = NP.expand_dims(preXwts2, axis=incax+1)
expandax_map[incax] = incax + NP.arange(2)
for ekey in expandax_map:
if ekey > incax:
expandax_map[ekey] += 1
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec1.ndim-1, dtype=NP.int))) * (dspec1*U.Unit('Jy Hz') * preXwts1) * (dspec2*U.Unit('Jy Hz') * preXwts2).conj()
if xinfo['wts']['preXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts1 * preXwts2.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts1.shape)>1, NP.asarray(preXwts2.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# if any one of collapsing of incoherent axes or
# averaging of full covariance is requested
diagoffsets = {} # Stores the correlation index difference along each axis.
diagweights = {} # Stores the number of points summed in the trace along the offset diagonal
for colaxind, colax in enumerate(xinfo['collapse_axes']):
if colax == 1:
shp = NP.ones(dspec.ndim, dtype=NP.int)
shp[colax] = lst_ind.size
multdim_idx = tuple([NP.arange(axdim) for axdim in shp])
diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(dspec[multdim_idx]))) - lstshifts
# diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts
if stat == 'mean':
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
else:
result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
diagoffsets[colax] = lstshifts
else:
pspec_unit = result[smplng][dpool][stat].si.unit
result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1')
diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagwts_shape[expandax_map[colax][0]] = diagwts.size
diagoffsets[colax] = offsets
diagweights[colax] = NP.copy(diagwts)
result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape)
for ekey in expandax_map:
if ekey > colax:
expandax_map[ekey] -= 1
expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel()
wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int))
postXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for colaxind, colax in enumerate(xinfo['collapse_axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[expandax_map[colax]] = -1
postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape))
result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts
axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix
if xinfo['wts']['postXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes
if xinfo['avgcov']:
# collapse the axes further (postXwts have already
# been applied)
diagoffset_weights = 1.0
for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]:
# It is important to sort the collapsable axes in
# reverse order before deleting elements below,
# otherwise the axes ordering may be get messed up
diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size
diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape)
del diagoffsets[xinfo['collapse_axes'][colaxind]]
result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True)
else:
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2
diagoffsets = {}
expandax_map = {}
if units == 'Jy':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc')
elif units == 'K':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3')
else:
raise ValueError('Input value for units invalid')
result[smplng][dpool]['diagoffsets'] = diagoffsets
result[smplng][dpool]['diagweights'] = diagweights
result[smplng][dpool]['axesmap'] = expandax_map
result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh
result[smplng][dpool]['nsamples_coh'] = nsamples_coh
return result
############################################################################
def compute_power_spectrum_uncertainty(self, cpds=None, selection=None,
autoinfo=None,xinfo=None,
cosmo=cosmo100, units='K',
beamparms=None):
"""
------------------------------------------------------------------------
Compute uncertainty in the power spectrum of closure phase data. It is
in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information on the key 'errinfo'. If it is not
specified the attributes cPhaseDS['errinfo'] and
cPhaseDS_resampled['errinfo'] are used. Under each of these
sampling keys, it holds a dictionary that has the following
keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'errinfo' [dictionary] It has two keys 'dspec0' and
'dspec1' each of which are dictionaries with
the following keys and values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using
the mean statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the
subsample difference obtained by using
the median statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
autoinfo
[NoneType or dictionary] Specifies parameters for processing
before power spectrum in auto or cross modes. If set to None,
a dictionary will be created with the default values as
described below. The dictionary must have the following keys
and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes that will
be averaged coherently before squaring (for auto) or
cross-multiplying (for cross) power spectrum. If set
to None (default), no axes are averaged coherently.
If set to int, list, tuple or numpy array, those axes
will be averaged coherently after applying the weights
specified under key 'wts' along those axes. 1=lst,
3=triads. Value of 2 for axes is not allowed since
that denotes repeated days and it is along this axis
that cross-power is computed regardless.
'wts' [NoneType/list/numpy array] If not provided (equivalent
to setting it to None) or set to None (default), it is
set to a one element list which is a one element numpy
array of unity. Otherwise, it must be a list of same
number of elements as in key 'axes' and each of these
must be a numpy broadcast compatible array corresponding
to each of the axis specified in 'axes'
xinfo [NoneType or dictionary] Specifies parameters for processing
cross power spectrum. If set to None, a dictionary will be
created with the default values as described below. The
dictionary must have the following keys and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes over which
power spectrum will be computed incoherently by cross-
multiplication. If set to None (default), no cross-
power spectrum is computed. If set to int, list, tuple
or numpy array, cross-power over those axes will be
computed incoherently by cross-multiplication. The
cross-spectrum over these axes will be computed after
applying the pre- and post- cross-multiplication
weights specified in key 'wts'. 1=lst, 3=triads. Value
of 2 for axes is not allowed since that denotes
repeated days and it is along this axis that
cross-power is computed regardless.
'collapse_axes'
[list] The axes that will be collpased after the
cross-power matrix is produced by cross-multiplication.
If this key is not set, it will be initialized to an
empty list (default), in which case none of the axes
is collapsed and the full cross-power matrix will be
output. it must be a subset of values under key 'axes'.
This will reduce it from a square matrix along that axis
to collapsed values along each of the leading diagonals.
1=lst, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,) array), 'dday'
((ndaycomb,) array), 'oversampled' and 'resampled' corresponding to
whether resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary. It
contains information about power spectrum uncertainties obtained from
subsample differences. It contains the following keys and values:
'mean' [numpy array] Delay power spectrum uncertainties incoherently
estimated over the axes specified in xinfo['axes'] using the
'mean' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends on the
combination of input parameters. See examples below. If both
collapse_axes and avgcov are not set, those axes will be
replaced with square covariance matrices. If collapse_axes is
provided but avgcov is False, those axes will be of shape
2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties incoherently
averaged over the axes specified in incohax using the 'median'
key in input cpds or attribute cPhaseDS['errinfo']['dspec'].
It has shape that depends on the combination of input
parameters. See examples below. If both collapse_axes and
avgcov are not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but is
not collapsed, the number of dimensions in the output will have
changed. This parameter tracks where the original axis is now
placed. The keys are the original axes that are involved in
incoherent cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the power
spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the power
spectrum
Examples:
(1)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False}}
This will not do anything because axes cannot include value 2 which
denote the 'days' axis and the uncertainties are obtained through
subsample differencing along days axis regardless.
Output delay power spectrum has shape (Nspw, Nlst, Ndaycomb, Ntriads,
Nlags)
(2)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False},
'dlst_range': None}
This will not do anything about coherent averaging along axis=2 because
axes cannot include value 2 which denote the 'days' axis and the
uncertainties are obtained through subsample differencing along days
axis regardless.
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndaycomb, Ntriads, Ntriads, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)},
axesmap = {1: [1,2], 3: [4,5]}
(3)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3],
'dlst_range': [0.0, 1.0, 2.0]}
This will not do anything about coherent averaging along axis=2 because
axes cannot include value 2 which denote the 'days' axis and the
uncertainties are obtained through subsample differencing along days
axis regardless.
Output delay power spectrum has shape
(Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1,2], 3: [4]}
(4)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3],
'dlst_range': [1.0, 2.0, 3.0, 4.0]}
Output delay power spectrum has shape
(Nspw, 4, Ndaycomb, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1], 3: [3]}
(5)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3],
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndays, 1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]}
(6)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []}
Output delay power spectrum has shape
(Nspw, 1, Ndays, 1, Nlags)
diagoffsets = {}, axesmap = {1: [1], 3: [3]}
------------------------------------------------------------------------
"""
if not isinstance(units,str):
raise TypeError('Input parameter units must be a string')
if units.lower() == 'k':
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if 'freqs' not in beamparms:
beamparms['freqs'] = self.f
beamparms_orig = copy.deepcopy(beamparms)
if autoinfo is None:
autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]}
elif not isinstance(autoinfo, dict):
raise TypeError('Input autoinfo must be a dictionary')
if 'axes' not in autoinfo:
autoinfo['axes'] = None
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array')
else:
autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1)
if 'wts' not in autoinfo:
if autoinfo['axes'] is not None:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes'])
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['wts'], list):
raise TypeError('wts in input autoinfo must be a list of numpy arrays')
else:
if len(autoinfo['wts']) != len(autoinfo['axes']):
raise ValueError('Input list of wts must be same as length of autoinfo axes')
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
if xinfo is None:
xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}}
elif not isinstance(xinfo, dict):
raise TypeError('Input xinfo must be a dictionary')
if 'axes' not in xinfo:
xinfo['axes'] = None
else:
if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array')
else:
xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1)
if 'wts' not in xinfo:
xinfo['wts'] = {}
for xkey in ['preX', 'postX']:
if xinfo['axes'] is not None:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes'])
else:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
xinfo['wts']['preXnorm'] = False
xinfo['wts']['postXnorm'] = False
else:
if xinfo['axes'] is not None:
if not isinstance(xinfo['wts'], dict):
raise TypeError('wts in input xinfo must be a dictionary')
for xkey in ['preX', 'postX']:
if not isinstance(xinfo['wts'][xkey], list):
raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey))
else:
if len(xinfo['wts'][xkey]) != len(xinfo['axes']):
raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey))
else:
for xkey in ['preX', 'postX']:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
if 'preXnorm' not in xinfo['wts']:
xinfo['wts']['preXnorm'] = False
if 'postXnorm' not in xinfo['wts']:
xinfo['wts']['postXnorm'] = False
if not isinstance(xinfo['wts']['preXnorm'], NP.bool):
raise TypeError('preXnorm in input xinfo must be a boolean')
if not isinstance(xinfo['wts']['postXnorm'], NP.bool):
raise TypeError('postXnorm in input xinfo must be a boolean')
if 'avgcov' not in xinfo:
xinfo['avgcov'] = False
if not isinstance(xinfo['avgcov'], NP.bool):
raise TypeError('avgcov under input xinfo must be boolean')
if 'collapse_axes' not in xinfo:
xinfo['collapse_axes'] = []
if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)):
raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array')
else:
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1)
if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None):
if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0:
raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection")
cohax = autoinfo['axes']
if cohax is None:
cohax = []
if 2 in cohax: # Remove axis=2 from cohax
if isinstance(cohax, list):
cohax.remove(2)
if isinstance(cohax, NP.ndarray):
cohax = cohax.tolist()
cohax.remove(2)
cohax = NP.asarray(cohax)
incohax = xinfo['axes']
if incohax is None:
incohax = []
if 2 in incohax: # Remove axis=2 from incohax
if isinstance(incohax, list):
incohax.remove(2)
if isinstance(incohax, NP.ndarray):
incohax = incohax.tolist()
incohax.remove(2)
incohax = NP.asarray(incohax)
if selection is None:
selection = {'triads': None, 'lst': None, 'days': None}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
if cpds is None:
cpds = {}
sampling = ['oversampled', 'resampled']
for smplng in sampling:
if smplng == 'oversampled':
cpds[smplng] = copy.deepcopy(self.cPhaseDS)
else:
cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled)
triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection)
result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['errinfo']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['errinfo']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['errinfo']['daybins'][day_ind], 'day_ind': day_ind_eicpdiff, 'dday': self.cPhase.cpinfo['errinfo']['diff_dbins'][day_ind]}
dlstbin = NP.mean(self.cPhase.cpinfo['errinfo']['dlstbins'])
if 'dlst_range' in xinfo:
if xinfo['dlst_range'] is None:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
else:
dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours)
if dlst_range.size == 1:
dlst_range = NP.insert(dlst_range, 0, 0.0)
lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size]))
else:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance
for smplng in sampling:
result[smplng] = {}
wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz)
z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1
dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff']
dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo)
kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags']
rz_los = cosmo.comoving_distance(z) # in Mpc/h
drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h
if units == 'Jy':
jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz)
jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = 1.0
elif units == 'K':
beamparms = copy.deepcopy(beamparms_orig)
omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts'])
jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned
jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B)
else:
raise ValueError('Input value for units invalid')
factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2
result[smplng]['z'] = z
result[smplng]['kprll'] = kprll
result[smplng]['lags'] = NP.copy(cpds[smplng]['lags'])
result[smplng]['freq_center'] = cpds[smplng]['freq_center']
result[smplng]['bw_eff'] = cpds[smplng]['bw_eff']
result[smplng]['shape'] = cpds[smplng]['shape']
result[smplng]['freq_wts'] = cpds[smplng]['freq_wts']
result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length']
dpool = 'errinfo'
if dpool in cpds[smplng]:
result[smplng][dpool] = {}
inpshape = list(cpds[smplng][dpool]['dspec0']['mean'].shape)
inpshape[1] = lst_ind.size
inpshape[2] = day_ind_eicpdiff.size
inpshape[3] = triad_ind.size
if len(cohax) > 0:
nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)])
else:
nsamples_coh = 1
if len(incohax) > 0:
nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)])
nsamples_incoh = nsamples * (nsamples - 1)
else:
nsamples_incoh = 1
twts_multidim_idx = NP.ix_(lst_ind,day_ind_eicpdiff,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1)
dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind_eicpdiff,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan)
max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['errinfo']['dspec0']['twts'].data, axis=(0,1,2,3)))
select_chan = NP.argmax(NP.sum(cpds[smplng]['errinfo']['dspec0']['twts'].data, axis=(0,1,2,3)))
twts = {'0': NP.copy(cpds[smplng]['errinfo']['dspec0']['twts'].data[:,:,:,[select_chan]]), '1': NP.copy(cpds[smplng]['errinfo']['dspec1']['twts'].data[:,:,:,[select_chan]])}
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['errinfo']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
dspec0 = NP.copy(cpds[smplng][dpool]['dspec0'][stat][dspec_multidim_idx])
dspec1 = NP.copy(cpds[smplng][dpool]['dspec1'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec0 = NP.sum(twts['0'][NP.newaxis,...] * awts * dspec0, axis=cohax, keepdims=True) / NP.sum(twts['0'][twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
dspec1 = NP.sum(twts['1'][NP.newaxis,...] * awts * dspec1, axis=cohax, keepdims=True) / NP.sum(twts['1'][twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec0 = NP.median(dspec0, axis=cohax, keepdims=True)
dspec1 = NP.median(dspec1, axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec0.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
preXwts0 = NP.copy(preXwts)
preXwts1 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec0 = NP.expand_dims(dspec0, axis=incax)
preXwts0 = NP.expand_dims(preXwts0, axis=incax)
if incax == 1:
preXwts0_outshape = list(preXwts0.shape)
preXwts0_outshape[incax+1] = dspec0.shape[incax+1]
preXwts0_outshape = tuple(preXwts0_outshape)
preXwts0 = NP.broadcast_to(preXwts0, preXwts0_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
preXwts1_tmp = NP.expand_dims(preXwts1, axis=incax)
preXwts1_shape = NP.asarray(preXwts1_tmp.shape)
preXwts1_shape[incax] = lstshifts.size
preXwts1_shape[incax+1] = preXwts0_outshape[incax+1]
preXwts1_shape = tuple(preXwts1_shape)
preXwts1 = NP.broadcast_to(preXwts1_tmp, preXwts1_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
dspec1_tmp = NP.expand_dims(dspec1, axis=incax)
dspec1_shape = NP.asarray(dspec1_tmp.shape)
dspec1_shape[incax] = lstshifts.size
# dspec1_shape = NP.insert(dspec1_shape, incax, lstshifts.size)
dspec1_shape = tuple(dspec1_shape)
dspec1 = NP.broadcast_to(dspec1_tmp, dspec1_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
for lstshiftind, lstshift in enumerate(lstshifts):
dspec1[:,lstshiftind,...] = NP.roll(dspec1_tmp[:,0,...], lstshift, axis=incax)
dspec1[:,lstshiftind,:lstshift,...] = NP.nan
preXwts1[:,lstshiftind,...] = NP.roll(preXwts1_tmp[:,0,...], lstshift, axis=incax)
preXwts1[:,lstshiftind,:lstshift,...] = NP.nan
else:
dspec1 = NP.expand_dims(dspec1, axis=incax+1)
preXwts1 = NP.expand_dims(preXwts1, axis=incax+1)
expandax_map[incax] = incax + NP.arange(2)
for ekey in expandax_map:
if ekey > incax:
expandax_map[ekey] += 1
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec0.ndim-1, dtype=NP.int))) * (dspec0*U.Unit('Jy Hz') * preXwts0) * (dspec1*U.Unit('Jy Hz') * preXwts1).conj()
if xinfo['wts']['preXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts0 * preXwts1.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts0.shape)>1, NP.asarray(preXwts1.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# Remove axis=2 if present
if 2 in xinfo['collapse_axes']:
# Remove axis=2 from cohax
if isinstance(xinfo['collapse_axes'], list):
xinfo['collapse_axes'].remove(2)
if isinstance(xinfo['collapse_axes'], NP.ndarray):
xinfo['collapse_axes'] = xinfo['collapse_axes'].tolist()
xinfo['collapse_axes'].remove(2)
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes'])
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# if any one of collapsing of incoherent axes or
# averaging of full covariance is requested
diagoffsets = {} # Stores the correlation index difference along each axis.
diagweights = {} # Stores the number of points summed in the trace along the offset diagonal
for colaxind, colax in enumerate(xinfo['collapse_axes']):
if colax == 1:
shp = NP.ones(cpds[smplng][dpool]['dspec0'][stat].ndim, dtype=NP.int)
shp[colax] = lst_ind.size
multdim_idx = tuple([NP.arange(axdim) for axdim in shp])
diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(cpds[smplng][dpool]['dspec0'][stat][dspec_multidim_idx][multdim_idx]))) - lstshifts
# diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts
if stat == 'mean':
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
else:
result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
diagoffsets[colax] = lstshifts
else:
pspec_unit = result[smplng][dpool][stat].si.unit
result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1')
diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagwts_shape[expandax_map[colax][0]] = diagwts.size
diagoffsets[colax] = offsets
diagweights[colax] = NP.copy(diagwts)
result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape)
for ekey in expandax_map:
if ekey > colax:
expandax_map[ekey] -= 1
expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel()
wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int))
postXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for colaxind, colax in enumerate(xinfo['collapse_axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[expandax_map[colax]] = -1
postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape))
result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts
axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix
if xinfo['wts']['postXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes
if xinfo['avgcov']:
# collapse the axes further (postXwts have already
# been applied)
diagoffset_weights = 1.0
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=axes_to_sum, keepdims=True)
for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]:
# It is import to sort the collapsable axes in
# reverse order before deleting elements below,
# otherwise the axes ordering may be get messed up
diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size
diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape)
del diagoffsets[xinfo['collapse_axes'][colaxind]]
result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True)
else:
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2
diagoffsets = {}
expandax_map = {}
if units == 'Jy':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc')
elif units == 'K':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3')
else:
raise ValueError('Input value for units invalid')
result[smplng][dpool]['diagoffsets'] = diagoffsets
result[smplng][dpool]['diagweights'] = diagweights
result[smplng][dpool]['axesmap'] = expandax_map
result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh
result[smplng][dpool]['nsamples_coh'] = nsamples_coh
return result
############################################################################
def rescale_power_spectrum(self, cpdps, visfile, blindex, visunits='Jy'):
"""
------------------------------------------------------------------------
Rescale power spectrum to dimensional quantity by converting the ratio
given visibility amplitude information
Inputs:
cpdps [dictionary] Dictionary with the keys 'triads',
'triads_ind', 'lstbins', 'lst', 'dlst', 'lst_ind',
'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy
array corresponding to triad and time indices used in
selecting the data. Values under keys 'oversampled' and
'resampled' each contain a dictionary with the following keys
and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has
shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc)
corresponding to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in
Hz) of the frequency subbands of the subband delay
spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in
Hz) of the subbands being delay transformed. It is
of size n_win. It is roughly equivalent to width in
redshift or along line-of-sight
'shape' [string] shape of the frequency window function
applied. Usual values are 'rect' (rectangular),
'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window
was raised.
The value is be a positive scalar with default = 1.0
'mean' [numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
'median'
[numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'median' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
visfile [string] Full path to the visibility file in NPZ format that
consists of the following keys and values:
'vis' [numpy array] Complex visibilities averaged over
all redundant baselines of different classes of
baselines. It is of shape (nlst,nbl,nchan)
'last' [numpy array] Array of LST in units of days where
the fractional part is LST in days.
blindex [numpy array] 3-element array of baseline indices to use in
selecting the triad corresponding to closure phase power
spectrum in cpdps. It will index into the 'vis' array in
NPZ file visfile
visunits [string] Units of visibility in visfile. Accepted values
are 'Jy' (default; for Jansky) and 'K' (for Kelvin)
Outputs:
Same dictionary as input cpdps except it has the following additional
keys and values. Under 'resampled' and 'oversampled' keys, there are
now new keys called 'mean-absscale' and 'median-absscale' keys which
are each dictionaries with the following keys and values:
'converted' [numpy array] Values of power (in units of visunits^2) with
same shape as the values under 'mean' and 'median' keys --
(nspw,nlst,ndays,ntriads,nchan) unless some of those axes
have already been averaged coherently or incoherently
'units' [string] Units of power in key 'converted'. Its values are
square of the input visunits -- 'Jy^2' or 'K^2'
------------------------------------------------------------------------
"""
if not isinstance(cpdps, dict):
raise TypeError('Input cpdps must be a dictionary')
if not isinstance(visfile, str):
raise TypeError('Input visfile must be a string containing full file path')
if isinstance(blindex, NP.ndarray):
raise TypeError('Input blindex must be a numpy array')
if blindex.size != 3:
raise ValueError('Input blindex must be a 3-element array')
if not isinstance(visunits, str):
raise TypeError('Input visunits must be a string')
if visunits not in ['Jy', 'K']:
raise ValueError('Input visunits currently not accepted')
datapool = []
for dpool in ['resampled', 'oversampled']:
if dpool in cpdps:
datapool += [dpool]
scaleinfo = NP.load(visfile)
vis = scaleinfo['vis'][:,blindex,:] # shape=(nlst,nbl,nchan)
vis_lstfrac, vis_lstint = NP.modf(scaleinfo['last']) # shape=(nlst,)
vis_lstHA = vis_lstfrac * 24.0 # in hours
vis_lstdeg = vis_lstHA * 15.0 # in degrees
cpdps_lstdeg = 15.0*cpdps['lst'] # in degrees
lstmatrix = cpdps_lstdeg.reshape(-1,1) - vis_lstdeg.reshape(1,-1)
lstmatrix[NP.abs(lstmatrix) > 180.0] -= 360.0
ind_minlstsep = NP.argmin(NP.abs(lstmatrix), axis=1)
vis_nearestLST = vis[blindex,ind_minlstsep,:] # nlst x nbl x nchan
for dpool in datapool:
freq_wts = cpdps[dpool]['freq_wts'] # nspw x nchan
freqwtd_avgvis_nearestLST = NP.sum(freq_wts[:,NP.newaxis,NP.newaxis,:] * vis_nearestLST[NP.newaxis,:,:,:], axis=-1, keepdims=True) / NP.sum(freq_wts[:,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x nbl x (nchan=1)
vis_square_multscalar = 1 / NP.sum(1/NP.abs(freqwtd_avgvis_nearestLST)**2, axis=2, keepdims=True) # nspw x nlst x (nbl=1) x (nchan=1)
for stat in ['mean', 'median']:
cpdps[dpool][stat+'-absscale'] = {}
cpdps[dpool][stat+'-absscale']['converted'] = cpdps[dpool][stat] * vis_square_multscalar[:,:,NP.newaxis,:,:] # nspw x nlst x ndays x ntriads x nlags
cpdps[dpool][stat+'-absscale']['units'] = '{0}^2'.format(visunits)
return cpdps
############################################################################
def average_rescaled_power_spectrum(rcpdps, avgax, kprll_llim=None):
"""
------------------------------------------------------------------------
Average the rescaled power spectrum with physical units along certain
axes with inverse variance or regular averaging
Inputs:
rcpdps [dictionary] Dictionary with the keys 'triads',
'triads_ind', 'lstbins', 'lst', 'dlst', 'lst_ind',
'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy
array corresponding to triad and time indices used in
selecting the data. Values under keys 'oversampled' and
'resampled' each contain a dictionary with the following keys
and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has
shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc)
corresponding to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in
Hz) of the frequency subbands of the subband delay
spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in
Hz) of the subbands being delay transformed. It is
of size n_win. It is roughly equivalent to width in
redshift or along line-of-sight
'shape' [string] shape of the frequency window function
applied. Usual values are 'rect' (rectangular),
'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window
was raised.
The value is be a positive scalar with default = 1.0
'mean' [numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
'median'
[numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'median' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
'mean-absscale' and 'median-absscale'
[dictionary] Each dictionary consists of the
following keys and values:
'converted' [numpy array] Values of power (in units
of value in key 'units') with same shape
as the values under 'mean' and 'median'
keys -- (nspw,nlst,ndays,ntriads,nchan)
unless some of those axes have already
been averaged coherently or incoherently
'units' [string] Units of power in key
'converted'. Its values are square of
either 'Jy^2' or 'K^2'
avgax [int, list, tuple] Specifies the axes over which the power
in absolute scale (with physical units) should be averaged.
This counts as incoherent averaging. The averaging is done
with inverse-variance weighting if the input kprll_llim is
set to choose the range of kprll from which the variance
and inverse variance will be determined. Otherwise, a
regular averaging is performed.
kprll_llim [float] Lower limit of absolute value of kprll (in Mpc/h)
beyond which the variance will be determined in order to
estimate the inverse variance weights. If set to None, the
weights are uniform. If set to a value, values beyond this
kprll_llim are used to estimate the variance and hence the
inverse-variance weights.
Outputs:
Dictionary with the same structure as the input dictionary rcpdps except
with the following additional keys and values. Under the dictionaries
under keys 'mean-absscale' and 'median-absscale', there is an additional
key-value pair:
'avg' [numpy array] Values of power (in units of value in key 'units')
with same shape as the values under 'converted' --
(nspw,nlst,ndays,ntriads,nchan) except those axes which were
averaged in this member function, and those axes will be
retained but with axis size=1.
------------------------------------------------------------------------
"""
if not isinstance(rcpdps, dict):
raise TypeError('Input rcpdps must be a dictionary')
if isinstance(avgax, int):
if avgax >= 4:
raise ValueError('Input avgax has a value greater than the maximum axis number over which averaging can be performed')
avgax = NP.asarray(avgax)
elif isinstance(avgax, (list,tuple)):
avgax = NP.asarray(avgax)
if NP.any(avgax >= 4):
raise ValueError('Input avgax contains a value greater than the maximum axis number over which averaging can be performed')
else:
raise TypeError('Input avgax must be an integer, list, or tuple')
if kprll_llim is not None:
if not isinstance(kprll_llim, (int,float)):
raise TypeError('Input kprll_llim must be a scalar')
kprll_llim = NP.abs(kprll_llim)
for dpool in datapool:
for stat in ['mean', 'median']:
wts = NP.ones((1,1,1,1,1))
if kprll_llim is not None:
kprll_ind = NP.abs(rcpdps[dpool]['kprll']) >= kprll_llim # nspw x nlags
if NP.any(kprll_ind):
if rcpdps[dpool]['z'].size > 1:
indsets = [NP.where(kprll_ind[i,:])[0] for i in range(rcpdps[dpool]['z'].size)]
common_kprll_ind = reduce(NP.intersect1d(indsets))
multidim_idx = NP.ix_(NP.arange(rcpdps[dpool]['freq_center'].size), NP.arange(rcpdps['lst'].size), NP.arange(rcpdps['days'].size), NP.arange(rcpdps['triads'].size), common_kprll_ind)
else:
multidim_idx = NP.ix_(NP.arange(rcpdps[dpool]['freq_center'].size), NP.arange(rcpdps['lst'].size), NP.arange(rcpdps['days'].size), NP.arange(rcpdps['triads'].size), kprll_ind[0,:])
else:
multidim_idx = NP.ix_(NP.arange(rcpdps[dpool]['freq_center'].size), NP.arange(rcpdps['lst'].size), NP.arange(rcpdps['days'].size), NP.arange(rcpdps['triads'].size), rcpdps[dpool]['lags'].size)
wts = 1 / NP.var(rcpdps[dpool][stat]['absscale']['rescale'][multidim_idx], axis=avgax, keepdims=True)
rcpdps[dpool][stat]['absscale']['avg'] = NP.sum(wts * rcpdps[dpool][stat]['absscale']['rescale'], axis=avgax, keepdims=True) / NP.sum(wts, axis=avgax, keepdims=True)
return rcpdps
############################################################################
def beam3Dvol(self, beamparms, freq_wts=None):
"""
------------------------------------------------------------------------
Compute three-dimensional (transverse-LOS) volume of the beam in units
of "Sr Hz".
Inputs:
beamparms [dictionary] Contains beam information. It contains the
following keys and values:
'beamfile' [string] If set to string, should contain the
filename relative to default path or absolute
path containing the power pattern. If both
'beamfile' and 'telescope' are set, the
'beamfile' will be used. The latter is used for
determining analytic beam.
'filepathtype'
[string] Specifies if the beamfile is to be
found at the 'default' location or a 'custom'
location. If set to 'default', the PRISim path
is searched for the beam file. Only applies if
'beamfile' key is set.
'filefmt' [string] External file format of the beam.
Accepted values are 'uvbeam', 'fits' and 'hdf5'
'telescope' [dictionary] Information used to analytically
determine the power pattern. used only if
'beamfile' is not set or set to None. This
specifies the type of element, its size and
orientation. It consists of the following keys
and values:
'id' [string] If set, will ignore the other keys
and use telescope details for known
telescopes. Accepted values are 'mwa',
'vla', 'gmrt', 'hera', 'paper', 'hirax',
and 'chime'
'shape' [string] Shape of antenna element. Accepted
values are 'dipole', 'delta', 'dish',
'gaussian', 'rect' and 'square'. Will be
ignored if key 'id' is set. 'delta' denotes
a delta function for the antenna element
which has an isotropic radiation pattern.
'delta' is the default when keys 'id' and
'shape' are not set.
'size' [scalar or 2-element list/numpy array]
Diameter of the telescope dish (in meters)
if the key 'shape' is set to 'dish', side
of the square aperture (in meters) if the
key 'shape' is set to 'square', 2-element
sides if key 'shape' is set to 'rect', or
length of the dipole if key 'shape' is set
to 'dipole'. Will be ignored if key 'shape'
is set to 'delta'. Will be ignored if key
'id' is set and a preset value used for the
diameter or dipole.
'orientation'
[list or numpy array] If key 'shape' is set
to dipole, it refers to the orientation of
the dipole element unit vector whose
magnitude is specified by length. If key
'shape' is set to 'dish', it refers to the
position on the sky to which the dish is
pointed. For a dipole, this unit vector must
be provided in the local ENU coordinate
system aligned with the direction cosines
coordinate system or in the Alt-Az
coordinate system. This will be used only
when key 'shape' is set to 'dipole'. This
could be a 2-element vector (transverse
direction cosines) where the third
(line-of-sight) component is determined, or
a 3-element vector specifying all three
direction cosines or a two-element
coordinate in Alt-Az system. If not provided
it defaults to an eastward pointing dipole.
If key 'shape' is set to 'dish' or
'gaussian', the orientation refers to the
pointing center of the dish on the sky. It
can be provided in Alt-Az system as a
two-element vector or in the direction
cosine coordinate system as a two- or
three-element vector. If not set in the case
of a dish element, it defaults to zenith.
This is not to be confused with the key
'pointing_center' in dictionary
'pointing_info' which refers to the
beamformed pointing center of the array. The
coordinate system is specified by the key
'ocoords'
'ocoords' [string] specifies the coordinate system
for key 'orientation'. Accepted values are
'altaz' and 'dircos'.
'element_locs'
[2- or 3-column array] Element locations that
constitute the tile. Each row specifies
location of one element in the tile. The
locations must be specified in local ENU
coordinate system. First column specifies along
local east, second along local north and the
third along local up. If only two columns are
specified, the third column is assumed to be
zeros. If 'elements_locs' is not provided, it
assumed to be a one-element system and not a
phased array as far as determination of
primary beam is concerned.
'groundplane'
[scalar] height of telescope element above
the ground plane (in meteres). Default=None
will denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to
modify the analytically computed ground
plane pattern. If absent, the ground plane
computed will not be modified. If set, it
may contain the following keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is
unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not
set, there is no upper limit
'freqs' [numpy array] Numpy array denoting frequencies
(in Hz) at which beam integrals are to be
evaluated. If set to None, it will automatically
be set from the class attribute.
'nside' [integer] NSIDE parameter for determining and
interpolating the beam. If not set, it will be
set to 64 (default).
'chromatic' [boolean] If set to true, a chromatic power
pattern is used. If false, an achromatic power
pattern is used based on a reference frequency
specified in 'select_freq'.
'select_freq'
[scalar] Selected frequency for the achromatic
beam. If not set, it will be determined to be
mean of the array in 'freqs'
'spec_interp'
[string] Method to perform spectral
interpolation. Accepted values are those
accepted in scipy.interpolate.interp1d() and
'fft'. Default='cubic'.
freq_wts [numpy array] Frequency weights centered on different
spectral windows or redshifts. Its shape is (nwin,nchan)
and should match the number of spectral channels in input
parameter 'freqs' under 'beamparms' dictionary
Output:
omega_bw [numpy array] Integral of the square of the power pattern
over transverse and spectral axes. Its shape is (nwin,)
------------------------------------------------------------------------
"""
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if ('beamfile' not in beamparms) and ('telescope' not in beamparms):
raise KeyError('Input beamparms does not contain either "beamfile" or "telescope" keys')
if 'freqs' not in beamparms:
raise KeyError('Key "freqs" not found in input beamparms')
if not isinstance(beamparms['freqs'], NP.ndarray):
raise TypeError('Key "freqs" in input beamparms must contain a numpy array')
if 'nside' not in beamparms:
beamparms['nside'] = 64
if not isinstance(beamparms['nside'], int):
raise TypeError('"nside" parameter in input beamparms must be an integer')
if 'chromatic' not in beamparms:
beamparms['chromatic'] = True
else:
if not isinstance(beamparms['chromatic'], bool):
raise TypeError('Beam chromaticity parameter in input beamparms must be a boolean')
theta, phi = HP.pix2ang(beamparms['nside'], NP.arange(HP.nside2npix(beamparms['nside'])))
theta_phi = NP.hstack((theta.reshape(-1,1), phi.reshape(-1,1)))
if beamparms['beamfile'] is not None:
if 'filepathtype' in beamparms:
if beamparms['filepathtype'] == 'default':
beamparms['beamfile'] = prisim_path+'data/beams/'+beamparms['beamfile']
if 'filefmt' not in beamparms:
raise KeyError('Input beam file format must be specified for an external beam')
if beamparms['filefmt'].lower() in ['hdf5', 'fits', 'uvbeam']:
beamparms['filefmt'] = beamparms['filefmt'].lower()
else:
raise ValueError('Invalid beam file format specified')
if 'pol' not in beamparms:
raise KeyError('Beam polarization must be specified')
if not beamparms['chromatic']:
if 'select_freq' not in beamparms:
raise KeyError('Input reference frequency for achromatic behavior must be specified')
if beamparms['select_freq'] is None:
beamparms['select_freq'] = NP.mean(beamparms['freqs'])
if 'spec_interp' not in beamparms:
beamparms['spec_interp'] = 'cubic'
if beamparms['filefmt'] == 'fits':
external_beam = fits.getdata(beamparms['beamfile'], extname='BEAM_{0}'.format(beamparms['pol']))
external_beam_freqs = fits.getdata(beamparms['beamfile'], extname='FREQS_{0}'.format(beamparms['pol'])) # in MHz
external_beam = external_beam.reshape(-1,external_beam_freqs.size) # npix x nfreqs
elif beamparms['filefmt'] == 'uvbeam':
if uvbeam_module_found:
uvbm = UVBeam()
uvbm.read_beamfits(beamparms['beamfile'])
axis_vec_ind = 0 # for power beam
spw_ind = 0 # spectral window index
if beamparms['pol'].lower() in ['x', 'e']:
beam_pol_ind = 0
else:
beam_pol_ind = 1
external_beam = uvbm.data_array[axis_vec_ind,spw_ind,beam_pol_ind,:,:].T # npix x nfreqs
external_beam_freqs = uvbm.freq_array.ravel() # nfreqs (in Hz)
else:
raise ImportError('uvbeam module not installed/found')
if NP.abs(NP.abs(external_beam).max() - 1.0) > 1e-10:
external_beam /= NP.abs(external_beam).max()
else:
raise ValueError('Specified beam file format not currently supported')
if beamparms['chromatic']:
if beamparms['spec_interp'] == 'fft':
external_beam = external_beam[:,:-1]
external_beam_freqs = external_beam_freqs[:-1]
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(external_beam), theta_phi=theta_phi, inloc_axis=external_beam_freqs, outloc_axis=beamparms['freqs'], axis=1, kind=beamparms['spec_interp'], assume_sorted=True)
else:
nearest_freq_ind = NP.argmin(NP.abs(external_beam_freqs - beamparms['select_freq']))
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(NP.repeat(external_beam[:,nearest_freq_ind].reshape(-1,1), beamparms['freqs'].size, axis=1)), theta_phi=theta_phi, inloc_axis=beamparms['freqs'], outloc_axis=beamparms['freqs'], axis=1, assume_sorted=True)
interp_logbeam_max = NP.nanmax(interp_logbeam, axis=0)
interp_logbeam_max[interp_logbeam_max <= 0.0] = 0.0
interp_logbeam_max = interp_logbeam_max.reshape(1,-1)
interp_logbeam = interp_logbeam - interp_logbeam_max
beam = 10**interp_logbeam
else:
altaz = NP.array([90.0, 0.0]).reshape(1,-1) + NP.array([-1,1]).reshape(1,-1) * NP.degrees(theta_phi)
if beamparms['chromatic']:
beam = PB.primary_beam_generator(altaz, beamparms['freqs'], beamparms['telescope'], skyunits='altaz', pointing_info=None, pointing_center=None, freq_scale='Hz', east2ax1=0.0)
else:
beam = PB.primary_beam_generator(altaz, beamparms['select_freq'], beamparms['telescope'], skyunits='altaz', pointing_info=None, pointing_center=None, freq_scale='Hz', east2ax1=0.0)
beam = beam.reshape(-1,1) * NP.ones(beamparms['freqs'].size).reshape(1,-1)
omega_bw = DS.beam3Dvol(beam, beamparms['freqs'], freq_wts=freq_wts, hemisphere=True)
return omega_bw
############################################################################
| StarcoderdataPython |
4828965 | from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_app
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from .models import Empty
class EmptyModelTests(TestCase):
def test_empty(self):
m = Empty()
self.assertEqual(m.id, None)
m.save()
Empty.objects.create()
self.assertEqual(len(Empty.objects.all()), 2)
self.assertTrue(m.id is not None)
existing = Empty(m.id)
existing.save()
class NoModelTests(TestCase):
"""
Test for #7198 to ensure that the proper error message is raised
when attempting to load an app with no models.py file.
Because the test runner won't currently load a test module with no
models.py file, this TestCase instead lives in this module.
It seemed like an appropriate home for it.
"""
@override_settings(INSTALLED_APPS=("modeltests.empty.no_models",))
def test_no_models(self):
with six.assertRaisesRegex(self, ImproperlyConfigured,
'App with label no_models is missing a models.py module.'):
get_app('no_models')
| StarcoderdataPython |
3297374 | import arrow
from api.serializers import TenureSerializer, UserInvitationSerializer
from rest_framework import serializers
from rest_framework.test import APITestCase
from ..handlers import get_new_tentative_end_date
from .utils import (create_fake_society, create_tenure, get_deadline,
get_fake_user)
class TenureTests(APITestCase):
def test_tenure_creation(self):
"""
Ensure we can signup a new user with valid credentials.
"""
society = create_fake_society()
inviter = society.admin
self.assertIsNone(society.active_tenure)
now = arrow.now('Africa/Lagos').date()
tenure = create_tenure(society, when=now)
self.assertIsNotNone(society.active_tenure)
self.assertEqual(society.active_tenure, tenure)
serializer = TenureSerializer()
serializer.society = society
# duplicate tenure
self.assertRaises(serializers.ValidationError, serializer.validate_start_date, now)
tenure.delete()
past = arrow.now('Africa/Lagos').shift(days=-1).date()
# tenure in the past
self.assertRaises(serializers.ValidationError, serializer.validate_start_date, past)
future = arrow.now('Africa/Lagos').shift(days=2).date()
tenure = create_tenure(society, when=future)
# Conflicting tenure
self.assertRaises(serializers.ValidationError, serializer.validate_start_date, now)
tenure.delete()
tenure = create_tenure(society)
when = get_deadline(tenure)
# Test Cannot start a new tenure during an active tenure
self.assertRaises(serializers.ValidationError, serializer.validate_start_date, when)
def test_tenure_update_after_user_join(self):
society = create_fake_society()
tenure = create_tenure(society)
inviter = society.admin
invitee = get_fake_user()
self.assertIsNone(invitee.society)
old_tentative_end_date = tenure.tentative_end_date
new_tentative_end_date = get_new_tentative_end_date(tenure.tentative_end_date)
last_schedule_before_new_user_joined = tenure.collection_schedules.order_by('-id').first()
UserInvitationSerializer().join_society(inviter, invitee)
self.assertIsNotNone(invitee.society)
self.assertEqual(invitee.society, society)
tenure.refresh_from_db()
# new member is last collector
last_schedule_after_new_user_joined = tenure.collection_schedules.order_by('-id').first()
self.assertTrue(last_schedule_after_new_user_joined.id > last_schedule_before_new_user_joined.id)
self.assertNotEqual(old_tentative_end_date, new_tentative_end_date)
self.assertEqual(tenure.tentative_end_date, new_tentative_end_date)
| StarcoderdataPython |
53718 | # coding: utf-8
import argparse
import time
from watchdog.observers import Observer
from pywatcher import PyWatcher
from logging import getLogger, Formatter, StreamHandler, DEBUG
logger = getLogger(__name__)
formatter = Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler = StreamHandler()
handler.setLevel(DEBUG)
handler.setFormatter(formatter)
logger.setLevel(DEBUG)
logger.addHandler(handler)
COMMAND_DESCRIPTION = """\
-----------------------------------------------------------------------
PyWatcher:
monitor file and reload process. like gulp watch
e.g:
pywatcher -t . -c 'ping localhost'
-> if some file on current dir changed, restart process 'ping localhost'.
-----------------------------------------------------------------------
"""
def init():
"""
arguments.
"""
parser = argparse.ArgumentParser(description=COMMAND_DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-t',
'--target-dir',
type=str,
required=True,
dest='target_dir_path',
help='target directory for watching.'
)
parser.add_argument(
'-c',
'--command',
type=str,
required=True,
dest='target_command_str',
help='target command. this command execute and restart when file changed.'
)
parser.add_argument(
'-s',
'--reload-interval-seconds',
type=int,
required=False,
default=5,
dest='reload_threshold_seconds',
help='reload threshold seconds.'
)
parser.add_argument(
'--reload-wait-seconds',
type=int,
required=False,
default=0,
dest='reload_wait_seconds',
help='reload wait seconds.'
)
parser.add_argument(
'--disable-capture-stdout',
required=False,
action='store_true',
default=False,
dest='is_disable_capture_stdout',
help='is_disable_capture_stdout'
)
parser.add_argument(
'-p',
'--pattern',
type=str,
nargs='*',
required=False,
dest='target_pattern_list',
help='target pattern for monitoring. default, all file match.',
metavar='TARGET_PATTERN',
)
parser.add_argument(
'--signal',
required=False,
type=str,
default='TERM',
choices=('TERM', 'KILL'),
dest='reload_signal',
help='reload_signal'
)
parser.add_argument(
'--is-use-shell',
required=False,
action='store_true',
default=False,
dest='is_use_shell',
help='use shell=True ?'
)
return parser.parse_args()
def main_action(target_dir, command, reload_threshold_seconds, watch_pattern_list,
reload_wait_seconds, is_use_shell, reload_signal, is_disable_capture_stdout):
while True:
event_handler = PyWatcher(
process_command=command,
reload_threshold_seconds=reload_threshold_seconds,
is_capture_subprocess_output=not is_disable_capture_stdout,
pattern_list=watch_pattern_list,
is_use_shell=is_use_shell,
reload_signal=reload_signal,
reload_wait_seconds=reload_wait_seconds,
logger=logger
)
observer = Observer()
observer.schedule(event_handler, target_dir, recursive=True)
observer.start()
try:
while True:
time.sleep(0.3)
except KeyboardInterrupt:
logger.info('stop watch request received.')
observer.stop()
logger.info('stop watch.')
break
observer.join()
def main():
args = init()
main_action(
target_dir=args.target_dir_path,
command=args.target_command_str,
reload_threshold_seconds=args.reload_threshold_seconds,
is_use_shell=args.is_use_shell,
watch_pattern_list=args.target_pattern_list,
reload_signal=args.reload_signal,
reload_wait_seconds=args.reload_wait_seconds,
is_disable_capture_stdout=args.is_disable_capture_stdout,
)
if __name__ in '__main__':
main()
| StarcoderdataPython |
85568 | import sys
import os
from collections import OrderedDict
from ttfautohint._compat import (
ensure_binary, ensure_text, basestring, open, IntEnum,
)
USER_OPTIONS = dict(
in_file=None,
in_buffer=None,
out_file=None,
control_file=None,
control_buffer=None,
reference_file=None,
reference_buffer=None,
reference_index=0,
reference_name=None,
hinting_range_min=8,
hinting_range_max=50,
hinting_limit=200,
hint_composites=False,
adjust_subglyphs=False,
increase_x_height=14,
x_height_snapping_exceptions="",
windows_compatibility=False,
default_script="latn",
fallback_script="none",
fallback_scaling=False,
symbol=False,
fallback_stem_width=0,
ignore_restrictions=False,
family_suffix=None,
detailed_info=False,
no_info=False,
TTFA_info=False,
dehint=False,
epoch=None,
debug=False,
verbose=False,
)
StemWidthMode = IntEnum("StemWidthMode",
[
"NATURAL", # -1
"QUANTIZED", # 0
"STRONG", # 1
],
start=-1)
STEM_WIDTH_MODE_OPTIONS = OrderedDict([
("gray_stem_width_mode", StemWidthMode.QUANTIZED),
("gdi_cleartype_stem_width_mode", StemWidthMode.STRONG),
("dw_cleartype_stem_width_mode", StemWidthMode.QUANTIZED),
])
USER_OPTIONS.update(STEM_WIDTH_MODE_OPTIONS)
# Deprecated; use stem width mode options
STRONG_STEM_WIDTH_OPTIONS = dict(
gdi_cleartype_strong_stem_width=True,
gray_strong_stem_width=False,
dw_cleartype_strong_stem_width=False,
)
PRIVATE_OPTIONS = frozenset([
"in_buffer_len",
"control_buffer_len",
"reference_buffer_len",
"out_buffer",
"out_buffer_len",
"error_string",
"alloc_func",
"free_func",
"info_callback",
"info_post_callback",
"info_callback_data",
"progress_callback",
"progress_callback_data",
"error_callback",
"error_callback_data",
])
ALL_OPTIONS = frozenset(USER_OPTIONS) | PRIVATE_OPTIONS
# used when the control file does not have a name on the filesystem
CONTROL_NAME_FALLBACK = u"<control-instructions>"
def validate_options(kwargs):
opts = {k: kwargs.pop(k, USER_OPTIONS[k]) for k in USER_OPTIONS}
if kwargs:
raise TypeError(
"unknown keyword argument%s: %s" % (
"s" if len(kwargs) > 1 else "",
", ".join(repr(k) for k in kwargs)))
if opts["no_info"] and opts["detailed_info"]:
raise ValueError("no_info and detailed_info are mutually exclusive")
in_file, in_buffer = opts.pop("in_file"), opts.pop("in_buffer")
if in_file is None and in_buffer is None:
raise ValueError("No input file or buffer provided")
elif in_file is not None and in_buffer is not None:
raise ValueError("in_file and in_buffer are mutually exclusive")
if in_file is not None:
try:
in_buffer = in_file.read()
except AttributeError:
with open(in_file, "rb") as f:
in_buffer = f.read()
if not isinstance(in_buffer, bytes):
raise TypeError("in_buffer type must be bytes, not %s"
% type(in_buffer).__name__)
opts['in_buffer'] = in_buffer
opts['in_buffer_len'] = len(in_buffer)
control_file = opts.pop('control_file')
control_buffer = opts.pop('control_buffer')
if control_file is not None:
if control_buffer is not None:
raise ValueError(
"control_file and control_buffer are mutually exclusive")
try:
control_buffer = control_file.read()
except AttributeError:
with open(control_file, "rt", encoding="utf-8") as f:
control_buffer = f.read()
opts["control_name"] = control_file
else:
try:
opts["control_name"] = control_file.name
except AttributeError:
pass
if control_buffer is not None:
opts['control_buffer'] = ensure_binary(control_buffer, "utf-8")
opts['control_buffer_len'] = len(control_buffer)
if "control_name" in opts:
opts["control_name"] = ensure_text(
opts["control_name"], encoding=sys.getfilesystemencoding())
else:
opts["control_name"] = CONTROL_NAME_FALLBACK
reference_file = opts.pop('reference_file')
reference_buffer = opts.pop('reference_buffer')
if reference_file is not None:
if reference_buffer is not None:
raise ValueError(
"reference_file and reference_buffer are mutually exclusive")
try:
reference_buffer = reference_file.read()
except AttributeError:
with open(reference_file, "rb") as f:
reference_buffer = f.read()
if opts["reference_name"] is None:
opts["reference_name"] = reference_file
else:
if opts["reference_name"] is None:
try:
opts["reference_name"] = reference_file.name
except AttributeError:
pass
if reference_buffer is not None:
if not isinstance(reference_buffer, bytes):
raise TypeError("reference_buffer type must be bytes, not %s"
% type(reference_buffer).__name__)
opts['reference_buffer'] = reference_buffer
opts['reference_buffer_len'] = len(reference_buffer)
if opts["reference_name"] is not None:
opts["reference_name"] = ensure_binary(
opts["reference_name"], encoding=sys.getfilesystemencoding())
for key in ('default_script', 'fallback_script',
'x_height_snapping_exceptions'):
opts[key] = ensure_binary(opts[key])
if opts['epoch'] is not None:
from ctypes import c_ulonglong
opts['epoch'] = c_ulonglong(opts['epoch'])
if opts["family_suffix"] is not None:
opts["family_suffix"] = ensure_text(opts["family_suffix"])
for mode_option in STEM_WIDTH_MODE_OPTIONS:
# raises ValueError if integer value is not a valid stem width mode
opts[mode_option] = StemWidthMode(opts[mode_option])
return opts
def format_varargs(**options):
items = sorted((k, v) for k, v in options.items()
if k in ALL_OPTIONS and v is not None)
format_string = b", ".join(ensure_binary(k.replace("_", "-"))
for k, v in items)
values = tuple(v for k, v in items)
return format_string, values
def strong_stem_width(s):
if len(s) > 3:
import argparse
raise argparse.ArgumentTypeError(
"string can only contain up to 3 letters")
valid = {
"g": "gray_stem_width_mode",
"G": "gdi_cleartype_stem_width_mode",
"D": "dw_cleartype_stem_width_mode"}
chars = set(s)
invalid = chars - set(valid)
if invalid:
import argparse
raise argparse.ArgumentTypeError(
"invalid value: %s" % ", ".join(
repr(v) for v in sorted(invalid)))
result = {}
for char, opt_name in valid.items():
is_strong = char in chars
result[opt_name] = (StemWidthMode.STRONG if is_strong
else StemWidthMode.QUANTIZED)
return result
def stem_width_mode(s):
if len(s) != 3:
import argparse
raise argparse.ArgumentTypeError(
"Stem width mode string must consist of exactly three letters")
modes = {k[0].lower(): v
for k, v in StemWidthMode.__members__.items()}
result = {}
for i, option in enumerate(STEM_WIDTH_MODE_OPTIONS):
m = s[i]
if m not in modes:
import argparse
letters = sorted(repr(k) for k in modes)
raise argparse.ArgumentTypeError(
"Stem width mode letter for %s must be %s, or %s"
% (option, ", ".join(letters[:-1]), letters[-1]))
result[option] = modes[m]
return result
def stdin_or_input_path_type(s):
# the special argument "-" means sys.stdin
if s == "-":
try:
if sys.stdin.isatty(): # ignore if interactive
return None
return open(sys.stdin.fileno(), mode="rb", closefd=False)
except (AttributeError, IOError):
# if stdout was redirected (e.g. inside pytest), fileno may raise
# io.UnsupportedOperation
return None
return s
def stdout_or_output_path_type(s):
# the special argument "-" means sys.stdout
if s == "-":
try:
if sys.stdout.isatty(): # ignore if interactive
return None
return open(sys.stdout.fileno(), mode="wb", closefd=False)
except (AttributeError, IOError):
# if stdout was redirected (e.g. inside pytest), fileno may raise
# io.UnsupportedOperation
return None
return s
def parse_args(args=None):
"""Parse command line arguments and return a dictionary of options
for ttfautohint.ttfautohint function.
`args` can be either None, a list of strings, or a single string,
that is split into individual options with `shlex.split`.
When `args` is None, the console's default sys.argv are used, and any
SystemExit exceptions raised by argparse are propagated.
If args is a string list or a string, it is assumed that the function
was not called from a console script's `main` entry point, but from
other client code, and thus the SystemExit exceptions are muted and
a `None` value is returned.
"""
import argparse
from ttfautohint import __version__, libttfautohint
from ttfautohint.cli import USAGE, DESCRIPTION, EPILOG
version_string = "ttfautohint-py %s (libttfautohint %s)" % (
__version__, libttfautohint.version_string)
if args is None:
capture_sys_exit = False
else:
capture_sys_exit = True
if isinstance(args, basestring):
import shlex
args = shlex.split(args)
parser = argparse.ArgumentParser(
prog="ttfautohint",
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"in_file", nargs="?", metavar="IN-FILE", default="-",
type=stdin_or_input_path_type,
help="input file (default: standard input)")
parser.add_argument(
"out_file", nargs="?", metavar="OUT-FILE", default="-",
type=stdout_or_output_path_type,
help="output file (default: standard output)")
parser.add_argument(
"--debug", action="store_true", help="print debugging information")
stem_width_group = parser.add_mutually_exclusive_group(required=False)
stem_width_group.add_argument(
"-a", "--stem-width-mode", type=stem_width_mode, metavar="S",
default=STEM_WIDTH_MODE_OPTIONS,
help=("select stem width mode for grayscale, GDI ClearType, and DW "
"ClearType, where S is a string of three letters with possible "
"values 'n' for natural, 'q' for quantized, and 's' for strong "
"(default: qsq)"))
stem_width_group.add_argument( # deprecated
"-w", "--strong-stem-width", type=strong_stem_width, metavar="S",
help=argparse.SUPPRESS)
parser.add_argument(
"-c", "--composites", dest="hint_composites", action="store_true",
help="hint glyph composites also")
parser.add_argument(
"-d", "--dehint", action="store_true", help="remove all hints")
parser.add_argument(
"-D", "--default-script", metavar="SCRIPT",
default=USER_OPTIONS["default_script"],
help="set default OpenType script (default: %(default)s)")
parser.add_argument(
"-f", "--fallback-script", metavar="SCRIPT",
default=USER_OPTIONS["fallback_script"],
help="set fallback script (default: %(default)s)")
parser.add_argument(
"-F", "--family-suffix", metavar="SUFFIX",
help="append SUFFIX to the family name string(s) in the `name' table")
parser.add_argument(
"-G", "--hinting-limit", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_limit"],
help=("switch off hinting above this PPEM value (default: "
"%(default)s); value 0 means no limit"))
parser.add_argument(
"-H", "--fallback-stem-width", type=int, metavar="UNITS",
default=USER_OPTIONS["fallback_stem_width"],
help=("set fallback stem width (default: %(default)s font units at "
"2048 UPEM)"))
parser.add_argument(
"-i", "--ignore-restrictions", action="store_true",
help="override font license restrictions")
parser.add_argument(
"-I", "--detailed-info", action="store_true",
help=("add detailed ttfautohint info to the version string(s) in "
"the `name' table"))
parser.add_argument(
"-l", "--hinting-range-min", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_min"],
help="the minimum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-m", "--control-file", metavar="FILE",
help="get control instructions from FILE")
parser.add_argument(
"-n", "--no-info", action="store_true",
help=("don't add ttfautohint info to the version string(s) in the "
"`name' table"))
parser.add_argument(
"-p", "--adjust-subglyphs", action="store_true",
help="handle subglyph adjustments in exotic fonts")
parser.add_argument(
"-r", "--hinting-range-max", type=int, metavar="PPEM",
default=USER_OPTIONS["hinting_range_max"],
help="the maximum PPEM value for hint sets (default: %(default)s)")
parser.add_argument(
"-R", "--reference", dest="reference_file", metavar="FILE",
help="derive blue zones from reference font FILE")
parser.add_argument(
"-s", "--symbol", action="store_true",
help="input is symbol font")
parser.add_argument(
"-S", "--fallback-scaling", action="store_true",
help="use fallback scaling, not hinting")
parser.add_argument(
"-t", "--ttfa-table", action="store_true", dest="TTFA_info",
help="add TTFA information table")
parser.add_argument(
"-T", "--ttfa-info", dest="show_TTFA_info", action="store_true",
help="display TTFA table in IN-FILE and exit")
parser.add_argument(
"-v", "--verbose", action="store_true",
help="show progress information")
parser.add_argument(
"-V", "--version", action="version",
version=version_string,
help="print version information and exit")
parser.add_argument(
"-W", "--windows-compatibility", action="store_true",
help=("add blue zones for `usWinAscent' and `usWinDescent' to avoid "
"clipping"))
parser.add_argument(
"-x", "--increase-x-height", type=int, metavar="PPEM",
default=USER_OPTIONS["increase_x_height"],
help=("increase x height for sizes in the range 6<=PPEM<=N; value "
"0 switches off this feature (default: %(default)s)"))
parser.add_argument(
"-X", "--x-height-snapping-exceptions", metavar="STRING",
default=USER_OPTIONS["x_height_snapping_exceptions"],
help=('specify a comma-separated list of x-height snapping exceptions'
', for example "-9, 13-17, 19" (default: "%(default)s")'))
parser.add_argument(
"-Z", "--reference-index", type=int, metavar="NUMBER",
default=USER_OPTIONS["reference_index"],
help="face index of reference font (default: %(default)s)")
try:
options = vars(parser.parse_args(args))
except SystemExit:
if capture_sys_exit:
return None
raise
# if either input/output are interactive, print help and exit
if (not capture_sys_exit and
(options["in_file"] is None or options["out_file"] is None)):
parser.print_help()
parser.exit(1)
# check SOURCE_DATE_EPOCH environment variable
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
if source_date_epoch:
try:
options["epoch"] = int(source_date_epoch)
except ValueError:
import warnings
warnings.warn(
UserWarning("invalid SOURCE_DATE_EPOCH: %r" % source_date_epoch))
if options.pop("show_TTFA_info"):
# TODO use fonttools to dump TTFA table?
raise NotImplementedError()
stem_width_options = options.pop("stem_width_mode")
strong_stem_width_options = options.pop("strong_stem_width")
if strong_stem_width_options:
import warnings
warnings.warn(
UserWarning("Option '-w' is deprecated! Use option '-a' instead"))
stem_width_options = strong_stem_width_options
options.update(stem_width_options)
return options
| StarcoderdataPython |
1643235 | <filename>www/controllers/api/session.py
from django.http import HttpResponse, HttpResponseNotFound
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework import serializers, viewsets
from rest_framework.permissions import IsAuthenticated
from www.models.terminate_session import terminate_session
from datetime import datetime
import logging
@method_decorator(csrf_exempt, name='dispatch')
class session(View):
http_method_names = ['post']
def post(self, request, *args, **kwargs):
acceptable_actions = ['start']
action = kwargs['action']
if action not in acceptable_actions:
return HttpResponseNotFound("No action %s for sessions" % action)
elif action == 'start':
session = session.getblah()
return HttpResponse(action)
class session_serializer(serializers.HyperlinkedModelSerializer):
cat_start_time=serializers.DateTimeField(input_formats=['iso-8601','YYYY/MM/DD hh:mm:ss'])
class Meta:
model = terminate_session
fields = ['id',
'skynet_start_time',
'cat_start_time',
'cat_version_requested',
'cat_version_reported',
'cat_end_time',
'cloud_app_href',
]
class session_viewset(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = terminate_session.objects.exclude(cat_end_time__isnull=False)
serializer_class = session_serializer
def partial_update(self, request, *args, **kwargs):
logger = logging.getLogger(__file__)
logger.debug("Patch Request.. "+str(request.data))
if 'cat_start_time' in request.data:
logger.debug("Attempting to update CAT Start Time")
if request.data['cat_start_time'] == 'now':
# request.data._mutable = True
request.data['cat_start_time'] = datetime.now().isoformat()
return super(viewsets.ModelViewSet, self).partial_update(request,args,kwargs)
| StarcoderdataPython |
1658415 | import math
def _get_distance(vec_a, vec_b):
return math.sqrt( ( vec_b[0] - vec_a[0] ) ** 2 + (vec_b[1] - vec_a[1]) ** 2 )
def get_points(vec_a, vec_b):
"""
return the points that intersect
"""
# a = (r02 - r12 + d2 ) / (2 d)
distance = _get_distance( vec_a, vec_b )
try:
# a = (r02 - r12 + d2 ) / (2 d)
a = (vec_a[2]**2 - vec_b[2]**2 + distance ** 2) / (2 * distance)
# h**2 = r0**2 - a**2
h = math.sqrt( vec_a[2] ** 2 - a ** 2 )
# p2 = p0 + a ( p1 - p0 ) / d
# where p0 = vec_a -and- p1 = vec_b
p2 = ( vec_a[0] + a*(vec_b[0] - vec_a[0]) / distance , vec_a[1] + a*(vec_b[1] - vec_a[1]) / distance )
x0 = p2[0] + h * (vec_b[1] - vec_a[1]) / distance
y0 = p2[1] - h * (vec_b[0] - vec_a[0]) / distance
x1 = p2[0] - h * (vec_b[1] - vec_a[1]) / distance
y1 = p2[1] + h * (vec_b[0] - vec_a[0]) / distance
p3 = (x0, y0)
p4 = (x1, y1)
print p3[0], p3[1]
if p4 != p3:
print p4[0], p4[1]
except:
# circles do not have intersection points
pass
if __name__ == "__main__":
import sys
l = sys.argv[1:]
r = open(l[0],'r').read().split('\n')
r.remove('')
# print r
vec_a = [float(i) for i in r[0].split(' ')]
vec_b = [float(i) for i in r[1].split(' ')]
# print vec_a, vec_b
get_points(vec_a, vec_b)
| StarcoderdataPython |
1721497 | keys=input('Enter elements separated by ,(comma) for keys: ').split(',')
values=input('Enter elements separated by ,(comma) for values: ').split(',')
mydict={keys[i]:values[i] for i in range(len(keys))}
newdict={values[i]:keys[i] for i in range(len(values))}
print('Dict : ',mydict)
print('Inverted Dict :',newdict)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.