metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joshua2352-cmis/joshua2352-cmis-cs2",
"score": 4
} |
#### File: joshua2352-cmis/joshua2352-cmis-cs2/countDOWN.py
```python
def countdown(n):
def countdown_from_to(start,stop)
if start <= stop:
print 'Blastoff!'
else:
print n
countdown(n-1)
countdown_from_to(10,1)
def main():
start = raw_input("put a number to count down from:")
stop = raw_input("put the number you want it to stop at:")
main()
```
#### File: joshua2352-cmis/joshua2352-cmis-cs2/cs2Final.py
```python
import math
import random
#imports operators
def adder(n1):
total = 0
n1 = raw_input("next number:")
if n1 == "":
average = total2 / total
print 'the average of the odd numbers was {}' .format(average) + '.'
else:
return float(total) + 1
return float(total2) + n1
adder()
def main():
adder(n1)
main()
```
#### File: joshua2352-cmis/joshua2352-cmis-cs2/simplefunction.py
```python
import math
def div(no1,b,y,x):
answer=(float(b) * int(y) / int(x))
return answer
def user(b,y,x,no1):
actualanswer = div(no1,b,y,x)
out = """
print for (b) you put {}
for (y) you put {}
for (x) you put {}
the answer is {}
your comment was {}
""".format(b,y,x,actualanswer,no1)
return out
def main():
no1 = raw_input("this will find the answer to a simple equation any comments:")
b = raw_input("enter first numero this will be the number being divided i.e b:")
y = raw_input("enter second number this will be the number dividing i.e y:")
x = raw_input("enter third number this will be what b * y /x will equal enter any number")
function = div(no1,b,y,x)
function2 = user(b,y,x,no1)
print function2
main()
``` |
{
"source": "Joshua2504/OpenSourceHelpCommunity.github.io",
"score": 3
} |
#### File: oshc/authentication/regbackend.py
```python
from django.contrib.auth import get_user_model
class EmailLoginBackend(object):
'''
This class checks that the user can be authenticated via our backend and
if it fails than normal authentication backend is used.
'''
def authenticate(self, username=None, password=None):
user_cls = get_user_model()
try:
user = user_cls.objects.get(email=username)
if user.check_password(password):
return user
except user_cls.DoesNotExist:
return None
return None
def get_user(self, user_id):
user_cls = get_user_model()
try:
return user_cls.objects.get(pk=user_id)
except user_cls.DoesNotExist:
return None
```
#### File: oshc/authentication/views.py
```python
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import JsonResponse
def profile(request):
return render(request, "profile.html")
def validate_username(request):
username = request.GET.get('username', None)
data = {
'is_present': User.objects.filter(username__iexact=username).exists()
}
return JsonResponse(data)
``` |
{
"source": "Joshua3212/Dionysus",
"score": 3
} |
#### File: dionysus/adapters/dummy.py
```python
class DummyAdapter:
def __init__(self):
pass
def on_data(self, callback):
for _i in range(1, 100):
print("-- dummy adapter on_data")
callback("/dummy", {"a": "b", "b": {"c": "d"}})
def on_send(self, data):
print("-- dummy adapter on_send")
print(data)
```
#### File: dionysus/utils/schemata.py
```python
from schema import Schema
def validate_schema(schema, data):
return Schema(schema).validate(data)
def is_schema_valid(schema, data):
return Schema(schema).is_valid(data)
```
#### File: Joshua3212/Dionysus/setup.py
```python
from setuptools import setup, find_packages
import dionysus
install_requires = ["redis>=4.1.0", "schema>=0.7.5"]
# Conditional dependencies:
def long_description():
with open("README.md", encoding="utf-8") as f:
return f.read()
setup(
name="dionysus",
version=dionysus.__version__,
description=dionysus.__description__,
long_description=long_description(),
long_description_content_type="text/markdown",
url="https://github.com/Joshua3212/dionysus",
author=dionysus.__author__,
author_email="<EMAIL>",
license="MIT",
packages=["dionysus", "dionysus.utils", "dionysus.adapters"],
python_requires=">=3.7",
install_requires=install_requires,
project_urls={
"GitHub": "https://github.com/Joshua3212/dionysus",
},
)
``` |
{
"source": "joshua5201/NATwitchPlay",
"score": 3
} |
#### File: joshua5201/NATwitchPlay/CMDParser.py
```python
import re
from collections import Counter
class CMDParser:
modes = {"normal", "violence", "democracy", "reverse"}
def print_mode():
res = ""
for mode in CMDParser.modes:
res += (mode + " ")
return res
class NoModeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def __init__(self, mode):
if mode not in self.modes:
raise self.NoModeError(mode)
self.mode = mode;
def parse(self, text, commands):
cmd_re = re.compile("|".join(commands))
input_cmds = cmd_re.findall(text)
if self.mode == "normal":
return input_cmds
elif self.mode == "reverse":
return self.reverse(input_cmds)
else :
max_count = 0
max_cmds = []
input_count = Counter(input_cmds)
for cmd in self.uniq(input_cmds):
if input_count[cmd] > max_count:
max_cmds = [cmd]
max_count = input_count[cmd]
elif input_count[cmd] == max_count:
max_cmds += [cmd]
if self.mode == "democracy":
return max_cmds
res_cmds = []
for max_cmd in max_cmds:
res_cmds += [max_cmd] * max_count
return res_cmds
def reverse(self, cmds):
reverse_dict = {"left":"right", "right":"left", "up":"down", "down":"up", "A":"B", "B":"A", "L":"R", "R":"L", "select":"start", "start":"select"}
reverse_cmds = []
for cmd in cmds:
reverse_cmds += [reverse_dict[cmd]]
return reverse_cmds
def uniq(self, li):
founded = set()
uniq_list = []
for entry in li :
if entry not in founded:
uniq_list += [entry]
founded.add(entry)
return uniq_list
``` |
{
"source": "Joshua56/covid-19-estimator-py",
"score": 4
} |
#### File: covid-19-estimator-py/src/estimator.py
```python
def number_of_days_in_period(periodType, timeToElapse):
""" Receives the period type and calculates the number of days in that period """
if periodType == "days":
return timeToElapse
elif periodType == "weeks":
return timeToElapse * 7
elif periodType == "months":
return timeToElapse * 30
else:
return 0
def estimator(data):
""" Receives data inputs and makes estimate calulations based on that data """
reported_cases = data['reportedCases']
# Currently infected calculations for mild and severe scenarios
mild_currenty_infected = reported_cases * 10
severe_currently_infected = reported_cases * 50
# Infections by requested time for both mild and severe scenarios
mild_infections_by_requested_time = int(mild_currenty_infected *
2 ** (number_of_days_in_period(data["periodType"],
data["timeToElapse"]) // 3))
severe_infections_by_requested_time = int(severe_currently_infected *
2 ** (number_of_days_in_period(data["periodType"],
data["timeToElapse"]) // 3))
# Severe positive cases by requested time for both mild and severe scenarios
mild_severe_cases_by_requested_time = int(
0.15 * mild_infections_by_requested_time)
severe_severe_cases_by_requested_time = int(
0.15 * severe_infections_by_requested_time)
# Available hospital beds for severe cases for both mild and severe scenarios
mild_hospital_beds_requested_time = int((
data[
"totalHospitalBeds"] * 0.35) - mild_severe_cases_by_requested_time)
severe_hospital_beds_requested_time = int((
data[
"totalHospitalBeds"] * 0.35) - severe_severe_cases_by_requested_time)
# Severe positive cases that will require ICU for both mild and severe scenarios
mild_cases_for_ICU_by_requested_time = int(
0.05 * mild_infections_by_requested_time)
severe_cases_for_ICU_by_requested_time = int(0.05 *
severe_infections_by_requested_time)
# Severe positive cases that will require ventilators for both mild and severe scenarios
mild_cases_for_ventilators_by_requested_time = int(0.02 *
mild_infections_by_requested_time)
severe_cases_for_ventilators_by_requested_time = int(0.02 *
severe_infections_by_requested_time)
# Dollars in Flight for both mild and severe impact scenarios.
mild_dollars_in_flight = int((mild_infections_by_requested_time * data["region"]["avgDailyIncomePopulation"] *
data["region"]["avgDailyIncomeInUSD"]) / number_of_days_in_period(data["periodType"],
data[
"timeToElapse"]))
severe_dollars_in_flight = int((severe_infections_by_requested_time * data["region"]["avgDailyIncomePopulation"] *
data["region"]["avgDailyIncomeInUSD"]) / number_of_days_in_period(
data["periodType"], data["timeToElapse"]))
# Response data
results = {
"data": data,
"impact": {
"currentlyInfected": mild_currenty_infected,
"infectionsByRequestedTime": mild_infections_by_requested_time,
"hospitalBedsByRequestedTime": mild_hospital_beds_requested_time,
"severeCasesByRequestedTime": mild_severe_cases_by_requested_time,
"casesForICUByRequestedTime": mild_cases_for_ICU_by_requested_time,
"casesForVentilatorsByRequestedTime": mild_cases_for_ventilators_by_requested_time,
"dollarsInFlight": mild_dollars_in_flight
},
"severeImpact": {
"currentlyInfected": severe_currently_infected,
"infectionsByRequestedTime": severe_infections_by_requested_time,
"hospitalBedsByRequestedTime": severe_hospital_beds_requested_time,
"severeCasesByRequestedTime": severe_severe_cases_by_requested_time,
"casesForICUByRequestedTime": severe_cases_for_ICU_by_requested_time,
"casesForVentilatorsByRequestedTime": severe_cases_for_ventilators_by_requested_time,
"dollarsInFlight": severe_dollars_in_flight
}
}
return results
``` |
{
"source": "joshua-612/Chain_Reaction",
"score": 4
} |
#### File: joshua-612/Chain_Reaction/logic.py
```python
from typing import Dict, List, Any, Union
class GameLogic:
def __init__(self):
self.values_list = []
def logic(self, gameboard):
self.values_list = [[0] * len(gameboard[0]) for i in range(len(gameboard))]
corners_dict_list = []
# the four corners
for i in range(len(gameboard)):
for j in range((len(gameboard[i]))):
if (j == 0 or j == len(gameboard[i]) - 1) and (i == 0 or i == len(gameboard) - 1):
self.values_list[i][j] = 1
# the center square
for x in range(len(gameboard)):
for y in range((len(gameboard[i]))):
if (0 < x < len(gameboard) - 1) and (0 < y < len(gameboard[x]) - 1):
self.values_list[x][y] = 3
# for the sides
for x in range(len(gameboard)):
for y in range((len(gameboard[i]))):
if (x == 0 or x == len(gameboard) - 1) and (0 < y < len(gameboard[x]) - 1):
self.values_list[x][y] = 2
if (y == 0 or y == len(gameboard[x]) - 1) and (0 < x < len(gameboard) - 1):
self.values_list[x][y] = 2
def returnNeighbours(self, grid, x, y):
neighbours_list = []
not_available = ["","","",""]
# check the missing neighbours
if(x + 1 > len(grid) -1 ):
not_available[0] = "x"
if(x - 1 < 0):
not_available[1] = "x"
if(y + 1 > len(grid) - 1):
not_available[2] = "x"
if(y - 1 < 0):
not_available[3] = "x"
if(not_available[0] != "x"):
neighbours_list.append([x + 1, y])
if(not_available[1] != "x"):
neighbours_list.append([x - 1, y])
if(not_available[2] != "x"):
neighbours_list.append([x, y + 1])
if(not_available[3] != "x"):
neighbours_list.append([x, y - 1])
return neighbours_list
def getMaximum(self, x, y):
return self.values_list[x][y]
```
#### File: joshua-612/Chain_Reaction/main.py
```python
import sys
import logic
import pygame
BLACK = (0, 0, 0)
WHITE = (200, 200, 200)
BLOCK_SIZE = 70
WINDOW_HEIGHT = 0
WINDOW_WIDTH = 0
class Ball:
def __init__(self, neighbours, maximum, value, position):
self.color = "" # String : "Green" or "Red"
self.neighbours = neighbours # List : positions of neighbours
self.maximum = maximum # int : Maximum value
self.value = value # int : Current value
self.position = position # list : position of the ball
self.visible = False # Bool : True or false initially set to false
def updateValue(self):
if(self.value + 1 <= self.maximum):
self.visible = True
self.value += 1
return True
else:
self.visible = False
self.value = 0
self.color = ""
return False
def isVisible(self):
return self.visible
def updateColor(self, color):
self.color = color
def getNeighbours(self):
return self.neighbours
def getValue(self):
return self.value
def getColor(self):
return self.color
class GameOver(Exception):
pass
class ChainReaction:
def __init__(self, row, col):
pygame.init()
self.ROW = row
self.COL = col
self.grid = [[0] * self.COL for i in range(self.ROW)]
self.gl = logic.GameLogic()
self.gl.logic(self.grid)
self.balls_list = [[Ball] * self.COL for i in range(self.ROW)]
for x in range(row):
for y in range(col):
neighbours = self.gl.returnNeighbours(self.grid, x, y)
ball = Ball(neighbours, self.gl.getMaximum(x, y), 0, [x, y])
self.balls_list[x][y] = ball
def main(self):
WINDOW_HEIGHT = BLOCK_SIZE * self.ROW + 40
WINDOW_WIDTH = BLOCK_SIZE * self.COL
global SCREEN
SCREEN = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Chain Reaction")
SCREEN.fill(BLACK)
TURN_COUNTER = 0
while True:
self.drawGrid(self.grid)
for event in pygame.event.get():
# To Quit the game
if event.type == pygame.QUIT:
pygame.quit()
exit()
# sys.exit(0)
# Mousebutton click
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
x, y = pos[1] // BLOCK_SIZE, pos[0] // BLOCK_SIZE
color = ""
if TURN_COUNTER == 0:
color = "green"
else:
color = "red"
if (color == self.balls_list[x][y].getColor() or self.balls_list[x][y].getColor() == ""):
# Update turn counter
if TURN_COUNTER == 1:
TURN_COUNTER = 0
else:
TURN_COUNTER = 1
self.updateGrid(x, y, color)
pygame.display.update()
def isGameOver(self, color):
count = 0
red = -1
green = -1
for row in range(len(self.grid)):
for col in range(len(self.grid[0])):
if(self.balls_list[row][col].getColor() == "red"):
red += 1
if(self.balls_list[row][col].getColor() == "green"):
green += 1
if(self.balls_list[row][col].isVisible()):
count += 1
if count < 3:
return False
if(red == -1 and color != "red"):
print("red game over")
self.display_gameover_screen("green")
raise GameOver
# return True
if(green == -1 and color != "green"):
print("green game over")
self.display_gameover_screen("red")
raise GameOver
# return True
def updateGrid(self, x, y, color):
self.balls_list[x][y].updateColor(color)
result = self.balls_list[x][y].updateValue()
neighbours = self.balls_list[x][y].getNeighbours()
self.grid[x][y] = self.balls_list[x][y].getValue()
if not self.isGameOver(color):
if result:
pass
else:
for i in range(len(neighbours)):
self.updateGrid(neighbours[i][0], neighbours[i][1], color)
def display_gameover_screen(self, winner):
global SCREEN
WINDOW_HEIGHT = BLOCK_SIZE * self.ROW + 40
WINDOW_WIDTH = BLOCK_SIZE * self.COL
font = pygame.font.SysFont('Comic Sans MS', 35)
text = font.render( winner.capitalize() + " Won !!", False, (0, 255, 0))
SCREEN = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
SCREEN.fill((255,255,255))
SCREEN.blit(text, (10, (int(WINDOW_HEIGHT // 2) - 40)))
pygame.display.update()
def drawGrid(self, grid):
font = pygame.font.SysFont('Comic Sans MS', 35)
i, j = 0, 0
SCREEN.fill(BLACK)
for row in range(len(grid)):
i = 0
for col in range(len(grid[0])):
rect = pygame.Rect(i, j, BLOCK_SIZE, BLOCK_SIZE)
pygame.draw.rect(SCREEN, WHITE, rect, 1)
# Blank box for default
text = font.render(str(""), False, (0, 0, 0))
SCREEN.blit(text, (i + int(BLOCK_SIZE / 3), j + int(BLOCK_SIZE / 5)))
#red and green
if(self.balls_list[row][col].isVisible()):
if(self.balls_list[row][col].getColor() == "green"):
# text = font.render(str(grid[row][col]), False, (0, 255, 0))
# SCREEN.blit(text, (i + int(BLOCK_SIZE / 3), j + int(BLOCK_SIZE / 5)))
if(self.balls_list[row][col].getValue() == 1):
pygame.draw.circle(SCREEN, (0, 255, 0), (i + int(BLOCK_SIZE / 2), j + int(BLOCK_SIZE / 2)), 10)
if(self.balls_list[row][col].getValue() == 2):
pygame.draw.circle(SCREEN, (0, 255, 0), (i + int(BLOCK_SIZE / 2) - int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
pygame.draw.circle(SCREEN, (25, 215, 4), (i + int(BLOCK_SIZE / 2) + int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
if(self.balls_list[row][col].getValue() == 3):
pygame.draw.circle(SCREEN, (0, 255, 0), (i + int(BLOCK_SIZE / 2), j + int(BLOCK_SIZE / 3)), 10)
pygame.draw.circle(SCREEN, (25, 215, 4), (i + int(BLOCK_SIZE / 2) - int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
pygame.draw.circle(SCREEN, (19, 164, 3), (i + int(BLOCK_SIZE / 2) + int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
if(self.balls_list[row][col].getColor() == "red"):
# text = font.render(str(grid[row][col]), False, (255, 0, 0))
# SCREEN.blit(text, (i + int(BLOCK_SIZE / 3), j + int(BLOCK_SIZE / 5)))
if(self.balls_list[row][col].getValue() == 1):
pygame.draw.circle(SCREEN, (255, 0, 0), (i + int(BLOCK_SIZE / 2), j + int(BLOCK_SIZE / 2)), 10)
if(self.balls_list[row][col].getValue() == 2):
pygame.draw.circle(SCREEN, (255, 0, 0), (i + int(BLOCK_SIZE / 2) - int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
pygame.draw.circle(SCREEN, (210, 5, 5), (i + int(BLOCK_SIZE / 2) + int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
if(self.balls_list[row][col].getValue() == 3):
pygame.draw.circle(SCREEN, (255, 0, 0), (i + int(BLOCK_SIZE / 2), j + int(BLOCK_SIZE / 3)), 10)
pygame.draw.circle(SCREEN, (210, 5, 5), (i + int(BLOCK_SIZE / 2) - int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
pygame.draw.circle(SCREEN, (164, 5, 5), (i + int(BLOCK_SIZE / 2) + int(BLOCK_SIZE / 8), j + int(BLOCK_SIZE / 2)), 10)
i = i + BLOCK_SIZE
j = j + BLOCK_SIZE
if __name__ == "__main__":
print("Enter grid size row x col")
row = int(input())
col = int(input())
while True:
cr = ChainReaction(row, col)
try:
cr.main()
except:
del cr
print("Play again ? y/n")
choice = input()
if choice.lower() == "y":
pass
else:
exit()
``` |
{
"source": "joshua655/v8cgi",
"score": 3
} |
#### File: GL/glesbindings/make_json.py
```python
import re
import json
PATH_GL = 'gl2.h'
FILE_JSON = 'glesbind.json'
#constant and function patterns
constant_pattern = """
.+define[\s]+ #define C/C++ keyword
(?P<name>GL_[^\s]+) #Constant name
[\s]+
(?P<value>[^\s]+) #Constant value
[\s]*
"""
function_pattern = """
[\s]*GL_APICALL[\s]+ #GLAPICALL typedef
(?P<return_type>[^\s]+) #Function return type
[\s]+GL_APIENTRY[\s]+ #GLAPIENTRY typedef
(?P<name>gl[A-Za-z0-9]+) #Function name
[\s]*
\((?P<parameters>.*)\); #Function parameters
"""
#precompile regexps
constant = re.compile(constant_pattern, re.VERBOSE)
function = re.compile(function_pattern, re.VERBOSE)
def main():
json_out = []
constants = []
#open input file
with open(PATH_GL, 'r') as fin:
#line accumulator. Function definitions
#can be spread into multiple lines
l = ''
#get function/constant prototype
for cont in fin:
l += cont.replace('\n', '')
if not balanced(l):
continue
#is constant declaration
mat = re.match(constant, l)
if mat and not mat.group('name') in constants:
constants.append(mat.group('name'))
json_out.append(make_constant(mat))
else:
#is function declaration
mat = re.match(function, l)
if mat:
json_out.append(make_function(mat))
l = '' #empty line accumulator
#dump as JSON
with open(FILE_JSON, 'w') as fout:
fout.write(json.dumps(json_out, indent=4))
def make_constant(match):
"""Returns a Constant JSON Object"""
return {
'type': 'c', #c for constant, f for function
'name': match.group('name'),
'value': match.group('value') #is this necessary?
}
def make_function(match):
"""Returns a Function JSON Object"""
return {
'type': 'f', #f for function
'name': match.group('name'),
'return_type': match.group('return_type'),
'parameters': get_parameters(match.group('parameters'))
}
def get_parameters(params):
"""Returns an ordered list of parameter types"""
params_list = []
params_aux = params.split(',')
passed = False
for par in params_aux:
if passed and not balanced(params_list[-1]):
params_list[-1] += ',' + par
else:
#magic
param = ' '.join(par.strip().replace('*', '').split(' ')[:-1]) + ('*' * (par.count('*') + par.count('[')))
if param.strip() != '': params_list.append(param)
passed = True
return params_list
def balanced(l):
return l.count('(') == l.count(')')
if __name__ == '__main__': main()
``` |
{
"source": "Joshua-AC-Curry/Covid-19-Dashboard",
"score": 3
} |
#### File: Covid-19-Dashboard/source_code/covid_news_handling.py
```python
import json
from re import L
from requests.api import get
import logging
from retrieve_config_data import news_apikey
from retrieve_config_data import covid_terms
def news_API_request(covid_terms = covid_terms):
"""argument string
the phrases which to retreive articles with from the api
gets news articles from the api which contain the correct terms
return list(dictionary)
the correct news articles from api
"""
key = news_apikey ##change this through config
url = 'https://newsapi.org/v2/everything?q=' + covid_terms + "&apiKey=" + key
logging.info("getting data from api")
data = get(url).json()['articles']
logging.info("got data from api")
return data
##key : 447b46b598644e9c83d9f2ae23b1ba20
##note when submitting put insert api key here in the config file
def update_news(covid_terms = covid_terms, articles = []):
"""argument list(dictionary)
recalls the api and updates the news articles
return list(dictionary)
updated list of news articles
"""
for art in news_API_request(covid_terms):
try:
articles.append(art)
except:
logging.error("articles is not callable")
logging.debug("type of articles is" + str(type(articles)))
return articles
``` |
{
"source": "joshuaadampoirier/jp-migrator",
"score": 2
} |
#### File: migrator/database/SQLite3Database.py
```python
import logging
import pkg_resources
from sqlite3 import OperationalError
from migrator.database.BaseDatabase import BaseDatabase
logging.basicConfig(
filename='SQLite3Database.log',
level=logging.INFO,
format='|'
'%(asctime)-18s|'
'%(levelname)-4s|'
'%(module)-18s|'
'%(filename)-18s:%(lineno)-4s|'
'%(funcName)-18s|'
'%(message)-32s|',
datefmt='%Y-%m-%d %H:%M:%S'
)
class SQLite3Database(BaseDatabase):
"""SQLite3 database class.
Parameters
----------
cnxn : database server connection
Connection to the database.
"""
def __init__(self, cnxn):
logging.info('Creating database object')
self.cnxn = cnxn
self.__migrations_run()
def __migrations_run(self):
"""If it does not exist, create a table to track the migrations executed
against the database.
Parameters
----------
None
Returns
-------
None
"""
logging.info('Creating _migrationsrun table')
# open sql file
path = 'sqlite3/_MigrationsRun.sql'
filepath = pkg_resources.resource_filename(__name__, path)
f = open(filepath, 'r')
cursor = self.cnxn.cursor()
# run sql command
sql = f.read()
cursor.execute(sql)
self.cnxn.commit()
# cleanup
f.close()
cursor.close()
def check_migration(self, migration: str) -> bool:
"""Checks if a given migration script name has already been executed
against this database.
Parameters
----------
migration : str
Path to the migration file being investigated.
Returns
-------
exists : bool
True if it has already been executed, otherwise False
Notes
-----
We determine this by checking if the file exists in the _MigrationsRun
table.
SQLite3 does not support stored procedures, so we must dynamically build
the SQL query here.
"""
# create database cursor
cursor = self.cnxn.cursor()
# build sql query to determine if migration has been run
sql = '''
SELECT COUNT(1)
FROM _MigrationsRun
WHERE Migration = '{m}'
'''.format(m=migration)
# run the sql query
cursor.execute(sql)
# if non-zero returned, migration exists; otherwise not
if cursor.fetchone()[0] > 0:
exists = True
else:
exists = False
return exists
def update_migrations_run(self, migration: str):
"""Insert the given migration into the _MigrationsRun table.
Parameters
----------
migration : str
Pathname for the migration script.
Returns
-------
None
"""
# create database cursor
cursor = self.cnxn.cursor()
# build sql query to update _MigrationsRun
sql = '''
INSERT INTO _MigrationsRun
(
Migration
,DateRun
)
VALUES
(
'{m}'
,DATETIME('now')
)
'''.format(m=migration)
try:
# run the sql query
cursor.execute(sql)
self.cnxn.commit()
except OperationalError:
logging.error('Problem updating _MigrationsRun for {m}'.format(
m=migration
))
raise OperationalError
```
#### File: database/tests/test_sqlite3.py
```python
import pkg_resources
import unittest
from migrator.database.SQLite3Database import SQLite3Database
from migrator.server.SQLite3Server import SQLite3Server
class SQLite3DatabaseTestCase(unittest.TestCase):
'''
Test class for SQLite3 database class.
'''
def test_database_type(self):
'''
Test to ensure creating a SQLite3 database object generates an object of
the expected type.
'''
server = SQLite3Server('TestServer')
database = server.get_database()
self.assertIsInstance(database, SQLite3Database)
def test_migrations_run(self):
'''
Test to ensure the _MigrationsRun table was created in the database.
'''
# build objects to test
server = SQLite3Server('TestServer')
cnxn = server.get_connection()
cursor = cnxn.cursor()
# build SQL query and execute
path = 'sqlite3/test_migrations_run.sql'
filepath = pkg_resources.resource_filename(__name__, path)
f = open(filepath, 'r')
sql = f.read()
cursor.execute(sql)
# run the test
self.assertEqual(cursor.fetchone()[0], '_MigrationsRun')
# cleanup
f.close()
cursor.close()
``` |
{
"source": "JoshuaAdrianJones/cs50x-project",
"score": 2
} |
#### File: JoshuaAdrianJones/cs50x-project/helpers.py
```python
import os
import logging
from flask import Flask, flash, request, redirect
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from werkzeug.utils import secure_filename
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from tempfile import mkdtemp
from functools import wraps
import sqlite3
def login_required(f):
"""
Decorate routes to require login.
https://flask.palletsprojects.com/en/1.1.x/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
ALLOWED_EXTENSIONS = {"txt", "xml"}
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def apology(message, code=400):
"""Render message as an apology to user."""
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [
("-", "--"),
(" ", "-"),
("_", "__"),
("?", "~q"),
("%", "~p"),
("#", "~h"),
("/", "~s"),
('"', "''"),
]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def add_record_to_db(db, username, p_hash, user_type):
latest_UID = list(db.execute("SELECT count(id) FROM users;"))
new_uid = latest_UID[0][0] + 1
db.execute(
f"INSERT INTO users (id, name, hash, user_type ) VALUES({new_uid}, '{username}', '{p_hash}', '{user_type}');"
)
db.commit()
def add_file_to_db(db, file_name, file_path, uploader_id):
latest_UID = list(db.execute("SELECT count(file_id) FROM files;"))
new_uid = latest_UID[0][0] + 1
# record file data and location for later retrieval
db.execute(
f"INSERT INTO files (file_id, file_name, file_path, uploader_id ) VALUES({new_uid}, '{file_name}', '{file_path}', '{uploader_id}');"
)
# record file to access list with user ID
latest_UID_access = list(
db.execute("SELECT count(access_record_id) FROM file_access;")
)
new_uid_access = latest_UID_access[0][0] + 1
db.execute(
f"INSERT INTO file_access (access_record_id, file_id, user_id) VALUES({new_uid_access},{new_uid}, {uploader_id});"
)
db.commit()
def load_user_files(db, uid):
allowed_file_ids = list(
db.execute(f"SELECT file_id FROM file_access WHERE user_id={uid}")
)
if allowed_file_ids:
allowed_file_ids = tuple([row[0] for row in allowed_file_ids])
if len(allowed_file_ids) > 1:
user_files = list(
db.execute(f"SELECT * FROM files WHERE file_id IN {allowed_file_ids}")
)
else:
user_files = list(
db.execute(f"SELECT * FROM files WHERE file_id={allowed_file_ids[0]}")
)
return user_files
else:
return []
def add_user_file_association(db, file_to_add_user_to, user_id_to_add):
latest_UID = list(db.execute("SELECT count(access_record_id) FROM file_access"))
file_id = list(
db.execute(
f"SELECT file_id from files where file_name='{file_to_add_user_to}' "
)
)[0][0]
new_uid = latest_UID[0][0] + 1
db.execute(
f"INSERT INTO file_access (access_record_id, file_id, user_id) VALUES({new_uid}, {file_id},{user_id_to_add});"
)
db.commit()
``` |
{
"source": "Joshuaalbert/bayes_filter",
"score": 2
} |
#### File: bayes_filter/bayes_filter/callbacks.py
```python
from .datapack import DataPack
import tensorflow as tf
import numpy as np
import os
from . import logging
from .plotting import DatapackPlotter
import pylab as plt
def callback_sequence(callbacks, args, async=False):
if async:
ops = []
for arg, callback in zip(args, callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
ops.append(callback(*arg))
return tf.group(ops)
lock = [tf.no_op()]
store_ops = []
for arg, callback in zip(args, callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
with tf.control_dependencies(lock):
store_ops.append(callback(*arg))
lock = store_ops[-1]
with tf.control_dependencies(lock):
return tf.no_op()
class Callback(object):
def __init__(self, *args, controls=None, **kwargs):
self._output_dtypes = None
self._name = 'Callback'
self._squeeze = False
self.controls = controls
self.callback_func = self.generate(*args, **kwargs)
@property
def controls(self):
return self._controls
@controls.setter
def controls(self, value):
if value is None:
self._controls = None
return
if not isinstance(value, (list, tuple)):
value = [value]
self._controls = list(value)
@property
def squeeze(self):
return self._squeeze
@squeeze.setter
def squeeze(self, value):
self._squeeze = value
def generate(self, *args, **kwargs):
raise NotImplementedError("Must subclass")
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def output_dtypes(self):
if self._output_dtypes is None:
raise ValueError("Output dtype should be a list of output dtypes.")
return self._output_dtypes
@output_dtypes.setter
def output_dtypes(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError("output dtypes must be a list or tuple")
self._output_dtypes = value
def __call__(self, *Tin):
squeeze = len(self.output_dtypes) == 1
def py_func(*Tin):
result = self.callback_func(*[t.numpy() for t in Tin])
if not isinstance(result, (list,tuple)):
result = [result]
if len(result) != len(self.output_dtypes):
raise ValueError("Len of py_function result {} not equal to number of output dtypes {}".format(len(result), len(self.output_dtypes)))
if squeeze and self.squeeze:
return result[0]
return result
if self.controls is not None:
with tf.control_dependencies(self.controls):
if squeeze and self.squeeze:
return tf.py_function(py_func, Tin, self.output_dtypes[0], name=self.name)
return tf.py_function(py_func, Tin, self.output_dtypes, name=self.name)
else:
if squeeze and self.squeeze:
return tf.py_function(py_func, Tin, self.output_dtypes[0], name=self.name)
return tf.py_function(py_func, Tin, self.output_dtypes, name=self.name)
class Chain(Callback):
def __init__(self, *callbacks, async = False):
for cb in callbacks:
if not isinstance(cb, Callback):
raise ValueError("All inputs should be Callbacks, got {}".format(type(cb)))
self._callbacks = callbacks
self._async = async
def __call__(self, *args):
if self._async:
ops = []
for arg, callback in zip(args, self._callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
ops.append(callback(*arg))
return tf.group(ops)
lock = [tf.no_op()]
store_ops = []
for arg, callback in zip(args, self._callbacks):
if not isinstance(arg, (tuple, list)):
arg = [arg]
with tf.control_dependencies(lock):
store_ops.append(callback(*arg))
lock = store_ops[-1]
with tf.control_dependencies(lock):
return tf.no_op()
class SummarySendCallback(Callback):
"""
Callback to submit summaries in-graph mode.
"""
def __init__(self, logdir):
super(SummarySendCallback, self).__init__(logdir=logdir)
def generate(self, logdir):
self.output_dtypes = [tf.int64]
self.name = 'SummarySendCallback'
filewriter = tf.summary.FileWriter(logdir,
graph=tf.get_default_graph(),
flush_secs=30)
def store(i, *summaries):
for summary in summaries:
filewriter.add_summary(summary, i)
return [np.array(len(summaries),dtype=np.int64)]
return store
class DatapackStoreCallback(Callback):
def __init__(self, datapack, solset, soltab, perm=(0,2,3,1),lock=None,index_map=None,**selection):
super(DatapackStoreCallback, self).__init__(datapack=datapack,
solset=solset,
soltab=soltab,
perm=perm,
lock=lock,
index_map=index_map,
**selection)
def generate(self, datapack, solset, soltab, perm, lock, index_map, **selection):
if not isinstance(datapack, str):
datapack = datapack.filename
selection.pop('time',None)
self.output_dtypes = [tf.int64]
self.name = 'DatapackStoreCallback'
def store(time_start, time_stop, array):
time_start = index_map[time_start]
time_stop = index_map[time_stop - 1] + 1
if lock is not None:
lock.acquire()
with DataPack(datapack,readonly=False) as dp:
dp.current_solset = solset
dp.select(time=slice(time_start, time_stop, 1), **selection)
dp.__setattr__(soltab, np.transpose(array, perm))#, dir=dir_sel, ant=ant_sel, freq=freq_sel, pol=pol_sel
if lock is not None:
lock.release()
return [np.array(array.__sizeof__(),dtype=np.int64)]
return store
class GetLearnIndices(Callback):
def __init__(self, dist_cutoff=0.3):
super(GetLearnIndices, self).__init__(dist_cutoff=dist_cutoff)
def generate(self, dist_cutoff):
self.output_dtypes = [tf.int64]
self.name = 'GetLearnIndices'
def get_learn_indices(X):
"""Get the indices of non-redundant antennas
:param X: np.array, float64, [N, 3]
Antenna locations
:param cutoff: float
Mark redundant if antennas within this in km
:return: np.array, int64
indices such that all antennas are at least cutoff apart
"""
N = X.shape[0]
Xa, inverse = np.unique(X, return_inverse=True, axis=0)
Na = len(Xa)
keep = []
for i in range(Na):
if np.all(np.linalg.norm(Xa[i:i + 1, :] - Xa[keep, :], axis=1) > dist_cutoff):
keep.append(i)
logging.info("Training on antennas: {}".format(keep))
return [(np.where(np.isin(inverse, keep, assume_unique=True))[0]).astype(np.int64)]
return get_learn_indices
class StoreHyperparameters(Callback):
def __init__(self, store_file):
super(StoreHyperparameters, self).__init__(store_file=store_file)
def generate(self, store_file):
if not isinstance(store_file, str):
raise ValueError("store_file should be str {}".format(type(store_file)))
store_file=os.path.abspath(store_file)
np.savez(store_file, times=np.array([]), amp=np.array([]), y_sigma=np.array([]), variance=np.array([]), lengthscales=np.array([]), a=np.array([]), b=np.array([]), timescale=np.array([]),
pert_amp=np.array([]), pert_dir_lengthscale=np.array([]), pert_ant_lengthscale=np.array([]))
self.output_dtypes = [tf.int64]
self.name = 'StoreHyperparameters'
def store(time, hyperparams, y_sigma, amp,pert_amp, pert_dir_lengthscale, pert_ant_lengthscale):
data = np.load(store_file)
#must match the order in the Target
variance, lengthscales, a, b, timescale,pert_amp, pert_dir_lengthscale, pert_ant_lengthscale = np.reshape(hyperparams, (-1,))
times = np.array([time] + list(data['times']))
y_sigma = np.array([np.reshape(y_sigma,(-1,))] + list(data['y_sigma']))
amp = np.array([np.reshape(amp, (-1,))] + list(data['amp']))
variance = np.array([variance] + list(data['variance']))
lengthscales = np.array([lengthscales] + list(data['lengthscales']))
a = np.array([a] + list(data['a']))
b = np.array([b] + list(data['b']))
timescale = np.array([timescale] + list(data['timescale']))
np.savez(store_file,
times=times,
y_sigma=y_sigma,
amp=amp,
variance=variance,
lengthscales=lengthscales,
a=a,
b=b,
timescale=timescale
)
return [np.array(len(times),dtype=np.int64)]
return store
class StoreHyperparametersV2(Callback):
def __init__(self, store_file):
super(StoreHyperparametersV2, self).__init__(store_file=store_file)
def generate(self, store_file):
if not isinstance(store_file, str):
raise ValueError("store_file should be str {}".format(type(store_file)))
store_file=os.path.abspath(store_file)
if not os.path.exists(store_file):
np.savez(store_file, times=np.array([]), amp=np.array([]), y_sigma=np.array([]), variance=np.array([]), lengthscales=np.array([]), a=np.array([]), b=np.array([]), timescale=np.array([]),
pert_amp=np.array([]), pert_dir_lengthscale=np.array([]), pert_ant_lengthscale=np.array([]))
self.output_dtypes = [tf.int64]
self.name = 'StoreHyperparametersV2'
def store(time, amp, lengthscales, a, b, timescale, y_sigma):
data = np.load(store_file)
times = np.array([time] + list(data['times']))
y_sigma = np.array([np.reshape(y_sigma,(-1,))] + list(data['y_sigma']))
amp = np.array([np.reshape(amp, (-1,))] + list(data['amp']))
lengthscales = np.array([lengthscales.reshape((-1,))] + list(data['lengthscales']))
a = np.array([a.reshape((-1,))] + list(data['a']))
b = np.array([b.reshape((-1,))] + list(data['b']))
timescale = np.array([timescale.reshape((-1,))] + list(data['timescale']))
np.savez(store_file,
times=times,
y_sigma=y_sigma,
amp=amp,
lengthscales=lengthscales,
a=a,
b=b,
timescale=timescale
)
return [np.array(len(times),dtype=np.int64)]
return store
class PlotResults(Callback):
def __init__(self, hyperparam_store, datapack, solset, lock=None, posterior_name='posterior', plot_directory='./plots', **selection):
super(PlotResults, self).__init__(hyperparam_store=hyperparam_store,
lock=lock,
datapack=datapack,
solset=solset,
posterior_name=posterior_name,
plot_directory=plot_directory,
**selection)
def generate(self, hyperparam_store, datapack, solset, lock, posterior_name, plot_directory, **selection):
self.output_dtypes = [tf.int64]
self.name = 'PlotResults'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.abspath(plot_directory)
fig_directory = os.path.join(plot_directory,'phase_screens')
# bayes_directory = os.path.join(plot_directory, 'bayes_hyperparmeters')
os.makedirs(fig_directory,exist_ok=True)
# os.makedirs(bayes_directory, exist_ok=True)
dp = DatapackPlotter(datapack)
def plot_results(index_start, index_end):
"""Get the indices of non-redundant antennas
:param X: np.array, float64, [N, 3]
Antenna locations
:param cutoff: float
Mark redundant if antennas within this in km
:return: np.array, int64
indices such that all antennas are at least cutoff apart
"""
data = np.load(hyperparam_store)
keys = ['amp','y_sigma','variance', 'lengthscales', 'a', 'b', 'timescale']
if lock is not None:
lock.acquire()
fig, axs = plt.subplots(len(keys),1,sharex='all', figsize=(6,len(keys)*2))
for i,key in enumerate(keys):
ax = axs[i]
if key in ['amp','y_sigma']:
# for t,d in zip(data['times'],data['y_sigma']):
ax.boxplot(data[key].T,positions=data['times'])
ax.set_title(key)
else:
ax.scatter(data['times'], data[key], label=key)
ax.legend()
plt.savefig(os.path.join(plot_directory,'hyperparameters.png'))
plt.close('all')
if lock is not None:
lock.release()
# keys = ['amp', 'y_sigma']
# if lock is not None:
# lock.acquire()
# fig, axs = plt.subplots(len(keys), 1, figsize=(6, len(keys) * 2))
# for i, key in enumerate(keys):
# ax = axs[i]
# ax.hist(data[key][-1], bins=max(10, int(np.sqrt(np.size(data[key][-1])))), label=key)
# ax.legend()
# plt.savefig(os.path.join(bayes_directory, 'bayesian_hyperparameters_{:04d}_{:04d}.png'.format(index_start, index_end)))
# plt.close('all')
# if lock is not None:
# lock.release()
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i,solset)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant',None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq',None),
dir_sel=selection.get('dir',None),
pol_sel=selection.get('pol', slice(0,1,1)),
fignames=fignames,
observable='phase',
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=solset)
plt.close('all')
if lock is not None:
lock.release()
data_posterior = "data_{}".format(posterior_name)
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i, data_posterior)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant', None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq', None),
dir_sel=selection.get('dir', None),
pol_sel=selection.get('pol', slice(0, 1, 1)),
fignames=fignames,
observable='tec',
tec_eval_freq=160e6,
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=data_posterior)
plt.close('all')
if lock is not None:
lock.release()
screen_posterior = "screen_{}".format(posterior_name)
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i, screen_posterior)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant', None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq', None),
dir_sel=selection.get('dir', None),
pol_sel=selection.get('pol', slice(0, 1, 1)),
fignames=fignames,
observable='tec',
tec_eval_freq=160e6,
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=screen_posterior)
plt.close('all')
if lock is not None:
lock.release()
return [np.array(3).astype(np.int64)]
return plot_results
class PlotResultsV2(Callback):
def __init__(self, hyperparam_store, datapack, solset, index_map, lock=None, posterior_name='posterior', plot_directory='./plots', **selection):
super(PlotResultsV2, self).__init__(hyperparam_store=hyperparam_store,
lock=lock,
datapack=datapack,
solset=solset,
index_map=index_map,
posterior_name=posterior_name,
plot_directory=plot_directory,
**selection)
def generate(self, hyperparam_store, datapack, solset, index_map, lock, posterior_name, plot_directory, **selection):
self.output_dtypes = [tf.int64]
self.name = 'PlotResultsV2'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.abspath(plot_directory)
fig_directory = os.path.join(plot_directory,'phase_screens')
# bayes_directory = os.path.join(plot_directory, 'bayes_hyperparmeters')
os.makedirs(fig_directory,exist_ok=True)
# os.makedirs(bayes_directory, exist_ok=True)
dp = DatapackPlotter(datapack)
def plot_results(index_start, index_end):
"""
Plot results.
:param index_start: int
Start index of results to plot relative to 0
:param index_end: int
End index of results to plot relative to 0
:return:
"""
index_start = index_map[index_start]
index_end = index_map[index_end-1] + 1
data = np.load(hyperparam_store)
keys = ['amp','y_sigma', 'lengthscales', 'a', 'b', 'timescale']
if lock is not None:
lock.acquire()
fig, axs = plt.subplots(len(keys),1,sharex='all', figsize=(9,len(keys)*2))
for i,key in enumerate(keys):
ax = axs[i]
ax.scatter(data['times'], data[key].flatten(),label=key)
# ax.boxplot(data[key].T,positions=data['times'])
# ax.set_title(key)
ax.legend()
plt.savefig(os.path.join(plot_directory,'hyperparameters.png'))
plt.close('all')
if lock is not None:
lock.release()
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i,solset)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant',None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq',None),
dir_sel=selection.get('dir',None),
pol_sel=selection.get('pol', slice(0,1,1)),
fignames=fignames,
observable='phase',
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=solset)
plt.close('all')
if lock is not None:
lock.release()
# fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_tec_ground_truth.png".format(i, solset)) for i in
# range(index_start, index_end, 1)]
# if lock is not None:
# lock.acquire()
# dp.plot(ant_sel=selection.get('ant', None),
# time_sel=slice(index_start, index_end, 1),
# freq_sel=selection.get('freq', None),
# dir_sel=selection.get('dir', None),
# pol_sel=selection.get('pol', slice(0, 1, 1)),
# fignames=fignames,
# observable='tec',
# tec_eval_freq=140e6,
# phase_wrap=True,
# plot_facet_idx=True,
# labels_in_radec=True,
# solset=solset)
# plt.close('all')
# if lock is not None:
# lock.release()
# data_posterior = "data_{}".format(posterior_name)
# fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_phase.png".format(i, data_posterior)) for i in
# range(index_start, index_end, 1)]
# if lock is not None:
# lock.acquire()
# dp.plot(ant_sel=selection.get('ant', None),
# time_sel=slice(index_start, index_end, 1),
# freq_sel=selection.get('freq', None),
# dir_sel=selection.get('dir', None),
# pol_sel=selection.get('pol', slice(0, 1, 1)),
# fignames=fignames,
# observable='phase',
# phase_wrap=True,
# plot_facet_idx=True,
# labels_in_radec=True,
# solset=data_posterior)
# plt.close('all')
# if lock is not None:
# lock.release()
data_posterior = "data_{}".format(posterior_name)
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_tec_144MHz.png".format(i, data_posterior)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant', None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq', None),
dir_sel=selection.get('dir', None),
pol_sel=selection.get('pol', slice(0, 1, 1)),
fignames=fignames,
observable='tec',
tec_eval_freq=144e6,
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=data_posterior)
plt.close('all')
if lock is not None:
lock.release()
# data_posterior = "data_{}".format(posterior_name)
# fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_tec_144MHz_uncert.png".format(i, data_posterior)) for i in
# range(index_start, index_end, 1)]
# if lock is not None:
# lock.acquire()
# dp.plot(ant_sel=selection.get('ant', None),
# time_sel=slice(index_start, index_end, 1),
# freq_sel=selection.get('freq', None),
# dir_sel=selection.get('dir', None),
# pol_sel=selection.get('pol', slice(0, 1, 1)),
# fignames=fignames,
# observable='weights_tec',
# tec_eval_freq=144e6,
# phase_wrap=False,
# plot_facet_idx=True,
# labels_in_radec=True,
# solset=data_posterior)
# plt.close('all')
# if lock is not None:
# lock.release()
screen_posterior = "screen_{}".format(posterior_name)
fignames = [os.path.join(fig_directory, "fig-{:04d}_{}_tec_144MHz.png".format(i, screen_posterior)) for i in
range(index_start, index_end, 1)]
if lock is not None:
lock.acquire()
dp.plot(ant_sel=selection.get('ant', None),
time_sel=slice(index_start, index_end, 1),
freq_sel=selection.get('freq', None),
dir_sel=selection.get('dir', None),
pol_sel=selection.get('pol', slice(0, 1, 1)),
fignames=fignames,
observable='tec',
tec_eval_freq=144e6,
phase_wrap=True,
plot_facet_idx=True,
labels_in_radec=True,
solset=screen_posterior)
plt.close('all')
if lock is not None:
lock.release()
return [np.array(3).astype(np.int64)]
return plot_results
class PlotStepsizes(Callback):
def __init__(self, lock = None, plot_directory='./plots'):
super(PlotStepsizes, self).__init__(
lock=lock,
plot_directory=plot_directory
)
def generate(self, lock, plot_directory):
self.output_dtypes = [tf.int64]
self.name = 'PlotStepsizes'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.join(os.path.abspath(plot_directory), 'diagnostics','stepsizes')
os.makedirs(plot_directory, exist_ok=True)
def plot_results(index_start, index_end, *stepsizesandlabels):
"""
Plot the performance results.
:param index_start: int
:param index_end: int
:param rhat: float array
num_coords gelman-rubin ratio
:param ess: float array
num_chains, num_coords effective sample size
:param log_accept_ratio: float array
num_samples, num_chains log acceptance ratio
:param step_size: float array
num_variables, num_samples step sizes
:return: int
dummy integer
"""
N = len(stepsizesandlabels)
if N % 2 != 0:
raise ValueError("Must be even number of rhats and labels.")
N = N >> 1
stepsizes = stepsizesandlabels[:N]
labels = stepsizesandlabels[N:]
for label,stepsize in zip(labels, stepsizes):
label = str(np.array(label,dtype=str))
if lock is not None:
lock.acquire()
fig, ax = plt.subplots(1,1,figsize=(4,4))
ax.hist(stepsize,
bins=max(10, int(np.sqrt(stepsize.size))), label=label)
ax.legend()
plt.savefig(os.path.join(plot_directory,'stepsize_{}_{}_{}.png'.format(label, index_start, index_end)))
plt.close('all')
if lock is not None:
lock.release()
return [np.array(1).astype(np.int64)]
return plot_results
class PlotAcceptRatio(Callback):
def __init__(self, lock=None, plot_directory='./plots'):
super(PlotAcceptRatio, self).__init__(
lock=lock,
plot_directory=plot_directory,
)
def generate(self, lock, plot_directory):
self.output_dtypes = [tf.int64]
self.name = 'PlotAcceptRatio'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.join(os.path.abspath(plot_directory),'diagnostics', 'accept_ratio')
os.makedirs(plot_directory,exist_ok=True)
def plot_results(index_start, index_end, log_accept_ratio):
"""
Plot the performance results.
:param index_start: int
:param index_end: int
:param rhat: float array
num_coords gelman-rubin ratio
:param ess: float array
num_chains, num_coords effective sample size
:param log_accept_ratio: float array
num_samples, num_chains log acceptance ratio
:param step_size: float array
num_variables, num_samples step sizes
:return: int
dummy integer
"""
if lock is not None:
lock.acquire()
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.hist(np.mean(np.exp(np.minimum(log_accept_ratio, 0.)), axis=-1),
bins=max(10, int(np.sqrt(log_accept_ratio.size))), label='acceptance_ratio')
ax.legend()
plt.savefig(os.path.join(plot_directory, 'acceptance_ratio_{}_{}.png'.format(index_start, index_end)))
plt.close('all')
if lock is not None:
lock.release()
return [np.array(1).astype(np.int64)]
return plot_results
class PlotEss(Callback):
def __init__(self, lock=None,plot_directory='./plots'):
super(PlotEss, self).__init__(lock=lock,
plot_directory=plot_directory,
)
def generate(self, lock, plot_directory):
self.output_dtypes = [tf.int64]
self.name = 'PlotEss'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.join(os.path.abspath(plot_directory), 'diagnostics','ess')
os.makedirs(plot_directory,exist_ok=True)
def plot_results(index_start, index_end, *esssandlabels):
"""
Plot the performance results.
:param index_start: int
:param index_end: int
:param rhat: float array
num_coords gelman-rubin ratio
:param ess: float array
num_chains, num_coords effective sample size
:param log_accept_ratio: float array
num_samples, num_chains log acceptance ratio
:param step_size: float array
num_variables, num_samples step sizes
:return: int
dummy integer
"""
N = len(esssandlabels)
if N%2 != 0:
raise ValueError("Must be even number of rhats and labels.")
N = N >> 1
esss = esssandlabels[:N]
labels = esssandlabels[N:]
for label,ess in zip(labels, esss):
label = str(np.array(label,dtype=str))
if lock is not None:
lock.acquire()
fig, ax = plt.subplots(1,1,figsize=(4,4))
if len(ess.shape) == 2:
ess = np.mean(ess,axis=0)
idx = np.arange(np.size(ess))
ax.bar(idx, ess, label=label)
ax.legend()
plt.savefig(os.path.join(plot_directory,'ess_{}_{}_{}.png'.format(label, index_start, index_end)))
plt.close('all')
if lock is not None:
lock.release()
return [np.array(1).astype(np.int64)]
return plot_results
class PlotRhat(Callback):
def __init__(self, lock=None, plot_directory='./plots'):
super(PlotRhat, self).__init__(lock=lock,
plot_directory=plot_directory,
)
def generate(self, lock, plot_directory):
self.output_dtypes = [tf.int64]
self.name = 'PlotRhat'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.join(os.path.abspath(plot_directory), 'diagnostics','rhat')
os.makedirs(plot_directory,exist_ok=True)
def plot_results(index_start, index_end, *rhatsandlabels):
"""
Plot the performance results.
:param index_start: int
:param index_end: int
:param rhat: float array
num_coords gelman-rubin ratio
:param ess: float array
num_chains, num_coords effective sample size
:param log_accept_ratio: float array
num_samples, num_chains log acceptance ratio
:param step_size: float array
num_variables, num_samples step sizes
:return: int
dummy integer
"""
N = len(rhatsandlabels)
if N%2 != 0:
raise ValueError("Must be even number of rhats and labels.")
N = N >> 1
rhats = rhatsandlabels[:N]
labels = rhatsandlabels[N:]
for label,rhat in zip(labels, rhats):
label = str(np.array(label,dtype=str))
if lock is not None:
lock.acquire()
fig, ax = plt.subplots(1,1,figsize=(4,4))
idx = np.arange(np.size(rhat))
ax.bar(idx, rhat, label=label)
ax.set_yscale('log')
ax.legend()
plt.savefig(os.path.join(plot_directory,'rhat_{}_{}_{}.png'.format(label, index_start, index_end)))
plt.close('all')
if lock is not None:
lock.release()
return [np.array(1).astype(np.int64)]
return plot_results
class PlotELBO(Callback):
def __init__(self, lock=None, plot_directory='./plots', index_map=None):
super(PlotELBO, self).__init__(lock=lock,
plot_directory=plot_directory,
index_map=index_map
)
def generate(self, lock, plot_directory, index_map):
self.output_dtypes = [tf.int64]
self.name = 'PlotELBO'
if not isinstance(plot_directory, str):
raise ValueError("plot_directory should be str {}".format(type(plot_directory)))
plot_directory = os.path.join(os.path.abspath(plot_directory), 'diagnostics', 'elbo')
os.makedirs(plot_directory, exist_ok=True)
def plot_results(index_start, index_end, elbo):
"""
Plot the performance results.
:param index_start: int
:param index_end: int
:param elbo: float array
num_steps
:return: int
dummy integer
"""
index_start = index_map[index_start]
index_end = index_map[index_end-1] + 1
if lock is not None:
lock.acquire()
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
idx = np.arange(np.size(elbo))
where_non_zero = elbo != 0.
ax.plot(idx[where_non_zero], elbo[where_non_zero], label='elbo')
ax.set_xlabel('iteration')
ax.legend()
plt.savefig(os.path.join(plot_directory, 'elbo_{:04d}_{:04d}.png'.format(index_start, index_end)))
plt.close('all')
if lock is not None:
lock.release()
return [np.array(1).astype(np.int64)]
return plot_results
class StorePerformance(Callback):
def __init__(self, store_file):
super(StorePerformance, self).__init__(store_file=store_file)
def generate(self, store_file):
if not isinstance(store_file, str):
raise ValueError("store_file should be str {}".format(type(store_file)))
store_file=os.path.abspath(store_file)
if not os.path.exists(store_file):
np.savez(store_file, index=np.array([]), loss= [], iter_time=np.array([]), num_steps=np.array([]))
self.output_dtypes = [tf.int64]
self.name = 'StorePerformance'
def store(index, loss, iter_time, num_steps):
data = np.load(store_file)
index = np.array([index] + list(data['index']))
loss = np.array([np.reshape(loss,(-1,))] + list(data['loss']))
iter_time = np.array([iter_time] + list(data['iter_time']))
num_steps = np.array([num_steps] + list(data['num_steps']))
np.savez(store_file,
index=index,
loss=loss,
iter_time=iter_time,
num_steps=num_steps
)
return [np.array(len(index),dtype=np.int64)]
return store
```
#### File: bayes_filter/bayes_filter/coord_transforms.py
```python
import astropy.coordinates as ac
import astropy.time as at
from .frames import ENU
from .settings import dist_type, angle_type
import numpy as np
import tensorflow as tf
from .callbacks import Callback
from .settings import float_type
def tf_coord_transform(transform):
def tf_transform(X):
return tf.py_function(lambda X: transform(X.numpy()), [X], X.dtype)
return tf_transform
#TODO: make a callback
def itrs_to_enu_6D(X, ref_location=None):
"""
Convert the given coordinates from ITRS to ENU
:param X: float array [b0,...,bB,6]
The coordinates are ordered [time, ra, dec, itrs.x, itrs.y, itrs.z]
:param ref_location: float array [3]
Point about which to rotate frame.
:return: float array [b0,...,bB, 7]
The transforms coordinates.
"""
time = np.unique(X[..., 0])
if time.size > 1:
raise ValueError("Times should be the same.")
shape = X.shape[:-1]
X = X.reshape((-1, 6))
if ref_location is None:
ref_location = X[0,3:]
obstime = at.Time(time / 86400., format='mjd')
location = ac.ITRS(x=ref_location[0] * dist_type, y=ref_location[1] * dist_type, z=ref_location[2] * dist_type)
enu = ENU(location=location, obstime=obstime)
antennas = ac.ITRS(x=X[:, 3] * dist_type, y=X[:, 4] * dist_type, z=X[:, 5] * dist_type, obstime=obstime)
antennas = antennas.transform_to(enu).cartesian.xyz.to(dist_type).value.T
directions = ac.ICRS(ra=X[:, 1] * angle_type, dec=X[:, 2] * angle_type)
directions = directions.transform_to(enu).cartesian.xyz.value.T
return np.concatenate([X[:,0:1], directions, antennas], axis=1).reshape(shape+(7,))
#TODO: make a callback
def itrs_to_enu_with_references(ref_antenna=None, ref_direction=None, ref_location=None):
"""
Wrapper that creates the function to convert the given coordinates from ITRS to ENU
:param ref_antenna: float array [3]
Location of reference antenna in ITRS.
:param ref_direction: float array [2]
RA and DEC of reference direction.
:param ref_location: float array [3]
Point about which to rotate frame.
"""
def transform(X, ref_location=ref_location):
"""
Convert the given coordinates from ITRS to ENU
:param X: float array [Nd, Na,6]
The coordinates are ordered [time, ra, dec, itrs.x, itrs.y, itrs.z]
:return: float array [Nd, Na, 7(10(13))]
The transforms coordinates.
"""
time = np.unique(X[..., 0])
if time.size > 1:
raise ValueError("Times should be the same.")
shape = X.shape[:-1]
X = X.reshape((-1, 6))
if ref_location is None:
ref_location = X[0,3:6]
obstime = at.Time(time / 86400., format='mjd')
location = ac.ITRS(x=ref_location[0] * dist_type, y=ref_location[1] * dist_type, z=ref_location[2] * dist_type)
ref_ant = ac.ITRS(x=ref_antenna[0] * dist_type, y=ref_antenna[1] * dist_type, z=ref_antenna[2] * dist_type, obstime=obstime)
ref_dir = ac.ICRS(ra=ref_direction[0] * angle_type, dec=ref_direction[1] * angle_type)
enu = ENU(location=location, obstime=obstime)
ref_ant = ref_ant.transform_to(enu).cartesian.xyz.to(dist_type).value.T
ref_dir = ref_dir.transform_to(enu).cartesian.xyz.value.T
antennas = ac.ITRS(x=X[:, 3] * dist_type, y=X[:, 4] * dist_type, z=X[:, 5] * dist_type, obstime=obstime)
antennas = antennas.transform_to(enu).cartesian.xyz.to(dist_type).value.T
directions = ac.ICRS(ra=X[:, 1] * angle_type, dec=X[:, 2] * angle_type)
directions = directions.transform_to(enu).cartesian.xyz.value.T
result = np.concatenate([X[:,0:1], directions, antennas], axis=1)#
if ref_antenna is not None:
result = np.concatenate([result, np.tile(ref_ant, (result.shape[0], 1))], axis=-1)
if ref_direction is not None:
result = np.concatenate([result, np.tile(ref_dir, (result.shape[0], 1))],axis=-1)
result = result.reshape(shape+result.shape[-1:])
return result
return transform
class ITRSToENUWithReferences(Callback):
def __init__(self,ref_antenna=None, ref_direction=None, ref_location=None):
"""
Wrapper that creates the function to convert the given coordinates from ITRS to ENU
:param ref_antenna: float array [3]
Location of reference antenna in ITRS.
:param ref_direction: float array [2]
RA and DEC of reference direction.
:param ref_location: float array [3]
Point about which to rotate frame.
"""
super(ITRSToENUWithReferences, self).__init__(ref_antenna=ref_antenna,
ref_direction=ref_direction,
ref_location=ref_location)
def generate(self, ref_antenna, ref_direction, ref_location):
self.output_dtypes = [float_type]
self.name = 'ITRSToENUWithReferences'
self.squeeze = True
def transform(X,ref_location=ref_location):
"""
Convert the given coordinates from ITRS to ENU
:param X: float array [Nd, Na,6]
The coordinates are ordered [time, ra, dec, itrs.x, itrs.y, itrs.z]
:return: float array [Nd, Na, 7(10(13))]
The transforms coordinates.
"""
time = np.unique(X[..., 0])
if time.size > 1:
raise ValueError("Times should be the same.")
shape = X.shape[:-1]
X = X.reshape((-1, 6))
if ref_location is None:
ref_location = X[0,3:6]
obstime = at.Time(time / 86400., format='mjd')
location = ac.ITRS(x=ref_location[0] * dist_type, y=ref_location[1] * dist_type, z=ref_location[2] * dist_type)
ref_ant = ac.ITRS(x=ref_antenna[0] * dist_type, y=ref_antenna[1] * dist_type, z=ref_antenna[2] * dist_type, obstime=obstime)
ref_dir = ac.ICRS(ra=ref_direction[0] * angle_type, dec=ref_direction[1] * angle_type)
enu = ENU(location=location, obstime=obstime)
ref_ant = ref_ant.transform_to(enu).cartesian.xyz.to(dist_type).value.T
ref_dir = ref_dir.transform_to(enu).cartesian.xyz.value.T
antennas = ac.ITRS(x=X[:, 3] * dist_type, y=X[:, 4] * dist_type, z=X[:, 5] * dist_type, obstime=obstime)
antennas = antennas.transform_to(enu).cartesian.xyz.to(dist_type).value.T
directions = ac.ICRS(ra=X[:, 1] * angle_type, dec=X[:, 2] * angle_type)
directions = directions.transform_to(enu).cartesian.xyz.value.T
result = np.concatenate([X[:,0:1], directions, antennas], axis=1)#
if ref_antenna is not None:
result = np.concatenate([result, np.tile(ref_ant, (result.shape[0], 1))], axis=-1)
if ref_direction is not None:
result = np.concatenate([result, np.tile(ref_dir, (result.shape[0], 1))],axis=-1)
result = result.reshape(shape+result.shape[-1:])
return result
return transform
```
#### File: bayes_filter/bayes_filter/parameters.py
```python
import tensorflow_probability as tfp
import tensorflow as tf
from .settings import float_type
class ConstrainedBijector(tfp.bijectors.Chain):
def __init__(self,a,b,validate_args=False):
"""
Create a bijector that constrains the value to (a,b)
:param a: float scalar
Lower limit
:param b: float scalar
Uppoer limit
:return: tfp.bijectors.Bijector
The chained affine and sigmoid that achieves the constraint.
"""
self.a = tf.convert_to_tensor(a, float_type)
self.b = tf.convert_to_tensor(b, float_type)
super(ConstrainedBijector, self).__init__(
[tfp.bijectors.AffineScalar(shift=self.a, scale=(self.b-self.a)),
tfp.bijectors.Sigmoid()],validate_args=validate_args, name='constrained')
class SphericalToCartesianBijector(tfp.bijectors.Bijector):
def __init__(self, validate_args=False, name='spherical'):
"""
A bijector that converts spherical to Cartesian coordinates.
:param validate_args: bool
:param name: optional
"""
super(SphericalToCartesianBijector, self).__init__(validate_args=validate_args,forward_min_event_ndims=0,
inverse_min_event_ndims=0, is_constant_jacobian=False,dtype=float_type,
name=name)
def _forward(self,sph):
"""
Convert spherical to cartesian along last dimension.
:param sph: float_type, Tensor, [b0,...,bB, 3]
Spherical coordinates (radial, azimuthal, polar)
:return: float_type, Tensor, [b0, ..., bB, 3]
Cartesian coordinates
"""
r = sph[...,0:1]
theta = sph[...,1]
phi = sph[...,2]
sinphi = tf.sin(phi)
x = tf.cos(theta) * sinphi
y = tf.sin(theta) * sinphi
z = tf.cos(phi)
xyz = r * tf.stack([x,y,z],axis=-1)
return xyz
def _inverse(self,car):
"""
Convert cartesian to sphereical along last dimension
:param car: float_type, Tensor, [b0,...,bB, 3]
The cartesian coordinates
:return: float_type, Tensor, [b0, ... bB, 3]
The sphereical coordinates
"""
x = car[...,0]
y = car[...,1]
z = car[...,2]
r = tf.sqrt(tf.square(x) + tf.square(y) + tf.square(z))
theta = tf.atan2(y, x)
phi = tf.acos(z/r)
sph = tf.stack([r,theta,phi],axis=-1)
return sph
def _forward_log_det_jacobian(self,sph):
"""
The forward jacobian of the transformation.
log |d y(x)/ d x|
:param sph: float_type, Tensor, [b0, ...,bB, 3]
The x of y(x)
:return: float_type, Tensor , [b0,...,bB]
The log det Jacobian of y(x)
"""
r = sph[..., 0]
theta = sph[..., 1]
phi = sph[..., 2]
return 2*tf.log(r) + tf.log(tf.sin(phi))
def _inverse_log_det_jacobian(self,car):
"""
The inverse jacobian of the transformation.
log |d x(y)/ d y|
:param car: float_type, Tensor, [b0, ...,bB, 3]
The y of y(x)
:return: float_type, Tensor , [b0,...,bB]
The log det Jacobian of x(y)
"""
return -self._forward_log_det_jacobian(self._inverse(car))
class ScaledPositiveBijector(tfp.bijectors.Chain):
def __init__(self, scale=1., validate_args=False):
self.scale = tf.convert_to_tensor(scale, float_type)
super(ScaledPositiveBijector, self).__init__(
[tfp.bijectors.AffineScalar(scale=self.scale),tfp.bijectors.Softplus()], validate_args=validate_args,
name='scaled_positive_bijector')
class ScaledLowerBoundedBijector(tfp.bijectors.Chain):
def __init__(self, lower_bound=0.,scale=1., validate_args=False):
self.scale = tf.convert_to_tensor(scale, float_type)
self.lower_bound = tf.convert_to_tensor(lower_bound, float_type)
super(ScaledLowerBoundedBijector, self).__init__(
[tfp.bijectors.AffineScalar(shift=self.lower_bound if self.lower_bound != 0. else None, scale=self.scale),
tfp.bijectors.Softplus()], validate_args=validate_args,
name='scaled_positive_bijector')
class ScaledBijector(tfp.bijectors.Chain):
def __init__(self, scale=1., validate_args=False):
self.scale = tf.convert_to_tensor(scale, float_type)
super(ScaledBijector, self).__init__(
[tfp.bijectors.AffineScalar(scale=self.scale)], validate_args=validate_args,
name='scaled_bijector')
class Parameter(object):
def __init__(self, unconstrained_value=None, constrained_value=None, bijector=None, distribution=None,
dtype=float_type, shape = None):
"""
Builds a parameter with a bijector and distribution. If the unconstrained value is X, then the constrained
value is Y = bijector.forward(X), and `distribution` models p(Y) then unconstrained_prior models p(X).
:param unconstrained_value: tf.Tensor, optional
If not None, then gives the unconstrained parameter value.
:param constraiunconstrained_valuesned_value: tf.Tensor
If not None, then gives the constrained parameter value
:param bijector: tfp.bijectors.Bijector
If None then Identity, else gives relation Y=g(X)
:param distribution: tfp.distributions.Distribution
Gives the distribution of p(Y)
:param dtype: tf.dtype
Gives the dtype of the parameter
:raise ValueError:
if both unconstrained_value and constrained_value given.
"""
if bijector is None:
bijector = tfp.bijectors.Identity()
self.bijector = bijector
if unconstrained_value is not None and constrained_value is not None:
raise ValueError("Only one of constrained_value and unconstrained_value may be given.")
# if unconstrained_value is None and constrained_value is None:
# raise ValueError("At least one of contrained_value and unconstrained_value must be given.")
self.unconstrained_value = None
self.constrained_value = None
if unconstrained_value is not None:
self.unconstrained_value = tf.convert_to_tensor(unconstrained_value, dtype)
self.constrained_value = self.bijector.forward(self.unconstrained_value)
if constrained_value is not None:
self.constrained_value = tf.convert_to_tensor(constrained_value, dtype)
self.unconstrained_value = self.bijector.inverse(self.constrained_value)
if shape is not None:
self.constrained_value = tf.reshape(self.constrained_value, shape)
self.unconstrained_value = tf.reshape(self.unconstrained_value, shape)
if distribution is not None:
# distribution = tfp.distributions.Uniform(
# low = tf.constant(0.,dtype=float_type), high = tf.constant(1.,dtype=float_type))
self.unconstrained_prior = tfp.distributions.TransformedDistribution(
distribution=distribution, bijector=tfp.bijectors.Invert(self.bijector))
self.constrained_prior = distribution
else:
self.unconstrained_prior = 0.
self.constrained_prior = 0.
def constrained_scaled_positive(a,b,scale):
return tfp.bijectors.Chain([ConstrainedBijector(a,b),ScaledPositiveBijector(scale)])
```
#### File: bayes_filter/bayes_filter/plotting.py
```python
import matplotlib
# matplotlib.use('Agg')
import numpy as np
import os
from concurrent import futures
from .datapack import DataPack
from . import logging
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
from scipy.spatial import ConvexHull, cKDTree
from scipy.spatial.distance import pdist
# import psutil
import pylab as plt
plt.style.use('ggplot')
from matplotlib.patches import Polygon, Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.colors as colors
from scipy.spatial import Voronoi
from . import TEC_CONV
try:
import cmocean
phase_cmap = cmocean.cm.phase
except ImportError:
phase_cmap = plt.cm.hsv
def plot_vornoi_map(points, colors, ax=None, alpha=1., radius=None, norm=None, cmap=plt.cm.jet, relim=False):
if cmap is 'phase':
cmap = phase_cmap
def voronoi_finite_polygons_2d(vor, radius=radius):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
# compute Voronoi tesselation
vor = Voronoi(points)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
if ax is None:
fig, ax = plt.subplots(1,1)
# colorize
for color,region in zip(colors,regions):
if np.size(color) == 1:
if norm is None:
color = cmap(color)
else:
color = cmap(norm(color))
polygon = vertices[region]
ax.fill(*zip(*polygon), color=color, alpha=alpha)
#plt.plot(points[:,0], points[:,1], 'ko')
if relim:
ax.set_xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
ax.set_ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
return ax
class DatapackPlotter(object):
def __init__(self, datapack):
if isinstance(datapack, str):
datapack = DataPack(filename=datapack, readonly=True)
self.datapack = datapack
def _create_polygon_plot(self, points, values=None, N=None, ax=None, cmap=plt.cm.bone, overlay_points=None,
annotations=None, title=None, polygon_labels=None, reverse_x=False):
# get nearest points (without odd voronoi extra regions)
k = cKDTree(points)
dx = np.max(points[:, 0]) - np.min(points[:, 0])
dy = np.max(points[:, 1]) - np.min(points[:, 1])
delta = pdist(points)
N = N or int(min(max(100, 2 * np.max(delta) / np.min(delta)), 500))
x = np.linspace(np.min(points[:, 0]) - 0.1 * dx, np.max(points[:, 0]) + 0.1 * dx, N)
y = np.linspace(np.min(points[:, 1]) - 0.1 * dy, np.max(points[:, 1]) + 0.1 * dy, N)
X, Y = np.meshgrid(x, y, indexing='ij')
# interior points population
points_i = np.array([X.flatten(), Y.flatten()]).T
# The match per input point
dist, i = k.query(points_i, k=1)
# the polygons are now created using convex hulls
# order is by point order
patches = []
for group in range(points.shape[0]):
points_g = points_i[i == group, :]
if points_g.size == 0:
logging.debug("Facet {} has zero size".format(group))
poly = Polygon(points[group:group + 1, :], closed=False)
else:
hull = ConvexHull(points_g)
nodes = points_g[hull.vertices, :]
poly = Polygon(nodes, closed=False)
patches.append(poly)
if ax is None:
fig, ax = plt.subplots()
logging.info("Making new plot")
if values is None:
values = np.zeros(len(patches)) # random.uniform(size=len(patches))
p = PatchCollection(patches, cmap=cmap)
p.set_array(values)
ax.add_collection(p)
# plt.colorbar(p)
if overlay_points is not None:
if annotations is None:
ax.scatter(overlay_points[:, 0], overlay_points[:, 1], marker='+', c='black')
else:
for point, a in zip(overlay_points, annotations):
ax.text(point[0], point[1], a, ha='center', va='center', backgroundcolor=(1., 1., 1., 0.1))
if reverse_x:
ax.set_xlim([np.max(points_i[:, 0]), np.min(points_i[:, 0])])
else:
ax.set_xlim([np.min(points_i[:, 0]), np.max(points_i[:, 0])])
ax.set_ylim([np.min(points_i[:, 1]), np.max(points_i[:, 1])])
ax.set_facecolor('black')
ax.grid(b=True, color='black')
if title is not None:
if reverse_x:
ax.text(np.max(points_i[:, 0]) - 0.05 * dx, np.max(points_i[:, 1]) - 0.05 * dy, title, ha='left',
va='top', backgroundcolor=(1., 1., 1., 0.5))
else:
ax.text(np.min(points_i[:, 0]) + 0.05 * dx, np.max(points_i[:, 1]) - 0.05 * dy, title, ha='left',
va='top', backgroundcolor=(1., 1., 1., 0.5))
# Rectangle((x, y), 0.5, 0.5,
# alpha=0.1,facecolor='red',label='Label'))
# ax.annotate(title,xy=(0.8,0.8),xycoords='axes fraction')
return ax, p
def _create_image_plot(self, points, values=None, N=None, ax=None, cmap=plt.cm.bone, overlay_points=None,
annotations=None, title=None, reverse_x=False):
'''
Create initial plot, with image data instead of polygons.
points: (ra, dec)
values: array [n, m] or None, assumes (dec, ra) ordering ie (y,x)
'''
dx = np.max(points[0]) - np.min(points[0])
dy = np.max(points[1]) - np.min(points[1])
if values is not None:
Ndec, Nra = values.shape
else:
Ndec, Nra = len(points[1]), len(points[0])
values = np.zeros([Ndec, Nra])
if ax is None:
fig, ax = plt.subplots()
logging.info("Making new plot")
x = np.linspace(np.min(points[0]), np.max(points[0]), Nra)
y = np.linspace(np.min(points[1]), np.max(points[1]), Ndec)
img = ax.imshow(values, origin='lower', cmap=cmap, aspect='auto', extent=(x[0], x[-1], y[0], y[-1]))
if overlay_points is not None:
if annotations is None:
ax.scatter(overlay_points[:, 0], overlay_points[:, 1], marker='+', c='black')
else:
for point, a in zip(overlay_points, annotations):
ax.text(point[0], point[1], a, ha='center', va='center', backgroundcolor=(1., 1., 1., 0.1))
if reverse_x:
ax.set_xlim([x[-1], x[0]])
else:
ax.set_xlim([x[0], x[-1]])
ax.set_ylim([y[0], y[-1]])
ax.set_facecolor('black')
ax.grid(b=True, color='black')
if title is not None:
if reverse_x:
ax.text(x[-1] - 0.05 * dx, y[-1] - 0.05 * dy, title, ha='left', va='top',
backgroundcolor=(1., 1., 1., 0.5))
else:
ax.text(x[0] + 0.05 * dx, y[-1] - 0.05 * dy, title, ha='left', va='top',
backgroundcolor=(1., 1., 1., 0.5))
return ax, img
def plot(self, ant_sel=None, time_sel=None, freq_sel=None, dir_sel=None, pol_sel=None, fignames=None, vmin=None,
vmax=None, mode='perantenna', observable='phase', phase_wrap=True, log_scale=False, plot_crosses=True,
plot_facet_idx=False, plot_patchnames=False, labels_in_radec=False, show=False, plot_arrays=False,
solset=None, plot_screen=False, tec_eval_freq=None, mean_residual=False, **kwargs):
"""
:param ant_sel:
:param time_sel:
:param freq_sel:
:param dir_sel:
:param pol_sel:
:param fignames:
:param vmin:
:param vmax:
:param mode:
:param observable:
:param phase_wrap:
:param log_scale:
:param plot_crosses:
:param plot_facet_idx:
:param plot_patchnames:
:param labels_in_radec:
:param show:
:param plot_arrays:
:param solset:
:param plot_screen:
:param tec_eval_freq:
:param kwargs:
:return:
"""
SUPPORTED = ['perantenna']
assert mode in SUPPORTED, "only 'perantenna' supported currently".format(SUPPORTED)
if fignames is None:
save_fig = False
show = True
else:
save_fig = True
show = show and True # False
if plot_patchnames:
plot_facet_idx = False
if plot_patchnames or plot_facet_idx:
plot_crosses = False
if not show:
logging.debug('turning off display')
matplotlib.use('Agg')
###
# Set up plotting
with self.datapack:
self.datapack.current_solset = solset
logging.info(
"Applying selection: ant={},time={},freq={},dir={},pol={}".format(ant_sel, time_sel, freq_sel, dir_sel,
pol_sel))
self.datapack.select(ant=ant_sel, time=time_sel, freq=freq_sel, dir=None, pol=pol_sel)
axes = self.datapack.__getattr__("axes_"+observable if 'weights_' not in observable else observable.replace('weights_','axes_'))
full_patch_names, _ = self.datapack.get_directions(axes['dir'])
self.datapack.select(ant=ant_sel, time=time_sel, freq=freq_sel, dir=dir_sel, pol=pol_sel)
obs, axes = self.datapack.__getattr__(observable)
if observable.startswith('weights_'):
# obs = np.sqrt(np.abs(1. / obs)) # uncert from weights = 1/var
obs = np.sqrt(obs) # uncert from weights = 1/var
phase_wrap = False
if 'pol' in axes.keys():
# plot only first pol selected
obs = obs[0, ...]
# obs is dir, ant, freq, time
antenna_labels, antennas = self.datapack.get_antennas(axes['ant'])
patch_names, directions = self.datapack.get_directions(axes['dir'])
timestamps, times = self.datapack.get_times(axes['time'])
freq_dep = True
try:
freq_labels, freqs = self.datapack.get_freqs(axes['freq'])
except:
freq_dep = False
obs = obs[:, :, None, :]
freq_labels, freqs = [""], [None]
if tec_eval_freq is not None:
# phase_wrap = True
obs = obs * TEC_CONV / tec_eval_freq
if observable.startswith('weights_'):
obs = np.abs(obs)
if phase_wrap:
obs = np.angle(np.exp(1j * obs))
vmin = -np.pi
vmax = np.pi
cmap = phase_cmap
else:
vmin = vmin or np.percentile(obs.flatten(), 1)
vmax = vmax or np.percentile(obs.flatten(), 99)
cmap = plt.cm.bone
if log_scale:
obs = np.log10(obs)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
fixfreq = Nf >> 1
logging.info("Plotting {} directions".format(Nd))
logging.info("Plotting {} antennas".format(Na))
logging.info("Plotting {} timestamps".format(Nt))
_, antennas_ = self.datapack.get_antennas([self.datapack.ref_ant])
# ants_uvw = antennas.transform_to(uvw)
ref_dist = np.sqrt(
(antennas.x - antennas_.x) ** 2 + (antennas.y - antennas_.y) ** 2 + (antennas.z - antennas_.z) ** 2).to(
au.km).value
# if labels_in_radec:
ra = directions.ra.deg
dec = directions.dec.deg
if not plot_screen:
### points are normal
points = np.array([ra, dec]).T
if plot_crosses or plot_patchnames or plot_facet_idx:
overlay_points = points
else:
overlay_points = None
else:
### get unique ra and dec and then rearrange into correct order.
_ra = np.unique(ra)
_dec = np.unique(dec)
Nra = len(_ra)
Ndec = len(_dec)
assert Ndec * Nra == Nd
### sort lexiconially
ind = np.lexsort((ra, dec))
points = (_ra, _dec)
obs = obs[ind, ...]
obs = obs.reshape((Ndec, Nra, Na, Nf, Nt))
if plot_crosses:
overlay_points = None # put the facet (ra,dec).T
else:
overlay_points = None
if plot_patchnames:
annotations = patch_names
elif plot_facet_idx:
facet_inv_map = [list(full_patch_names).index(ts) for ts in patch_names]
annotations = np.array([str(facet_inv_map[k]) for k in range(Nd)])
else:
annotations = None
if fignames is not None:
if not isinstance(fignames, (tuple, list)):
fignames = [fignames]
if fignames is not None:
if Nt > len(fignames):
fignames = fignames[:Nt]
if Nt < len(fignames):
print(Nt, fignames)
raise ValueError("Gave too few fignames.")
if mode == 'perantenna':
M = int(np.ceil(np.sqrt(Na)))
fig, axs = plt.subplots(nrows=M, ncols=M, sharex='col', sharey='row', squeeze=False, \
figsize=(4 * M, 4 * M))
fig.subplots_adjust(wspace=0., hspace=0.)
axes_patches = []
c = 0
for row in range(M):
for col in range(M):
ax = axs[row, col]
if col == 0:
ax.set_ylabel("Projected North (radians)" if not labels_in_radec else "DEC (deg)")
if row == M - 1:
ax.set_xlabel("Projected East (radians)" if not labels_in_radec else "RA (deg)")
if c >= Na:
continue
try:
title = antenna_labels[c].decode()
except:
title = antenna_labels[c]
if plot_screen:
_, p = self._create_image_plot(points, values=None, N=None,
ax=ax, cmap=cmap, overlay_points=overlay_points,
annotations=annotations,
title="{} {:.1f}km".format(title, ref_dist[c]),
reverse_x=labels_in_radec)
else:
_, p = self._create_polygon_plot(points, values=None, N=None,
ax=ax, cmap=cmap, overlay_points=overlay_points,
annotations=annotations,
title="{} {:.1f}km".format(title, ref_dist[c]),
reverse_x=labels_in_radec)
p.set_clim(vmin, vmax)
axes_patches.append(p)
c += 1
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.875, 0.15, 0.025, 0.7])
fig.colorbar(p, cax=cbar_ax, orientation='vertical')
if show:
plt.ion()
plt.show()
for j in range(Nt):
logging.info("Plotting {}".format(timestamps[j]))
for i in range(Na):
if not plot_screen:
axes_patches[i].set_array(obs[:, i, fixfreq, j])
else:
axes_patches[i].set_array(obs[:, :, i, fixfreq, j])
axs[0, 0].set_title("{} {} : {}".format(observable, freq_labels[fixfreq], timestamps[j]))
fig.canvas.draw()
if save_fig:
plt.savefig(fignames[j])
if show:
# plt.close(fig)
plt.ioff()
def _parallel_plot(arg):
datapack, time_slice, kwargs, output_folder = arg
dp = DatapackPlotter(datapack=datapack)
with dp.datapack:
dp.datapack.current_solset = kwargs.get('solset','sol000')
# Get the time selection desired
dp.datapack.select(time=kwargs.get('time_sel', None))
axes = dp.datapack.axes_phase
# timeslice the selection
times = axes['time'] # mjs
sel_list = list(np.arange(len(times))[time_slice])#times[time_slice]
kwargs['time_sel'] = sel_list
fignames = [os.path.join(output_folder, "fig-{:04d}.png".format(j)) for j in range(len(times))[time_slice]]
dp.plot(fignames=fignames, **kwargs)
return fignames
def animate_datapack(datapack, output_folder, num_processes, **kwargs):
"""
Plot the datapack in parallel, then stitch into movie.
datapack: str the datapack filename
output_folder: str, folder to store figs in
num_processes: int number of parallel plotting processes to run
**kwargs: keywords to pass to DatapackPlotter.plot function.
"""
try:
os.makedirs(output_folder)
except:
pass
if num_processes is None:
num_processes = 1#psutil.cpu_count()
if isinstance(datapack, DataPack):
datapack = datapack.filename
# with DataPack(datapack) as datapack_fix:
# datapack_fix.add_antennas(DataPack.lofar_array)
args = []
for i in range(num_processes):
args.append((datapack, slice(i, None, num_processes), kwargs, output_folder))
with futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
jobs = executor.map(_parallel_plot, args)
results = list(jobs)
plt.close('all')
make_animation(output_folder, prefix='fig', fps=4)
def make_animation(datafolder, prefix='fig', fps=4):
'''Given a datafolder with figures of format `prefix`-%04d.png create a
video at framerate `fps`.
Output is datafolder/animation.mp4'''
if os.system(
'ffmpeg -framerate {} -i {}/{}-%04d.png -vf scale="trunc(iw/2)*2:trunc(ih/2)*2" -c:v libx264 -profile:v high -pix_fmt yuv420p -g 30 -r 30 {}/animation.mp4'.format(
fps, datafolder, prefix, datafolder)):
logging.info("{}/animation.mp4 exists already".format(datafolder))
def plot_phase_vs_time(datapack, output_folder, solsets='sol000',
ant_sel=None, time_sel=None, dir_sel=None, freq_sel=None, pol_sel=None):
if isinstance(datapack, DataPack):
datapack = datapack.filename
if not isinstance(solsets, (list, tuple)):
solsets = [solsets]
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder, exist_ok=True)
with DataPack(datapack, readonly=True) as datapack:
phases = []
stds = []
for solset in solsets:
datapack.current_solset = solset
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
weights, axes = datapack.weights_phase
freq_ind = len(axes['freq']) >> 1
freq = axes['freq'][freq_ind]
ant = axes['ant'][0]
phase, _ = datapack.phase
std = np.sqrt(np.abs(weights))
timestamps, times = datapack.get_times(axes['time'])
phases.append(phase)
stds.append(std)
for phase in phases:
for s, S in zip(phase.shape, phases[0].shape):
assert s == S
Npol, Nd, Na, Nf, Nt = phases[0].shape
fig, ax = plt.subplots()
for p in range(Npol):
for d in range(Nd):
for a in range(Na):
for f in range(Nf):
ax.cla()
for i, solset in enumerate(solsets):
phase = phases[i]
std = stds[i]
label = "{} {} {:.1f}MHz {}:{}".format(solset, axes['pol'][p], axes['freq'][f] / 1e6,
axes['ant'][a], axes['dir'][d])
# ax.fill_between(times.mjd, phase[p, d, a, f, :] - 2 * std[p, d, a, f, :],
# phase[p, d, a, f, :] + 2 * std[p, d, a, f, :], alpha=0.5,
# label=r'$\pm2\hat{\sigma}_\phi$') # ,color='blue')
ax.scatter(times.mjd, phase[p, d, a, f, :], marker='+', alpha=0.3,
label=label)
ax.set_xlabel('Time [mjd]')
ax.set_ylabel('Phase deviation [rad.]')
ax.legend()
filename = "{}_{}_{}_{}MHz.png".format(axes['ant'][a], axes['dir'][d], axes['pol'][p],
axes['freq'][f] / 1e6)
plt.savefig(os.path.join(output_folder, filename))
plt.close('all')
def plot_phase_vs_time_per_datapack(datapacks, output_folder, solsets='sol000',
ant_sel=None, time_sel=None, dir_sel=None, freq_sel=None, pol_sel=None):
if not isinstance(solsets, (list, tuple)):
solsets = [solsets]
if not isinstance(datapacks, (list, tuple)):
datapacks = [datapacks]
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder, exist_ok=True)
phases = []
stds = []
for solset, datapack in zip(solsets,datapacks):
with DataPack(datapack, readonly=True) as datapack:
datapack.current_solset = solset
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
weights, axes = datapack.weights_phase
freq_ind = len(axes['freq']) >> 1
freq = axes['freq'][freq_ind]
ant = axes['ant'][0]
phase, _ = datapack.phase
std = np.sqrt(np.abs(weights))
timestamps, times = datapack.get_times(axes['time'])
phases.append(phase)
stds.append(std)
for phase in phases:
for s, S in zip(phase.shape, phases[0].shape):
assert s == S
Npol, Nd, Na, Nf, Nt = phases[0].shape
fig, ax = plt.subplots()
for p in range(Npol):
for d in range(Nd):
for a in range(Na):
for f in range(Nf):
ax.cla()
for i, solset in enumerate(solsets):
phase = phases[i]
std = stds[i]
label = "{} {} {} {:.1f}MHz {}:{}".format(os.path.basename(datapacks[i]), solset, axes['pol'][p], axes['freq'][f] / 1e6,
axes['ant'][a], axes['dir'][d])
# ax.fill_between(times.mjd, phase[p, d, a, f, :] - 2 * std[p, d, a, f, :],
# phase[p, d, a, f, :] + 2 * std[p, d, a, f, :], alpha=0.5,
# label=r'$\pm2\hat{\sigma}_\phi$') # ,color='blue')
ax.scatter(times.mjd, phase[p, d, a, f, :], marker='+', alpha=0.3,
label=label)
ax.set_xlabel('Time [mjd]')
ax.set_ylabel('Phase deviation [rad.]')
ax.legend()
filename = "{}_{}_{}_{}MHz.png".format(axes['ant'][a], axes['dir'][d], axes['pol'][p],
axes['freq'][f] / 1e6)
plt.savefig(os.path.join(output_folder, filename))
plt.close('all')
def plot_data_vs_solution(datapack, output_folder, data_solset='sol000', solution_solset='posterior_sol',
show_prior_uncert=False,
ant_sel=None, time_sel=None, dir_sel=None, freq_sel=None, pol_sel=None):
def _wrap(phi):
return np.angle(np.exp(1j * phi))
if isinstance(datapack, DataPack):
datapack = datapack.filename
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder, exist_ok=True)
solsets = [data_solset, solution_solset]
with DataPack(datapack, readonly=True) as datapack:
phases = []
stds = []
datapack.switch_solset(data_solset)
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
weights, axes = datapack.weights_phase
_, freqs = datapack.get_freqs(axes['freq'])
phase, _ = datapack.phase
std = np.sqrt(np.abs(1. / weights))
timestamps, times = datapack.get_times(axes['time'])
phases.append(_wrap(phase))
stds.append(std)
tec_conversion = -8.4480e9 / freqs[None, None, None, :, None]
datapack.switch_solset(solution_solset)
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
weights, _ = datapack.weights_tec
tec, _ = datapack.tec
std = np.sqrt(np.abs(1. / weights))[:, :, :, None, :] * np.abs(tec_conversion)
phases.append(_wrap(tec[:, :, :, None, :] * tec_conversion))
stds.append(std)
for phase in phases:
for s, S in zip(phase.shape, phases[0].shape):
assert s == S
Npol, Nd, Na, Nf, Nt = phases[0].shape
fig, ax = plt.subplots()
for p in range(Npol):
for d in range(Nd):
for a in range(Na):
for f in range(Nf):
ax.cla()
###
# Data
phase = phases[0]
std = stds[0]
label = "{} {} {:.1f}MHz {}:{}".format(data_solset, axes['pol'][p], axes['freq'][f] / 1e6,
axes['ant'][a], axes['dir'][d])
if show_prior_uncert:
ax.fill_between(times.mjd, phase[p, d, a, f, :] - std[p, d, a, f, :],
phase[p, d, a, f, :] + std[p, d, a, f, :], alpha=0.5,
label=r'$\pm2\hat{\sigma}_\phi$') # ,color='blue')
ax.scatter(times.mjd, phase[p, d, a, f, :], marker='+', alpha=0.3, color='black', label=label)
###
# Solution
phase = phases[1]
std = stds[1]
label = "Solution: {}".format(solution_solset)
ax.fill_between(times.mjd, phase[p, d, a, f, :] - std[p, d, a, f, :],
phase[p, d, a, f, :] + std[p, d, a, f, :], alpha=0.5,
label=r'$\pm\hat{\sigma}_\phi$') # ,color='blue')
ax.scatter(times.mjd, phase[p, d, a, f, :], label=label, marker='.', s=5.)
ax.set_xlabel('Time [mjd]')
ax.set_ylabel('Phase deviation [rad.]')
ax.legend()
filename = "{}_v_{}_{}_{}_{}_{}MHz.png".format(data_solset, solution_solset, axes['ant'][a],
axes['dir'][d], axes['pol'][p],
axes['freq'][f] / 1e6)
ax.set_ylim(-np.pi, np.pi)
plt.savefig(os.path.join(output_folder, filename))
plt.close('all')
def plot_freq_vs_time(datapack, output_folder, solset='sol000', soltab='phase', phase_wrap=True, log_scale=False,
ant_sel=None, time_sel=None, dir_sel=None, freq_sel=None, pol_sel=None, vmin=None, vmax=None):
if isinstance(datapack, DataPack):
datapack = datapack.filename
with DataPack(datapack, readonly=True) as datapack:
datapack.switch_solset(solset)
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
obs, axes = datapack.__getattr__(soltab)
if soltab.startswith('weights_'):
obs = np.sqrt(np.abs(1. / obs)) # uncert from weights = 1/var
phase_wrap = False
if 'pol' in axes.keys():
# plot only first pol selected
obs = obs[0, ...]
# obs is dir, ant, freq, time
antenna_labels, antennas = datapack.get_antennas(axes['ant'])
patch_names, directions = datapack.get_sources(axes['dir'])
timestamps, times = datapack.get_times(axes['time'])
freq_labels, freqs = datapack.get_freqs(axes['freq'])
if phase_wrap:
obs = np.angle(np.exp(1j * obs))
vmin = -np.pi
vmax = np.pi
cmap = phase_cmap
else:
vmin = vmin if vmin is not None else np.percentile(obs.flatten(), 1)
vmax = vmax if vmax is not None else np.percentile(obs.flatten(), 99)
cmap = plt.cm.bone
if log_scale:
obs = np.log10(obs)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
M = int(np.ceil(np.sqrt(Na)))
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder, exist_ok=True)
for k in range(Nd):
filename = os.path.join(os.path.abspath(output_folder), "{}_{}_dir_{}.png".format(solset, soltab, k))
logging.info("Plotting {}".format(filename))
fig, axs = plt.subplots(nrows=M, ncols=M, figsize=(4 * M, 4 * M), sharex=True, sharey=True)
for i in range(M):
for j in range(M):
l = j + M * i
if l >= Na:
continue
im = axs[i][j].imshow(obs[k, l, :, :], origin='lower', cmap=cmap, aspect='auto', vmin=vmin,
vmax=vmax,
extent=(times[0].mjd * 86400., times[-1].mjd * 86400., freqs[0], freqs[1]))
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig(filename)
plt.close('all')
def plot_solution_residuals(datapack, output_folder, data_solset='sol000', solution_solset='posterior_sol',
ant_sel=None, time_sel=None, dir_sel=None, freq_sel=None, pol_sel=None):
def _wrap(phi):
return np.angle(np.exp(1j * phi))
if not isinstance(datapack, str):
datapack = datapack.filename
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder, exist_ok=True)
solsets = [data_solset, solution_solset]
with DataPack(datapack, readonly=True) as datapack:
datapack.switch_solset(data_solset)
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
phase, axes = datapack.phase
timestamps, times = datapack.get_times(axes['time'])
antenna_labels, antennas = datapack.get_antennas(axes['ant'])
patch_names, directions = datapack.get_sources(axes['dir'])
_, freqs = datapack.get_freqs(axes['freq'])
pols, _ = datapack.get_pols(axes['pol'])
Npol, Nd, Na, Nf, Nt = phase.shape
datapack.switch_solset(solution_solset)
datapack.select(ant=ant_sel, time=time_sel, dir=dir_sel, freq=freq_sel, pol=pol_sel)
tec, _ = datapack.tec
phase_pred = -8.448e9 * tec[..., None, :] / freqs[:, None]
res = _wrap(_wrap(phase) - _wrap(phase_pred))
cbar = None
for p in range(Npol):
for a in range(Na):
M = int(np.ceil(np.sqrt(Nd)))
fig, axs = plt.subplots(nrows=2 * M, ncols=M, sharex=True, figsize=(M * 4, 1 * M * 4),
gridspec_kw={'height_ratios': [1.5, 1] * M})
fig.subplots_adjust(wspace=0., hspace=0.)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.875, 0.15, 0.025, 0.7])
vmin = -1.
vmax = 1.
norm = plt.Normalize(vmin, vmax)
for row in range(0, 2 * M, 2):
for col in range(M):
ax1 = axs[row][col]
ax2 = axs[row + 1][col]
d = col + row // 2 * M
if d >= Nd:
continue
img = ax1.imshow(res[p, d, a, :, :], origin='lower', aspect='auto',
extent=(times[0].mjd * 86400., times[-1].mjd * 86400., freqs[0], freqs[-1]),
cmap=plt.cm.jet, norm=norm)
ax1.text(0.05, 0.95, axes['dir'][d], horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes, backgroundcolor=(1., 1., 1., 0.5))
ax1.set_ylabel('frequency [Hz]')
ax1.legend()
mean = res[p, d, a, :, :].mean(0)
t = np.arange(len(times))
ax2.plot(times.mjd * 86400, mean, label=r'$\mathbb{E}_\nu[\delta\phi]$')
std = res[p, d, a, :, :].std(0)
ax2.fill_between(times.mjd * 86400, mean - std, mean + std, alpha=0.5,
label=r'$\pm\sigma_{\delta\phi}$')
ax2.set_xlabel('Time [mjs]')
ax2.set_xlim(times[0].mjd * 86400., times[-1].mjd * 86400.)
ax2.set_ylim(-np.pi, np.pi)
# ax2.legend()
fig.colorbar(img, cax=cbar_ax, orientation='vertical', label='phase dev. [rad]')
filename = "{}_v_{}_{}_{}.png".format(data_solset, solution_solset, axes['ant'][a], axes['pol'][p])
plt.savefig(os.path.join(output_folder, filename))
plt.close('all')
def test_vornoi():
from scipy.spatial import Voronoi, voronoi_plot_2d
import pylab as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import numpy as np
points = np.random.uniform(size=[10, 2])
v = Voronoi(points)
nodes = v.vertices
regions = v.regions
ax = plt.subplot()
patches = []
for reg in regions:
if len(reg) < 3:
continue
poly = Polygon(np.array([nodes[i] for i in reg]), closed=False)
patches.append(poly)
p = PatchCollection(patches)
p.set_array(np.random.uniform(size=len(patches)))
ax.add_collection(p)
# plt.colorbar(p)
ax.scatter(points[:, 0], points[:, 1])
ax.set_xlim([np.min(points[:, 0]), np.max(points[:, 0])])
ax.set_ylim([np.min(points[:, 1]), np.max(points[:, 1])])
plt.show()
def test_nearest():
from scipy.spatial import ConvexHull, cKDTree
import pylab as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import numpy as np
points = np.random.uniform(size=[42, 2])
k = cKDTree(points)
dx = np.max(points[:, 0]) - np.min(points[:, 0])
dy = np.max(points[:, 1]) - np.min(points[:, 1])
N = int(min(max(100, points.shape[0] * 2), 500))
x = np.linspace(np.min(points[:, 0]) - 0.1 * dx, np.max(points[:, 0]) + 0.1 * dx, N)
y = np.linspace(np.min(points[:, 1]) - 0.1 * dy, np.max(points[:, 1]) + 0.1 * dy, N)
X, Y = np.meshgrid(x, y, indexing='ij')
points_i = np.array([X.flatten(), Y.flatten()]).T
dist, i = k.query(points_i, k=1)
patches = []
for group in range(points.shape[0]):
points_g = points_i[i == group, :]
hull = ConvexHull(points_g)
nodes = points_g[hull.vertices, :]
poly = Polygon(nodes, closed=False)
patches.append(poly)
ax = plt.subplot()
p = PatchCollection(patches)
p.set_array(np.random.uniform(size=len(patches)))
ax.add_collection(p)
# plt.colorbar(p)
ax.scatter(points[:, 0], points[:, 1])
ax.set_xlim([np.min(points_i[:, 0]), np.max(points_i[:, 0])])
ax.set_ylim([np.min(points_i[:, 1]), np.max(points_i[:, 1])])
ax.set_facecolor('black')
plt.show()
```
#### File: bayes_filter/bayes_filter/targets.py
```python
import tensorflow as tf
import tensorflow_probability as tfp
from .parameters import Parameter, ScaledLowerBoundedBijector
from collections import namedtuple
from .misc import sqrt_with_finite_grads
from .settings import float_type
from .processes import Process, DTECProcess
from .misc import random_sample
from . import TEC_CONV
import numpy as np
class DTECToGainsSAEM(Process):
@property
def _Params(self):
return namedtuple('DTECToGainsParams',
['amp', 'y_sigma', 'dtec', 'dtec_prior'])
def __init__(self,
Lp:tf.Tensor,
mp:tf.Tensor,
dtec_process:DTECProcess):
"""
Creates an instance of the target distribution for complex gains modelled by DTEC.
:param initial_hyperparams: dict
The initial parameters for the DTEC process
:param variables: float tf.Tensor or None
If None then will initialise variables from initial_hyperparams or the default.
"""
self._setup = False
self.dtec_process = dtec_process
self.Lp = Lp
self.logdetLp = tf.reduce_sum(tf.math.log(tf.linalg.diag_part(Lp)))
self.mp = mp
def _vec_bijector(m, *L):
"""
Make the appropriate bijector for L.x + m = y
where x is tf.Tensor of shape [S, N]
:param L: tf.Tensor
lower triangular [N, N]
:param m: tf.Tensor
mean [N]
:return:
"""
_L = L[0]
for i in range(1,len(L)):
_L = tf.matmul(_L, L[i])
def _forward(x):
# ij,sj-> si
return tf.matmul(x,_L,transpose_b=True) + m
def _inverse(y):
# ij, sj -> si
return tf.transpose(tf.linalg.triangular_solve(_L, tf.transpose(y - m)))
def _inverse_log_jac(y):
logdetjac = [-tf.reduce_sum(tf.math.log(tf.linalg.diag_part(l))) for l in L]
return sum(logdetjac)
return tfp.bijectors.Inline(forward_fn=_forward,
inverse_fn=_inverse,
inverse_log_det_jacobian_fn=_inverse_log_jac,
forward_min_event_ndims=1)
bijectors = self.Params(
amp = tfp.bijectors.Exp(),#ScaledLowerBoundedBijector(0.1, 1.),
y_sigma = ScaledLowerBoundedBijector(1e-2, 0.1),
#dtec = L.(Lp.y + mp) + m
dtec = _vec_bijector(tf.matmul(self.dtec_process.L, mp[:,None])[:,0] + self.dtec_process.m,
self.dtec_process.L, Lp),
dtec_prior = _vec_bijector(mp, Lp)
)
super(DTECToGainsSAEM, self).__init__(bijectors=bijectors, distributions=None, unconstrained_values=None)
@staticmethod
def init_variables(num_chains, full_block_size, tf_seed=0):
amp_bijector = tfp.bijectors.Exp()#ScaledLowerBoundedBijector(0.1, 1.)
y_sigma_bijector = ScaledLowerBoundedBijector(1e-2, 0.1)
init_y_sigma = y_sigma_bijector.inverse(
tf.random.truncated_normal(mean=tf.constant(0.1, dtype=float_type),
stddev=tf.constant(0.03, dtype=float_type),
shape=[num_chains, 1], dtype=float_type, seed=tf_seed))
init_amp = amp_bijector.inverse(
tf.random.truncated_normal(mean=tf.constant(1., dtype=float_type),
stddev=tf.constant(0.5, dtype=float_type),
shape=[num_chains, 1], dtype=float_type, seed=tf_seed))
init_dtec = tf.random.truncated_normal(shape=[num_chains, full_block_size], dtype=float_type, seed=tf_seed)
return init_amp, init_y_sigma, init_dtec
def setup_target(self,Y_real, Y_imag, freqs,
full_posterior=True):
self.full_posterior = full_posterior
# N, Nf
self.Y_real = Y_real
# N, Nf
self.Y_imag = Y_imag
# Nf
self.freqs = freqs
self.N = self.dtec_process.N
self.Ns = self.dtec_process.Ns
self.Nh = self.dtec_process.Nh
# [1]
self.logdetLp = tf.reduce_sum(tf.math.log(tf.linalg.diag_part(self.Lp)), axis=-1, keepdims=True)
# dtec = L.x + m (reparameterisation of prior)
# x = Lp.y + mp (decorrelation of posterior)
# dtec = L.(Lp.y + mp) + m
self._setup = True
def forward_equation(self, dtec):
"""
Calculate real and imaginary parts of gains from dtec.
:param dtec: float_type, Tensor [b0,...,bB]
The DTECs
:returns: tuple
float_type, tf.Tensor [b0,...,bB,Nf] Real part
float_type, tf.Tensor [b0,...,bB,Nf] Imag part
"""
#Nf
invfreqs = TEC_CONV*tf.math.reciprocal(self.freqs)
#..., Nf
phase = dtec[..., None] * invfreqs
real_part = tf.cos(phase)
imag_part = tf.sin(phase)
return real_part, imag_part
def log_prob_gains(self, constrained_params):
"""
Get log Prob(gains | dtec)
:param constrained_y_sigma: float, tf.Tensor
y_sigma [num_samples, 1]
:param constrained_dtec: float_type, tf.Tensor, [S, M]
Cosntrained dtec
:return: float_type, tf.Tensor, scalar
The log probability
"""
# marginal
# S, N
dtec_marginal = constrained_params.dtec[:, :self.N]
# S, N, Nf
g_real, g_imag = self.forward_equation(dtec_marginal)
likelihood_real = tfp.distributions.Laplace(loc=g_real, scale=constrained_params.y_sigma[:, :, None])
likelihood_imag = tfp.distributions.Laplace(loc=g_imag, scale=constrained_params.y_sigma[:, :, None])
# S
logp = tf.reduce_sum(likelihood_real.log_prob(self.Y_real[None, :, :]), axis=[1, 2]) + \
tf.reduce_sum(likelihood_imag.log_prob(self.Y_imag[None, :, :]), axis=[1, 2])
return logp
def log_prob(self, amp, y_sigma, dtec):
"""
Calculate the log probability of the gains given a model.
:param amp: float_type tf.Tensor [num_chains, 1]
Unconstrained amp
:param y_sigma: float_type tf.Tensor [num_chains, 1]
Unconstrained y_sigma
:param dtec: float_type tf.Tensor [num_chains, N+Ns]
Unconstrained dtec
:return: float_type, tf.Tensor, [num_chains]
The log-probability of the data given model.
"""
if not self.setup:
raise ValueError("setup is not complete, must run setup_target")
unconstrained_params = self.Params(amp=amp, #[num_chains, 1]
y_sigma=y_sigma, #[num_chains,1]
dtec=dtec, #[num_chains,N+Ns]
dtec_prior=dtec) #[num_chains,N+Ns]
constrained_params = self.constrained_state(unconstrained_params)
constrained_params = constrained_params._replace(dtec=constrained_params.amp * constrained_params.dtec)
# dtec = L.x + m
# x = Lp.y + mp
# dtec = L.(Lp.y + mp) + m
# P(dtec) = P(y) | ddtec/dy |^(-1)
# N[m, L.L^T] |L||Lp| = P(y)
# log P(y) = -1/2 (L.(Lp.y + mp) + m - m)^T L^-T L^-1 (L.(Lp.y + mp) + m - m) - D/2log(2pi) - log(|L|) + log(|L|) + log(|Lp|)
# = -1/2 ((Lp.y + mp))^T ((Lp.y + mp)) - D/2log(2pi) + log(|Lp|)
# num_chains
log_prob_gains = self.log_prob_gains(constrained_params)
# num_chains
log_prob_dtec_prior = -0.5*tf.reduce_sum(tf.square(constrained_params.dtec_prior),axis=1) + self.logdetLp - 0.5*tf.cast(self.N+self.Ns, float_type)*np.log(2*np.pi)
#num_chains
log_prob_y_sigma_prior = tf.reduce_sum(tfp.distributions.Normal(loc=tf.constant(0.1,dtype=float_type),
scale=tf.constant(0.05,dtype=float_type)).log_prob(
constrained_params.y_sigma), axis=-1)
# num_chains
log_prob_amp_prior = tf.reduce_sum(tfp.distributions.Normal(loc=tf.constant(1.0, dtype=float_type),
scale=tf.constant(0.75,
dtype=float_type)).log_prob(
constrained_params.amp), axis=-1)
# log_prob_amp = tfp.distributions.Normal(loc=tf.constant(0.1, dtype=float_type),
# scale=tf.constant(0.05, dtype=float_type)).log_prob(
# constrained_params.amp)
if self.full_posterior:
res = log_prob_gains + log_prob_dtec_prior + log_prob_y_sigma_prior + log_prob_amp_prior
else:
res = log_prob_gains
return res
class DTECToGainsTarget(object):
@property
def Params(self):
return namedtuple('DTECToGainsParams',
['amp', 'y_sigma', 'dtec'])
def __init__(self,
dtec_process:DTECProcess):
"""
Creates an instance of the target distribution for complex gains modelled by DTEC.
:param initial_hyperparams: dict
The initial parameters for the DTEC process
:param variables: float tf.Tensor or None
If None then will initialise variables from initial_hyperparams or the default.
"""
self._setup = False
self.dtec_process = dtec_process
@staticmethod
def init_variables(num_chains, full_block_size, tf_seed=0):
"""
Get initial variables for the target.
:param num_chains:
:param full_block_size:
:param tf_seed:
:return:
"""
init_y_sigma = tf.math.log(
tf.random.uniform(shape=[num_chains, 1],
minval=tf.constant(0.05, dtype=float_type),
maxval=tf.constant(0.15, dtype=float_type),
dtype=float_type, seed=tf_seed))
init_amp = tf.math.log(
tf.random.uniform(shape=[num_chains, 1],
minval=tf.constant(0.5, dtype=float_type),
maxval=tf.constant(3., dtype=float_type),
dtype=float_type, seed=tf_seed))
init_dtec = 0.3*tf.random.truncated_normal(shape=[num_chains, full_block_size], dtype=float_type, seed=tf_seed)
return init_amp, init_y_sigma, init_dtec
def setup_target(self,Y_real, Y_imag, freqs,
full_posterior=True):
"""
Taken from the instatiation to allow cacheing.
:param Y_real:
:param Y_imag:
:param freqs:
:param full_posterior:
:return:
"""
self.full_posterior = full_posterior
# N, Nf
self.Y_real = Y_real
# N, Nf
self.Y_imag = Y_imag
# Nf
self.freqs = freqs
# Nf
self.invfreqs = TEC_CONV * tf.math.reciprocal(self.freqs)
self.N = self.dtec_process.N
self.Ns = self.dtec_process.Ns
self.Nh = self.dtec_process.Nh
self.L_data = self.dtec_process.L[:self.N, :]
self.L = self.dtec_process.L
self._setup = True
def transform_state(self,log_amp, log_y_sigma, f, data_only=False):
"""
Transform the input state into constrained variables.
:param log_amp:
[S, 1]
:param log_y_sigma:
[S, 1]
:param f:
[S, N]
:returns: tuple of
tf.Tensor [S, 1]
tf.Tensor [S, 1]
tf.Tensor [S, N]
"""
y_sigma = tf.exp(log_y_sigma)
amp = tf.exp(log_amp)
if data_only:
# L_ij f_sj -> f_sj L_ji
dtec = amp * tf.matmul(f, self.L_data, transpose_b=True)
else:
# L_ij f_sj -> f_sj L_ji
dtec = amp * tf.matmul(f, self.L, transpose_b=True)
return self.Params(amp=amp, y_sigma=y_sigma, dtec=dtec)
def forward_equation(self, dtec):
"""
Calculate real and imaginary parts of gains from dtec.
:param dtec: float_type, Tensor [b0,...,bB]
The DTECs
:returns: tuple
float_type, tf.Tensor [b0,...,bB,Nf] Real part
float_type, tf.Tensor [b0,...,bB,Nf] Imag part
"""
#..., Nf
phase = dtec[..., None] * self.invfreqs
real_part = tf.cos(phase)
imag_part = tf.sin(phase)
return real_part, imag_part
def log_prob(self, log_amp, log_y_sigma, f):
"""
Calculate the log probability of the gains given a model.
:param amp: float_type tf.Tensor [num_chains, 1]
Unconstrained amp
:param y_sigma: float_type tf.Tensor [num_chains, 1]
Unconstrained y_sigma
:param dtec: float_type tf.Tensor [num_chains, N+Ns]
Unconstrained dtec
:return: float_type, tf.Tensor, [num_chains]
The log-probability of the data given model.
"""
# num_chains = tf.shape(f)[0]
# shuffle = tf.random.shuffle(tf.range(num_chains))
# log_y_sigma = tf.gather(log_y_sigma, shuffle, axis=0)
# log_amp = tf.gather(log_amp, shuffle, axis=0)
# f = tf.gather(f, shuffle, axis=0)
# print('f',f)
#TODO: once working try with only data prior
transformed = self.transform_state(log_amp, log_y_sigma, f, data_only=True)
# num_chains
prior = tfp.distributions.MultivariateNormalDiag(loc=tf.zeros_like(f),
scale_identity_multiplier=1.).log_prob(f)
# phase_model = transformed.dtec[:,:,None]*self.invfreqs
# Yimag_model = tf.sin(phase_model)
# Yreal_model = tf.cos(phase_model)
#TODO: do slicing on L first reduce complexity
Yreal_model, Yimag_model = self.forward_equation(transformed.dtec)
likelihood = -tf.math.reciprocal(transformed.y_sigma[:, :, None]) * sqrt_with_finite_grads(
tf.math.square(self.Y_imag[None, :, :] - Yimag_model) + tf.math.square(
self.Y_real[None, :, :] - Yreal_model)) - log_y_sigma[:, :, None]
# # num_chains, N, Nf
# likelihood = tfp.distributions.Laplace(loc=self.Y_imag[None, :, :], scale=transformed.y_sigma[:, :, None]).log_prob(
# Yimag_model) \
# + tfp.distributions.Laplace(loc=self.Y_real[None, :, :], scale=transformed.y_sigma[:, :, None]).log_prob(
# Yreal_model)
#num_chains
y_sigma_prior = tfp.distributions.Normal(
loc=tf.constant(0.1, dtype=float_type), scale=tf.constant(0.1, dtype=float_type)).log_prob(transformed.y_sigma[:, 0])
# num_chains
logp = tf.reduce_sum(likelihood, axis=[1, 2]) + prior
# + y_sigma_prior
# print('logp',logp)
return logp
```
#### File: bayes_filter/tests/test_coord_transforms.py
```python
from .common_setup import *
import numpy as np
import tensorflow as tf
from astropy import time as at
from bayes_filter import float_type
from bayes_filter.coord_transforms import itrs_to_enu_6D, tf_coord_transform, itrs_to_enu_with_references, ITRSToENUWithReferences
from bayes_filter.feeds import IndexFeed, TimeFeed, CoordinateFeed, init_feed
from bayes_filter.misc import make_coord_array
def test_itrs_to_enu_6D(tf_session, time_feed, lofar_array):
# python test
times = np.arange(2)[:,None]
directions = np.random.normal(0,0.1, size=(10,2))
antennas = lofar_array[1]
X = make_coord_array(times, directions, antennas,flat=False)
out = np.array(list(map(itrs_to_enu_6D,X)))
assert out.shape == (2,10,antennas.shape[0],7)
#TF test
with tf_session.graph.as_default():
index_feed = IndexFeed(1)
obstime_init = at.Time("2018-01-01T00:00:00.000", format='isot')
times = tf.linspace(obstime_init.mjd * 86400., obstime_init.mjd * 86400. + 100., 9)[:, None]
time_feed = TimeFeed(index_feed, times)
ra = np.pi / 4 + 2. * np.pi / 180. * tf.random_normal(shape=(4, 1))
dec = np.pi / 4 + 2. * np.pi / 180. * tf.random_normal(shape=(4, 1))
Xd = tf.concat([ra, dec], axis=1)
Xa = tf.constant(lofar_array[1], dtype=float_type)
coord_feed = CoordinateFeed(time_feed, Xd, Xa, coord_map=tf_coord_transform(itrs_to_enu_6D))
init, next = init_feed(coord_feed)
tf_session.run(init)
out, N, slice_size = tf_session.run([next, coord_feed.N, coord_feed.slice_size])
assert out.shape[0] == slice_size * 4 * len(lofar_array[0])
assert out.shape[1] == 7
assert np.all(np.isclose(np.linalg.norm(out[:,1:4],axis=1), 1.))
assert np.all(np.isclose(np.linalg.norm(out[:, 4:7], axis=1) < 100., 1.))
def test_itrs_to_enu_with_references(tf_session, time_feed, lofar_array):
# python test
times = np.arange(2)[:,None]
directions = np.random.normal(0,0.1, size=(10,2))
antennas = lofar_array[1]
X = make_coord_array(times, directions, antennas,flat=False)
out = np.array(list(map(itrs_to_enu_6D,X)))
assert out.shape == (2,10,antennas.shape[0],7)
#TF test
with tf_session.graph.as_default():
index_feed = IndexFeed(1)
obstime_init = at.Time("2018-01-01T00:00:00.000", format='isot')
times = tf.linspace(obstime_init.mjd * 86400., obstime_init.mjd * 86400. + 100., 9)[:, None]
time_feed = TimeFeed(index_feed, times)
ra = np.pi / 4 + 2. * np.pi / 180. * tf.random_normal(shape=(4, 1))
dec = np.pi / 4 + 2. * np.pi / 180. * tf.random_normal(shape=(4, 1))
Xd = tf.concat([ra, dec], axis=1)
Xa = tf.constant(lofar_array[1], dtype=float_type)
coord_feed = CoordinateFeed(time_feed, Xd, Xa, coord_map=ITRSToENUWithReferences(lofar_array[1][0,:], [np.pi/4,np.pi/4], lofar_array[1][0,:]))
init, next = init_feed(coord_feed)
tf_session.run(init)
out, N, slice_size = tf_session.run([next, coord_feed.N, coord_feed.slice_size])
assert np.all(np.isclose(out[0, 4:7], np.zeros_like(out[0,4:7])))
assert out.shape[0] == slice_size * 4 * len(lofar_array[0])
assert out.shape[1] == 13
assert np.all(np.isclose(np.linalg.norm(out[:,1:4],axis=1), 1.))
assert np.all(np.isclose(np.linalg.norm(out[:, 4:7], axis=1) < 100., 1.))
assert np.all(np.isclose(np.linalg.norm(out[:, 10:13], axis=1), 1.))
assert np.all(np.isclose(np.linalg.norm(out[:, 7:10], axis=1) < 100., 1.))
```
#### File: bayes_filter/debug/free_transition_saem_screen.py
```python
from bayes_filter.filters import FreeTransitionSAEM
import tensorflow as tf
import tensorflow_probability as tfp
import os
from bayes_filter.misc import load_array_file
from bayes_filter import float_type
import sys
from bayes_filter.feeds import IndexFeed,TimeFeed,CoordinateFeed, DataFeed, init_feed, ContinueFeed
from bayes_filter.coord_transforms import tf_coord_transform, itrs_to_enu_with_references
from bayes_filter.kernels import DTECIsotropicTimeGeneralODE, DTECIsotropicTimeGeneral
import astropy.time as at
import astropy.coordinates as ac
import astropy.units as au
from bayes_filter.frames import ENU
import numpy as np
import pylab as plt
from scipy.spatial import cKDTree
import seaborn as sns
from timeit import default_timer
from bayes_filter.settings import angle_type, dist_type
def arrays():
return os.path.dirname(sys.modules["bayes_filter"].__file__)
def lofar_array(arrays):
lofar_array = os.path.join(arrays, 'arrays/lofar.hba.antenna.cfg')
return load_array_file(lofar_array)
def lofar_array2(arrays):
lofar_array = os.path.join(arrays, 'arrays/lofar.hba.antenna.cfg')
res = load_array_file(lofar_array)
return res[0][[0,48,49,50, 51]], res[1][[0,48,49,50,51],:]
def simulated_ddtec(tf_session, lofar_array):
class Simulated:
def __init__(self):
ref_ant = lofar_array[1][0,:]
Nt, Nd, Na, Nf = 1, 20, len(lofar_array[0])-1, 6
with tf_session.graph.as_default():
index_feed = IndexFeed(Nt)
obstime_init = at.Time("2018-01-01T00:00:00.000", format='isot')
times = obstime_init.mjd*86400. + tf.cast(tf.linspace(0., Nt*30., Nt)[:, None],float_type)
time_feed = TimeFeed(index_feed, times)
cont_feed = ContinueFeed(time_feed)
enu = ENU(location=ac.ITRS(*ref_ant * au.m), obstime=obstime_init)
up = ac.SkyCoord(east=0., north=0., up=1., frame=enu).transform_to('icrs')
M = 20
self.M = M
ra_vec = np.linspace(up.ra.rad - 2. * np.pi / 180., up.ra.rad + 0. * np.pi / 180., M)
dec_vec = np.linspace(up.dec.rad - 2. * np.pi / 180., up.dec.rad + 2. * np.pi / 180., M)
ra, dec = np.meshgrid(ra_vec, dec_vec, indexing='ij')
ra = ra.flatten()[:, None]
dec = dec.flatten()[:, None]
Nd = ra.shape[0]
Xd = tf.concat([ra, dec], axis=1)
Xa = tf.constant(lofar_array[1][1:,:], dtype=float_type)
coord_feed = CoordinateFeed(time_feed, Xd, Xa,
coord_map=tf_coord_transform(itrs_to_enu_with_references(ref_ant, [up.ra.rad, up.dec.rad], ref_ant)))
ra_vec = np.linspace(up.ra.rad - 2. * np.pi / 180., up.ra.rad + 2. * np.pi / 180., M)
dec_vec = np.linspace(up.dec.rad - 2. * np.pi / 180., up.dec.rad + 2. * np.pi / 180., M)
ra, dec = np.meshgrid(ra_vec, dec_vec, indexing='ij')
ra = ra.flatten()[:, None]
dec = dec.flatten()[:, None]
Nd_screen = ra.shape[0]
Xd_screen = tf.concat([ra, dec], axis=1)
star_coord_feed = CoordinateFeed(time_feed, Xd_screen, Xa,
coord_map=tf_coord_transform(itrs_to_enu_with_references(ref_ant, [up.ra.rad, up.dec.rad], ref_ant)))
init, next = init_feed(coord_feed)
init_star, next_star = init_feed(star_coord_feed)
init_cont, cont = init_feed(cont_feed)
Xd_screen, Xd, _,_,_ = tf_session.run([Xd_screen, Xd, init, init_cont, init_star])
kern = DTECIsotropicTimeGeneral(variance=1e-4,timescale=45.,lengthscales=5., a=500., b=60.,
fed_kernel='RBF',obs_type='DDTEC', squeeze=True, kernel_params={'resolution':3})
# kern = tfp.positive_semidefinite_kernels.ExponentiatedQuadratic(tf.convert_to_tensor(0.04,float_type), tf.convert_to_tensor(10.,float_type))
self.slice_size = Nt * Xd_screen.shape[0] * Xa.shape[0] + Nt * Xd.shape[0] * Xa.shape[0]
kd = cKDTree(Xd)
self.nearest, idx = kd.query(Xd_screen, k=1)
self.nearest *= 180./np.pi
from timeit import default_timer
t0 = default_timer()
Y_real, Y_imag = [],[]
Y_real_star, Y_imag_star = [], []
ddtec_true, ddtec_star = [],[]
while True:
K,N = tf_session.run([kern.K(tf.concat([next,next_star],axis=0)),tf.shape(next)[0]])
s = np.mean(np.diag(K))
L = np.sqrt(s)*np.linalg.cholesky(K/s+1e-6*np.eye(K.shape[-1]))
np.random.seed(0)
ddtec = np.einsum('ab,b->a',L, np.random.normal(size=L.shape[1]))
ddtec_true.append(ddtec[:N])
ddtec_star.append(ddtec[N:])
freqs = np.linspace(110.e6, 160.e6, Nf)
Y_real.append(np.cos(-8.448e9 * ddtec[:N,None]/freqs))
Y_imag.append(np.sin(-8.448e9 * ddtec[:N, None] / freqs))
Y_real_star.append(np.cos(-8.448e9 * ddtec[N:, None] / freqs))
Y_imag_star.append(np.sin(-8.448e9 * ddtec[N:, None] / freqs))
if not tf_session.run(cont):
break
self.Y_real_star = np.concatenate(Y_real_star,axis=0).reshape((Nt, Nd_screen, Na, Nf))
self.Y_imag_star = np.concatenate(Y_imag_star, axis=0).reshape((Nt, Nd_screen, Na, Nf))
Y_real_true = np.concatenate(Y_real,axis=0).reshape((Nt, Nd, Na, Nf))
Y_real = Y_real_true + 0.26*np.random.normal(size=Y_real_true.shape)
# Y_real[Nt//2:Nt//2 + 5, ...] *= 0.5
Y_imag_true = np.concatenate(Y_imag, axis=0).reshape((Nt, Nd, Na, Nf))
Y_imag = Y_imag_true + 0.26 * np.random.normal(size=Y_imag_true.shape)
# Y_imag[Nt // 2:Nt // 2 + 5, ...] *= 0.5
self.freqs = freqs
self.ddtec_true = np.concatenate(ddtec_true,axis=0).reshape((Nt, Nd, Na))
self.ddtec_star = np.concatenate(ddtec_star, axis=0).reshape((Nt, Nd_screen, Na))
self.Y_real = Y_real
self.Y_imag = Y_imag
self.Y_real_true = Y_real_true
self.Y_imag_true = Y_imag_true
# self.np_freqs = tf_session.run(freqs)
self.np_times = tf_session.run(times)
self.ddtec = ddtec
self.coord_feed = coord_feed
self.star_coord_feed = star_coord_feed
self.data_feed = DataFeed(index_feed, Y_real, Y_imag, event_size=1)
return Simulated()
if __name__ == '__main__':
from tensorflow.python import debug as tf_debug
sess = tf.Session(graph=tf.Graph())
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with sess.graph.as_default():
simulated_ddtec = simulated_ddtec(sess, lofar_array2(arrays()))
free_transition = FreeTransitionSAEM(
simulated_ddtec.freqs,
simulated_ddtec.data_feed,
simulated_ddtec.coord_feed,
simulated_ddtec.star_coord_feed)
filtered_res, inits = free_transition.filter_step(
num_samples=2000, num_chains=2,parallel_iterations=10, num_leapfrog_steps=3,target_rate=0.6,
num_burnin_steps=1000,num_saem_samples=2000,saem_maxsteps=0,initial_stepsize=7e-3,
init_kern_params={'y_sigma':0.5,'variance':1e-4,'timescale':45.,'lengthscales':5., 'a':500., 'b':60.},
which_kernel=0, kernel_params={'resolution':3}, saem_batchsize=500,
slice_size=simulated_ddtec.slice_size)
sess.run(inits[0])
sess.run(inits[1])
sess.run(inits[2])
cont = True
while cont:
res = sess.run(filtered_res)
# print("post_logp", res.post_logp,"test_logp", res.test_logp)
print("rhat:",np.percentile(res.rhat,[10,50,90]), res.rhat)
plt.hist(res.rhat, bins = int(np.sqrt(len(res.rhat))))
plt.show()
# plt.plot(res.step_sizes)
# plt.show()
# plt.hist(res.ess.flatten(),bins=100)
# plt.show()
times = simulated_ddtec.np_times[:,0]
ddtec_true = simulated_ddtec.ddtec_true
ddtec_star = simulated_ddtec.ddtec_star
Y_real_star = simulated_ddtec.Y_real_star
Y_imag_star = simulated_ddtec.Y_imag_star
# plt.plot(times, res.Y_imag[1,:,0,1,0],c='black',lw=2.)
# plt.fill_between(times, res.Y_imag[0,:,0,1,0], res.Y_imag[2,:,0,1,0],alpha=0.5)
# plt.plot(times, res.extra.Y_imag_data[:, 0, 1, 0], c='red', lw=1.)
# plt.plot(times, simulated_ddtec.Y_imag_true[:, 0, 1, 0], c='green', lw=1.)
# plt.show()
vmin, vmax = np.percentile(res.dtec_star[1, ...], [5, 95])
plt.style.use('ggplot')
fig, axs = plt.subplots(1+(simulated_ddtec.Y_imag_true.shape[2]), 2, figsize=(8,4*(simulated_ddtec.Y_imag_true.shape[2])+4))
ax1,ax2 = axs[0]
ax1.imshow(res.dtec[1, 0, :, 1].reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax1.set_title("Model space solution")
ax2.imshow(res.dtec[1, 0, :, 1].reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax2.set_title("Data space solution")
ax2.legend()
for i in range(simulated_ddtec.Y_imag_true.shape[2]):
ax3,ax4 = axs[i+1]
ax3.imshow(res.dtec_star[1, 0, :, i].reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax3.set_title("Model space solution*")
ax4.imshow((ddtec_star[0, :, i]).reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax4.set_title("True model*")
plt.show()
error = np.sqrt(np.square(res.Y_imag_star[1, :, :, :, :]-simulated_ddtec.Y_imag_star[:, :, :, :]).mean(3).mean(2).mean(0))
plt.scatter(simulated_ddtec.nearest,error)
x = simulated_ddtec.nearest[:, None]
a, _, _, _ = np.linalg.lstsq(x, error)
plt.plot(x, a * x, 'r-')
plt.show()
error = np.sqrt(
np.square(res.Y_real_star[1, :, :, :, :] - simulated_ddtec.Y_real_star[:, :, :, :]).mean(3).mean(
2).mean(0))
plt.scatter(simulated_ddtec.nearest, error)
x = simulated_ddtec.nearest[:, None]
a, _, _, _ = np.linalg.lstsq(x, error)
plt.plot(x, a * x, 'r-')
plt.show()
# print(res)
cont = res.cont
```
#### File: bayes_filter/debug/hmc_sampler_simple.py
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from bayes_filter import float_type, TEC_CONV
from bayes_filter.misc import safe_cholesky, flatten_batch_dims, make_coord_array
def hmc_matrix_stepsizes_1D():
num_chains = 2
# config = tf.ConfigProto()
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
# tf_session = tf.Session(graph=tf.Graph(), config=config)
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
tf_session = tf.Session(graph=tf.Graph())
with tf_session.graph.as_default():
X = tf.cast(tf.linspace(0., 10., 100), float_type)[:, None]
freqs = tf.cast(tf.linspace(100e6, 160e6, 2), float_type)
# with jit_scope():
kern = tfp.positive_semidefinite_kernels.ExponentiatedQuadratic(amplitude=tf.convert_to_tensor(3., float_type),
length_scale=tf.convert_to_tensor(0.5,
float_type),
feature_ndims=1)
K = kern.matrix(X, X)
L = safe_cholesky(K)
Ftrue = tf.matmul(L, tf.random.normal(shape=[tf.shape(K)[0], 1], dtype=float_type))
invfreqs = TEC_CONV * tf.math.reciprocal(freqs)
Yimag_true, Yreal_true = tf.sin(Ftrue * invfreqs), tf.cos(Ftrue * invfreqs)
Yimag = Yimag_true + 0.3 * tf.random_normal(shape=tf.shape(Yimag_true), dtype=float_type)
Yreal = Yreal_true + 0.3 * tf.random_normal(shape=tf.shape(Yreal_true), dtype=float_type)
outliers = np.zeros((100, 2))
outliers[np.random.choice(100, size=10, replace=False), :] = 3.
Yimag += tf.constant(outliers, float_type)
# Yreal += tf.constant(outliers, float_type)
###
# sampling
a = tf.Variable(0., dtype=float_type)
l = tf.Variable(0., dtype=float_type)
# with jit_scope():
kern = tfp.positive_semidefinite_kernels.ExponentiatedQuadratic(amplitude=3. * tf.exp(a),
length_scale=0.5 * tf.exp(l),
feature_ndims=1)
K = kern.matrix(X, X)
L = safe_cholesky(K)
def logp(log_y_sigma, f):
# with jit_scope():
prior = tfp.distributions.MultivariateNormalDiag(loc=tf.zeros_like(f),
scale_identity_multiplier=1.).log_prob(f)
y_sigma = tf.exp(log_y_sigma)
Yimag_model = tf.sin(tf.matmul(tf.tile(L[None, :, :], (num_chains, 1, 1)), f[:, :, None]) * invfreqs)
Yreal_model = tf.cos(tf.matmul(tf.tile(L[None, :, :], (num_chains, 1, 1)), f[:, :, None]) * invfreqs)
likelihood = tfp.distributions.Laplace(loc=Yimag[None, :, :], scale=y_sigma[:, :, None]).log_prob(
Yimag_model) \
+ tfp.distributions.Laplace(loc=Yreal[None, :, :], scale=y_sigma[:, :, None]).log_prob(
Yreal_model)
logp = tf.reduce_sum(likelihood, axis=[1, 2]) + prior
return logp
step_size = [tf.get_variable(
name='step_size',
initializer=lambda: tf.constant(0.001, dtype=float_type),
use_resource=True,
dtype=float_type,
trainable=False)]
###
hmc = tfp.mcmc.SimpleStepSizeAdaptation(tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=logp,
num_leapfrog_steps=2,
step_size=step_size,
state_gradients_are_stopped=True),
num_adaptation_steps=1000,
target_accept_prob=tf.constant(0.6, dtype=float_type),
adaptation_rate=0.05)
# Run the chain (with burn-in maybe).
# last state as initial point (mean of each chain)
# TODO: add trace once working without it
# TODO: let noise be a hmc param
def trace_fn(_, pkr):
return (pkr.inner_results.log_accept_ratio,
pkr.inner_results.accepted_results.step_size,
pkr.inner_results.accepted_results.target_log_prob)
init_state = [tf.constant(np.log(0.1 * np.ones((num_chains, 1))), float_type),
tf.zeros(tf.concat([[num_chains], tf.shape(Ftrue)[0:1]], axis=0), dtype=float_type)]
samples, (log_accept_ratio, stepsizes, target_log_prob) = tfp.mcmc.sample_chain( # ,
num_results=2000,
num_burnin_steps=1000,
trace_fn=trace_fn, # trace_step,
return_final_kernel_results=False,
current_state=init_state,
kernel=hmc,
parallel_iterations=10)
avg_acceptance_ratio = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)), name='avg_acc_ratio')
posterior_log_prob = tf.reduce_mean(tf.exp(target_log_prob), name='marginal_log_likelihood')
flat_samples = [flatten_batch_dims(s, -1) for s in samples]
transformed_y_sigma = tf.exp(flat_samples[0])
transformed_dtec = tf.matmul(L, flat_samples[1], transpose_b=True)
Yimag_post = tf.reduce_mean(tf.sin(invfreqs * transformed_dtec[:, :, None]), axis=1)
Yreal_post = tf.reduce_mean(tf.cos(invfreqs * transformed_dtec[:, :, None]), axis=1)
transformed_dtec = tf.reduce_mean(transformed_dtec, axis=1)
# saem_opt = tf.train.AdamOptimizer(1e-3).minimize(-posterior_log_prob,var_list=[a,l])
tf_session.run(tf.global_variables_initializer())
# for i in range(100):
times = tf_session.run(X[:, 0])
out = tf_session.run({
'dtec': transformed_dtec, 'y_sigma': transformed_y_sigma,
'avg_acceptance_ratio': avg_acceptance_ratio, 'posterior_log_prob': posterior_log_prob,
'Ftrue': Ftrue[:, 0],
'Yimag_true': Yimag_true,
'Yimag': Yimag,
'Yimag_post': Yimag_post})
print(out['y_sigma'])
import pylab as plt
import os
output_folder = os.path.abspath('hmc_debug_output_1D')
os.makedirs(output_folder, exist_ok=True)
plt.plot(times, out['dtec'], label='mean')
plt.plot(times, out['Ftrue'], label='true')
plt.legend()
plt.savefig(os.path.join(output_folder, 'dtec_vs_true.png'))
plt.close('all')
plt.plot(times, out['dtec'] - out['Ftrue'])
plt.savefig(os.path.join(output_folder, 'residuals.png'))
plt.close('all')
plt.plot(times, out['Yimag_true'], label='true')
plt.plot(times, out['Yimag_post'], label='post')
plt.plot(times, out['Yimag'], label='data')
plt.legend()
plt.savefig(os.path.join(output_folder, 'Yimag.png'))
plt.close('all')
plt.hist(out['y_sigma'].flatten(), bins=100, label='y_sigma')
plt.legend()
plt.savefig(os.path.join(output_folder, 'y_sigma.png'))
def hmc_matrix_stepsizes_2D():
num_chains = 2
# config = tf.ConfigProto()
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
# tf_session = tf.Session(graph=tf.Graph(), config=config)
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
tf_session = tf.Session(graph=tf.Graph())
with tf_session.graph.as_default():
N = 15
Nf = 5
x = tf.cast(tf.linspace(0.,10.,N), float_type)[:,None]
x = tf_session.run(x)
X = tf.constant(make_coord_array(x,x,flat=True))
freqs = tf.cast(tf.linspace(100e6, 160e6, Nf),float_type)
# with jit_scope():
kern = tfp.positive_semidefinite_kernels.ExponentiatedQuadratic(amplitude=tf.convert_to_tensor(25.,float_type),
length_scale=tf.convert_to_tensor(1.,float_type),
feature_ndims=1)
K = kern.matrix(X, X)
L = safe_cholesky(K)
Ftrue = tf.matmul(L, tf.random.normal(shape=[tf.shape(K)[0], 1],dtype=float_type))
invfreqs = TEC_CONV*tf.math.reciprocal(freqs)
phase_true = Ftrue*invfreqs
Yimag_true, Yreal_true = tf.sin(phase_true), tf.cos(phase_true)
Yimag = Yimag_true + 0.07*tf.random_normal(shape=tf.shape(Yimag_true),dtype=float_type)
Yreal = Yreal_true + 0.07*tf.random_normal(shape=tf.shape(Yreal_true),dtype=float_type)
phase_data = tf.atan2(Yimag,Yreal)
mag_data = tf.sqrt(tf.square(Yimag) + tf.square(Yreal))
Yimag /= mag_data
Yreal /= mag_data
outliers = np.zeros((N*N, Nf))
outliers[np.random.choice(N*N, size=6, replace=False),:] = 1.
Yimag += tf.constant(outliers, float_type)
# Yreal += tf.constant(outliers, float_type)
###
# sampling
a = tf.Variable(0., dtype=float_type)
l = tf.Variable(0., dtype=float_type)
# with jit_scope():
kern = tfp.positive_semidefinite_kernels.ExponentiatedQuadratic(amplitude=1. * tf.exp(a),
length_scale=1. * tf.exp(l),
feature_ndims=1)
K = kern.matrix(X, X)
L = safe_cholesky(K)
L_sample = tf.tile(L[None, :, :], (num_chains, 1, 1))
def logp(log_y_sigma, log_amp, f):
# with jit_scope():
prior = tfp.distributions.MultivariateNormalDiag(loc=tf.zeros_like(f), scale_identity_multiplier=1.).log_prob(f)
y_sigma = tf.exp(log_y_sigma)
amp = tf.exp(log_amp)
L_sample_ = amp[:,:,None] * L_sample
Yimag_model = tf.sin(tf.matmul(L_sample_, f[:,:,None])*invfreqs)
Yreal_model = tf.cos(tf.matmul(L_sample_, f[:,:,None]) * invfreqs)
likelihood = tfp.distributions.Laplace(loc = Yimag[None, :, :], scale=y_sigma[:,:,None]).log_prob(Yimag_model) \
+ tfp.distributions.Laplace(loc = Yreal[None, :, :], scale=y_sigma[:,:,None]).log_prob(Yreal_model)
logp = tf.reduce_sum(likelihood, axis=[1,2]) + prior + tfp.distributions.Normal(tf.constant(0.07,dtype=float_type), tf.constant(0.1,dtype=float_type)).log_prob(y_sigma[:,0])
return logp
step_size = [tf.constant(1e-1, dtype=float_type)]
# tf.get_variable(
# name='step_size',
# initializer=lambda: tf.constant(0.001, dtype=float_type),
# use_resource=True,
# dtype=float_type,
# trainable=False)]
###
hmc = tfp.mcmc.SimpleStepSizeAdaptation(tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=logp,
num_leapfrog_steps=3,
step_size=step_size,
state_gradients_are_stopped=True),
num_adaptation_steps=2000,
target_accept_prob=tf.constant(0.6, dtype=float_type),
adaptation_rate=0.05)
# Run the chain (with burn-in maybe).
# last state as initial point (mean of each chain)
# TODO: add trace once working without it
# TODO: let noise be a hmc param
def trace_fn(_, pkr):
return (pkr.inner_results.log_accept_ratio,
pkr.inner_results.accepted_results.step_size,
pkr.inner_results.accepted_results.target_log_prob)
init_state = [tf.constant(np.log(0.1*np.ones((num_chains, 1))), float_type),
tf.constant(np.log(5. * np.ones((num_chains, 1))), float_type),
tf.zeros(tf.concat([[num_chains], tf.shape(Ftrue)[0:1]], axis=0), dtype=float_type)]
samples,(log_accept_ratio, stepsizes, target_log_prob) = tfp.mcmc.sample_chain(#,
num_results=10000,
num_burnin_steps=3000,
trace_fn=trace_fn, # trace_step,
return_final_kernel_results=False,
current_state=init_state,
kernel=hmc,
parallel_iterations=10)
avg_acceptance_ratio = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)), name='avg_acc_ratio')
posterior_log_prob = tf.reduce_mean((target_log_prob), name='marginal_log_likelihood')
flat_samples = [flatten_batch_dims(s,-1) for s in samples]
transformed_y_sigma = tf.exp(flat_samples[0])
transformed_amp = tf.exp(flat_samples[1])
transformed_dtec = transformed_amp[:,0]*tf.matmul(L, flat_samples[2],transpose_b=True)
phase_post = invfreqs*transformed_dtec[:,:,None]
Yimag_post = tf.reduce_mean(tf.sin(phase_post),axis=1)
Yreal_post = tf.reduce_mean(tf.cos(phase_post),axis=1)
phase_post = tf.atan2(Yimag_post, Yreal_post)
transformed_dtec = tf.reduce_mean(transformed_dtec,axis=1)
# saem_opt = tf.train.AdamOptimizer(1e-3).minimize(-posterior_log_prob,var_list=[a,l])
tf_session.run(tf.global_variables_initializer())
# for i in range(100):
times = x[:,0]
out = tf_session.run({
'dtec':tf.reshape(transformed_dtec, (N, N)),
'y_sigma':transformed_y_sigma,
'amp':transformed_amp,
'avg_acceptance_ratio':avg_acceptance_ratio,
'posterior_log_prob':posterior_log_prob,
'Ftrue':tf.reshape(Ftrue[:,0],(N,N)),
'Yimag_true':tf.reshape(Yimag_true,(N, N, Nf)),
'Yimag_data':tf.reshape(Yimag,(N,N,Nf)),
'Yimag_post':tf.reshape(Yimag_post,(N,N,Nf)),
'phase_true': tf.reshape(phase_true, (N, N, Nf)),
'phase_data': tf.reshape(phase_data, (N, N, Nf)),
'phase_post': tf.reshape(phase_post, (N, N, Nf))
})
print(out['avg_acceptance_ratio'])
print(out['posterior_log_prob'])
import pylab as plt
import os
output_folder = os.path.abspath('hmc_debug_output_2D')
os.makedirs(output_folder,exist_ok=True)
plt.imshow(out['dtec'])
plt.colorbar()
plt.savefig(os.path.join(output_folder,'dtec_post.png'))
plt.close('all')
plt.imshow(out['Ftrue'])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'dtec_true.png'))
plt.close('all')
plt.imshow(out['dtec']-out['Ftrue'])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'residuals.png'))
plt.close('all')
plt.imshow(out['Yimag_post'][:,:,0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'Yimag_post.png'))
plt.close('all')
plt.imshow(out['Yimag_true'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'Yimag_true.png'))
plt.close('all')
plt.imshow(out['Yimag_data'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'Yimag_data.png'))
plt.close('all')
plt.imshow(out['Yimag_post'][:, :, 0] - out['Yimag_true'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'Yimag_post_true_res.png'))
plt.close('all')
plt.imshow(out['Yimag_post'][:, :, 0] - out['Yimag_data'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'Yimag_post_data_res.png'))
plt.close('all')
plt.imshow(out['phase_post'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'phase_post.png'))
plt.close('all')
plt.imshow(out['phase_true'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'phase_true.png'))
plt.close('all')
plt.imshow(out['phase_data'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'phase_data.png'))
plt.close('all')
plt.imshow(out['phase_post'][:, :, 0] - out['phase_true'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'phase_post_true_res.png'))
plt.close('all')
plt.imshow(out['phase_post'][:, :, 0] - out['phase_data'][:, :, 0])
plt.colorbar()
plt.savefig(os.path.join(output_folder, 'phase_post_data_res.png'))
plt.close('all')
plt.hist(out['y_sigma'].flatten(), bins=100, label='y_sigma')
plt.legend()
plt.savefig(os.path.join(output_folder, 'y_sigma.png'))
plt.close('all')
plt.hist(out['amp'].flatten(), bins=100, label='amp')
plt.legend()
plt.savefig(os.path.join(output_folder, 'amp.png'))
plt.close('all')
if __name__ == '__main__':
hmc_matrix_stepsizes_2D()
```
#### File: bayes_filter/debug/valuation.py
```python
import numpy as np
from scipy.integrate import odeint
import tensorflow as tf
from tensorflow.python.ops.parallel_for.gradients import jacobian
from collections import OrderedDict
class ODE(object):
def __init__(self, num_cities=1, num_clinic_sizes=1, num_clinic_specs=1):
self.shapes = OrderedDict(V=(1,),
discounted_cashflow=(1,),
discount_rate=(1,),
n_join=(num_cities, num_clinic_sizes, num_clinic_specs),
n_churn=(num_cities, num_clinic_sizes, num_clinic_specs),
gamma=(num_cities,),
s=(num_clinic_sizes,),
alpha=(num_cities, num_clinic_specs),
P_join=(num_cities, num_clinic_sizes, num_clinic_specs),
P_churn=(num_cities, num_clinic_sizes, num_clinic_specs),
N_total=(num_cities, num_clinic_sizes, num_clinic_specs),
burn_rate=(1,),
retension=(num_cities, num_clinic_sizes, num_clinic_specs),
frac_accessible=(num_cities, num_clinic_sizes, num_clinic_specs),
invested=(1,),
burned=(1,))
self._boosts = []
def boost(self, state, t_start, t_end, amount):
if state not in self.shapes.keys():
raise ValueError('{} invalid key'.format(state))
self._boosts.append((state, t_start, t_end, amount))
def _diff(self, t, V, discounted_cashflow, discount_rate, n_join, n_churn, gamma, s, alpha, P_join, P_churn,
N_total, burn_rate, retension, frac_accessible, invested, burned):
def _boost(tstart, tend, P_boost):
window = tend - tstart
mean = (tend + tstart) / 2.
return P_boost * (2 * np.pi * window ** 2) ** (-0.5) * tf.exp(-0.5 * (t - mean) ** 2 / window ** 2)
n = n_join - n_churn
clinic_revenue = tf.einsum("ijk,j,ik->ijk", n, s, alpha)
dVdt = tf.einsum("i,ijk->", gamma, clinic_revenue)
ddiscounted_cashflowdt = (dVdt - burn_rate) / (1. + discount_rate) ** t
ddiscount_ratedt = tf.zeros_like(discount_rate)
dburneddt = burn_rate
dinvesteddt = tf.zeros_like(invested)
N_untaken = N_total * frac_accessible - n_join
dn_joindt = P_join * N_untaken
n_perm = retension * n_join
n_non_perm = n - n_perm
dn_churndt = P_churn * n_non_perm
dsdt = tf.zeros_like(s)
dalphadt = tf.zeros_like(alpha)
dgammadt = tf.zeros_like(gamma)
dP_joindt = tf.zeros_like(P_join)
dP_churndt = tf.zeros_like(P_churn)
dN_totaldt = (0.024 / 12.) * N_total
dburn_ratedt = tf.zeros_like(burn_rate)
dretensiondt = tf.zeros_like(retension)
dfrac_accessibledt = tf.zeros_like(frac_accessible)
augmented_out = OrderedDict(V=dVdt,
discounted_cashflow=ddiscounted_cashflowdt,
discount_rate=ddiscount_ratedt,
n_join=dn_joindt,
n_churn=dn_churndt,
gamma=dgammadt,
s=dsdt,
alpha=dalphadt,
P_join=dP_joindt,
P_churn=dP_churndt,
N_total=dN_totaldt,
burn_rate=dburn_ratedt,
retension=dretensiondt,
frac_accessible=dfrac_accessibledt,
invested=dinvesteddt,
burned=dburneddt
)
for b in self._boosts:
augmented_out[b[0]] = augmented_out[b[0]] + _boost(b[1], b[2], b[3])
return list(augmented_out.values())
@property
def state_size(self):
return np.sum([np.prod(shape) for shape in self.shapes.values()])
@property
def state_names(self):
return list(self.shapes.keys())
def get_derivative_and_jacobian_func(self, sess):
state_pl = tf.placeholder(tf.float64, shape=self.state_size, name='state_pl')
t_pl = tf.placeholder(tf.float64, shape=(), name='t_pl')
out_derivative = self.derivative(t_pl, state_pl)
out_jacobian = jacobian(out_derivative, state_pl, use_pfor=True, parallel_iterations=10)
def diff_func(t_np, state_np):
return sess.run(out_derivative, feed_dict={t_pl: t_np, state_pl: state_np})
def jac_func(t_np, state_np):
return sess.run(out_jacobian, feed_dict={t_pl: t_np, state_pl: state_np})
return diff_func, jac_func
def derivative(self, t, state):
with tf.variable_scope("derivative") as scope:
idx = 0
split = {}
for key, shape in self.shapes.items():
m = np.prod(shape)
split[key] = tf.cast(tf.reshape(state[idx:idx + m], shape), tf.float64)
idx += m
def _merge(*D):
res = tf.concat([tf.reshape(d, (-1,)) for d in D], axis=0)
return res
return _merge(*self._diff(t=t, **split))
def odeint(self, init_state, time_array, sess):
diff_func, jac_func = self.get_derivative_and_jacobian_func(sess)
out = odeint(diff_func, init_state, time_array, Dfun=jac_func, tfirst=True)
out_dict = OrderedDict()
jdx = 0
for i, (k, v) in enumerate(self.shapes.items()):
size = np.prod(v)
out_dict[k] = np.reshape(out[:, jdx:jdx + size], (-1,) + v)
jdx += size
return out_dict
def run():
with tf.Session(graph=tf.Graph()) as sess:
ode = ODE(num_cities=1, num_clinic_sizes=1, num_clinic_specs=1)
V = 0.
discounted_cashflow = 0.
discount_rate = 0.05
n_join = 0.
n_churn = 0.
gamma = 0.01 # proportion of clinic revenue
s = 3 # physios per clinic
alpha = 6.5 # 1000$ / month
P_join = np.random.uniform(0.05, 0.15) # prob of someone new joining in a month
P_churn = np.random.uniform(0.05, 0.15) # prob of someone with churning in a month
N_total = 1.2 / 37. * 20e3 # total number of physios
burn_rate = 49.5
retension = np.random.uniform(0.25, 0.65)
frac_accessible = 0.
invested = 335.
burned = 299.
init_state = np.array(
[V, discounted_cashflow, discount_rate, n_join, n_churn, gamma, s, alpha, P_join, P_churn, N_total,
burn_rate, retension, frac_accessible, invested, burned])
ode.boost('frac_accessible', 4., 6., np.random.uniform(0.25, 0.5))
ode.boost('invested', 0., 2., 300.)
ode.boost('N_total', 6., 10., np.random.uniform(10e3, 20e3))
out = ode.odeint(init_state, time_array, sess)
return out
import pylab as plt
from IPython import display
from dask.multiprocessing import get
if __name__ == '__main__':
ode = ODE()
fig, ax = plt.subplots(1, 1)
dsk = {}
time_array = np.linspace(0, 12 * 1, 200)
N = 1000
labels = False
fig, axs = plt.subplots(18, 1, figsize=(6, 20 * 3))
for j in range(N):
dsk[j] = (run,)
results = get(dsk, list(range(N)), num_workers=64)
for j in range(N):
out = results[j]
c = 'blue'
ax = axs[0]
ax.plot(time_array, out['V'] + out['invested'] - out['burned'], label='Total Value', alpha=0.1, c=c)
if j == 0:
ax.legend()
ax = axs[1]
ax.plot(time_array, out['V'], label='Value', ls='-', alpha=0.1, c=c)
ax.plot(time_array, out['invested'], label='invested', ls='dashed', alpha=0.1, c=c)
ax.plot(time_array, out['burned'], label='burned', alpha=0.1, ls='dotted', c=c)
if j == 0:
ax.legend()
for i, name in enumerate(ode.state_names):
ax = axs[i + 2]
ax.plot(time_array, out[name].flatten(), label=name, alpha=0.1, c=c)
if j == 0:
ax.legend()
# display.clear_output(wait=True)
# display.display(plt.gcf())
plt.show()
# import pylab as plt
# from IPython import display
#
# fig, ax = plt.subplots(1, 1)
#
# results = []
#
# time_array = np.linspace(0, 12 * 1, 200)
#
# N = 100
# labels = False
# fig, axs = plt.subplots(18, 1, figsize=(6, 20 * 3))
# for j in range(N):
# with tf.Session(graph=tf.Graph()) as sess:
# ode = ODE(num_cities=1, num_clinic_sizes=1, num_clinic_specs=1)
#
# V = 0.
# discounted_cashflow = 0.
# discount_rate = 0.05
# n_join = 0.
# n_churn = 0.
# gamma = 0.01 # proportion of clinic revenue
# s = 3 # physios per clinic
# alpha = 6.5 # 1000$ / month
# P_join = np.random.uniform(0.05, 0.15) # prob of someone new joining in a month
# P_churn = np.random.uniform(0.05, 0.15) # prob of someone with churning in a month
# N_total = 1.2 / 37. * 20e3 # total number of physios
# burn_rate = 49.5
# retension = np.random.uniform(0.25, 0.65)
# frac_accessible = 0.
# invested = 335.
# burned = 299.
#
# init_state = np.array(
# [V, discounted_cashflow, discount_rate, n_join, n_churn, gamma, s, alpha, P_join, P_churn, N_total,
# burn_rate, retension, frac_accessible, invested, burned])
#
# ode.boost('frac_accessible', 4., 6., np.random.uniform(0.25, 0.5))
# ode.boost('invested', 0., 2., 300.)
# ode.boost('N_total', 6., 10., np.random.uniform(10e3, 20e3))
#
# out = ode.odeint(init_state, time_array, sess)
# results.append(out)
#
# c = 'blue'
#
# ax = axs[0]
# ax.plot(time_array, out['V'] + out['invested'] - out['burned'], label='Total Value', alpha=0.1, c=c)
# if j == 0:
# ax.legend()
#
# ax = axs[1]
# ax.plot(time_array, out['V'], label='Value', ls='-', alpha=0.1, c=c)
# ax.plot(time_array, out['invested'], label='invested', ls='dashed', alpha=0.1, c=c)
# ax.plot(time_array, out['burned'], label='burned', alpha=0.1, ls='dotted', c=c)
# if j == 0:
# ax.legend()
#
# for i, name in enumerate(ode.state_names):
# ax = axs[i + 2]
# ax.plot(time_array, out[name].flatten(), label=name, alpha=0.1, c=c)
# if j == 0:
# ax.legend()
#
# display.clear_output(wait=True)
# display.display(plt.gcf())
#
# # plt.show()
```
#### File: bayes_filter/notebooks/reentry_in_pyfunc.py
```python
import tensorflow as tf
"""
This makes it seem as though we can use py_function to do reentrant usage of a Session.
Makes sense since Sessions are supposed to be thread safe.
"""
if __name__ == '__main__':
def tf_func(sess):
def func(x):
with sess.graph.as_default():
y = x.numpy()
print("numpy",y)
return sess.run(tf.constant(y))
return func
with tf.Session(graph=tf.Graph()) as sess:
x = tf.constant(0.)
p = tf.stack([tf.py_function(tf_func(sess),[x],[x.dtype]),
tf.py_function(tf_func(sess), [x+1.], [x.dtype]),
tf.py_function(tf_func(sess), [x+2.], [x.dtype])])
print(sess.run(p))
``` |
{
"source": "Joshuaalbert/bayes_gain_screens",
"score": 2
} |
#### File: bayes_gain_screens/bayes_gain_screens/nn_tools.py
```python
import tensorflow as tf
from graph_nets.graphs import GraphsTuple
from graph_nets import utils_tf, blocks
import tqdm
import sonnet as snt
from sonnet.src.base import Optimizer, Module
import numpy as np
import six
import abc
import contextlib
from typing import List
import os
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(snt.Module):
"""Makes Sonnet1-style childs from this look like a Sonnet2 module."""
def __init__(self, *args, **kwargs):
super(AbstractModule, self).__init__(*args, **kwargs)
self.__call__.__func__.__doc__ = self._build.__doc__ # pytype: disable=attribute-error
# In snt2 calls to `_enter_variable_scope` are ignored.
@contextlib.contextmanager
def _enter_variable_scope(self, *args, **kwargs):
yield None
def __call__(self, *args, **kwargs):
return self._build(*args, **kwargs)
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Similar to Sonnet 1 ._build method."""
class TrainOneEpoch(Module):
_model:AbstractModule
_opt:Optimizer
def __init__(self, model:AbstractModule, loss, opt:Optimizer, strategy:tf.distribute.MirroredStrategy=None, name=None):
super(TrainOneEpoch, self).__init__(name=name)
self.epoch = tf.Variable(0, dtype=tf.int64)
self.minibatch = tf.Variable(0, dtype=tf.int64)
self._model = model
self._model.step = self.minibatch
self._opt = opt
self._loss = loss
self._strategy = strategy
self._checkpoint = tf.train.Checkpoint(module=model)
@property
def strategy(self) -> tf.distribute.MirroredStrategy:
return self._strategy
@property
def model(self):
return self._model
@property
def opt(self):
return self._opt
def loss(self, model_output, batch):
return self._loss(model_output, batch)
def train_step(self, batch):
"""
Trains on a single batch.
Args:
batch: user defined batch from a dataset.
Returns:
loss
"""
with tf.GradientTape() as tape:
model_output = self.model(batch)
loss = self.loss(model_output, batch)
params = self.model.trainable_variables
grads = tape.gradient(loss, params)
if self.strategy is not None:
replica_ctx = tf.distribute.get_replica_context()
grads = replica_ctx.all_reduce("mean", grads)
for (param, grad) in zip(params, grads):
if grad is not None:
tf.summary.histogram(param.name+"_grad",grad, step=self.minibatch)
self.opt.apply(grads, params)
return loss
def one_epoch_step(self, train_dataset):
"""
Updates a model with one epoch of train_one_epoch, and returns a dictionary of values to monitor, i.e. metrics.
Returns:
average loss
"""
self.epoch.assign_add(1)
# metrics = None
loss = 0.
num_batches = 0.
if self.strategy is not None:
train_dataset = self.strategy.experimental_distribute_dataset(train_dataset)
for train_batch in train_dataset:
self.minibatch.assign_add(1)
if self.strategy is not None:
_loss = self.strategy.run(self.train_step, args=(train_batch,))
_loss = self.strategy.reduce("sum", _loss, axis=None)
else:
_loss = self.train_step(train_batch)
tf.summary.scalar('mini_batch_loss',_loss, step=self.minibatch)
loss += _loss
num_batches += 1.
tf.summary.scalar('epoch_loss', loss/num_batches, step=self.epoch)
return loss/num_batches
def evaluate(self, test_dataset):
loss = 0.
num_batches = 0.
if self.strategy is not None:
test_dataset = self.strategy.experimental_distribute_dataset(test_dataset)
for test_batch in test_dataset:
if self.strategy is not None:
model_output = self.strategy.run(self.model, args=(test_batch,))
_loss = self.strategy.run(self.loss, args=(model_output, test_batch))
loss += self.strategy.reduce("sum", _loss, axis=0)
else:
model_output = self.model(test_batch)
loss += self.loss(model_output, test_batch)
num_batches += 1.
tf.summary.scalar('loss', loss / num_batches, step=self.epoch)
return loss / num_batches
def get_distribution_strategy(use_cpus=True, logical_per_physical_factor=1, memory_limit=2000) -> tf.distribute.MirroredStrategy:
# trying to set GPU distribution
physical_gpus = tf.config.experimental.list_physical_devices("GPU")
physical_cpus = tf.config.experimental.list_physical_devices("CPU")
if len(physical_gpus) > 0 and not use_cpus:
print("Physical GPUS: {}".format(physical_gpus))
if logical_per_physical_factor > 1:
for dev in physical_gpus:
tf.config.experimental.set_virtual_device_configuration(
dev,
[tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=memory_limit)] * logical_per_physical_factor
)
gpus = tf.config.experimental.list_logical_devices("GPU")
print("Logical GPUs: {}".format(gpus))
strategy = snt.distribute.Replicator(
["/device:GPU:{}".format(i) for i in range(len(gpus))],
tf.distribute.ReductionToOneDevice("GPU:0"))
else:
print("Physical CPUS: {}".format(physical_cpus))
if logical_per_physical_factor > 1:
for dev in physical_cpus:
tf.config.experimental.set_virtual_device_configuration(
dev,
[tf.config.experimental.VirtualDeviceConfiguration()] * logical_per_physical_factor
)
cpus = tf.config.experimental.list_logical_devices("CPU")
print("Logical CPUs: {}".format(cpus))
strategy = snt.distribute.Replicator(
["/device:CPU:{}".format(i) for i in range(len(cpus))],
tf.distribute.ReductionToOneDevice("CPU:0"))
return strategy
def _round(v, last_v):
if last_v is None:
uncert_v = v
else:
uncert_v = abs(v - last_v)
sig_figs = -int("{:e}".format(uncert_v).split('e')[1]) + 1
return round(float(v), sig_figs)
def vanilla_training_loop(train_one_epoch:TrainOneEpoch, training_dataset, test_dataset=None, num_epochs=1,
early_stop_patience=None, checkpoint_dir=None, log_dir=None, debug=False):
"""
Does simple training.
Args:
training_dataset: Dataset for training
train_one_epoch: TrainOneEpoch
num_epochs: how many epochs to train
test_dataset: Dataset for testing
early_stop_patience: Stops training after this many epochs where test dataset loss doesn't improve
checkpoint_dir: where to save epoch results.
debug: bool, whether to use debug mode.
Returns:
"""
if checkpoint_dir is not None:
os.makedirs(checkpoint_dir,exist_ok=True)
training_dataset = training_dataset.prefetch(tf.data.experimental.AUTOTUNE)#.cache()
if test_dataset is not None:
test_dataset = test_dataset.prefetch(tf.data.experimental.AUTOTUNE)#.cache()
# We'll turn the one_epoch_step function which updates our models into a tf.function using
# autograph. This makes train_one_epoch much faster. If debugging, you can turn this
# off by setting `debug = True`.
step = train_one_epoch.one_epoch_step
evaluate = train_one_epoch.evaluate
if not debug:
step = tf.function(step)
evaluate = tf.function(evaluate)
fancy_progress_bar = tqdm.tqdm(range(num_epochs),
unit='epochs',
position=0)
early_stop_min_loss = np.inf
early_stop_interval = 0
train_log_dir = os.path.join(log_dir,"train")
test_log_dir = os.path.join(log_dir,"test")
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
checkpoint = tf.train.Checkpoint(module=train_one_epoch)
manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3,
checkpoint_name=train_one_epoch.model.__class__.__name__)
if manager.latest_checkpoint is not None:
checkpoint.restore(manager.latest_checkpoint)
print(f"Restored from {manager.latest_checkpoint}")
last_loss = None
last_test_loss = None
for step_num in fancy_progress_bar:
with train_summary_writer.as_default():
loss = step(iter(training_dataset))
tqdm.tqdm.write(
'\nEpoch = {}/{} (loss = {})'.format(
train_one_epoch.epoch.numpy(), num_epochs, _round(loss,last_loss)))
last_loss = loss
if test_dataset is not None:
with test_summary_writer.as_default():
test_loss = evaluate(iter(test_dataset))
tqdm.tqdm.write(
'\n\t(Test loss = {})'.format(_round(test_loss,last_test_loss)))
last_test_loss = test_loss
if early_stop_patience is not None:
if test_loss <= early_stop_min_loss:
early_stop_min_loss = test_loss
early_stop_interval = 0
manager.save()
else:
early_stop_interval += 1
if early_stop_interval == early_stop_patience:
tqdm.tqdm.write(
'\n\tStopping Early')
break
else:
manager.save()
else:
manager.save()
train_summary_writer.close()
test_summary_writer.close()
```
#### File: bayes_gain_screens/bayes_gain_screens/screen_solvers.py
```python
from astropy import coordinates as ac, units as au
from jax import numpy as jnp, jit, random, vmap
from jax._src.scipy.linalg import solve_triangular
from timeit import default_timer
from bayes_gain_screens.frames import ENU
from bayes_gain_screens.tomographic_kernel import TomographicKernel
from bayes_gain_screens.utils import make_coord_array, axes_move
from bayes_gain_screens.plotting import plot_vornoi_map
from h5parm import DataPack
from jaxns import NestedSampler, plot_diagnostics, plot_cornerplot
from jaxns.gaussian_process import RBF, M32, M12, M52
from jaxns.prior_transforms import UniformPrior, PriorChain, DeltaPrior
from jaxns.utils import chunked_pmap, marginalise_static, summary
from jax.scipy.ndimage import map_coordinates
import pylab as plt
def log_normal_with_outliers(x, mean, cov, sigma):
"""
Computes log-Normal density with outliers removed.
Args:
x: RV value
mean: mean of Gaussian
cov: covariance of underlying, minus the obs. covariance
sigma: stddev's of obs. error, inf encodes an outlier.
Returns: a normal density for all points not of inf stddev obs. error.
"""
C = cov / (sigma[:, None] * sigma[None, :]) + jnp.eye(cov.shape[0])
L = jnp.linalg.cholesky(C)
Ls = sigma[:, None] * L
log_det = jnp.sum(jnp.where(jnp.isinf(sigma), 0., jnp.log(jnp.diag(Ls))))
dx = (x - mean)
dx = solve_triangular(L, dx / sigma, lower=True)
maha = dx @ dx
log_likelihood = -0.5 * jnp.sum(~jnp.isinf(sigma)) * jnp.log(2. * jnp.pi) \
- log_det \
- 0.5 * maha
return log_likelihood
def build_lookup_index(*arrays):
def linear_lookup(values, *coords):
fractional_coordinates = jnp.asarray([jnp.interp(coord, array, jnp.arange(array.size))
for array, coord in zip(arrays, coords)])
return map_coordinates(values, fractional_coordinates, order=1)
return linear_lookup
def precompute_log_prob_components_with_wind(kernel, X, dtec, dtec_uncert,
bottom_array, width_array, lengthscale_array, sigma_array, east_wind_speed_array,
north_wind_speed_array,
chunksize=2):
"""
Precompute the log_prob for each parameter.
Args:
kernel:
X:
dtec:
dtec_uncert:
*arrays:
Returns:
"""
arrays = jnp.meshgrid(bottom_array, width_array, lengthscale_array,
east_wind_speed_array, north_wind_speed_array, indexing='ij')
arrays = [a.ravel() for a in arrays]
def compute_log_prob_components(bottom, width, lengthscale,
east_wind_speed, north_wind_speed):
wind_velocity = jnp.asarray([east_wind_speed, north_wind_speed, 0.])
# N, N
K = kernel(X, X, bottom, width, lengthscale, 1., wind_velocity=wind_velocity)
def _compute_with_sigma(sigma):
def _compute(dtec, dtec_uncert):
return log_normal_with_outliers(dtec, 0., sigma**2 * K, dtec_uncert)
# M
return chunked_pmap(_compute, dtec, dtec_uncert, chunksize=1)
# Ns,M
return chunked_pmap(_compute_with_sigma, sigma_array, chunksize=1)
Nb = bottom_array.shape[0]
Nw = width_array.shape[0]
Nl = lengthscale_array.shape[0]
Ne = east_wind_speed_array.shape[0]
Nn = north_wind_speed_array.shape[0]
Ns = sigma_array.shape[0]
# Nb*Nw*Nl*Ne*Nn,Ns,M
log_prob = chunked_pmap(compute_log_prob_components, *arrays, chunksize=chunksize)
# M, Nb,Nw,Nl,Ne,Nn,Ns
log_prob = log_prob.reshape((Nb * Nw * Nl * Ne * Nn * Ns, dtec.shape[0])).transpose((1, 0)).reshape(
(dtec.shape[0], Nb, Nw, Nl, Ne, Nn, Ns))
return log_prob
def precompute_log_prob_components_without_wind(kernel, X, dtec, dtec_uncert,
bottom_array, width_array, lengthscale_array, sigma_array,
chunksize=2):
"""
Precompute the log_prob for each parameter.
Args:
kernel:
X:
dtec:
dtec_uncert:
*arrays:
Returns:
"""
arrays = jnp.meshgrid(bottom_array, width_array, lengthscale_array, indexing='ij')
arrays = [a.ravel() for a in arrays]
def compute_log_prob_components(bottom, width, lengthscale):
# N, N
K = kernel(X, X, bottom, width, lengthscale, 1., wind_velocity=None)
def _compute_with_sigma(sigma):
def _compute(dtec, dtec_uncert):
return log_normal_with_outliers(dtec, 0., sigma**2 * K, dtec_uncert)
# M
return chunked_pmap(_compute, dtec, dtec_uncert, chunksize=1)
# Ns,M
return chunked_pmap(_compute_with_sigma, sigma_array, chunksize=1)
Nb = bottom_array.shape[0]
Nw = width_array.shape[0]
Nl = lengthscale_array.shape[0]
Ns = sigma_array.shape[0]
# Nb*Nw*Nl,Ns,M
log_prob = chunked_pmap(compute_log_prob_components, *arrays, chunksize=chunksize)
# M, Nb,Nw,Nl,Ns
log_prob = log_prob.reshape((Nb * Nw * Nl * Ns, dtec.shape[0])).transpose((1, 0)).reshape(
(dtec.shape[0], Nb, Nw, Nl, Ns))
return log_prob
def solve_with_tomographic_kernel(dtec, dtec_uncert, X, x0, fed_kernel, time_block_size):
"""
Precompute look-up tables for all blocks.
Assumes that each antenna is independent and doesn't take into account time.
Args:
dtec: [Nd, Na, Nt]
dtec_uncert: [Nd, Na, Nt]
X: [Nd,6]
x0: [3]
fed_kernel: StationaryKernel
time_block_size: int
"""
scale = jnp.std(dtec) / 35.
dtec /= scale
dtec_uncert /= scale
bottom_array = jnp.linspace(200., 400., 5)
width_array = jnp.linspace(50., 50., 1)
lengthscale_array = jnp.linspace(0.1, 7.5, 7)
sigma_array = jnp.linspace(0.1, 2., 11)
kernel = TomographicKernel(x0, x0, fed_kernel, S_marg=25, compute_tec=False)
lookup_func = build_lookup_index(bottom_array, width_array, lengthscale_array, sigma_array)
Nd,Na,Nt = dtec.shape
remainder = Nt % time_block_size
dtec = jnp.concatenate([dtec,dtec[:, :,-remainder:]], axis=-1)
dtec_uncert = jnp.concatenate([dtec_uncert,dtec_uncert[:, :,-remainder:]], axis=-1)
Nt = dtec.shape[-1]
dtec = dtec.transpose((2,1, 0)).reshape((Nt*Na, Nd))
dtec_uncert = dtec_uncert.transpose((2,1,0)).reshape((Nt*Na, Nd))
# Nt*Na, ...
log_prob = precompute_log_prob_components_without_wind(kernel, X, dtec, dtec_uncert,
bottom_array, width_array, lengthscale_array, sigma_array,
chunksize=4)
log_prob = jnp.reshape(log_prob, (Nt//remainder, remainder, Na) + log_prob.shape[1:])
def run_block(block_idx):
def log_likelihood(bottom, width, lengthscale, sigma, **kwargs):
return jnp.sum(vmap(lambda log_prob: lookup_func(log_prob, bottom, width, lengthscale, sigma)
)(log_prob[block_idx]))
bottom = UniformPrior('bottom', bottom_array.min(), bottom_array.max())
width = DeltaPrior('width', 50., tracked=False)
lengthscale = UniformPrior('lengthscale', jnp.min(lengthscale_array), jnp.max(lengthscale_array))
sigma = UniformPrior('sigma', sigma_array.min(), sigma_array.max())
prior_chain = PriorChain(lengthscale, sigma, bottom, width)
ns = NestedSampler(loglikelihood=log_likelihood,
prior_chain=prior_chain,
sampler_name='slice',
sampler_kwargs=dict(num_slices=prior_chain.U_ndims * 5),
num_live_points=prior_chain.U_ndims * 50)
ns = jit(ns)
results = ns(random.PRNGKey(42), termination_frac=0.001)
return results
# results.efficiency.block_until_ready()
# t0 = default_timer()
# results = ns(random.PRNGKey(42), termination_frac=0.001)
# summary(results)
# print(default_timer() - t0)
# def screen(bottom, lengthscale, east_wind_speed, north_wind_speed, sigma, **kw):
# wind_velocity = jnp.asarray([east_wind_speed, north_wind_speed, 0.])
# K = kernel(X, X, bottom, 50., lengthscale, sigma, wind_velocity=wind_velocity)
# Kstar = kernel(X, Xstar, bottom, 50., lengthscale, sigma)
# L = jnp.linalg.cholesky(K + jnp.diag(jnp.maximum(1e-6, dtec_uncert) ** 2))
# dx = solve_triangular(L, dtec, lower=True)
# return solve_triangular(L, Kstar, lower=True).T @ dx
# summary(results)
# plot_diagnostics(results)
# plot_cornerplot(results)
# screen_mean = marginalise_static(random.PRNGKey(4325325), results.samples, results.log_p, int(results.ESS), screen)
# print(screen_mean)
# plot_vornoi_map(Xstar[:, 3:5], screen_mean)
# plt.show()
# plot_vornoi_map(X[:, 3:5], dtec)
# plt.show()
# return screen_mean
results = chunked_pmap(run_block, jnp.arange(Nt//time_block_size))
def solve_with_vanilla_kernel(key, dtec, dtec_uncert, X, Xstar, fed_kernel, time_block_size, chunksize):
"""
Precompute look-up tables for all blocks.
Args:
key: PRNG key
dtec: [Nd, Na, Nt] TECU
dtec_uncert: [Nd, Na, Nt] TECU
X: [Nd,2] coordinates in deg
Xstar: [Nd_screen, 2] screen coordinates
fed_kernel: StationaryKernel
time_block_size: int
chunksize: int number of parallel devices to use.
"""
field_of_view = 4. #deg
min_separation_arcmin = 4. #drcmin
min_separation_deg = min_separation_arcmin / 60.
lengthscale_array = jnp.linspace(min_separation_deg, field_of_view, 120)
sigma_array = jnp.linspace(0., 150., 150)
kernel = fed_kernel
lookup_func = build_lookup_index(lengthscale_array, sigma_array)
dtec_uncert = jnp.maximum(dtec_uncert, 1e-6)
Nd,Na,Nt = dtec.shape
remainder = Nt % time_block_size
extra = time_block_size - remainder
dtec = jnp.concatenate([dtec,dtec[:, :,Nt-extra:]], axis=-1)
dtec_uncert = jnp.concatenate([dtec_uncert,dtec_uncert[:, :,Nt-extra:]], axis=-1)
Nt = dtec.shape[-1]
size_dict = dict(a=Na, d=Nd, b=time_block_size)
dtec = axes_move(dtec, ['d','a','tb'], ['atb','d'],size_dict=size_dict)
dtec_uncert = axes_move(dtec_uncert, ['d','a','tb'], ['atb', 'd'],size_dict=size_dict)
def compute_log_prob_components(lengthscale):
# N, N
K = kernel(X, X, lengthscale, 1.)
def _compute_with_sigma(sigma):
def _compute(dtec, dtec_uncert):
#each [Nd]
return log_normal_with_outliers(dtec, 0., sigma ** 2 * K, dtec_uncert)
return chunked_pmap(_compute, dtec, dtec_uncert, chunksize=1)#M
# Ns,M
return chunked_pmap(_compute_with_sigma, sigma_array, chunksize=1)
# Nl,Ns,M
log_prob = chunked_pmap(compute_log_prob_components, lengthscale_array, chunksize=chunksize)
# Na * (Nt//time_block_size),block_size,Nl,Ns
log_prob = axes_move(log_prob, ['l','s','atb'],['at', 'b', 'l','s'], size_dict=size_dict)
# Na * (Nt//time_block_size),Nl,Ns
log_prob = jnp.sum(log_prob, axis=1)#independent datasets summed up.
def run_block(key, dtec, dtec_uncert, log_prob):
key1, key2 = random.split(key, 2)
def log_likelihood(lengthscale, sigma, **kwargs):
# K = kernel(X, X, lengthscale, sigma)
# def _compute(dtec, dtec_uncert):
# #each [Nd]
# return log_normal_with_outliers(dtec, 0., K, jnp.maximum(1e-6, dtec_uncert))
# return chunked_pmap(_compute, dtec, dtec_uncert, chunksize=1).sum()
return lookup_func(log_prob, lengthscale, sigma)
lengthscale = UniformPrior('lengthscale', jnp.min(lengthscale_array), jnp.max(lengthscale_array))
sigma = UniformPrior('sigma', sigma_array.min(), sigma_array.max())
prior_chain = PriorChain(lengthscale, sigma)
ns = NestedSampler(loglikelihood=log_likelihood,
prior_chain=prior_chain,
sampler_kwargs=dict(num_slices=prior_chain.U_ndims * 1),
num_live_points=prior_chain.U_ndims * 50)
ns = jit(ns)
results = ns(key1, termination_evidence_frac=0.1)
def marg_func(lengthscale, sigma, **kwargs):
def screen(dtec, dtec_uncert, **kw):
K = kernel(X, X, lengthscale, sigma)
Kstar = kernel(X, Xstar, lengthscale, sigma)
L = jnp.linalg.cholesky(K/(dtec_uncert[:,None]*dtec_uncert[None,:]) + jnp.eye(dtec.shape[0]))
# L = jnp.where(jnp.isnan(L), jnp.eye(L.shape[0])/sigma, L)
dx = solve_triangular(L, dtec/dtec_uncert, lower=True)
JT = solve_triangular(L, Kstar/dtec_uncert[:, None], lower=True)
#var_ik = JT_ji JT_jk
mean = JT.T @ dx
var = jnp.sum(JT * JT, axis=0)
return mean, var
return vmap(screen)(dtec, dtec_uncert), lengthscale, jnp.log(sigma)#[time_block_size, Nd_screen], [time_block_size, Nd_screen]
#[time_block_size, Nd_screen], [time_block_size, Nd_screen], [time_block_size]
(mean, var), mean_lengthscale, mean_logsigma = marginalise_static(key2, results.samples, results.log_p, 500, marg_func)
uncert = jnp.sqrt(var)
mean_sigma = jnp.exp(mean_logsigma)
mean_lengthscale = jnp.ones(time_block_size)*mean_lengthscale
mean_sigma = jnp.ones(time_block_size)*mean_sigma
ESS = results.ESS*jnp.ones(time_block_size)
logZ = results.logZ*jnp.ones(time_block_size)
likelihood_evals = results.num_likelihood_evaluations*jnp.ones(time_block_size)
return mean, uncert, mean_lengthscale, mean_sigma, ESS, logZ, likelihood_evals
T = Na * (Nt//time_block_size)
keys = random.split(key, T)
# [T, time_block_size, Nd_screen], [T, time_block_size, Nd_screen], [T, time_block_size], [T, time_block_size]
dtec = axes_move(dtec,['atb','d'], ['at','b','d'], size_dict=size_dict)
dtec_uncert = axes_move(dtec_uncert,['atb','d'], ['at','b','d'], size_dict=size_dict)
mean, uncert, mean_lengthscale, mean_sigma, ESS, logZ, likelihood_evals = chunked_pmap(run_block, keys, dtec, dtec_uncert, log_prob, chunksize=chunksize)
mean = axes_move(mean, ['at','b','n'],['n','a','tb'], size_dict=size_dict)
uncert = axes_move(uncert, ['at','b','n'],['n','a','tb'], size_dict=size_dict)
mean_lengthscale = axes_move(mean_lengthscale, ['at','b'],['a','tb'], size_dict=size_dict)
mean_sigma = axes_move(mean_sigma, ['at','b'],['a','tb'], size_dict=size_dict)
ESS = axes_move(ESS, ['at', 'b'],['a','tb'], size_dict=size_dict)
logZ = axes_move(logZ, ['at', 'b'],['a','tb'], size_dict=size_dict)
likelihood_evals = axes_move(likelihood_evals, ['at', 'b'],['a','tb'], size_dict=size_dict)
return mean[...,Nt-extra:], uncert[...,Nt-extra:], mean_lengthscale[...,Nt-extra:], mean_sigma[...,Nt-extra:], ESS[...,Nt-extra:], logZ,likelihood_evals[...,Nt-extra:]
if __name__ == '__main__':
from jax.config import config
config.update("jax_enable_x64", True)
dp = DataPack('/home/albert/data/gains_screen/data/L342938_DDS5_full_merged.h5', readonly=True)
with dp:
select = dict(pol=slice(0, 1, 1), ant=[50], time=slice(0, 9, 1))
dp.current_solset = 'sol000'
dp.select(**select)
tec_mean, axes = dp.tec
dtec = jnp.asarray(tec_mean[0, :, :, :])
tec_std, axes = dp.weights_tec
dtec_uncert = jnp.asarray(tec_std[0, :, :, :])
patch_names, directions = dp.get_directions(axes['dir'])
antenna_labels, antennas = dp.get_antennas(axes['ant'])
timestamps, times = dp.get_times(axes['time'])
# antennas = ac.ITRS(*antennas.cartesian.xyz, obstime=times[0])
# ref_ant = antennas[0]
# frame = ENU(obstime=times[0], location=ref_ant.earth_location)
# antennas = antennas.transform_to(frame)
# ref_ant = antennas[0]
# directions = directions.transform_to(frame)
# x = antennas.cartesian.xyz.to(au.km).value.T[1:2, :]
# k = directions.cartesian.xyz.value.T
times = times.mjd
times -= times[0]
times *= 86400.
directions = jnp.stack([directions.ra.deg, directions.dec.deg], axis=1)
X = make_coord_array(directions, flat=True)
n_screen = 250
directions_star = random.uniform(random.PRNGKey(29428942), (n_screen, 2), minval=jnp.min(X, axis=0),
maxval=jnp.max(X, axis=0))
Xstar = make_coord_array(directions_star, flat=True)
#
# kstar = random.uniform(random.PRNGKey(29428942), (n_screen, 3), minval=jnp.min(k, axis=0),
# maxval=jnp.max(k, axis=0))
# X = jnp.asarray(k)
# Xstar = jnp.asarray(kstar)
# print(dtec_uncert)
mean, uncert, lengthscale, sigma, ESS, logZ, likelihood_evals = solve_with_vanilla_kernel(random.PRNGKey(42), dtec,
dtec_uncert, X, Xstar,
M32(), time_block_size=9,
chunksize=2)
print(lengthscale, sigma, ESS, logZ, likelihood_evals)
for a in range(len(antennas)):
for t in range(len(times)):
plot_vornoi_map(X, dtec[:,a,t])
plt.title(f"{antenna_labels[a]}, {t}")
plt.show()
plot_vornoi_map(Xstar, mean[:, a, t])
plt.title(f"{antenna_labels[a]}, {t}")
plt.show()
#
# plot_vornoi_map(X, dtec_uncert[:, a, t])
# plt.title(f"{antenna_labels[a]}, {t}")
# plt.show()
#
# plot_vornoi_map(Xstar, uncert[:, a, t])
# plt.title(f"{antenna_labels[a]}, {t}")
# plt.show()
```
#### File: bayes_gain_screens/steps/download_archive.py
```python
import subprocess
import os
import numpy as np
import argparse
import glob
def cmd_call(cmd):
print("{}".format(cmd))
exit_status = subprocess.call(cmd, shell=True)
if exit_status:
raise ValueError("Failed to run: {}".format(cmd))
def get_solutions_timerange(sols):
t = np.load(sols)['BeamTimes']
return np.min(t), np.max(t)
def fixsymlinks(archive_dir, working_dir, obs_num):
# Code from Tim for fixing symbolic links for DDS3_
# dds3smoothed = glob.glob('SOLSDIR/*/*killMS.DDS3_full_smoothed*npz')
print("Fixing symbolic links")
dds3 = glob.glob(os.path.join(archive_dir, 'SOLSDIR/L{obs_num}*.ms/killMS.DDS3_full.sols.npz'.format(obs_num=obs_num)))
for f in dds3:
ms = os.path.basename(os.path.dirname(f))
to_folder = os.path.join(working_dir, 'SOLSDIR', ms)
try:
os.makedirs(to_folder)
except:
pass
for g in glob.glob(os.path.join(os.path.dirname(f), '*')):
src = os.path.abspath(g)
dst = os.path.join(to_folder, os.path.basename(g))
if os.path.islink(dst):
os.unlink(dst)
print("Linking {} -> {}".format(src,dst))
os.symlink(src,dst)
start_time, _ = get_solutions_timerange(f)
start_time = os.path.basename(
glob.glob(os.path.join(archive_dir, 'DDS3_full_{}*_smoothed.npz'.format(int(start_time))))[0]).split('_')[2]
src = os.path.join(archive_dir, 'DDS3_full_{}_smoothed.npz'.format(start_time))
dst = os.path.join(to_folder, 'killMS.DDS3_full_smoothed.sols.npz')
if os.path.islink(dst):
os.unlink(dst)
print("Linking {} -> {}".format(src, dst))
os.symlink(src, dst)
src = os.path.join(archive_dir, 'image_full_ampphase_di_m.NS.app.restored.fits')
dst = os.path.join(working_dir, 'image_full_ampphase_di_m.NS.app.restored.fits')
if os.path.islink(dst):
os.unlink(dst)
print("Linking {} -> {}".format(src, dst))
os.symlink(src, dst)
def copy_archives(archive_dir, working_dir, obs_num, no_download):
print("Copying archives.")
archive_fullmask = os.path.join(archive_dir, 'image_full_ampphase_di_m.NS.mask01.fits')
archive_indico = os.path.join(archive_dir, 'image_full_ampphase_di_m.NS.DicoModel')
archive_clustercat = os.path.join(archive_dir, 'image_dirin_SSD_m.npy.ClusterCat.npy')
fullmask = os.path.join(working_dir, os.path.basename(archive_fullmask))
indico = os.path.join(working_dir, os.path.basename(archive_indico))
clustercat = os.path.join(working_dir, 'image_dirin_SSD_m.npy.ClusterCat.npy')
if no_download:
cmd_call('mv {} {}'.format(archive_fullmask, fullmask))
cmd_call('mv {} {}'.format(archive_indico, indico))
cmd_call('mv {} {}'.format(archive_clustercat, clustercat))
else:
cmd_call('rsync -auvP {} {}'.format(archive_fullmask, fullmask))
cmd_call('rsync -auvP {} {}'.format(archive_indico, indico))
cmd_call('rsync -auvP {} {}'.format(archive_clustercat, clustercat))
mslist = sorted(glob.glob(os.path.join(archive_dir, 'L{obs_num}*_SB*.ms.archive'.format(obs_num=obs_num))))
print('Found archives files:\n{}'.format(mslist))
outms = []
for ms in mslist:
outname = os.path.join(working_dir, os.path.basename(ms.rstrip('.archive')))
if no_download:
cmd_call('mv {}/ {}/'.format(ms, outname))
else:
cmd_call('rsync -auvP --delete {}/ {}/'.format(ms, outname))
outms.append(outname)
mslist_file = os.path.join(working_dir, 'mslist.txt')
with open(mslist_file, 'w') as f:
for ms in outms:
f.write('{}\n'.format(ms))
return mslist_file, outms, fullmask, indico, clustercat
def add_args(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument('--obs_num', help='Obs number L*',
default=None, type=int, required=True)
parser.add_argument('--archive_dir', help='Where are the archives stored, may also be networked, e.g. <user>@<host>:<path>.',
default=None, type=str, required=True)
parser.add_argument('--working_dir', help='Where to perform the subtract.',
default=None, type=str, required=True)
parser.add_argument('--no_download', help='Whether to move instead of copy.',
default=False, type="bool", required=False)
def main(archive_dir, working_dir, obs_num, no_download):
if no_download:
if "SP_AUTH" in os.environ.keys():
if os.environ['SP_AUTH'] != '1':
raise ValueError("Trying to mv archive directory without authentication.")
else:
raise ValueError("Trying to mv archive directory without authentication.")
print("Will use 'mv' instead of 'rsync'. Archive dir must be local then.")
archive_dir = os.path.abspath(archive_dir)
working_dir = os.path.abspath(working_dir)
try:
os.makedirs(working_dir)
except:
pass
try:
os.makedirs(os.path.join(working_dir, 'SOLSDIR'))
except:
pass
os.chdir(working_dir)
mslist_file, mslist, fullmask, indico, clustercat = copy_archives(archive_dir, working_dir, obs_num,no_download)
if not os.path.isfile(fullmask):
raise IOError("Missing mask {}".format(fullmask))
if not os.path.isfile(indico):
raise IOError("Missing dico model {}".format(indico))
if not os.path.isfile(clustercat):
raise IOError("Missing clustercat {}".format(clustercat))
fixsymlinks(archive_dir, working_dir, obs_num)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download the archive to root.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
print("Running with:")
for option, value in vars(flags).items():
print(" {} -> {}".format(option, value))
main(**vars(flags))
```
#### File: bayes_gain_screens/steps/slow_solve_on_subtracted.py
```python
import os
import glob
import numpy as np
import tables
import argparse
import subprocess
import logging
logger = logging.getLogger(__name__)
def link_overwrite(src, dst):
if os.path.islink(dst):
logger.info("Unlinking pre-existing sym link {}".format(dst))
os.unlink(dst)
logger.info("Linking {} -> {}".format(src, dst))
os.symlink(src, dst)
def cmd_call(cmd):
logger.info("{}".format(cmd))
exit_status = subprocess.call(cmd, shell=True)
if exit_status:
raise ValueError("Failed to run: {}".format(cmd))
def prepare_kms_sols(working_dir, data_dir, obs_num):
smoothed_h5parm = os.path.join(data_dir, 'L{}_DDS5_full_merged.h5'.format(obs_num))
original_sols = os.path.join(data_dir, 'L{}_DDS4_full_merged.sols.npz'.format(obs_num))
smooth_merged_sol = os.path.join(working_dir, 'L{}_DDS5_full_smoothed_merged.sols.npz'.format(obs_num))
linked_smooth_merged_sol = os.path.join(data_dir, os.path.basename(smooth_merged_sol))
link_overwrite(smooth_merged_sol, linked_smooth_merged_sol)
with tables.open_file(smoothed_h5parm) as t:
# Nt, Nf, Na, Nd, Npol
phase = t.root.sol000.phase000.val[...].T
amp = t.root.sol000.amplitude000.val[...].T
kms = np.load(original_sols)
if phase[:, :, :, :, 0].shape != kms['Sols']['G'][:, :, :, :, 0, 0].shape:
raise ValueError("Shapes are not correct in kms solutions {} {}".format(kms['Sols']['G'].shape, phase.shape))
Sols = np.copy(kms['Sols'])
Sols['G'][:, :, :, :, 0, 0] = amp[:, :, :, :, 0] * np.cos(phase[:, :, :, :, 0]) + \
1j * amp[:, :, :, :, 0] * np.sin(phase[:, :, :, :, 0]) # XX
Sols['G'][:, :, :, :, 1, 1] = amp[:, :, :, :, 0] * np.cos(phase[:, :, :, :, 0]) + \
1j * amp[:, :, :, :, 0] * np.sin(phase[:, :, :, :, 0]) # YY
np.savez(smooth_merged_sol, ModelName=kms['ModelName'], MaskedSols=kms['MaskedSols'],
FreqDomains=kms['FreqDomains'], StationNames=kms['StationNames'], BeamTimes=kms['BeamTimes'],
SourceCatSub=kms['SourceCatSub'], ClusterCat=kms['ClusterCat'], MSName=kms['MSName'], Sols=Sols,
SkyModel=kms['SkyModel'])
d = np.load(smooth_merged_sol)
assert np.all(np.isclose(d['Sols']['G'], Sols['G']))
def make_symlinks(data_dir, obs_num):
logger.info("Creating symbolic links")
smooth_merged_sol = os.path.join(data_dir, 'L{}_DDS5_full_smoothed_merged.sols.npz'.format(obs_num))
solsdir = os.path.join(data_dir, 'SOLSDIR')
sol_folders = glob.glob(os.path.join(solsdir, 'L{obs_num}*.ms'.format(obs_num=obs_num)))
for f in sol_folders:
src = smooth_merged_sol
dst = os.path.join(f, 'killMS.DDS5_full_smoothed.sols.npz')
link_overwrite(src, dst)
def solve(masked_dico_model, obs_num, clustercat, working_dir, data_dir, ncpu):
pre_apply_sol_name = 'DDS5_full_smoothed'
out_sol_name = 'DDS7_full_slow'
mslist = sorted(glob.glob(os.path.join(data_dir, 'L{}*.ms'.format(obs_num))))
if len(mslist) == 0:
raise IOError("MS list empty")
if not os.path.isfile(clustercat):
raise IOError("Clustercat doesn't exist {}".format(clustercat))
solsdir = os.path.join(data_dir, 'SOLSDIR')
for ms in mslist:
cmd = ['kMS.py',
'--MSName={ms}'.format(ms=ms),
'--SolverType=KAFCA',
'--PolMode=Scalar',
'--BaseImageName=image_full_ampphase_di_m.NS',
'--dt=40.000000',
'--NIterKF=6',
'--CovQ=0.1',
'--LambdaKF=0.5',
'--NCPU={ncpu}'.format(ncpu=ncpu),
'--OutSolsName={out_sol_name}'.format(out_sol_name=out_sol_name),
'--NChanSols=1',
'--PowerSmooth=0.0',
'--PreApplySols=[{pre_apply_sol_name}]'.format(pre_apply_sol_name=pre_apply_sol_name),
'--InCol=DATA_SUB',
'--Weighting=Natural',
'--UVMinMax=0.500000,5000.000000',
'--SolsDir={solsdir}'.format(solsdir=solsdir),
'--BeamMode=LOFAR',
'--LOFARBeamMode=A',
'--DDFCacheDir=.',
'--NodesFile={clustercat}'.format(clustercat=clustercat),
'--DicoModel={masked_dico_model}'.format(masked_dico_model=masked_dico_model)]
cmd = ' \\\n\t'.join(cmd)
with open(os.path.join(working_dir, 'instruct.sh'), 'w') as f:
f.write(cmd)
logger.info(cmd)
cmd_call(cmd)
def make_merged_h5parm(obs_num, data_dir, working_dir):
merged_sol = os.path.join(working_dir, 'L{}_DDS7_full_slow_merged.sols.npz'.format(obs_num))
linked_merged_sol = os.path.join(data_dir, os.path.basename(merged_sol))
merged_h5parm = os.path.join(working_dir, 'L{}_DDS7_full_slow_merged.h5'.format(obs_num))
linked_merged_h5parm = os.path.join(data_dir, os.path.basename(merged_h5parm))
solsdir = os.path.join(data_dir, 'SOLSDIR')
sol_folders = sorted(glob.glob(os.path.join(solsdir, "L{}*.ms".format(obs_num))))
if len(sol_folders) == 0:
raise ValueError("Invalid obs num {}".format(obs_num))
sols = []
for f in sol_folders:
sol = glob.glob(os.path.join(f, '*DDS7_full_slow.sols.npz'))
if len(sol) == 0:
logger.info("Can't find DDS7_full_slow in {}".format(f))
continue
sols.append(os.path.abspath(sol[0]))
solsfile = os.path.join(working_dir, 'solslist_dds7_slow.txt')
with open(solsfile, 'w') as f:
for s in sols:
f.write("{}\n".format(s))
cmd_call('MergeSols.py --SolsFilesIn={} --SolFileOut={}'.format(solsfile, merged_sol))
if os.path.isfile(merged_h5parm):
logger.info("Deleting old {}".format(merged_h5parm))
os.unlink(merged_h5parm)
cmd_call('killMS2H5parm.py --nofulljones {h5_file} {npz_file} '.format(npz_file=merged_sol,
h5_file=merged_h5parm))
link_overwrite(merged_sol, linked_merged_sol)
link_overwrite(merged_h5parm, linked_merged_h5parm)
def cleanup_working_dir(working_dir):
logger.info("Deleting cache since we're done.")
for f in glob.glob(os.path.join(working_dir, "*.ddfcache")):
cmd_call("rm -r {}".format(f))
def main(obs_num, data_dir, working_dir, ncpu):
clustercat = os.path.join(data_dir, 'subtract.ClusterCat.npy')
if not os.path.isfile(clustercat):
raise IOError("Clustercat{} doesn't exist".format(clustercat))
filtered_dico_model = os.path.join(data_dir, 'image_full_ampphase_di_m.NS.DATA_SUB.DicoModel')
if not os.path.isfile(filtered_dico_model):
raise IOError("Dico model doesn't exists {}".format(filtered_dico_model))
os.chdir(working_dir)
prepare_kms_sols(working_dir, data_dir, obs_num)
make_symlinks(data_dir, obs_num)
solve(filtered_dico_model, obs_num, clustercat, working_dir, data_dir, ncpu)
make_merged_h5parm(obs_num, data_dir, working_dir)
cleanup_working_dir(working_dir)
def add_args(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument('--ncpu', help='Number of cpu to use', default=32, type=int, required=True)
parser.add_argument('--obs_num', help='Obs number L*',
default=None, type=int, required=True)
parser.add_argument('--data_dir', help='Where are the ms files are stored.',
default=None, type=str, required=True)
parser.add_argument('--working_dir', help='Where to perform the imaging.',
default=None, type=str, required=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Solve slow AP solutions on 43 minutes timescale to solve holes.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
logger.info("Running with:")
for option, value in vars(flags).items():
logger.info("\t{} -> {}".format(option, value))
main(**vars(flags))
```
#### File: bayes_gain_screens/steps/solve_on_subtracted.py
```python
import os
import glob
import pyregion
import numpy as np
import argparse
import subprocess
import logging
logger = logging.getLogger(__name__)
def link_overwrite(src, dst):
if os.path.islink(dst):
logger.info("Unlinking pre-existing sym link {}".format(dst))
os.unlink(dst)
logger.info("Linking {} -> {}".format(src, dst))
os.symlink(src, dst)
def cmd_call(cmd):
logger.info("{}".format(cmd))
exit_status = subprocess.call(cmd, shell=True)
if exit_status:
raise ValueError("Failed to run: {}".format(cmd))
def make_clustercat(reg_file, clustercat):
regions = pyregion.open(reg_file)
centers = np.zeros(len(regions[:]),
dtype=([('Name', 'S200'), ('ra', '<f8'), ('dec', '<f8'), ('SumI', '<f8'), ('Cluster', '<i8')]))
logger.info('Number of directions', len(regions[:]))
for region_id, regions in enumerate(regions[:]):
ra = np.pi * regions.coord_list[0] / 180.
dec = np.pi * regions.coord_list[1] / 180.
centers[region_id][0] = ''
centers[region_id][1] = ra
centers[region_id][2] = dec
centers[region_id][3] = 0.
centers[region_id][4] = region_id
logger.info("ClusterCat centers:\n{}".format(centers))
np.save(clustercat, centers)
def solve(masked_dico_model, obs_num, clustercat, working_dir, data_dir, ncpu, sol_name):
mslist = sorted(glob.glob(os.path.join(data_dir, 'L{}*.ms'.format(obs_num))))
if len(mslist) == 0:
raise IOError("MS list empty")
if not os.path.isfile(clustercat):
raise IOError("Clustercat doesn't exist {}".format(clustercat))
solsdir = os.path.join(data_dir, 'SOLSDIR')
for i, ms in enumerate(mslist):
cmd = ['kMS.py',
'--MSName={ms}'.format(ms=ms),
'--SolverType=KAFCA',
'--PolMode=Scalar',
'--BaseImageName=image_full_ampphase_di_m.NS',
'--dt=0.5',
'--NIterKF=6',
'--CovQ=0.1',
'--LambdaKF=0.5',
'--NCPU={ncpu}'.format(ncpu=ncpu),
'--OutSolsName={out_sols}'.format(out_sols=sol_name),
'--NChanSols=1',
'--PowerSmooth=0.0',
'--InCol=DATA_SUB',
'--Weighting=Natural',
'--UVMinMax=0.100000,5000.000000',
'--SolsDir={solsdir}'.format(solsdir=solsdir),
'--BeamMode=LOFAR',
'--LOFARBeamMode=A',
'--DDFCacheDir=.',
'--NodesFile={clustercat}'.format(clustercat=clustercat),
'--DicoModel={masked_dico_model}'.format(masked_dico_model=masked_dico_model)]
cmd = ' \\\n\t'.join(cmd)
with open(os.path.join(working_dir, 'instruct_{:02d}.sh'.format(i)), 'w') as f:
f.write(cmd)
logger.info(cmd)
cmd_call(cmd)
def make_merged_h5parm(obs_num, sol_name, data_dir, working_dir):
merged_sol = os.path.join(data_dir, 'L{}_{}_merged.sols.npz'.format(obs_num, sol_name))
merged_h5parm = os.path.join(working_dir, 'L{}_{}_merged.h5'.format(obs_num, sol_name))
linked_merged_h5parm = os.path.join(data_dir, 'L{}_{}_merged.h5'.format(obs_num, sol_name))
solsdir = os.path.join(data_dir, 'SOLSDIR')
sol_folders = sorted(glob.glob(os.path.join(solsdir, "L{}*.ms".format(obs_num))))
if len(sol_folders) == 0:
raise ValueError("Invalid obs num {}".format(obs_num))
sols = []
for f in sol_folders:
sol = glob.glob(os.path.join(f, '*{}.sols.npz'.format(sol_name)))
if len(sol) == 0:
logger.info("Can't find {} in {}".format(sol_name, f))
continue
sols.append(os.path.abspath(sol[0]))
solsfile = os.path.join(working_dir, 'solslist_dds4.txt')
with open(solsfile, 'w') as f:
for s in sols:
f.write("{}\n".format(s))
cmd_call('MergeSols.py --SolsFilesIn={} --SolFileOut={}'.format(solsfile, merged_sol))
if os.path.isfile(merged_h5parm):
logger.info("Deleting old {}".format(merged_h5parm))
os.unlink(merged_h5parm)
cmd_call('killMS2H5parm.py --nofulljones {h5_file} {npz_file} '.format(npz_file=merged_sol,
h5_file=merged_h5parm))
link_overwrite(merged_h5parm, linked_merged_h5parm)
def cleanup_working_dir(working_dir):
logger.info("Deleting cache since we're done.")
for f in glob.glob(os.path.join(working_dir, "*.ddfcache")):
cmd_call("rm -r {}".format(f))
def add_args(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument('--ncpu', help='Number of cpu to use, default=34', default=32, type=int)
parser.add_argument('--obs_num', help='Obs number L*',
default=None, type=int, required=True)
parser.add_argument('--data_dir', help='Where are the ms files are stored.',
default=None, type=str, required=True)
parser.add_argument('--working_dir', help='Where to perform the imaging.',
default=None, type=str, required=True)
parser.add_argument('--region_file', help='Region file to use for new directions.',
default=None, type=str, required=True)
def main(region_file, obs_num, data_dir, working_dir, ncpu):
filtered_dico_model = os.path.join(data_dir, 'image_full_ampphase_di_m.NS.DATA_SUB.DicoModel')
if not os.path.isfile(filtered_dico_model):
raise IOError("Dico model doesn't exists {}".format(filtered_dico_model))
clustercat = os.path.join(data_dir, 'subtract.ClusterCat.npy')
os.chdir(working_dir)
make_clustercat(region_file, clustercat)
solve(filtered_dico_model, obs_num, clustercat, working_dir, data_dir, ncpu, 'DDS4_full')
make_merged_h5parm(obs_num, 'DDS4_full', data_dir, working_dir)
cleanup_working_dir(working_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Solves for DDS4_full on subtracted DATA_SUB.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
logger.info("Running with:")
for option, value in vars(flags).items():
logger.info(" {} -> {}".format(option, value))
main(**vars(flags))
```
#### File: bayes_gain_screens/tomographic_kernel/neural_approximation.py
```python
import haiku as hk
from jax import random, numpy as jnp, nn, vmap, value_and_grad, tree_multimap, tree_map, jit
from jax.lax import scan
from bayes_gain_screens.tomographic_kernel.tomographic_kernel import TomographicKernel, NeuralTomographicKernel
from jaxns.gaussian_process.kernels import RBF
from h5parm.utils import create_empty_datapack
from h5parm import DataPack
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
from bayes_gain_screens.frames import ENU
from bayes_gain_screens.utils import make_coord_array
import pylab as plt
def train_neural_network(datapack: DataPack, batch_size, learning_rate, num_batches):
with datapack:
select = dict(pol=slice(0, 1, 1), ant=None, time=slice(0,1,1))
datapack.current_solset = 'sol000'
datapack.select(**select)
axes = datapack.axes_tec
patch_names, directions = datapack.get_directions(axes['dir'])
antenna_labels, antennas = datapack.get_antennas(axes['ant'])
timestamps, times = datapack.get_times(axes['time'])
antennas = ac.ITRS(*antennas.cartesian.xyz, obstime=times[0])
ref_ant = antennas[0]
frame = ENU(obstime=times[0], location=ref_ant.earth_location)
antennas = antennas.transform_to(frame)
ref_ant = antennas[0]
directions = directions.transform_to(frame)
x = antennas.cartesian.xyz.to(au.km).value.T
k = directions.cartesian.xyz.value.T
t = times.mjd
t -= t[len(t)//2]
t *= 86400.
n_screen = 250
kstar = random.uniform(random.PRNGKey(29428942),(n_screen,3), minval=jnp.min(k, axis=0), maxval=jnp.max(k, axis=0))
kstar /= jnp.linalg.norm(kstar, axis=-1, keepdims=True)
X = jnp.asarray(make_coord_array(x,
jnp.concatenate([k,kstar], axis=0),
t[:,None]))
x0 = jnp.asarray(antennas.cartesian.xyz.to(au.km).value.T[0, :])
ref_ant = x0
kernel = TomographicKernel(x0, ref_ant, RBF(), S_marg=100)
neural_kernel = NeuralTomographicKernel(x0, ref_ant)
def loss(params, key):
keys = random.split(key,5)
indices = random.permutation(keys[0], jnp.arange(X.shape[0]))[:batch_size]
X_batch = X[indices, :]
wind_velocity = random.uniform(keys[1], shape=(3,), minval=jnp.asarray([-200., -200., 0.]), maxval=jnp.asarray([200., 200., 0.]))/1000.
bottom = random.uniform(keys[2], minval=50., maxval=500.)
width = random.uniform(keys[3], minval=40., maxval=300.)
l = random.uniform(keys[4], minval=1., maxval=30.)
sigma = 1.
K = kernel(X_batch, X_batch, bottom, width, l, sigma, wind_velocity=wind_velocity)
neural_kernel.set_params(params)
neural_K = neural_kernel(X_batch, X_batch, bottom, width, l, sigma, wind_velocity=wind_velocity)
return jnp.mean((K-neural_K)**2)/width**2
init_params = neural_kernel.init_params(random.PRNGKey(42))
def train_one_batch(params, key):
l, g = value_and_grad(lambda params: loss(params, key))(params)
params = tree_multimap(lambda p, g: p - learning_rate*g, params, g)
return params, l
final_params, losses = jit(lambda key: scan(train_one_batch, init_params, random.split(key, num_batches)))(random.PRNGKey(42))
plt.plot(losses)
plt.yscale('log')
plt.show()
if __name__ == '__main__':
datapack = create_empty_datapack(250, 2, 100, pols=None,
field_of_view_diameter=8.,
start_time=None,
time_resolution=30.,
min_freq=122.,
max_freq=166.,
array_file=None,
phase_tracking=None,
save_name='test_datapack.h5',
clobber=True)
train_neural_network(datapack, batch_size=64, learning_rate=0.001, num_batches=100)
```
#### File: bayes_gain_screens/bin/simulate_ionosphere_phase_screen.py
```python
import argparse
import sys
import logging
logger = logging.getLogger(__name__)
from bayes_gain_screens.tomographic_kernel import TomographicKernel
from bayes_gain_screens.utils import make_coord_array
from bayes_gain_screens.plotting import plot_vornoi_map
from bayes_gain_screens.frames import ENU
from h5parm import DataPack
from jaxns.gaussian_process.kernels import RBF
import jax.numpy as jnp
from jax.scipy.linalg import solve_triangular
from jax import jit, random, vmap
from h5parm.utils import create_empty_datapack
import astropy.units as au
import astropy.coordinates as ac
import pylab as plt
import numpy as np
ARRAYS = {'lofar': DataPack.lofar_array_hba}
def get_num_directions(avg_spacing, field_of_view_diameter):
V = 2.*np.pi*(field_of_view_diameter/2.)**2
pp = 0.5
n = -V * np.log(1. - pp) / (avg_spacing/60.)**2 / np.pi / 2.
n = max(int(n), 50)
return n
def compute_conditional_moments(kernel:TomographicKernel, X_new, wind_velocity):
f_K = lambda X1, X2: kernel(X1, X2, bottom=300., width=50., l=4., sigma=1., wind_velocity=wind_velocity)
K_new_new = f_K(X_new, X_new)
L_new = jnp.linalg.cholesky(K_new_new + 1e-6*jnp.eye(K_new_new.shape[0]))
return L_new
# K_old_old = f_K(X_old, X_old)
# K_old_new = f_K(X_old, X_new)
# L = jnp.linalg.cholesky(K_old_old + 1e-6 * jnp.eye(K_old_old.shape[0]))
# JT = solve_triangular(L, K_old_new, lower=True)
# C = K_new_new - JT.T @ JT
# LC = jnp.linalg.cholesky(C + 1e-6*jnp.eye(C.shape[0]))
# # K_new_old @ (K_old_old)^-1 m(old)
# # K = L @ L^T
# # K^-1 = L^-T @ L^-1
# # (L^-T @ J.T)^T
# M = solve_triangular(L.T, JT, lower=False)
# return L_new, LC, M
def main(output_h5parm, ncpu, ra, dec,
array_name, start_time, time_resolution, duration,
field_of_view_diameter, avg_direction_spacing, east_wind, north_wind, time_block_size):
Nd = get_num_directions(avg_direction_spacing, field_of_view_diameter)
logger.info(f"Number of directions to simulate: {Nd}")
Nf = 1
Nt = int(duration / time_resolution) + 1
time_block_size = min(time_block_size, Nt)
logger.info(f"Number of times to simulate: {Nt}")
dp = create_empty_datapack(Nd, Nf, Nt, pols=None,
field_of_view_diameter=field_of_view_diameter,
start_time=start_time,
time_resolution=time_resolution,
min_freq=122.,
max_freq=166.,
array_file=ARRAYS[array_name],
phase_tracking=(ra, dec),
save_name=output_h5parm,
clobber=True)
with dp:
dp.current_solset = 'sol000'
dp.select(pol=slice(0, 1, 1), ant=[0,10,20], time=slice(0,time_block_size))
axes = dp.axes_tec
patch_names, directions = dp.get_directions(axes['dir'])
antenna_labels, antennas = dp.get_antennas(axes['ant'])
timestamps, times = dp.get_times(axes['time'])
avg_time = times[len(times) // 2]
antennas = ac.ITRS(*antennas.cartesian.xyz, obstime=avg_time)
ref_ant = antennas[0]
frame = ENU(obstime=avg_time, location=ref_ant.earth_location)
antennas = antennas.transform_to(frame)
ref_ant = antennas[0]
x0 = ref_ant.cartesian.xyz.to(au.km).value
directions = directions.transform_to(frame)
t = times.mjd*86400.
t -= t[0]
dt = time_resolution
x = antennas.cartesian.xyz.to(au.km).value.T[1:,:]
print(x[1]-x[0])
# x[1,:] = x[0,:]
# x[1,0] += 0.3
k = directions.cartesian.xyz.value.T
logger.info(f"Directions: {directions}")
logger.info(f"Antennas: {x} {antenna_labels}")
logger.info(f"Reference Ant: {x0}")
logger.info(f"Times: {t}")
Na = x.shape[0]
logger.info(f"Number of antenna to simulate: {Na}")
Nd = k.shape[0]
Nt = t.shape[0]
#m(X_new) = K(X_new, X_old) @ (K(X_old, X_old))^{-1} m(X_old)
#K(X_new, X_new) = K(X_new, X_new) - K(X_new, X_old) @ (K(X_old, X_old))^{-1} K(X_old, X_new)
wind_vector = jnp.asarray([east_wind, north_wind, 0.])/1000.#km/s
X = make_coord_array(x, k, t[:,None], flat=True)#N,7
logger.info(f"Sampling {X.shape[0]} new points.")
kernel = TomographicKernel(x0, x0, RBF(), S_marg=25, compute_tec=True)
L = jit(compute_conditional_moments, static_argnums=[0])(kernel, X, wind_vector)
dtec = L @ random.normal(random.PRNGKey(24532), shape=(L.shape[0],1))
dtec = dtec.reshape((Na, Nd, time_block_size)).transpose((1,0,2))
with dp:
dp.select(pol=slice(0, 1, 1), ant=[10, 50], time=slice(0,time_block_size))
dp.tec = np.asarray(dtec[None, ...])
fig, axs = plt.subplots(Na,time_block_size, sharex=True, sharey=True, figsize=(2*time_block_size,2*Na))
for a in range(Na):
for i in range(time_block_size):
ax = axs[a][i]
ax = plot_vornoi_map(k[:, 0:2], dtec[:, a, i], ax=ax, colorbar=False)
if a == (Na-1):
ax.set_xlabel(r"$k_{\rm east}$")
if i == 0:
ax.set_ylabel(r"$k_{\rm north}$")
# ax.set_title(f"{} {times[i]}")
# plt.show()
# for d in range(Nd):
# plt.plot(dtec[d, a,:],alpha=0.3)
# plt.title(f"{antenna_labels[a]}")
plt.show()
def debug_main():
main(output_h5parm='test_datapack.h5',
ncpu=1,
ra=120.,
dec=30.,
array_name='lofar',
start_time=None,
time_resolution=10.,
duration=600.,
field_of_view_diameter=4.,
avg_direction_spacing=8.,
east_wind=150.,
north_wind=0.,
time_block_size=5)
def add_args(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument('--output_h5parm', help='H5Parm file to file to place the simulated differential TEC',
default=None, type=str, required=True)
parser.add_argument('--ra', help='RA in degrees in ICRS frame.',
default=None, type=float, required=True)
parser.add_argument('--dec', help='DEC in degrees in ICRS frame.',
default=None, type=float, required=True)
parser.add_argument('--array_name', help=f'Name of array, options are {sorted(list(ARRAYS.keys()))}.',
default=None, type=float, required=True)
parser.add_argument('--start_time', help=f'Start time in modified Julian days (mjs/86400).',
default=None, type=float, required=True)
parser.add_argument('--time_resolution', help=f'Temporal resolution in seconds.',
default=30., type=float, required=False)
parser.add_argument('--time_resolution', help=f'Temporal duration in seconds.',
default=30., type=float, required=False)
parser.add_argument('--field_of_view_diameter', help=f'Diameter of field of view in degrees.',
default=4., type=float, required=False)
parser.add_argument('--avg_direction_spacing', help=f'Average spacing between directions in arcmin.',
default=6., type=float, required=False)
parser.add_argument('--east_wind', help=f'Velocity of wind to the east at 100km in m/s.',
default=-200., type=float, required=False)
parser.add_argument('--north_wind', help=f'Velocity of wind to the north at 100km in m/s.',
default=0., type=float, required=False)
parser.add_argument('--ncpu', help='Number of CPUs.',
default=1, type=int, required=True)
parser.add_argument('--time_block_size', help='Number of time steps to simulate at once (must be >= 2).',
default=2, type=int, required=True)
if __name__ == '__main__':
if len(sys.argv) == 1:
debug_main()
exit(0)
parser = argparse.ArgumentParser(
description='Infers the value of DTEC and a constant over a screen.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
logger.info("Running with:")
for option, value in vars(flags).items():
logger.info("\t{} -> {}".format(option, value))
main(**vars(flags))
``` |
{
"source": "Joshuaalbert/bayes_tec",
"score": 2
} |
#### File: notebooks/pre_production_runs/run_solver_hpopt.py
```python
from bayes_tec.bayes_opt.bayes_hp_opt import BayesHPOpt
from bayes_tec.solvers.phase_only_solver import PhaseOnlySolver
from concurrent import futures
import numpy as np
opt = {'initial_learning_rate': 0.03,
'learning_rate_steps': 2.39,
'learning_rate_decay': 2.66,
'minibatch_size': 128,
'dof_ratio': 20.,
'gamma_start': 5e-05,
'gamma_add': 1e-4,
'gamma_mul': 1.04,
'gamma_max': 0.14,
'gamma_fallback': 0.1}
def _run(kwargs):
datapack = '../../scripts/data/killms_datapack_2.hdf5'
ant_sel='RS*'
time_sel=slice(0,100,1)
freq_sel=slice(0,48,1)
pol_sel=slice(0,1,1)
iterations=100
run_dir='run_dir_hp_opt_SM_itoh'
output_solset='posterior_sol_hp_opt_SM'
opt['priors'] = {
'time_period_uncert':[kwargs['t_uncert{}'.format(i)] for i in range(3)],
'dir_period_uncert':[kwargs['d_uncert{}'.format(i)] for i in range(3)],
'time_periods':[kwargs['t_period{}'.format(i)] for i in range(3)],
'dir_periods':[kwargs['d_period{}'.format(i)] for i in range(3)],
'w_init':[kwargs['w{}'.format(i)] for i in range(3)]
}
solver = PhaseOnlySolver(run_dir, datapack)
m,s = solver.solve(output_solset=output_solset, solset='sol000',
jitter=1e-6,tec_scale=0.005,screec_res=30,
iterations=iterations,
remake_posterior_solsets=False, inter_op_threads=0,
intra_op_threads=0,ant_sel=ant_sel, time_sel=time_sel,
pol_sel=pol_sel, freq_sel=freq_sel,debug=False,
W_diag=False,return_likelihood=True,num_likelihood_samples=100,
plot_level=-3, compute_posterior=False, **opt)
return -m/1e6
def objective(**kwargs):
return _run(kwargs)
with futures.ThreadPoolExecutor(max_workers=2) as exe:
jobs = exe.map(_run, [kwargs])
res = list(jobs)
return np.mean(res)
#1.326511211345239 -> {'initial_learning_rate': 0.08672814094012078, 'learning_rate_steps': 3.845691869451716, 'learning_rate_decay': 2.2338225170518045, 'minibatch_size': 343, 'dof_ratio': 17.483912391131362, 'gamma_start': 1.8893702113878085e-05, 'gamma_add': 0.00025304970643971796, 'gamma_mul': 1.1673530952703717, 'gamma_max': 0.21196916296812654, 'gamma_fallback': 0.15811131579133963}
#WARNING:root:1 (43) : 1.4169010382169323 -> {'initial_learning_rate': 0.04693469657453876, 'learning_rate_steps': 2.3379450095649053, 'learning_rate_decay': 2.309697760459837, 'minibatch_size': 257, 'dof_ratio': 15.324853129981337, 'gamma_start': 1.7497951372018477e-05, 'gamma_add': 0.00024740343452076625, 'gamma_mul': 1.1955893705407017, 'gamma_max': 0.34639589024185186, 'gamma_fallback': 0.15444066000616663}
#WARNING:root:2 (27) : 1.5081948998999013 -> {'initial_learning_rate': 0.009944268827823587, 'learning_rate_steps': 2.7228499570724916, 'learning_rate_decay': 1.268929681705544, 'minibatch_size': 484, 'dof_ratio': 15.793002501207107, 'gamma_start': 1.3162914446789919e-05, 'gamma_add': 0.0014083695974122102, 'gamma_mul': 1.1920515053318887, 'gamma_max': 0.08734702837532575, 'gamma_fallback': 0.21598310688240693}
#WARNING:root:3 (51) : 1.5183867590113769 -> {'initial_learning_rate': 0.09929641010035925, 'learning_rate_steps': 3.760297282474147, 'learning_rate_decay': 1.9596598257348894, 'minibatch_size': 381, 'dof_ratio': 19.712394557961836, 'gamma_start': 5.7113644372202535e-05, 'gamma_add': 0.00039745743579932673, 'gamma_mul': 1.0104099384398493, 'gamma_max': 0.49512123114366735, 'gamma_fallback': 0.2273128821926654}
#WARNING:root:4 (41) : 1.5421102537039924 -> {'initial_learning_rate': 0.03999651253015149, 'learning_rate_steps': 2.7655606636091004, 'learning_rate_decay': 2.252062714633563, 'minibatch_size': 257, 'dof_ratio': 19.897864384533356, 'gamma_start': 2.2467224826890863e-05, 'gamma_add': 0.00048298906787098023, 'gamma_mul': 1.0293807927120147, 'gamma_max': 0.45511367853454426, 'gamma_fallback': 0.22026128808845857}
bo = BayesHPOpt(objective,init='hp_opt_results_SM_itoh_td_yvar.hdf5',t=20.)
# initial_learning_rate=0.1,learning_rate_steps=2,
# learning_rate_decay=1.5,
# minibatch_size=128, dof_ratio=30,
# gamma_start=1e-5,gamma_add=1e-3,gamma_mul=1.1,
# gamma_max=0.15,gamma_fallback=1e-1):
#bo.add_continuous_param('initial_learning_rate',1e-3,1e-1,log=True)
#bo.add_continuous_param('learning_rate_steps',1,4)
#bo.add_continuous_param('learning_rate_decay',1.,3.)
#bo.add_integer_param('minibatch_size',16,512)
#bo.add_continuous_param('dof_ratio',14,21)
#bo.add_continuous_param('gamma_start',1e-7,1e-4,log=True)
#bo.add_continuous_param('gamma_add',1e-5,1e-2,log=True)
#bo.add_continuous_param('gamma_mul',1.01,1.3,log=True)
#bo.add_continuous_param('gamma_max',0.01,0.5,log=True)
#bo.add_continuous_param('gamma_fallback',1e-3,5e-1,log=True)
#bo.add_continuous_param('kern_time_ls',25,100,log=True)
#bo.add_continuous_param('kern_dir_ls',0.1,1.2,log=True)
bo.add_continuous_param('t_period0',15,200.,log=False)
bo.add_continuous_param('t_period1',15,200.,log=False)
bo.add_continuous_param('t_period2',15,200.,log=False)
bo.add_continuous_param('d_period0',0.3, 2.,log=False)
bo.add_continuous_param('d_period1',0.3, 2.,log=False)
bo.add_continuous_param('d_period2',0.3, 2.,log=False)
bo.add_continuous_param('t_uncert0',1.,100.,log=True)
bo.add_continuous_param('t_uncert1',1.,100.,log=True)
bo.add_continuous_param('t_uncert2',1.,100.,log=True)
bo.add_continuous_param('d_uncert0',0.05,2.,log=True)
bo.add_continuous_param('d_uncert1',0.05,2.,log=True)
bo.add_continuous_param('d_uncert2',0.05,2.,log=True)
bo.add_continuous_param('w0',0.1, 2.,log=False)
bo.add_continuous_param('w1',0.1, 2.,log=False)
bo.add_continuous_param('w2',0.1, 2.,log=False)
bo.run('hp_opt_results_SM_itoh_td_yvar.hdf5',init_design_size=0,n_iter=0,plot=True,likelihood_uncert=0.1)
```
#### File: notebooks/pre_production_runs/run_solver_kern_opt.py
```python
from bayes_tec.solvers.phase_only_solver import PhaseOnlySolver
from bayes_tec.utils.data_utils import define_equal_subsets
from bayes_tec.logging import logging
import numpy as np
from timeit import default_timer
import gpflow as gp
from bayes_tec.utils.stat_utils import log_normal_solve, log_normal_solve_fwhm
from gpflow.priors import LogNormal
def create_kern(name):
kerns = {'rbf':gp.kernels.RBF,'m12':gp.kernels.Matern12, 'm32':gp.kernels.Matern32, 'm52':gp.kernels.Matern52}
s = name.split("_")
k_time = kerns[s[1].lower()]
k_dir = kerns[s[2].lower()]
if s[0].lower() == 'sum':
return _sum(k_time,k_dir)
elif s[0].lower() == 'product':
return _product(k_time,k_dir)
def _product(kern_time_, kern_dir_):
def _kern(kern_ls_lower=0.75, kern_ls_upper=1.25, kern_dir_ls=0.5, kern_time_ls=50., kern_var=1., include_time=True, include_dir=True, **priors):
kern_dir = kern_dir_(2,active_dims=slice(0,2,1))
kern_time = kern_time_(1,active_dims=slice(2,3,1))
kern = kern_dir*kern_time
kern_var = 1. if kern_var == 0. else kern_var
kern_dir.variance.trainable = False
kern_dir.lengthscales = kern_dir_ls
kern_dir_ls = log_normal_solve_fwhm(kern_dir_ls*kern_ls_lower, kern_dir_ls*kern_ls_upper, D=0.1)
kern_dir.lengthscales.prior = LogNormal(kern_dir_ls[0], kern_dir_ls[1]**2)
kern_dir.lengthscales.trainable = True
kern_time.variance = kern_var
kern_var = log_normal_solve_fwhm(kern_var*kern_ls_lower, kern_var*kern_ls_upper, D=0.1)
kern_time.variance.prior = LogNormal(kern_var[0], kern_var[1]**2)
kern_time.variance.trainable = True
kern_time.lengthscales = kern_time_ls
kern_time_ls = log_normal_solve_fwhm(kern_time_ls*kern_ls_lower, kern_time_ls*kern_ls_upper, D=0.1)
kern_time.lengthscales.prior = LogNormal(kern_time_ls[0], kern_time_ls[1]**2)
kern_time.lengthscales.trainable = True
return kern
return _kern
def _sum(kern_time_, kern_dir_):
def _kern(kern_ls_lower=0.75, kern_ls_upper=1.25, kern_dir_ls=0.5, kern_time_ls=50., kern_var=1., include_time=True, include_dir=True, **priors):
kern_dir = kern_dir_(2,active_dims=slice(0,2,1))
kern_time = kern_time_(1,active_dims=slice(2,3,1))
kern = kern_dir + kern_time
kern_var = 1. if kern_var == 0. else kern_var
kern_var = log_normal_solve_fwhm(kern_var*kern_ls_lower, kern_var*kern_ls_upper, D=0.1)
kern_dir.variance.prior = LogNormal(kern_var[0], kern_var[1]**2)
kern_dir.variance.trainable = True
kern_dir.variance = np.exp(kern_var[0])
kern_dir.lengthscales = kern_dir_ls
kern_dir_ls = log_normal_solve_fwhm(kern_dir_ls*kern_ls_lower, kern_dir_ls*kern_ls_upper, D=0.1)
kern_dir.lengthscales.prior = LogNormal(kern_dir_ls[0], kern_dir_ls[1]**2)
kern_dir.lengthscales.trainable = True
kern_time.variance.prior = LogNormal(kern_var[0], kern_var[1]**2)
kern_time.variance = np.exp(kern_var[0])
kern_time.variance.trainable = True
kern_time.lengthscales = kern_time_ls
kern_time_ls = log_normal_solve_fwhm(kern_time_ls*kern_ls_lower, kern_time_ls*kern_ls_upper, D=0.1)
kern_time.lengthscales.prior = LogNormal(kern_time_ls[0], kern_time_ls[1]**2)
kern_time.lengthscales.trainable = True
return kern
return _kern
def test_new_solver():
# opt = {'initial_learning_rate': 0.0469346965745387, 'learning_rate_steps': 2.3379450095649053, 'learning_rate_decay': 2.3096977604598385, 'minibatch_size': 257, 'dof_ratio': 15.32485312998133, 'gamma_start': 1.749795137201838e-05, 'gamma_add': 0.00014740343452076625, 'gamma_mul': 1.0555893705407017, 'gamma_max': 0.1063958902418518, 'gamma_fallback': 0.15444066000616663}
opt = {'initial_learning_rate': 0.030035792298837113, 'learning_rate_steps': 2.3915384159241064, 'learning_rate_decay': 2.6685242978751798, 'minibatch_size': 128, 'dof_ratio': 10., 'gamma_start': 6.876944103773131e-05, 'gamma_add': 1e-4, 'gamma_mul': 1.04, 'gamma_max': 0.14, 'gamma_fallback': 0.1, 'priors' : {'kern_time_ls': 50., 'kern_dir_ls': 0.80}}
datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_2.hdf5'
run_dir='run_dir_killms_kern_opt'
output_solset = "posterior_sol_kern_opt"
time_sel = slice(50,150,1)
ant_sel = "RS210HBA"
import itertools
res = []
for s in itertools.product(['product','sum'],['rbf','m32','m52'],['rbf','m32','m52']):
name = "_".join(s)
logging.info("Running {}".format(name))
solver = PhaseOnlySolver(run_dir, datapack)
solver._build_kernel = create_kern(name)
lik = solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
iterations=500,intra_op_threads=0, inter_op_threads=0, ant_sel=ant_sel, time_sel=time_sel,pol_sel=slice(0,1,1),debug=False,
W_diag=True, freq_sel=slice(0,48,1), plot_level=-1, return_likelihood=True, num_likelihood_samples=100, **opt)
res.append([name,-lik[0]/1e6,lik[1]/1e6])
logging.info("{} results {}".format(name,res))
with open("kern_opt_res.csv", 'a') as f:
f.write("{}\n".format(str(res[-1]).replace('[','').replace(']','') ))
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# iterations=500, intra_op_threads=0, inter_op_threads=0, ant_sel="CS*", time_sel=time_sel,pol_sel=slice(0,1,1),debug=False,
# W_diag=False, freq_sel=slice(0,48,1), **opt)
# W_diag = False
# dof_ratio = 20.
#
# run_dir = "run_dir_killms_notime_{}_{}".format(int(dof_ratio),'diag' if W_diag else 'chol')
# output_solset = "posterior_sol_notime_{}_{}".format(int(dof_ratio),'diag' if W_diag else 'chol')
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# initial_learning_rate=1e-2, final_learning_rate=1e-3, iterations=2000, minibatch_size=128, dof_ratio=dof_ratio,
# intra_op_threads=0, inter_op_threads=0, ant_sel=ant_sel, time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=W_diag, freq_sel=slice(0,48,1))
# ###
# # RS
# for i in range(18):
# time_sel = slice(i*200,min(3600,(i+1)*200),1)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=20.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=True, freq_sel=slice(0,48,1))
#
# ###
# # CS
# for i in range(18):
# time_sel = slice(i*200,min(3600,(i+1)*200),1)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=20.,intra_op_threads=0, inter_op_threads=0, ant_sel="CS*",
# time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=True, freq_sel=slice(0,48,1))
# run_dir = "run_dir_killms_10_Wdiag"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_3.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=True)
# run_dir = "run_dir_killms_10_chol"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_3.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=False)
#
# run_dir = "run_dir_ndppp_10_Wdiag"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/ndppp_datapack.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=True)
#
# run_dir = "run_dir_ndppp_10_chol"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=False)
if __name__ == '__main__':
test_new_solver()
```
#### File: notebooks/pre_production_runs/run_solver.py
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow
from bayes_tec.solvers.phase_only_solver import PhaseOnlySolver
from bayes_tec.utils.data_utils import define_equal_subsets
from bayes_tec.logging import logging
import numpy as np
from timeit import default_timer
def test_new_solver():
# opt = {'initial_learning_rate': 0.0469346965745387, 'learning_rate_steps': 2.3379450095649053, 'learning_rate_decay': 2.3096977604598385, 'minibatch_size': 257, 'dof_ratio': 15.32485312998133, 'gamma_start': 1.749795137201838e-05, 'gamma_add': 0.00014740343452076625, 'gamma_mul': 1.0555893705407017, 'gamma_max': 0.1063958902418518, 'gamma_fallback': 0.15444066000616663}
opt = {'initial_learning_rate': 0.03, 'learning_rate_steps': 2.39, 'learning_rate_decay': 2.66, 'minibatch_size': 1024, 'dof_ratio': 2., 'gamma_start': 5e-05, 'gamma_add': 1e-4, 'gamma_mul': 1.04, 'gamma_max': 0.14, 'gamma_fallback': 0.1}
# opt['priors'] = {'kern_time_ls': 42.20929516497659, 'kern_dir_ls': 0.36789336277387313}
datapack = '../../scripts/data/killms_datapack_4.hdf5'
run_dir='run_dir_killms'
output_solset = "posterior_sol"
solver = PhaseOnlySolver(run_dir, datapack)
solve_slices, set_slices, subset_slices = define_equal_subsets(3600,200,20)
for solve_slice, set_slice, subset_slice in zip(solve_slices, set_slices, subset_slices):
time_sel = slice(*solve_slice,1)
opt['posterior_save_settings'] = {'save_time_sel':slice(*set_slice,1), 'subset_slice':slice(*subset_slice,1)}
logging.debug(time_sel)
logging.debug(opt['posterior_save_settings'])
# for start in range(1,3600, 100):
# stop = min(3600, start + 100)
#
# time_sel = slice(start,stop,1)
solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
iterations=400,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*", dir_sel=slice(None,None,1), time_sel=time_sel,pol_sel=slice(0,1,1),debug=False,
W_trainable=False, freq_sel=slice(0,48,1), plot_level=-1, **opt)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# iterations=500, intra_op_threads=0, inter_op_threads=0, ant_sel="CS*", time_sel=time_sel,pol_sel=slice(0,1,1),debug=False,
# W_diag=False, freq_sel=slice(0,48,1), **opt)
# W_diag = False
# dof_ratio = 20.
#
# run_dir = "run_dir_killms_notime_{}_{}".format(int(dof_ratio),'diag' if W_diag else 'chol')
# output_solset = "posterior_sol_notime_{}_{}".format(int(dof_ratio),'diag' if W_diag else 'chol')
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# initial_learning_rate=1e-2, final_learning_rate=1e-3, iterations=2000, minibatch_size=128, dof_ratio=dof_ratio,
# intra_op_threads=0, inter_op_threads=0, ant_sel=ant_sel, time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=W_diag, freq_sel=slice(0,48,1))
# ###
# # RS
# for i in range(18):
# time_sel = slice(i*200,min(3600,(i+1)*200),1)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=20.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=True, freq_sel=slice(0,48,1))
#
# ###
# # CS
# for i in range(18):
# time_sel = slice(i*200,min(3600,(i+1)*200),1)
# solver.solve(output_solset=output_solset, solset='sol000', jitter=1e-6, tec_scale=0.005, screen_res=30, remake_posterior_solsets=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=20.,intra_op_threads=0, inter_op_threads=0, ant_sel="CS*",
# time_sel=time_sel,pol_sel=slice(0,1,1),debug=False, W_diag=True, freq_sel=slice(0,48,1))
# run_dir = "run_dir_killms_10_Wdiag"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_3.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=True)
# run_dir = "run_dir_killms_10_chol"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack_3.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=False)
#
# run_dir = "run_dir_ndppp_10_Wdiag"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/ndppp_datapack.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=True)
#
# run_dir = "run_dir_ndppp_10_chol"
# datapack = '/net/lofar1/data1/albert/git/bayes_tec/scripts/data/killms_datapack.hdf5'
# solver = PhaseOnlySolver(run_dir, datapack)
# solver.solve(solset='sol000', recalculate_coords=False, jitter=1e-6, tec_scale=0.005, screen_res=30, weight_smooth_len=40, reweight_obs=False,
# learning_rate=1e-2,iterations=2000, minibatch_size=128, dof_ratio=10.,intra_op_threads=0, inter_op_threads=0, ant_sel="RS*",
# time_sel=slice(100,200,1),pol_sel=slice(0,1,1),debug=False, W_diag=False)
if __name__ == '__main__':
test_new_solver()
```
#### File: scripts/import_data/import_ndppp.py
```python
import argparse
from bayes_tec.datapack import DataPack
import h5py
import numpy as np
import os
import sys
from bayes_tec.logging import logging
#TECU = 1e16
tec_conversion = -8.4480e9# rad Hz/tecu
def _wrap(x):
return np.angle(np.exp(1j*x))
def import_data(ndppp_dd_sols, out_datapack, clobber,ant_sel, time_sel, freq_sel, pol_sel, dir_sel):
"""Create a datapack from the direction dependent NDPPP solutions.
"""
if os.path.exists(out_datapack):
logging.info("{} exists".format(out_datapack))
if clobber:
logging.info("Deleting old datapack")
os.unlink(out_datapack)
else:
raise ValueError("{} already exists and non clobber".format(out_datapack))
with DataPack(ndppp_dd_sols,readonly=True) as f_dd:
f_dd.select(ant=ant_sel,time=time_sel,freq=freq_sel,dir=dir_sel,pol=pol_sel)
freqs = np.array([125.,135.,145.,155.,165.])*1e6
with DataPack(out_datapack) as out:
patch_names, directions = f_dd.sources
antenna_labels, antennas = f_dd.antennas
out.add_antennas()#default is lofar
out.add_sources(directions, patch_names=patch_names)
tec,axes = f_dd.tec#(npol), nt, na, nd,1
scalarphase,axes = f_dd.scalarphase#(npol), nt, na, nd,1
if 'pol' in axes.keys():#(1,3595,62,1,42,1)
tec = tec[...,0].transpose((0,3,2,1))#npol,nd,na,nt
scalarphase = scalarphase[...,0].transpose((0,3,2,1))#npol,nd,na,nt
phase = tec_conversion*tec[:,:,:,None,:]/freqs[None,None,None,:,None] + scalarphase[:,:,:,None,:]
else:
tec = tec[...,0].transpose((2,1,0))#nd,na,nt
scalarphase = scalarphase[...,0].transpose((2,1,0))#nd,na,nt
phase = tec_conversion*tec[None,:,:,None,:]/freqs[None,None,None,:,None] + scalarphase[None,:,:,None,:]
axes['pol'] = ['XX']
out.add_freq_dep_tab('phase', axes['time'], freqs, pols=axes['pol'], ants = axes['ant'],
dirs = axes['dir'], vals=_wrap(phase))
logging.info("Done importing data")
def add_args(parser):
def _time_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _ant_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _dir_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _pol_sel(s):
if s.lower() == 'none':
return None
elif ',' in s:
s = s.split(',')
return list(s)
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _freq_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.register("type", "time_sel", _time_sel)
parser.register("type", "ant_sel", _ant_sel)
parser.register("type", "dir_sel", _dir_sel)
parser.register("type", "pol_sel", _pol_sel)
parser.register("type", "freq_sel", _freq_sel)
optional = parser._action_groups.pop() # Edited this line
parser._action_groups.append(optional) # added this line
required = parser.add_argument_group('required arguments')
# remove this line: optional = parser...
required.add_argument("--ndppp_dd_sols", type=str,
help="""NDPPP direction-dep. sols in a losoto h5parm.""", required=True)
required.add_argument("--out_datapack", type=str,
help="""The name of output datapack.""", required=True)
# network
optional.add_argument("--ant_sel", type="ant_sel", default=None,
help="""The antennas selection: None, regex RS*, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--time_sel", type="time_sel", default=None,
help="""The antennas selection: None, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--dir_sel", type="dir_sel", default=None,
help="""The direction selection: None, regex patch_???, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--pol_sel", type="pol_sel", default=None,
help="""The polarization selection: None, list XX,XY,YX,YY, regex X?, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--freq_sel", type="freq_sel", default=None,
help="""The channel selection: None, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--clobber", type="bool", default=False,
help="""Whether to overwrite output datapack.\n""")
if __name__=='__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
import_data(**vars(flags))
```
#### File: scripts/plot_solutions/plot_sol.py
```python
from bayes_tec.datapack import DataPack
from bayes_tec.plotting.plot_datapack import animate_datapack
from bayes_tec.logging import logging
import argparse
def run_plot(datapack, output_folder, num_processes, **kwargs):
animate_datapack(datapack,output_folder, num_processes, **kwargs)
def add_args(parser):
def _time_sel(s):
logging.info("Parsing {}".format(s))
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _ant_sel(s):
logging.info("Parsing {}".format(s))
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _dir_sel(s):
logging.info("Parsing {}".format(s))
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _pol_sel(s):
logging.info("Parsing {}".format(s))
if s.lower() == 'none':
return None
elif ',' in s:
s = s.split(',')
return list(s)
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _freq_sel(s):
logging.info("Parsing {}".format(s))
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.register("type", "time_sel", _time_sel)
parser.register("type", "ant_sel", _ant_sel)
parser.register("type", "dir_sel", _dir_sel)
parser.register("type", "pol_sel", _pol_sel)
parser.register("type", "freq_sel", _freq_sel)
optional = parser._action_groups.pop() # Edited this line
parser._action_groups.append(optional) # added this line
required = parser.add_argument_group('required arguments')
# remove this line: optional = parser...
required.add_argument("--datapack", type=str,
help="""Datapack input, a losoto h5parm.""", required=True)
# network
optional.add_argument("--ant_sel", type="ant_sel", default=None,
help="""The antennas selection: None, regex RS*, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--time_sel", type="time_sel", default=None,
help="""The antennas selection: None, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--dir_sel", type="dir_sel", default=None,
help="""The direction selection: None, regex patch_???, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--pol_sel", type="pol_sel", default=None,
help="""The polarization selection: None, list XX,XY,YX,YY, regex X?, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--freq_sel", type="freq_sel", default=None,
help="""The channel selection: None, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--plot_crosses", type="bool", default=True,
help="Plot crosses in facets")
optional.add_argument("--labels_in_radec", type="bool", default=True,
help="Labels in RA/DEC")
optional.add_argument("--plot_screen", type="bool", default=False,
help="Whether to plot screen. Expects properly shaped array.")
optional.add_argument("--num_processes", type=int, default=1,
help="Number of parallel plots")
optional.add_argument("--tec_eval_freq", type=float, default=None,
help="Freq to eval tec at.")
optional.add_argument("--output_folder", type=str, default="./figs",
help="""The output folder.""")
optional.add_argument("--observable", type=str, default="phase",
help="""The soltab to plot""")
optional.add_argument("--phase_wrap", type="bool", default=True,
help="""Whether to wrap the observable""")
optional.add_argument("--solset", type=str, default="sol000",
help="""The solset to plot""")
optional.add_argument("--vmin", type=float, default=None,
help="""The min value if phase_wrap is False""")
optional.add_argument("--vmax", type=float, default=None,
help="""The max value if phase_wrap is False""")
if __name__=='__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
logging.info(vars(flags))
run_plot(**vars(flags))
```
#### File: scripts/solver/solve_datapack_lmc_phase_only.py
```python
from bayes_tec.solvers.phase_only_lmc_solver import LMCPhaseOnlySolver
import argparse
import os
def run_solve(flags):
solver = LMCPhaseOnlySolver(flags.run_dir, flags.datapack)
solver.run(**vars(flags))
def add_args(parser):
def _time_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _ant_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _dir_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _pol_sel(s):
if s.lower() == 'none':
return None
elif ',' in s:
s = s.split(',')
return list(s)
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
def _freq_sel(s):
if s.lower() == 'none':
return None
elif '/' in s:#slice
s = s.split("/")
assert len(s) == 3, "Proper slice notations is 'start/stop/step'"
return slice(int(s[0]) if s[0].lower() != 'none' else None,
int(s[1]) if s[1].lower() != 'none' else None,
int(s[2])if s[2].lower() != 'none' else None)
else:
return s
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.register("type", "time_sel", _time_sel)
parser.register("type", "ant_sel", _ant_sel)
parser.register("type", "dir_sel", _dir_sel)
parser.register("type", "pol_sel", _pol_sel)
parser.register("type", "freq_sel", _freq_sel)
optional = parser._action_groups.pop() # Edited this line
parser._action_groups.append(optional) # added this line
required = parser.add_argument_group('required arguments')
# remove this line: optional = parser...
required.add_argument("--datapack", type=str,
help="""Datapack input, a losoto h5parm.""", required=True)
# network
optional.add_argument("--ant_sel", type="ant_sel", default=None,
help="""The antennas selection: None, regex RS*, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--time_sel", type="time_sel", default=None,
help="""The antennas selection: None, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--dir_sel", type="dir_sel", default=None,
help="""The direction selection: None, regex patch_???, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--pol_sel", type="pol_sel", default=None,
help="""The polarization selection: None, list XX,XY,YX,YY, regex X?, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--freq_sel", type="freq_sel", default=None,
help="""The channel selection: None, or slice format <start>/<stop>/<step>.\n""")
optional.add_argument("--dof_ratio", type=float, default=40.,
help="""The ratio of temporal-spatial coordinates to degrees of freedom.""")
optional.add_argument("--minibatch_size", type=int, default=256,
help="Size of minibatch")
optional.add_argument("--learning_rate", type=float, default=1e-3,
help="learning rate")
optional.add_argument("--plot", type="bool", default=True, const=True,nargs='?',
help="Whether to plot results")
optional.add_argument("--run_dir", type=str, default='./run_dir',
help="Where to run the solve")
optional.add_argument("--iterations", type=int, default=10000,
help="How many iterations to run")
optional.add_argument("--jitter", type=float, default=1e-6,
help="Jitter for stability")
optional.add_argument("--eval_freq", type=float, default=144e6,
help="Eval frequency")
optional.add_argument("--reweight_obs", type="bool", default=True,
help="Whether to re-calculate the weights down the frequency axis. Otherwise use /weight table.")
optional.add_argument("--inter_op_threads", type=int, default=0,
help="""The max number of concurrent threads""")
optional.add_argument("--intra_op_threads", type=int, default=0,
help="""The number threads allowed for multi-threaded ops.""")
optional.add_argument("--tec_scale", type=float, default=0.01,
help="""The relative tec scale used for scaling the GP model for computational stability.""")
optional.add_argument("--max_block_size", type=int, default=500,
help="""Maximum number of timestamps per block solve.""")
required.add_argument("--overlap", type=float, default=160.,
help="""Temporal overlap in seconds.""",required=True)
optional.add_argument("--time_skip", type=int, default=2,
help="Time skip")
if __name__=='__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
run_solve(flags)
```
#### File: bayes_tec/plotting/plot_datapack.py
```python
import matplotlib
matplotlib.use('Agg')
import numpy as np
import os
from concurrent import futures
from ..datapack import DataPack
from ..frames import UVW
from ..logging import logging
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
from scipy.spatial import ConvexHull, cKDTree
import time
from scipy.spatial.distance import pdist
import psutil
import pylab as plt
plt.style.use('ggplot')
from matplotlib.patches import Polygon, Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.colors as colors
try:
import cmocean
phase_cmap = cmocean.cm.phase
except ImportError:
phase_cmap = plt.cm.hsv
class DatapackPlotter(object):
def __init__(self,datapack):
if isinstance(datapack,str):
datapack = DataPack(filename=datapack,readonly=True)
self.datapack = datapack
def _create_polygon_plot(self,points, values=None, N = None,ax=None,cmap=plt.cm.bone,overlay_points=None,annotations=None,title=None,polygon_labels=None,reverse_x=False):
# get nearest points (without odd voronoi extra regions)
k = cKDTree(points)
dx = np.max(points[:,0]) - np.min(points[:,0])
dy = np.max(points[:,1]) - np.min(points[:,1])
delta = pdist(points)
N = N or int(min(max(100,2*np.max(delta)/np.min(delta)),500))
x = np.linspace(np.min(points[:,0])-0.1*dx,np.max(points[:,0])+0.1*dx,N)
y = np.linspace(np.min(points[:,1])-0.1*dy,np.max(points[:,1])+0.1*dy,N)
X,Y = np.meshgrid(x,y,indexing='ij')
# interior points population
points_i = np.array([X.flatten(),Y.flatten()]).T
# The match per input point
dist,i = k.query(points_i,k=1)
# the polygons are now created using convex hulls
# order is by point order
patches = []
for group in range(points.shape[0]):
points_g = points_i[i==group,:]
if points_g.size == 0:
logging.debug("Facet {} has zero size".format(group))
poly = Polygon(points[group:group+1,:],closed=False)
else:
hull = ConvexHull(points_g)
nodes = points_g[hull.vertices,:]
poly = Polygon(nodes,closed=False)
patches.append(poly)
if ax is None:
fig,ax = plt.subplots()
logging.info("Making new plot")
if values is None:
values = np.zeros(len(patches))#random.uniform(size=len(patches))
p = PatchCollection(patches,cmap=cmap)
p.set_array(values)
ax.add_collection(p)
#plt.colorbar(p)
if overlay_points is not None:
if annotations is None:
ax.scatter(overlay_points[:,0],overlay_points[:,1],marker='+',c='black')
else:
for point, a in zip(overlay_points, annotations):
ax.text(point[0],point[1],a,ha='center',va='center',backgroundcolor=(1.,1.,1., 0.1))
if reverse_x:
ax.set_xlim([np.max(points_i[:,0]),np.min(points_i[:,0])])
else:
ax.set_xlim([np.min(points_i[:,0]),np.max(points_i[:,0])])
ax.set_ylim([np.min(points_i[:,1]),np.max(points_i[:,1])])
ax.set_facecolor('black')
ax.grid(b=True,color='black')
if title is not None:
if reverse_x:
ax.text(np.max(points_i[:,0])-0.05*dx,np.max(points_i[:,1])-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5))
else:
ax.text(np.min(points_i[:,0])+0.05*dx,np.max(points_i[:,1])-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5))
# Rectangle((x, y), 0.5, 0.5,
# alpha=0.1,facecolor='red',label='Label'))
# ax.annotate(title,xy=(0.8,0.8),xycoords='axes fraction')
return ax, p
def _create_image_plot(self,points, values=None, N = None,ax=None,cmap=plt.cm.bone,overlay_points=None,annotations=None,title=None,reverse_x=False):
'''
Create initial plot, with image data instead of polygons.
points: (ra, dec)
values: array [n, m] or None, assumes (dec, ra) ordering ie (y,x)
'''
dx = np.max(points[0]) - np.min(points[0])
dy = np.max(points[1]) - np.min(points[1])
if values is not None:
Ndec,Nra = values.shape
else:
Ndec,Nra = len(points[1]),len(points[0])
values = np.zeros([Ndec,Nra])
if ax is None:
fig,ax = plt.subplots()
logging.info("Making new plot")
x = np.linspace(np.min(points[0]),np.max(points[0]),Nra)
y = np.linspace(np.min(points[1]),np.max(points[1]),Ndec)
img = ax.imshow(values,origin='lower',cmap=cmap, aspect='auto', extent=(x[0],x[-1],y[0],y[-1]))
if overlay_points is not None:
if annotations is None:
ax.scatter(overlay_points[:,0],overlay_points[:,1],marker='+',c='black')
else:
for point, a in zip(overlay_points, annotations):
ax.text(point[0],point[1],a,ha='center',va='center',backgroundcolor=(1.,1.,1., 0.1))
if reverse_x:
ax.set_xlim([x[-1],x[0]])
else:
ax.set_xlim([x[0],x[-1]])
ax.set_ylim([y[0],y[-1]])
ax.set_facecolor('black')
ax.grid(b=True,color='black')
if title is not None:
if reverse_x:
ax.text(x[-1]-0.05*dx,y[-1]-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5))
else:
ax.text(x[0]+0.05*dx,y[-1]-0.05*dy,title,ha='left',va='top',backgroundcolor=(1.,1.,1., 0.5))
return ax, img
def plot(self, ant_sel=None,time_sel=None,freq_sel=None,dir_sel=None,pol_sel=None, fignames=None, vmin=None,vmax=None,mode='perantenna',observable='phase',phase_wrap=True, log_scale=False, plot_crosses=True,plot_facet_idx=False,plot_patchnames=False,labels_in_radec=False,show=False, plot_arrays=False, solset=None, plot_screen=False, tec_eval_freq=None, **kwargs):
"""
Plot datapack with given parameters.
"""
SUPPORTED = ['perantenna']
assert mode in SUPPORTED, "only 'perantenna' supported currently".format(SUPPORTED)
if fignames is None:
save_fig = False
show = True
else:
save_fig = True
show = show and True #False
if plot_patchnames:
plot_facet_idx = False
if plot_patchnames or plot_facet_idx:
plot_crosses = False
if not show:
logging.debug('turning off display')
matplotlib.use('Agg')
###
# Set up plotting
with self.datapack:
self.datapack.switch_solset(solset)
logging.info("Applying selection: ant={},time={},freq={},dir={},pol={}".format(ant_sel,time_sel,freq_sel,dir_sel,pol_sel))
self.datapack.select(ant=ant_sel,time=time_sel,freq=freq_sel,dir=dir_sel,pol=pol_sel)
obs,axes = self.datapack.__getattr__(observable)
if observable.startswith('weights_'):
obs = np.sqrt(np.abs(1./obs)) #uncert from weights = 1/var
phase_wrap=False
if 'pol' in axes.keys():
# plot only first pol selected
obs = obs[0,...]
#obs is dir, ant, freq, time
antenna_labels, antennas = self.datapack.get_antennas(axes['ant'])
patch_names, directions = self.datapack.get_sources(axes['dir'])
timestamps, times = self.datapack.get_times(axes['time'])
freq_dep = True
try:
freq_labels, freqs = self.datapack.get_freqs(axes['freq'])
except:
freq_dep = False
obs = obs[:,:,None,:]
freq_labels, freqs = [""],[None]
if tec_eval_freq is not None:
obs = obs*-8.4480e9/tec_eval_freq
if phase_wrap:
obs = np.angle(np.exp(1j*obs))
vmin = -np.pi
vmax = np.pi
cmap = phase_cmap
else:
vmin = vmin or np.percentile(obs.flatten(),1)
vmax = vmax or np.percentile(obs.flatten(),99)
cmap = plt.cm.bone
if log_scale:
obs = np.log10(obs)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
fixfreq = Nf >> 1
logging.info("Plotting {} directions".format(Nd))
logging.info("Plotting {} antennas".format(Na))
logging.info("Plotting {} timestamps".format(Nt))
_, antennas_ = self.datapack.get_antennas([self.datapack.ref_ant])
#ants_uvw = antennas.transform_to(uvw)
ref_dist = np.sqrt((antennas.x - antennas_.x)**2 + (antennas.y - antennas_.y)**2 + (antennas.z - antennas_.z)**2).to(au.km).value
# if labels_in_radec:
ra = directions.ra.deg
dec = directions.dec.deg
if not plot_screen:
### points are normal
points = np.array([ra,dec]).T
if plot_crosses or plot_patchnames or plot_facet_idx:
overlay_points = points
else:
overlay_points = None
else:
### get unique ra and dec and then rearrange into correct order.
_ra = np.unique(ra)
_dec = np.unique(dec)
Nra = len(_ra)
Ndec = len(_dec)
assert Ndec * Nra == Nd
### sort lexiconially
ind = np.lexsort((ra,dec))
points = (_ra, _dec)
obs = obs[ind, ...]
obs = obs.reshape((Ndec,Nra,Na,Nf,Nt))
if plot_crosses:
overlay_points = None # put the facet (ra,dec).T
else:
overlay_points = None
if plot_patchnames:
annotations = patch_names
elif plot_facet_idx:
annotations = np.array([str(k) for k in range(Nd)])
else:
annotations = None
if fignames is not None:
if not isinstance(fignames,(tuple,list)):
fignames = [fignames]
if fignames is not None:
assert Nt == len(fignames)
if mode == 'perantenna':
M = int(np.ceil(np.sqrt(Na)))
fig,axs = plt.subplots(nrows=M,ncols=M,sharex='col',sharey='row',squeeze=False, \
figsize=(4*M,4*M))
fig.subplots_adjust(wspace=0., hspace=0.)
axes_patches = []
c = 0
for row in range(M):
for col in range(M):
ax = axs[row,col]
if col == 0:
ax.set_ylabel("Projected North (radians)" if not labels_in_radec else "DEC (deg)")
if row == M - 1:
ax.set_xlabel("Projected East (radians)" if not labels_in_radec else "RA (deg)")
if c >= Na:
continue
try:
title = antenna_labels[c].decode()
except:
title = antenna_labels[c]
if plot_screen:
_, p = self._create_image_plot(points, values=None, N = None,
ax=ax,cmap=cmap,overlay_points=overlay_points,
annotations=annotations,
title="{} {:.1f}km".format(title, ref_dist[c]),
reverse_x=labels_in_radec)
else:
_, p = self._create_polygon_plot(points, values=None, N = None,
ax=ax,cmap=cmap,overlay_points=overlay_points,
annotations=annotations,
title="{} {:.1f}km".format(title, ref_dist[c]),
reverse_x=labels_in_radec)
p.set_clim(vmin,vmax)
axes_patches.append(p)
c += 1
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.875, 0.15, 0.025, 0.7])
fig.colorbar(p, cax=cbar_ax, orientation='vertical')
if show:
plt.ion()
plt.show()
for j in range(Nt):
logging.info("Plotting {}".format(timestamps[j]))
for i in range(Na):
if not plot_screen:
axes_patches[i].set_array(obs[:,i,fixfreq,j])
else:
axes_patches[i].set_array(obs[:,:,i,fixfreq,j])
axs[0,0].set_title("{} {} : {}".format(observable, freq_labels[fixfreq], timestamps[j]))
fig.canvas.draw()
if save_fig:
plt.savefig(fignames[j])
if show:
# plt.close(fig)
plt.ioff()
def _parallel_plot(arg):
datapack,time_slice,kwargs,output_folder=arg
dp = DatapackPlotter(datapack=datapack)
with dp.datapack:
# Get the time selection desired
dp.datapack.select(time=kwargs.get('time_sel',None))
axes = dp.datapack.axes_phase
# timeslice the selection
times = axes['time']#mjs
sel_list = times[time_slice]
kwargs['time_sel'] = sel_list
fignames = [os.path.join(output_folder,"fig-{:04d}.png".format(j)) for j in range(len(times))[time_slice]]
dp.plot(fignames=fignames,**kwargs)
return fignames
def animate_datapack(datapack,output_folder,num_processes,**kwargs):
"""
Plot the datapack in parallel, then stitch into movie.
datapack: str the datapack filename
output_folder: str, folder to store figs in
num_processes: int number of parallel plotting processes to run
**kwargs: keywords to pass to DatapackPlotter.plot function.
"""
try:
os.makedirs(output_folder)
except:
pass
if num_processes is None:
num_processes = psutil.cpu_count()
if isinstance(datapack,DataPack):
datapack = datapack.filename
# with DataPack(datapack) as datapack_fix:
# datapack_fix.add_antennas(DataPack.lofar_array)
args = []
for i in range(num_processes):
args.append((datapack,slice(i,None,num_processes),kwargs,output_folder))
with futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
jobs = executor.map(_parallel_plot,args)
results = list(jobs)
plt.close('all')
make_animation(output_folder,prefix='fig',fps=4)
def make_animation(datafolder,prefix='fig',fps=4):
'''Given a datafolder with figures of format `prefix`-%04d.png create a
video at framerate `fps`.
Output is datafolder/animation.mp4'''
if os.system('ffmpeg -framerate {} -i {}/{}-%04d.png -vf scale="trunc(iw/2)*2:trunc(ih/2)*2" -c:v libx264 -profile:v high -pix_fmt yuv420p -g 30 -r 30 {}/animation.mp4'.format(fps,datafolder,prefix,datafolder)):
logging.info("{}/animation.mp4 exists already".format(datafolder))
def plot_phase_vs_time(datapack,output_folder, solsets='sol000',
ant_sel=None,time_sel=None,dir_sel=None,freq_sel=None,pol_sel=None):
if isinstance(datapack,DataPack):
datapack = datapack.filename
if not isinstance(solsets , (list,tuple)):
solsets = [solsets]
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder,exist_ok=True)
with DataPack(datapack,readonly=True) as datapack:
phases = []
stds = []
for solset in solsets:
datapack.switch_solset(solset)
datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel)
weights,axes = datapack.weights_phase
freq_ind = len(axes['freq']) >> 1
freq = axes['freq'][freq_ind]
ant = axes['ant'][0]
phase,_ = datapack.phase
std = np.sqrt(np.abs(weights))
timestamps,times = datapack.get_times(axes['time'])
phases.append(phase)
stds.append(std)
for phase in phases:
for s,S in zip(phase.shape,phases[0].shape):
assert s==S
Npol,Nd,Na,Nf,Nt = phases[0].shape
fig,ax = plt.subplots()
for p in range(Npol):
for d in range(Nd):
for a in range(Na):
for f in range(Nf):
ax.cla()
for i,solset in enumerate(solsets):
phase = phases[i]
std = stds[i]
label = "{} {} {:.1f}MHz {}:{}".format(solset, axes['pol'][p], axes['freq'][f]/1e6, axes['ant'][a], axes['dir'][d])
ax.fill_between(times.mjd,phase[p,d,a,f,:]-2*std[p,d,a,f,:],phase[p,d,a,f,:]+2*std[p,d,a,f,:],alpha=0.5,label=r'$\pm2\hat{\sigma}_\phi$')#,color='blue')
ax.scatter(times.mjd,phase[p,d,a,f,:],marker='+',alpha=0.3,color='black',label=label)
ax.set_xlabel('Time [mjd]')
ax.set_ylabel('Phase deviation [rad.]')
ax.legend()
filename = "{}_{}_{}_{}MHz.png".format(axes['ant'][a], axes['dir'][d], axes['pol'][p], axes['freq'][f]/1e6 )
plt.savefig(os.path.join(output_folder,filename))
plt.close('all')
def plot_data_vs_solution(datapack,output_folder, data_solset='sol000', solution_solset='posterior_sol', show_prior_uncert=False,
ant_sel=None,time_sel=None,dir_sel=None,freq_sel=None,pol_sel=None):
def _wrap(phi):
return np.angle(np.exp(1j*phi))
if isinstance(datapack,DataPack):
datapack = datapack.filename
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder,exist_ok=True)
solsets = [data_solset, solution_solset]
with DataPack(datapack,readonly=True) as datapack:
phases = []
stds = []
datapack.switch_solset(data_solset)
datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel)
weights,axes = datapack.weights_phase
_,freqs = datapack.get_freqs(axes['freq'])
phase,_ = datapack.phase
std = np.sqrt(np.abs(1./weights))
timestamps,times = datapack.get_times(axes['time'])
phases.append(_wrap(phase))
stds.append(std)
tec_conversion = -8.4480e9/freqs[None,None,None,:,None]
datapack.switch_solset(solution_solset)
datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel)
weights,_ = datapack.weights_tec
tec,_ = datapack.tec
std = np.sqrt(np.abs(1./weights))[:,:,:,None,:]*np.abs(tec_conversion)
phases.append(_wrap(tec[:,:,:,None,:]*tec_conversion))
stds.append(std)
for phase in phases:
for s,S in zip(phase.shape,phases[0].shape):
assert s==S
Npol,Nd,Na,Nf,Nt = phases[0].shape
fig,ax = plt.subplots()
for p in range(Npol):
for d in range(Nd):
for a in range(Na):
for f in range(Nf):
ax.cla()
###
# Data
phase = phases[0]
std = stds[0]
label = "{} {} {:.1f}MHz {}:{}".format(data_solset,axes['pol'][p], axes['freq'][f]/1e6, axes['ant'][a], axes['dir'][d])
if show_prior_uncert:
ax.fill_between(times.mjd,phase[p,d,a,f,:]-std[p,d,a,f,:],phase[p,d,a,f,:]+std[p,d,a,f,:],alpha=0.5,label=r'$\pm2\hat{\sigma}_\phi$')#,color='blue')
ax.scatter(times.mjd,phase[p,d,a,f,:],marker='+',alpha=0.3,color='black',label=label)
###
# Solution
phase = phases[1]
std = stds[1]
label = "Solution: {}".format(solution_solset)
ax.fill_between(times.mjd,phase[p,d,a,f,:]-std[p,d,a,f,:],phase[p,d,a,f,:]+std[p,d,a,f,:],alpha=0.5,label=r'$\pm\hat{\sigma}_\phi$')#,color='blue')
ax.scatter(times.mjd,phase[p,d,a,f,:],label=label,marker='.',s=5.)
ax.set_xlabel('Time [mjd]')
ax.set_ylabel('Phase deviation [rad.]')
ax.legend()
filename = "{}_v_{}_{}_{}_{}_{}MHz.png".format(data_solset,solution_solset, axes['ant'][a], axes['dir'][d], axes['pol'][p], axes['freq'][f]/1e6 )
ax.set_ylim(-np.pi, np.pi)
plt.savefig(os.path.join(output_folder,filename))
plt.close('all')
def plot_freq_vs_time(datapack,output_folder, solset='sol000', soltab='phase', phase_wrap=True,log_scale=False,
ant_sel=None,time_sel=None,dir_sel=None,freq_sel=None,pol_sel=None):
if isinstance(datapack,DataPack):
datapack = datapack.filename
with DataPack(datapack, readonly=True) as datapack:
datapack.switch_solset(solset)
datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel)
obs, axes = datapack.__getattr__(soltab)
if soltab.startswith('weights_'):
obs = np.sqrt(np.abs(1./obs)) #uncert from weights = 1/var
phase_wrap=False
if 'pol' in axes.keys():
# plot only first pol selected
obs = obs[0,...]
#obs is dir, ant, freq, time
antenna_labels, antennas = datapack.get_antennas(axes['ant'])
patch_names, directions = datapack.get_sources(axes['dir'])
timestamps, times = datapack.get_times(axes['time'])
freq_labels, freqs = datapack.get_freqs(axes['freq'])
if phase_wrap:
obs = np.angle(np.exp(1j*obs))
vmin = -np.pi
vmax = np.pi
cmap = phase_cmap
else:
vmin = vmin or np.percentile(obs.flatten(),1)
vmax = vmax or np.percentile(obs.flatten(),99)
cmap = plt.cm.bone
if log_scale:
obs = np.log10(obs)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
M = int(np.ceil(np.sqrt(Na)))
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder, exist_ok=True)
for k in range(Nd):
filename = os.path.join(os.path.abspath(output_folder),"{}_{}_dir_{}.png".format(solset,soltab,k))
logging.info("Plotting {}".format(filename))
fig, axs = plt.subplots(nrows=M, ncols=M, figsize=(4*M,4*M),sharex=True,sharey=True)
for i in range(M):
for j in range(M):
l = j + M*i
if l >= Na:
continue
im = axs[i][j].imshow(obs[k,l,:,:],origin='lower',cmap=cmap, aspect='auto',vmin=vmin,vmax=vmax,extent=(times[0].mjd*86400.,times[-1].mjd*86400.,freqs[0],freqs[1]))
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85,0.15,0.05, 0.7])
fig.colorbar(im,cax=cbar_ax)
plt.savefig(filename)
plt.close('all')
def plot_solution_residuals(datapack, output_folder, data_solset='sol000', solution_solset='posterior_sol',
ant_sel=None,time_sel=None,dir_sel=None,freq_sel=None,pol_sel=None):
def _wrap(phi):
return np.angle(np.exp(1j*phi))
if not isinstance(datapack,str):
datapack = datapack.filename
output_folder = os.path.abspath(output_folder)
os.makedirs(output_folder,exist_ok=True)
solsets = [data_solset, solution_solset]
with DataPack(datapack,readonly=True) as datapack:
datapack.switch_solset(data_solset)
datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel)
phase,axes = datapack.phase
timestamps,times = datapack.get_times(axes['time'])
antenna_labels, antennas = datapack.get_antennas(axes['ant'])
patch_names, directions = datapack.get_sources(axes['dir'])
_,freqs = datapack.get_freqs(axes['freq'])
pols, _ = datapack.get_pols(axes['pol'])
Npol,Nd,Na,Nf,Nt = phase.shape
datapack.switch_solset(solution_solset)
datapack.select(ant=ant_sel,time=time_sel,dir=dir_sel,freq=freq_sel,pol=pol_sel)
tec,_ = datapack.tec
phase_pred = -8.448e9*tec[...,None,:]/freqs[:,None]
res = _wrap(_wrap(phase) - _wrap(phase_pred))
cbar = None
for p in range(Npol):
for a in range(Na):
M = int(np.ceil(np.sqrt(Nd)))
fig,axs = plt.subplots(nrows=2*M,ncols=M,sharex=True,figsize=(M*4,1*M*4),gridspec_kw = {'height_ratios':[1.5,1]*M})
fig.subplots_adjust(wspace=0., hspace=0.)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.875, 0.15, 0.025, 0.7])
vmin = -1.
vmax = 1.
norm = plt.Normalize(vmin, vmax)
for row in range(0,2*M,2):
for col in range(M):
ax1 = axs[row][col]
ax2 = axs[row+1][col]
d = col + row//2*M
if d >= Nd:
continue
img = ax1.imshow(res[p,d,a,:,:],origin='lower',aspect='auto',
extent=(times[0].mjd*86400.,times[-1].mjd*86400.,freqs[0],freqs[-1]),
cmap=plt.cm.jet, norm = norm)
ax1.text(0.05, 0.95, axes['dir'][d], horizontalalignment='left',verticalalignment='top', transform=ax1.transAxes,backgroundcolor=(1.,1.,1., 0.5))
ax1.set_ylabel('frequency [Hz]')
ax1.legend()
mean = res[p,d,a,:,:].mean(0)
t = np.arange(len(times))
ax2.plot(times.mjd*86400, mean,label=r'$\mathbb{E}_\nu[\delta\phi]$')
std = res[p,d,a,:,:].std(0)
ax2.fill_between(times.mjd*86400, mean - std, mean + std,alpha=0.5,label=r'$\pm\sigma_{\delta\phi}$')
ax2.set_xlabel('Time [mjs]')
ax2.set_xlim(times[0].mjd*86400.,times[-1].mjd*86400.)
ax2.set_ylim(-np.pi,np.pi)
# ax2.legend()
fig.colorbar(img, cax=cbar_ax, orientation='vertical', label='phase dev. [rad]')
filename = "{}_v_{}_{}_{}.png".format(data_solset,solution_solset, axes['ant'][a], axes['pol'][p])
plt.savefig(os.path.join(output_folder,filename))
plt.close('all')
def test_vornoi():
from scipy.spatial import Voronoi, voronoi_plot_2d
import pylab as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import numpy as np
points = np.random.uniform(size=[10,2])
v = Voronoi(points)
nodes = v.vertices
regions = v.regions
ax = plt.subplot()
patches = []
for reg in regions:
if len(reg) < 3:
continue
poly = Polygon(np.array([nodes[i] for i in reg]),closed=False)
patches.append(poly)
p = PatchCollection(patches)
p.set_array(np.random.uniform(size=len(patches)))
ax.add_collection(p)
#plt.colorbar(p)
ax.scatter(points[:,0],points[:,1])
ax.set_xlim([np.min(points[:,0]),np.max(points[:,0])])
ax.set_ylim([np.min(points[:,1]),np.max(points[:,1])])
plt.show()
def test_nearest():
from scipy.spatial import ConvexHull, cKDTree
import pylab as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import numpy as np
points = np.random.uniform(size=[42,2])
k = cKDTree(points)
dx = np.max(points[:,0]) - np.min(points[:,0])
dy = np.max(points[:,1]) - np.min(points[:,1])
N = int(min(max(100,points.shape[0]*2),500))
x = np.linspace(np.min(points[:,0])-0.1*dx,np.max(points[:,0])+0.1*dx,N)
y = np.linspace(np.min(points[:,1])-0.1*dy,np.max(points[:,1])+0.1*dy,N)
X,Y = np.meshgrid(x,y,indexing='ij')
points_i = np.array([X.flatten(),Y.flatten()]).T
dist,i = k.query(points_i,k=1)
patches = []
for group in range(points.shape[0]):
points_g = points_i[i==group,:]
hull = ConvexHull(points_g)
nodes = points_g[hull.vertices,:]
poly = Polygon(nodes,closed=False)
patches.append(poly)
ax = plt.subplot()
p = PatchCollection(patches)
p.set_array(np.random.uniform(size=len(patches)))
ax.add_collection(p)
#plt.colorbar(p)
ax.scatter(points[:,0],points[:,1])
ax.set_xlim([np.min(points_i[:,0]),np.max(points_i[:,0])])
ax.set_ylim([np.min(points_i[:,1]),np.max(points_i[:,1])])
ax.set_facecolor('black')
plt.show()
def test():
from ionotomo.astro.real_data import generate_example_datapack
datapack = generate_example_datapack(Ndir=10,Nant=10,Ntime=20)
datapack.phase = np.random.uniform(size=datapack.phase.shape)
dp = DatapackPlotter(datapack='../data/rvw_datapack_full_phase_dec27_smooth.hdf5')
dp.plot(ant_idx=[50],dir_idx=-1,time_idx=[0],labels_in_radec=True,show=True)
# animate_datapack('../data/rvw_datapack_full_phase_dec27_smooth.hdf5',
# 'test_output',num_processes=1,observable='phase',labels_in_radec=True,show=True)
if __name__=='__main__':
test()
```
#### File: bayes_tec/tests/test_solvers.py
```python
from bayes_tec.solvers.phase_only_solver import PhaseOnlySolver
from bayes_tec.datapack import DataPack
import numpy as np
from bayes_tec.solvers.phase_only_solver import PhaseOnlySolver
from bayes_tec.datapack import DataPack
import numpy as np
import pylab as plt
def test_get_coords():
datapack = DataPack('../../scripts/data/killms_datapack_3.hdf5', readonly=True)
with datapack:
datapack.select(time=slice(0,1000,1),
ant='RS210HBA',
pol=slice(0,1,1))
phase, axes = datapack.phase
_,times = datapack.get_times(axes['time'])
_, directions = datapack.get_sources(axes['dir'])
_, freqs = datapack.get_freqs(axes['freq'])
Nt, Nd, Nf = len(times), len(directions), len(freqs)
indices = np.array([np.random.randint(Nd,size=1000),
np.random.randint(Nf,size=1000),
np.random.randint(Nt,size=1000)]).T
ra = directions.ra.deg[indices[:,0]]
dec = directions.dec.deg[indices[:,0]]
time = times.mjd[indices[:,2]]*86400. - times[0].mjd*86400.
freq = freqs[indices[:,1]]
phase = phase[0,indices[:,0],0,indices[:,1], indices[:,2]][...,None]
solver = PhaseOnlySolver('run_dir_diagnostic', datapack)
kwargs = {'ant_sel':"RS210HBA",
'time_sel':slice(0,1000,1),
'pol_sel':slice(0,1,1),
'reweight_obs':False,
'coord_file':"coords.hdf5",
'minibatch_size':32,
'tec_scale':0.005}
solver.output_solset = 'posterior_sol'
solver.output_screen_solset = 'screen_sol'
data_shape, build_params = solver._prepare_data(datapack,**kwargs)
yv, f, x, y = solver._get_data(indices, [Nd, Nf, Nt])
assert np.isclose(ra, x[:,0]).all()
assert np.isclose(dec, x[:,1]).all()
assert np.isclose(time, x[:,2]).all()
assert np.isclose(phase, y).all()
assert np.isclose(freq, f[:,0]).all()
assert np.all(yv < 2*np.pi)
test_get_coords()
if __name__=='__main__':
test_new_solver()
```
#### File: bayes_tec/utils/stat_utils.py
```python
import numpy as np
from scipy.optimize import fmin,minimize
def log_normal_solve(mean,std):
mu = np.log(mean) - 0.5*np.log((std/mean)**2 + 1)
sigma = np.sqrt(np.log((std/mean)**2 + 1))
return mu, sigma
def log_normal_solve_fwhm(a,b,D=0.5):
assert b > a
lower = np.log(a)
upper = np.log(b)
d = upper - lower #2 sqrt(2 sigma**2 ln(1/D))
sigma2 = 0.5*(0.5*d)**2/np.log(1./D)
s = upper + lower #2 (mu - sigma**2)
mu = 0.5*s + sigma2
return mu, np.sqrt(sigma2)
def gamma_prior(mode,std):
"""
In general you should prefer the log_normal prior.
"""
a = std/mode#sqrt(k)/(k-1)
shape = (2* a**2 + np.sqrt((4 * a**2 + 1)/a**4) * a**2 + 1)/(2 *a**2)
scale = std/np.sqrt(shape)
return gp.priors.Gamma(shape,scale)
```
#### File: bayes_tec/utils/testing_utils.py
```python
from ..datapack import DataPack
from ..logging import logging
from .data_utils import make_coord_array
import numpy as np
import os
import astropy.time as at
def make_example_datapack(Nd,Nf,Nt,pols=None, time_corr=50.,dir_corr=0.5*np.pi/180.,tec_scale=0.02,tec_noise=1e-3,name='test.hdf5',clobber=False):
logging.info("=== Creating example datapack ===")
name = os.path.abspath(name)
if os.path.isfile(name) and clobber:
os.unlink(name)
datapack = DataPack(name,readonly=False)
with datapack:
datapack.add_antennas()
datapack.add_sources(np.random.normal(np.pi/4.,np.pi/180.*2.5,size=[Nd,2]))
_, directions = datapack.sources
_, antennas = datapack.antennas
ref_dist = np.linalg.norm(antennas - antennas[0:1,:],axis=1)[None,None,:,None]#1,1,Na,1
times = at.Time(np.linspace(0,Nt*8,Nt)[:,None],format='gps').mjd*86400.#mjs
freqs = np.linspace(120,160,Nf)*1e6
if pols is not None:
use_pols = True
assert isinstance(pols,(tuple,list))
else:
use_pols = False
pols = ['XX']
tec_conversion = -8.440e9/freqs #Nf
X = make_coord_array(directions/dir_corr, times/time_corr)# Nd*Nt, 3
X2 = np.sum((X[:,:,None] - X.T[None,:,:])**2, axis=1)#N,N
K = tec_scale**2 * np.exp(-0.5*X2)
L = np.linalg.cholesky(K + 1e-6*np.eye(K.shape[0]))#N,N
Z = np.random.normal(size=(K.shape[0],len(pols)))#N,npols
tec = np.einsum("ab,bc->ac",L,Z)#N,npols
tec = tec.reshape((Nd,Nt,len(pols))).transpose((2,0,1))#Npols,Nd,Nt
tec = tec[:,:,None,:]*(0.2+ref_dist/np.max(ref_dist))#Npols,Nd,Na,Nt
# print(tec)
tec += tec_noise*np.random.normal(size=tec.shape)
phase = tec[:,:,:,None,:]*tec_conversion[None,None,None,:,None]##Npols,Nd,Na,Nf,Nt
# print(phase)
phase = np.angle(np.exp(1j*phase))
if not use_pols:
phase = phase[0,...]
pols = None
datapack.add_freq_dep_tab('phase',times=times[:,0],freqs=freqs,pols=pols,vals=phase)
datapack.phase = phase
return datapack
``` |
{
"source": "Joshuaalbert/born_rime",
"score": 2
} |
#### File: examples/comparison/main.py
```python
import numpy as np
import astropy.units as au
from astropy.units import Quantity
import pylab as plt
from born_rime.fourier import fourier, inv_fourier, fft_freqs
from born_rime.greens import two_dim_g, two_dim_G
from born_rime.potentials import partial_blockage, pad_with_absorbing_boundary_conditions
def main():
"""
First we compare convergence of Born series to exact solution on a partially blocked bar:
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
"""
nu = 50e6 / au.s
x, z, k2, k02 = partial_blockage(1000, nu, True)
k2, m, (x, z) = pad_with_absorbing_boundary_conditions(k2, k02, 1, x, z, dn_max=0.001)
# corner_indices = [0,0]
scatter_potential = (k2 - k02)/k02
plt.imshow(np.abs(scatter_potential.T.value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value,
Quantity([z[m[1]], z[m[1]], z[-m[1]], z[-m[1]], z[m[1]]]).value, c='red')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {} with boundary'.format(
nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scatter_potential.unit))
plt.show()
X,Z = np.meshgrid(x,z,indexing='ij')
R = np.sqrt((X-(-300.*au.m))**2 + (Z-(0*au.m))**2)
E_i = np.exp(1j*np.sqrt(k02)*R)/(1*au.m**2 + R**2)
E_i = np.exp(1j*np.sqrt(k02)*Z)
g = two_dim_g(np.sqrt(k02), X, Z)
plt.imshow((np.abs(g).value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.colorbar()
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value,
Quantity([z[m[1]], z[m[1]], z[-m[1]], z[-m[1]], z[m[1]]]).value, c='red')
plt.title('g')
plt.show()
G_numeric = fourier(g, x, z)
# sx, sy = fft_freqs(x,z)
# Sx, Sy = np.meshgrid(sx,sy, indexing='ij')
# G_numeric = two_dim_G(np.sqrt(k02),Sx, Sy)
n = G_numeric.shape[0]
pad_size = 1#int((n*0.6)/2.)
plt.imshow((np.abs(G_numeric).value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.colorbar()
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value,
Quantity([z[m[1]], z[m[1]], z[-m[1]], z[-m[1]], z[m[1]]]).value, c='red')
plt.title('G')
plt.show()
G_padded = np.pad(G_numeric,pad_size, mode='constant')
x_padded = np.pad(x, pad_size, mode='linear_ramp')
z_padded = np.pad(z, pad_size, mode='linear_ramp')
E_born = E_i
for i in range(1, 4):
source = scatter_potential * E_born
source_padded = np.pad(source, pad_size)
f_source_padded = fourier(source_padded, x_padded, z_padded)
E_born = E_i + k02*inv_fourier(G_padded * f_source_padded, x_padded, z_padded)[pad_size:-pad_size,pad_size:-pad_size]
E_s = E_born - E_i
plt.imshow((np.abs(E_s.T).value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.colorbar()
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value,
Quantity([z[m[1]], z[m[1]], z[-m[1]], z[-m[1]], z[m[1]]]).value, c='red')
plt.title('Born-{}'.format(i))
plt.show()
# plt.plot(x, np.abs(E_born.T[0,:]))
# # plt.xscale('log')
# plt.show()
#
# plt.plot(x, np.angle(E_born.T[0, :]))
# plt.show()
_vis = E_born.T[200, :, None] * E_born.T[200, None, :].conj()
vis = [np.mean(np.diagonal(_vis, i)) for i in range(x.size)]
b = x[:, None] - x[None, :]
plt.plot(b[0, :], np.abs(vis))
plt.title('|vis|')
plt.show()
plt.plot(b[0, :], np.angle(vis))
plt.title('Arg(vis)')
plt.show()
pass
if __name__ == '__main__':
main()
```
#### File: examples/scattered_planewave/main.py
```python
import numpy as np
import astropy.units as au
from astropy.units import Quantity
import pylab as plt
from timeit import default_timer
from matplotlib.animation import FuncAnimation
from born_rime.fourier import fourier, inv_fourier, fft_freqs
from born_rime.greens import two_dim_g, two_dim_G
from born_rime.potentials import partial_blockage, pad_with_vacuum_conditions, single_blob
from born_rime.series import born_series
from born_rime.plotting import plot_2d_image, add_colorbar
au.set_enabled_equivalencies(au.dimensionless_angles())
def plot_E_s(arrays, x, y, x0, y0, corner_indices, save_name=None):
figsize = 6
fig, ax = plt.subplots(1, 1, figsize=(figsize, figsize))
arrays = [np.abs(E_s)/np.abs(E_s).mean() for E_s in arrays]
vmin = min([E_s.min().value for E_s in arrays])
vmax = max([E_s.max().value for E_s in arrays])
norm = plt.Normalize(vmin, vmax)
to_colour = lambda w: plt.cm.jet(norm(w))
def _get_artists(artists, start):
_, img = plot_2d_image(arrays[start], x, y, title="Scattered electric field", corner_indices=corner_indices,
colorizer=to_colour,ax=ax)
sc = ax.scatter(x0[start].value, y0[start].value,c='green', label='source')
ax.set_xlim(x.min().value, x.max().value)
ax.set_ylim(y.min().value, y.max().value)
ax.legend()
artists.append(img)
artists.append(sc)
return artists
def init():
start = 0
ax.clear()
artists = []
artists = _get_artists(artists, start)
mappable = plt.cm.ScalarMappable(norm=norm, cmap=plt.cm.jet)
add_colorbar(mappable, label='rel. electric field amplitude [{}]'.format(arrays[0].unit), ax=ax)
return artists
def update(start):
ax.clear()
artists = []
artists = _get_artists(artists, start)
return artists
ani = FuncAnimation(fig, update,
frames=range(1, len(arrays)),
init_func=init, blit=True)
ani.save(save_name, fps=5.)#len(arrays) / 6.)
def _test_plot():
arrays = [np.random.uniform(size=(100,100)) for i in range(50)]
x = np.random.uniform(size=100)
y = np.random.uniform(size=100)
plot_E_s(Quantity(arrays), Quantity(x), Quantity(y), Quantity(np.random.uniform(size=50)),
Quantity(np.random.uniform(size=50)), save_name='example.mp4')
def main():
"""
First we compare convergence of Born series to exact solution on a partially blocked bar:
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
"""
# x = np.linspace(-10., 10., 1000) * au.km
# f = np.cos(2 * np.pi * x / (5. * au.km)) / au.m ** 3
# plt.plot(x, f)
# plt.xlabel('x [{}]'.format(x.unit))
# plt.ylabel('y [{}]'.format(f.unit))
# plt.show()
#
# F = fourier(f, x.value)
# (s,) = fft_freqs(x)
# plt.plot(s, np.abs(F))
# plt.xlim(-2., 2.)
# plt.xlabel('s [{}]'.format(s.unit))
# plt.show()
nu = 150e6 / au.s
x, z, k2, k02 = partial_blockage(1000, nu, True)
# x, z, k2, k02 = single_blob(1000, nu, 10.*
x_medium, z_medium = x, z
pad_size = int(x.size*0.6/2)
k2, m, (x, z) = pad_with_vacuum_conditions(k2, k02, pad_size, x, z)
X, Z = np.meshgrid(x, z, indexing='ij')
g = two_dim_g(np.sqrt(k02), X, Z)
E_s = []
x0 = []
z0 = []
for i in range(100):
_x0 = x_medium.min() + (x_medium.max() - x_medium.min())*np.sin(i/100.*np.pi/1.2)
_z0 = z_medium.min() + (z_medium.max() - z_medium.min())*i/100.
E_s.append(simulate_E_s(_x0, _z0, X, Z, k02, k2, x, z, g))
x0.append(_x0)
z0.append(_z0)
plot_E_s(E_s, x, z, x0, z0, m, save_name='moving_point_source.mp4')
###
# Plot things
plot_2d_image(np.abs(g),x, z, title='g', corner_indices=m)
# plot_2d_image(np.abs(results['G']),x, z, title='G',corner_indices=m)
# plot_2d_image(np.abs(results['scatter_potential']), x, z, colorbar_name='potential',
# title=r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {} with boundary'.format(
# nu.to(au.MHz)),
# corner_indices=m)
# for i, E_s in enumerate(results['E_s']):
# plot_2d_image(np.log(np.abs(E_s)), x, z, colorbar_name='log abs(electric field)',
# title="E_s {}".format(i),
# corner_indices=m)
# E_b = E_s.T[-m[1], m[0]:-m[0]]
# plt.plot(np.abs(E_b))
# plt.show()
# vis = E_b[:, None] * E_b[None,:].conj()
# plot_2d_image(np.abs(vis), x[m[0]:-m[0]], z[m[1]:-m[1]], colorbar_name='vis amplitude')
def simulate_E_s(x0, z0, X, Z, k02, k2, x, z, g):
R = np.sqrt((X - x0) ** 2 + (Z - z0) ** 2)
E_i = np.exp(1j * np.sqrt(k02) * R) / (1 * au.m ** 2 + R ** 2)
t0 = default_timer()
E_born, results = born_series(E_i, g, k2, k02, x, z, N=4, pad=300)
print(x0, z0, default_timer() - t0)
return results['E_s'][-1]
if __name__ == '__main__':
# test_plot()
main()
```
#### File: born_rime/born_rime/greens.py
```python
import numpy as np
import pylab as plt
from scipy.special import hankel1
from born_rime.fourier import fourier, ifft_freqs, inv_fourier
def _get_dx(X, i):
i0 = [0] * len(X.shape)
i0[i] = 0
i1 = [0] * len(X.shape)
i1[i] = 1
return X[tuple(i1)] - X[tuple(i0)]
def two_dim_g(k0, X, Y):
r = np.sqrt(X**2 + Y**2)
g = 0.25j * hankel1(0., k0 * r)
max_pix = 0j
dx = _get_dx(X,0)
dy = _get_dx(Y,1)
for _ in range(100):
r = np.sqrt((dx*np.random.uniform(-0.5, 0.5)) ** 2 + (dy*np.random.uniform(-0.5, +0.5)) ** 2)
max_pix += 0.25j * hankel1(0., k0 * r)
max_pix = max_pix/100.
g = np.where(np.isnan(g) | np.isinf(g), max_pix, g)
return g
def n_dim_G(k0, *S):
k2 = 4.*np.pi**2 * sum([s**2 for s in S])
diff = (k2 - k0**2)
eps = 4.*np.pi**2 * sum([(5.*_get_dx(s, i))**2 for i, s in enumerate(S)])#empirical fudge
return diff/(diff**2 + eps**2)
def two_dim_G(k0, Sx, Sy):
return n_dim_G(k0,Sx, Sy)
def test_two_dim_greens():
wavelength = 1.
km = 2.*np.pi/wavelength
x = np.linspace(-100.,100., 1001) * wavelength
X,Y = np.meshgrid(x,x,indexing='ij')
g = two_dim_g(km, X,Y)
plt.imshow(np.abs(g))
plt.colorbar()
plt.title("g_true abs")
plt.show()
G_numeric = fourier(g, x, x)
plt.imshow(np.abs(G_numeric))
plt.colorbar()
plt.title("G_num abs")
plt.show()
sx,sy = ifft_freqs(x, x)
Sx, Sy = np.meshgrid(sx,sy,indexing='ij')
G_analytic = two_dim_G(km,Sx,Sy)
plt.imshow(np.abs(G_analytic))
plt.colorbar()
plt.title("G_true abs")
plt.show()
g_num = inv_fourier(G_analytic, x, x)
plt.imshow(np.abs(g_num))
plt.colorbar()
plt.title('g_num abs')
plt.show()
```
#### File: born_rime/born_rime/plotting.py
```python
import pylab as plt
from astropy.units import Quantity
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_2d_image(a, x, y, colorbar_name=None, title=None, xlabel='x', ylabel='y', cmap='bone', colorizer=None, corner_indices=None, ax=None, save_name=None):
if ax is None:
fig, ax = plt.subplots(1,1, figsize=(5,5))
if colorizer is None:
norm = plt.Normalize(a.min().value, a.max().value)
colorizer = plt.cm.get_cmap(cmap)(norm)
img = ax.imshow(colorizer(a.T.value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, y.min().value, y.max().value))
if corner_indices is not None:
ax.plot(Quantity([x[corner_indices[0]], x[-corner_indices[0]], x[-corner_indices[0]], x[corner_indices[0]], x[corner_indices[0]]]).value,
Quantity([y[corner_indices[1]], y[corner_indices[1]], y[-corner_indices[1]], y[-corner_indices[1]], y[corner_indices[1]]]).value, c='red')
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel('{} [{}]'.format(xlabel, x.unit))
if ylabel is not None:
ax.set_ylabel('{} [{}]'.format(ylabel, y.unit))
if colorbar_name is not None:
plt.colorbar(label='{} [{}]'.format(colorbar_name, a.unit))
if save_name is not None:
plt.savefig(save_name)
return ax, img
def add_colorbar(mappable, label, ax=None):
if ax is None:
last_axes = plt.gca()
ax = mappable.axes
else:
last_axes = ax
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(mappable, cax=cax, label=label)
fig.sca(last_axes)
return cbar
```
#### File: born_rime/born_rime/potentials.py
```python
import numpy as np
from scipy.special import logsumexp, gammaln
from astropy import constants, units as au
from astropy.units import Quantity
Gauss = 1e-4 * au.T
au.set_enabled_equivalencies(au.dimensionless_angles())
def pad_with_absorbing_boundary_conditions(k2, k02, N, *coords, dn_max=0.05):
if dn_max is None:
dn_max = np.max(np.abs(np.sqrt(k2 / k02) - 1.))
print("Using the dn_max={}".format(dn_max))
alpha = np.abs(dn_max)*np.sqrt(k02)#/(np.pi*2.)
l = N / alpha
print("Extinction alpha={}".format(alpha))
print("Extinction l={}".format(l))
def log_Pn(alpha, x, N):
log_res = -np.inf
for n in range(N + 1):
log_res = np.logaddexp(n * (np.log(alpha * x)) - gammaln(n + 1.), log_res)
return np.where(x > 0, log_res, 0.)
def evaluate_k2(alpha, x):
k2 = k02 + alpha**2 - 2j*alpha*np.sqrt(k02)
return k02*np.ones(x.shape)
def _evaluate_k2(alpha, x, N):
return alpha**2 * np.exp(np.log(N - alpha * x + 2j * np.sqrt(k02) * x) + (N - 1) * (np.log(alpha * x))
- log_Pn(alpha, x, N) - gammaln(N + 1.)) + k02
def _add_other_dims(v, shape, i):
"""
[
Args:
v: [D]
shape: (s0,s1,s2,...)
i: int
Returns: same shape as `shape` except ith dim which is D.
"""
dims = list(range(len(shape)))
del dims[i]
v = np.expand_dims(v, dims)
grow = list(shape)
grow[i] = 1
return np.tile(v,grow)
m = []
out_coords = []
for i,x in enumerate(coords):
dx = x[1] - x[0]
M = int(l / dx) + 1
m.append(M)
print("Dimension {} padded by {}".format(i, M))
x_pad = np.arange(1,M+1)*dx
k2_pad = evaluate_k2(alpha, x_pad)
k2_before = _add_other_dims(k2_pad[::-1], k2.shape, i)
k2_after = _add_other_dims(k2_pad, k2.shape, i)
k2 = np.concatenate([k2_before, k2, k2_after], axis=i)
x_out = np.concatenate([x[0] - np.arange(1,M+1)[::-1]*dx, x, x[-1]+np.arange(1,M+1)*dx])
out_coords.append(x_out)
return k2, m, tuple(out_coords)
def pad_with_vacuum_conditions(k2, k02, pad_size, *coords):
def evaluate_k2(x):
return k02*np.ones(x.shape)
def _add_other_dims(v, shape, i):
"""
[
Args:
v: [D]
shape: (s0,s1,s2,...)
i: int
Returns: same shape as `shape` except ith dim which is D.
"""
dims = list(range(len(shape)))
del dims[i]
v = np.expand_dims(v, dims)
grow = list(shape)
grow[i] = 1
return np.tile(v,grow)
m = []
out_coords = []
for i,x in enumerate(coords):
print("Dimension {} padded by {}".format(i, pad_size))
dx = x[1] - x[0]
x_pad = np.arange(1,pad_size+1)*dx
k2_pad = evaluate_k2(x_pad)
m.append(pad_size)
k2_before = _add_other_dims(k2_pad[::-1], k2.shape, i)
k2_after = _add_other_dims(k2_pad, k2.shape, i)
k2 = np.concatenate([k2_before, k2, k2_after], axis=i)
x_out = np.concatenate([x[0] - np.arange(1,pad_size+1)[::-1]*dx, x, x[-1]+np.arange(1, pad_size+1)*dx])
out_coords.append(x_out)
return k2, m, tuple(out_coords)
def appleton_hartree(ne, nu):
def _plasma_freqency_squared(fed):
omega_p_squared = fed * (constants.e.si ** 2 / constants.eps0 / constants.m_e)
return omega_p_squared
omega_0_squared = _plasma_freqency_squared(ne)
dn = omega_0_squared / (2 * np.pi * nu) ** 2
return 1. - dn
def partial_blockage(N, nu, sinusoidal_blockage=False):
"""
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
Args:
x:
z:
nu:
Returns:
"""
ne = 2e12 / au.m ** 3
wavelength = constants.c.si / nu
x = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
z = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
n_ionosphere = appleton_hartree(ne, nu)
k0 = 2. * np.pi / wavelength
X, Z = np.meshgrid(x, z, indexing='ij')
z_bar_bottom = z.min() + 0.5 * (z.max() - z.min())
z_bar_top = z_bar_bottom + 10. * wavelength
x_bar_left = x.min() + 0. * (x.max() - x.min())
where_bar = (X > x_bar_left) & (Z > z_bar_bottom) & (Z < z_bar_top)
if sinusoidal_blockage:
refractive_index = np.where(where_bar, 1. - (1. - n_ionosphere) * np.cos(2 * np.pi * X / (10. * wavelength)),
1.)
else:
refractive_index = np.where(where_bar, n_ionosphere, 1.)
k2 = 4. * np.pi ** 2 * refractive_index ** 2 / wavelength ** 2
return x, z, k2, k0 ** 2
def single_blob(N, nu, l):
"""
| * source
|
| _________________
| | n = 1 - dn
| |________________
|
|
| x receiver
|(0,0)
Args:
x:
z:
nu:
Returns:
"""
ne = 2e12 / au.m ** 3
wavelength = constants.c.si / nu
x = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
z = np.arange(-N//2, N-N//2,1) * 0.25 * wavelength
n_ionosphere = appleton_hartree(ne, nu)
k0 = 2. * np.pi / wavelength
X, Z = np.meshgrid(x, z, indexing='ij')
z_blob = z.min() + 0.5 * (z.max() - z.min())
x_blob = x.min() + 0.5 * (x.max() - x.min())
refractive_index = (n_ionosphere - 1) * np.exp(-0.5*((X-x_blob)**2 + (Z-z_blob)**2)/l**2) + 1.
k2 = 4. * np.pi ** 2 * refractive_index ** 2 / wavelength ** 2
return x, z, k2, k0 ** 2
def test_partial_blockage():
import pylab as plt
nu = 100e6 / au.s
N = 1000
x, z, k2, k02 = partial_blockage(N, nu)
scattering_potential = k2 - k02
plt.imshow(scattering_potential.T.value, interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.title(r'Partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {}'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
x, z, k2, k02 = partial_blockage(N, nu, sinusoidal_blockage=True)
scattering_potential = k2 - k02
plt.imshow(scattering_potential.T.value, interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {}'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
k2, m, (x,z) = pad_with_absorbing_boundary_conditions(k2, k02, 4, x, z, dn_max=0.01)
scattering_potential = k2 - k02
plt.imshow(np.abs(scattering_potential.T.value), interpolation='nearest', origin='lower',
extent=(x.min().value, x.max().value, z.min().value, z.max().value),
cmap='bone')
print(x)
plt.plot(Quantity([x[m[0]], x[-m[0]], x[-m[0]], x[m[0]], x[m[0]]]).value, Quantity([z[m[1]], z[m[1]],z[-m[1]],z[-m[1]],z[m[1]]]).value, c='red')
plt.title(r'Sinusoidal partial blockage potential ($k^2(\mathbf{{x}}) - k_0^2$) at {} with boundary'.format(nu.to(au.MHz)))
plt.colorbar(label='potential [{}]'.format(scattering_potential.unit))
plt.show()
``` |
{
"source": "Joshuaalbert/h5parm",
"score": 2
} |
#### File: h5parm/h5parm/datapack.py
```python
import tables as tb
import os
import numpy as np
import astropy.units as au
import astropy.time as at
import astropy.coordinates as ac
import sys
import time
import itertools
import re
import logging
logger = logging.getLogger(__name__)
from h5parm.maintenance import deprecated
def _load_array_file(array_file):
'''Loads a csv where each row is x,y,z in geocentric ITRS coords of the antennas'''
types = np.dtype({'names': ['station', 'X_ITRS', 'Y_ITRS', 'Z_ITRS'],
'formats': ['S16', np.double, np.double, np.double, np.double]})
d = np.genfromtxt(array_file, comments='#', delimiter=',', dtype=types)
labels = np.array(d['station'].astype(str))
locs = ac.SkyCoord(x=d['X_ITRS'] * au.m, y=d['Y_ITRS'] * au.m, z=d['Z_ITRS'] * au.m, frame='itrs')
Nantenna = int(np.size(d['X_ITRS']))
diameters = None
return np.array(labels).astype(np.str_), locs.cartesian.xyz.to(au.m).value.transpose()
def update_h5parm(old_h5parm, new_h5parm):
"""
Clones an old H5parm typically created with LoSoTO, that only has readonly access.
:param old_h5parm:
:param new_h5parm:
:return:
"""
logger.info("Updating {}".format(old_h5parm))
select = dict(ant=None, time=None, dir=None, freq=None, pol=None)
old = DataPack(old_h5parm, readonly=True)
new = DataPack(new_h5parm, readonly=False)
logger.info("Created {}".format(new_h5parm))
solsets = old.solsets
for solset in solsets:
old.current_solset = solset
old.select(**select)
antenna_labels, antennas = old.antennas
patch_names, directions = old.directions
# Sometimes the antennas are not set properly in the original datapack
if solset in new.solsets:
new.delete_solset(solset)
if np.sum(antennas) == 0.:
new.add_solset(solset,
array_file=DataPack.lofar_array,
directions=directions,
patch_names=patch_names)
else:
new.add_solset(solset,
antenna_labels=antenna_labels,
antennas=antennas,
directions=directions,
patch_names=patch_names)
new.current_solset = solset
soltabs = old.soltabs
for soltab in soltabs:
if soltab in new.soltabs:
new.delete_soltab(soltab)
axes = {k: v for (v, k) in zip(*old.soltab_axes(soltab))}
antenna_labels, antennas = old.get_antennas(axes['ant'])
patch_names, directions = old.get_directions(axes['dir'])
timestamps, times = old.get_times(axes['time'])
pol_labels, pols = old.get_pols(axes['pol'])
vals, _ = old.get_soltab(soltab, weight=False)
weight_vals, _ = old.get_soltab(soltab, weight=True)
if 'freq' in axes.keys():
freq_labels, freqs = old.get_freqs(axes['freq'])
new.add_soltab(soltab, values=vals, weights=weight_vals, weightDtype='f16', time=times.mjd * 86400.,
pol=pol_labels,
ant=antenna_labels,
dir=patch_names, freq=freqs)
else:
new.add_soltab(soltab, values=vals, weights=weight_vals, weightDtype='f16', time=times.mjd * 86400.,
pol=pol_labels,
ant=antenna_labels,
dir=patch_names)
class DataPack(object):
# _H: tb.File
_arrays = os.path.dirname(sys.modules["h5parm"].__file__)
lofar_array = os.path.join(_arrays, 'arrays/lofar.antenna.cfg')
lofar_array_hba = os.path.join(_arrays, 'arrays/lofar.hba.antenna.cfg')
gmrt_array = os.path.join(_arrays, 'arrays/gmrtPos.cfg')
def __init__(self, filename, readonly=False):
if isinstance(filename, DataPack):
filename = filename.filename
self.filename = os.path.abspath(filename)
if not os.path.isfile(self.filename) and readonly:
raise IOError("File {} doesn't exist, and in readonly mode".format(self.filename))
self.readonly = readonly
self._H = None
self._contexts_open = 0
self._selection = None
self._current_solset = None
self.axes_order = ['pol', 'dir', 'ant', 'freq', 'time']
self.axes_atoms = {'pol': (np.str_, tb.StringAtom(16)),
'dir': (np.str_, tb.StringAtom(128)),
'ant': (np.str_, tb.StringAtom(16)),
'freq': (np.float64, tb.Float64Atom()),
'time': (np.float64, tb.Float64Atom())}
if len(self.solsets) > 0:
self.current_solset = self.solsets[0]
@property
def axes_order(self):
return self._axes_order
@axes_order.setter
def axes_order(self, axes):
if not isinstance(axes, (tuple, list)):
raise ValueError("axes should be a list or tuple. {}".format(type(axes)))
order = []
for axis in axes:
if axis not in ['ant', 'dir', 'freq', 'pol', 'time']:
raise ValueError("Axis {} not a valid axis.".format(axis))
if axis in order:
raise ValueError("Found duplicate in ordering. {}".format(axes))
order.append(axis)
self._axes_order = order
@property
def readonly(self):
return self._readonly
@readonly.setter
def readonly(self, value):
if not isinstance(value, bool):
raise ValueError("Readonly must be a bool.")
self._readonly = value
def __enter__(self):
if self._contexts_open == 0:
self._H = tb.open_file(self.filename, mode='r' if self.readonly else 'a')
self._contexts_open += 1
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._contexts_open == 1:
self._H.close()
self._H = None
self._contexts_open -= 1
@property
def current_solset(self):
return self._current_solset
@current_solset.setter
def current_solset(self, solset):
if solset not in self.solsets:
raise ValueError("Solset {} does not exist.".format(solset))
self._current_solset = solset
logger.info("Set current solset to: {}".format(self._current_solset))
def set_current_solset(self, solset):
if solset not in self.solsets:
raise ValueError("Solset {} does not exist.".format(solset))
self._current_solset = solset
logger.info("Set current solset to: {}".format(self._current_solset))
@property
def solsets(self):
with self:
return [k for k, v in self._H.root._v_groups.items()]
@property
def soltabs(self):
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
return [k for k, v in solset_group._v_groups.items()]
@deprecated("Use current_solset and add_solset")
def switch_solset(self, solset, antenna_labels=None, antennas=None, array_file=None, directions=None,
patch_names=None):
self.add_solset(solset, antenna_labels, antennas, array_file, directions,
patch_names)
def add_solset(self, solset, antenna_labels=None, antennas=None, array_file=None, patch_names=None,
directions=None):
"""
Create a solset.
:param solset: str
Name of solset
:param antenna_labels, antennas: see set_antennas
:param array_file: str
array file to load, lofar array if None
:params patch_names, directions: see set_directions
:param directions:
:return:
"""
if solset in self.solsets:
logger.warning("Solset {} already exists.".format(solset))
self.current_solset = solset
return
with self:
self._H.create_group(self._H.root, solset, title='Solset: {}'.format(solset))
self.current_solset = solset
self.add_antenna_table()
if antennas is None:
antenna_labels, antennas = _load_array_file(self.lofar_array_hba if array_file is None else array_file)
self.set_antennas(antenna_labels, antennas)
self.add_directions_table()
if directions is not None:
self.set_directions(patch_names, directions)
logger.info("Created solset {}.".format(solset))
def add_soltab(self, soltab, values=None, weights=None, weightDtype='f16', **axes):
if soltab in self.soltabs:
logger.warning('Soltab {} already exists.'.format(soltab))
return
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
self._H.create_group(solset_group, soltab, "Soltab: {}".format(soltab))
soltab_group = solset_group._v_groups[soltab]
soltab_group._v_attrs['parmdb_type'] = ""
shape = []
ordered_axes = []
for axis_name in self.axes_order:
if axis_name not in axes.keys():
logger.info("Soltab missing axis {}".format(axis_name))
continue
shape.append(len(axes[axis_name]))
ordered_axes.append(axis_name)
self._H.create_array(soltab_group, axis_name, obj=np.array(axes[axis_name]),
title='Axis: {}'.format(axis_name)) # ,atom=self.axes_atoms[axis_name][1])
if values is None:
values = np.zeros(shape)
self._H.create_array(soltab_group, 'val', obj=values.astype(np.float64), atom=tb.Float64Atom())
val_leaf = soltab_group._v_leaves['val']
val_leaf.attrs['AXES'] = ','.join(ordered_axes)
if weightDtype not in ['f16', 'f32', 'f64']:
raise ValueError("Allowed weight dtypes are 'f16','f32', 'f64'")
if weights is None:
weights = np.ones(shape)
if weightDtype == 'f16':
self._H.create_array(soltab_group, 'weight', obj=weights.astype(np.float16), title='Weights',
atom=tb.Float16Atom())
elif weightDtype == 'f32':
self._H.create_array(soltab_group, 'weight', obj=weights.astype(np.float32), title='Weights',
atom=tb.Float32Atom())
elif weightDtype == 'f64':
self._H.create_array(soltab_group, 'weight', obj=weights.astype(np.float64), title='Weights',
atom=tb.Float64Atom())
weight_leaf = soltab_group._v_leaves['weight']
weight_leaf.attrs['AXES'] = ','.join(ordered_axes)
logger.info("Created soltab {}/{}".format(self.current_solset, soltab))
def delete_soltab(self, soltab):
if soltab not in self.soltabs:
raise ValueError("Soltab {} not in solset {}.".format(soltab, self.current_solset))
with self:
solset_group = self._H.root._v_groups[self.current_solset]
soltab_group = solset_group._v_groups[soltab]
soltab_group._f_remove(recursive=True)
def delete_solset(self, solset):
if solset not in self.solsets:
raise ValueError("Solset {} appears not to exist.".format(solset))
with self:
solset_group = self._H.root._v_groups[solset]
solset_group._f_remove(recursive=True)
if solset == self.current_solset:
logger.warning("Setting current solset to None because you deleted it.")
self._current_solset = None
logger.info("Deleted solset {}.".format(solset))
def add_antenna_table(self):
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
class Antenna(tb.IsDescription):
name = tb.StringCol(16, pos=1)
position = tb.Float64Col(shape=3, dflt=0.0, pos=2)
# tb.Col(np.float64,3, np.zeros(3, dtype=np.float64),pos=2)
# descriptor = np.dtype([('name', np.str_, 16), ('position', np.float64, 3)])
self._H.create_table(solset_group, 'antenna', Antenna,
title='Antenna names and positions', expectedrows=62)
logger.info("Created antenna table.")
def add_directions_table(self):
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
class Direction(tb.IsDescription):
name = tb.StringCol(128, pos=1)
dir = tb.Float64Col(shape=2, dflt=0.0, pos=2)
# tb.Col(np.float64, 2, np.zeros(2, dtype=np.float64), pos=2)
# descriptor = np.dtype([('name', np.str_, 16), ('position', np.float64, 3)])
self._H.create_table(solset_group, 'source', Direction,
title='Direction names and directions', expectedrows=35)
logger.info("Created direction table.")
def set_antennas(self, antenna_labels, antennas):
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
if 'antenna' not in solset_group._v_leaves:
logger.info("antenna not in leaves. Adding.")
self.add_antenna_table()
antenna_table = solset_group._v_leaves['antenna']
antenna_table.remove_rows(0)
antenna_table.append(list(zip(antenna_labels, antennas)))
logger.info("Set the antenna table.")
def set_directions(self, patch_names, directions):
if patch_names is None:
patch_names = ["patch_{:03d}".format(i) for i in range(len(directions))]
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
if 'source' not in solset_group._v_leaves:
self.add_directions_table()
direction_table = solset_group._v_leaves['source']
direction_table.remove_rows(0)
direction_table.append(list(zip(patch_names, directions)))
logger.info("Set the direction table.")
def save_array_file(self, array_file):
with self:
ants = self._solset.getAnt()
labels = []
locs = []
for label, pos in ants.items():
labels.append(label)
locs.append(pos)
Na = len(labels)
with open(array_file, 'w') as f:
f.write('# Created on {0} by <NAME>\n'.format(time.strftime("%a %c", time.localtime())))
f.write('# ITRS(m)\n')
f.write('# X\tY\tZ\tlabels\n')
i = 0
while i < Na:
f.write(
'{0:1.9e}\t{1:1.9e}\t{2:1.9e}\t{4}'.format(locs[i][0], locs[i][1], locs[i][2], labels[i]))
if i < Na - 1:
f.write('\n')
i += 1
@property
def antennas(self):
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
if 'antenna' not in solset_group._v_leaves:
self.add_antenna_table()
antenna_table = solset_group._v_leaves['antenna']
return antenna_table.col('name'), antenna_table.col('position')
@property
def directions(self):
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
if 'source' not in solset_group._v_leaves:
self.add_directions_table()
direction_table = solset_group._v_leaves['source']
return direction_table.col('name'), direction_table.col('dir')
def __repr__(self):
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
_temp_solset = self.current_solset
info = "==== DataPack: {} ====\n".format(os.path.abspath(self.filename))
for solset in self.solsets:
self.current_solset = solset
info += "=== solset: {} ===\n".format(solset)
info += "Directions: \n"
for i, src_name1 in enumerate(zip(*self.directions)):
info += "{} -> {}\t{}\n".format(i, src_name1[0], list(src_name1[1]))
info += "\nStations: \n"
for i, ant1 in enumerate(zip(*self.antennas)):
info += "{} -> {}\t{}\n".format(i, ant1[0], list(ant1[1]))
for soltab in self.soltabs:
info += "== soltab: {} ==\n".format(soltab)
shape = [len(axis_vals) for axis_vals, axis in zip(*self.soltab_axes(soltab))]
axes = [axis for axis_vals, axis in zip(*self.soltab_axes(soltab))]
info += "shape: {}\n".format(shape)
info += "axes: {}\n".format(axes)
# for axis_vals, axis, size, dtype in zip(*self.soltab_axes(soltab)):
# info += "Axis: {} {} {}\n{}\n".format(axis,size, dtype,list(axis_vals))
self.current_solset = _temp_solset
return info
def select(self, **axes):
self._selection = {}
for axis_name in self.axes_order:
if axis_name not in axes.keys():
continue
if isinstance(axes[axis_name], int):
self._selection[axis_name] = [axes[axis_name]]
else:
self._selection[axis_name] = axes[axis_name]
def select_all(self):
self._selection = None
@property
def allowed_soltab_prefixes(self):
# return [soltab.replace("000","") for soltab in self.soltabs]
return ['phase', 'amplitude', 'tec', 'clock', 'const']
def soltab_axes(self, soltab):
with self:
if soltab not in self.soltabs:
logger.warning('Soltab {} does not exist.'.format(soltab))
return
with self:
if self.current_solset is None:
raise ValueError("Current solset is None.")
solset_group = self._H.root._v_groups[self.current_solset]
soltab_group = solset_group._v_groups[soltab]
val_leaf = soltab_group._v_leaves['val']
try:
axes = val_leaf.attrs['AXES'].split(',')
except TypeError:
axes = [s.decode().lower() for s in val_leaf.attrs['AXES'].split(b',')]
shape = []
type = []
vals = []
for axis in axes:
axis_vals = soltab_group._v_leaves[axis].read()
vals.append(axis_vals)
shape.append(len(axis_vals))
type.append(np.array(axis_vals).dtype)
return vals, axes
def get_selection(self, soltab):
if self._selection is None:
self._selection = {}
if soltab not in self.soltabs:
raise ValueError('Soltab {} does not exist.'.format(soltab))
if self.current_solset is None:
raise ValueError("Current solset is None.")
selection = []
# goal is to reduce everything to slices if possible for efficient usage of pytables
for axis_val, axis in zip(*self.soltab_axes(soltab)):
axis_selection = self._selection.get(axis, None)
if axis_selection is None:
selection.append(slice(None, None, None))
elif isinstance(axis_selection, slice):
selection.append(axis_selection)
elif isinstance(axis_selection, (tuple, list)):
list_select = []
for element in axis_selection:
if isinstance(element, int):
if element >= len(axis_val):
raise ValueError(
"Selecting index greater than length of axis {} {}".format(element, axis_val))
list_select.append(element)
else:
idx = np.where(axis_val.astype(type(element)) == element)[0]
if len(idx) == 0:
raise ValueError("Element not in axis {} {}".format(element, axis_val))
list_select.append(idx[0])
selection.append(list_select)
elif isinstance(axis_selection, str):
axis_val = np.asarray(axis_val)
is_pattern = []
for idx, element in enumerate(axis_val.astype(type(axis_selection))):
if re.search(axis_selection, element) is not None:
is_pattern.append(idx)
selection.append(is_pattern)
else:
raise ValueError("Unable to parse {}".format(axis_selection))
# replace all lists with slices if possible: limitation of only one list per indexing
corrected_selection = []
for sel in selection:
_sel = sel
if isinstance(sel, list):
if sel[0] != np.min(sel) or sel[-1] != np.max(sel):
break
if len(sel) == 1:
_sel = slice(sel[0], sel[0] + 1, 1)
else:
try_slice = slice(sel[0], sel[-1] + 1, (sel[-1] - sel[0]) // (len(sel) - 1))
comp_list = list(range(sel[0], sel[-1] + 1, (sel[-1] - sel[0]) // (len(sel) - 1)))
if comp_list == sel:
_sel = try_slice
corrected_selection.append(_sel)
num_lists = sum([1 if isinstance(sel, list) else 0 for sel in corrected_selection])
if num_lists > 1:
raise IndexError("Due to a limitation, only one fancy indexing can be applied per pytables getattr.")
return tuple(corrected_selection)
def get_axes_perm(self, actual_axes, want_axes):
"""
Get the permutation that changes the actual stored axes to prefered axes order stored
in self.axes_order.
:param actual_axes: list of str
The order of axes in an array
:return: tuple of int
"""
if not isinstance(actual_axes, (list, tuple)):
raise TypeError("actual axes must be a list or tuple of str")
actual_axes = list(actual_axes)
if not isinstance(want_axes, (list, tuple)):
raise TypeError("want axes must be a list or tuple of str")
want_axes = list(want_axes)
for a in want_axes:
if a not in actual_axes:
raise ValueError("Missing {} in {}.".format(a, actual_axes))
return tuple([actual_axes.index(a) for a in want_axes])
def get_soltab(self, soltab, weight=False):
with self:
selection = self.get_selection(soltab)
soltab_axes_vals, soltab_axes = self.soltab_axes(soltab)
###
# assumes the desired axes are in self.ordered_axes
want_axes = [a for a in self.axes_order if a in soltab_axes]
actual_axes = soltab_axes
perm = self.get_axes_perm(actual_axes, want_axes)
solset_group = self._H.root._v_groups[self.current_solset]
soltab_group = solset_group._v_groups[soltab]
if weight:
leaf = soltab_group._v_leaves['weight']
else:
leaf = soltab_group._v_leaves['val']
out_axes = {name: np.array(vals)[selection[i]] for i, (vals, name) in
enumerate(zip(soltab_axes_vals, soltab_axes))}
out_vals = leaf.__getitem__(selection).transpose(perm)
return out_vals, out_axes
def set_soltab(self, soltab, value, weight=False):
"""
Sets the values of soltab, according to the current selection.
:param soltab: str
:param value: np.array or array like
Should have shape of self.axes_order
:param weight: bool
Whether you are setting weights or not
:return:
"""
with self:
selection = self.get_selection(soltab)
solset_group = self._H.root._v_groups[self.current_solset]
soltab_group = solset_group._v_groups[soltab]
_, soltab_axes = self.soltab_axes(soltab)
###
#
want_axes = soltab_axes
actual_axes = [a for a in self.axes_order if a in soltab_axes]
perm = self.get_axes_perm(actual_axes, want_axes)
if weight:
leaf = soltab_group._v_leaves['weight']
else:
leaf = soltab_group._v_leaves['val']
leaf.__setitem__(selection, value.transpose(perm))
def __getattr__(self, tab):
"""
Get a value in allowed soltabs or pass on to underlying.
:param tab:
:return: np.array
Shape as determined by self.axes_order
"""
# with self:
# tabs = self._solset.getSoltabNames()
tabs = self.allowed_soltab_prefixes
tabs = ["weights_{}".format(t) for t in tabs] + ["axes_{}".format(t) for t in tabs] + tabs
weight = False
axes = False
if any([tab.startswith(t) for t in tabs]):
if tab.startswith("weights_"):
tab = "".join(tab.split('weights_')[1:])
weight = True
if tab.startswith("axes_"):
tab = "".join(tab.split('axes_')[1:])
axes = True
with self:
soltab = "{}000".format(tab)
selection = self.get_selection(soltab)
soltab_axes_vals, soltab_axes = self.soltab_axes(soltab)
###
# assumes the desired axes are in self.ordered_axes
want_axes = [a for a in self.axes_order if a in soltab_axes]
actual_axes = soltab_axes
perm = self.get_axes_perm(actual_axes, want_axes)
if not axes:
solset_group = self._H.root._v_groups[self.current_solset]
soltab_group = solset_group._v_groups[soltab]
if weight:
leaf = soltab_group._v_leaves['weight']
else:
leaf = soltab_group._v_leaves['val']
out_axes = {name: np.array(vals)[selection[i]] for i, (vals, name) in
enumerate(zip(soltab_axes_vals, soltab_axes))}
out_vals = leaf.__getitem__(selection).transpose(perm)
return out_vals, out_axes
else:
out_axes = {name: np.array(vals)[selection[i]] for i, (vals, name) in
enumerate(zip(soltab_axes_vals, soltab_axes))}
return out_axes
else:
return object.__getattribute__(self, tab)
def __setattr__(self, tab, value):
"""
Links any attribute with an "axis name" to getValuesAxis("axis name")
also links val and weight to the relative arrays.
Parameter
----------
axis : str
The axis name.
value : array
The array of the right shape for selection.
Assumes the values to set are in self.axes_order
"""
# with self:
# tabs = self._solset.getSoltabNames()
tabs = self.allowed_soltab_prefixes
tabs = ["weights_{}".format(t) for t in tabs] + ["axes_{}".format(t) for t in tabs] + tabs
weight = False
axes = False
if any([tab.startswith(t) for t in tabs]):
if tab.startswith("weights_"):
tab = "".join(tab.split('weights_')[1:])
weight = True
if tab.startswith("axes_"):
tab = "".join(tab.split('axes_')[1:])
axes = True
with self:
soltab = "{}000".format(tab)
selection = self.get_selection(soltab)
solset_group = self._H.root._v_groups[self.current_solset]
soltab_group = solset_group._v_groups[soltab]
_, soltab_axes = self.soltab_axes(soltab)
###
#
want_axes = soltab_axes
actual_axes = [a for a in self.axes_order if a in soltab_axes]
perm = self.get_axes_perm(actual_axes, want_axes)
if not axes:
if weight:
leaf = soltab_group._v_leaves['weight']
else:
leaf = soltab_group._v_leaves['val']
leaf.__setitem__(selection, value.transpose(perm))
else:
if not isinstance(value, dict):
raise ("Axes must come in dict of 'name':vals")
for i, (k, v) in enumerate(value.items()):
axis_vals = soltab_group._v_leaves[k]
axis_vals[selection[i]] = v
else:
object.__setattr__(self, tab, value)
@property
def ref_ant(self):
with self:
antenna_labels, antennas = self.antennas
return antenna_labels[0]
@property
def array_center(self):
with self:
_, antennas = self.get_antennas(None)
center = antennas.cartesian.xyz[:, 0]
center = ac.SkyCoord(x=center[0], y=center[1], z=center[2], frame='itrs')
return center
@property
def mean_array_loc(self):
with self:
_, antennas = self.get_antennas(None)
center = np.mean(antennas.cartesian.xyz, axis=1)
center = ac.SkyCoord(x=center[0], y=center[1], z=center[2], frame='itrs')
return center
def get_antennas(self, ants):
with self:
antenna_labels, antennas = self.antennas
if ants is None:
lookup = slice(None, None, None)
else:
ants = np.array(ants).astype(antenna_labels.dtype)
lookup = []
for a in ants:
if a not in antenna_labels:
raise ValueError("Antenna not found in solset {} {}".format(a, antenna_labels))
lookup.append(np.where(a == antenna_labels)[0][0])
antennas = antennas[lookup, :]
return antenna_labels[lookup], ac.SkyCoord(antennas[:, 0] * au.m, antennas[:, 1] * au.m,
antennas[:, 2] * au.m, frame='itrs')
@property
def pointing_center(self):
with self:
_, directions = self.get_directions(None)
ra_mean = np.mean(directions.transform_to('icrs').ra)
dec_mean = np.mean(directions.transform_to('icrs').dec)
dir = ac.SkyCoord(ra_mean, dec_mean, frame='icrs')
return dir
def get_directions(self, dirs):
with self:
patch_names, directions = self.directions
if dirs is None:
lookup = slice(None, None, None)
else:
dirs = np.array(dirs).astype(patch_names.dtype)
lookup = []
for a in dirs:
if a not in patch_names:
raise ValueError("Direction not found in solset {} {}".format(a, patch_names))
lookup.append(np.where(a == patch_names)[0][0])
directions = directions[lookup, :]
return patch_names[lookup], ac.SkyCoord(directions[:, 0] * au.rad, directions[:, 1] * au.rad, frame='icrs')
def get_times(self, times):
"""
times are stored as mjs
"""
times = at.Time(times / 86400., format='mjd')
return times.isot, times
def get_freqs(self, freqs):
labs = ['{:.1f}MHz'.format(f / 1e6) for f in freqs]
return np.array(labs), freqs*au.Hz
def get_pols(self, pols):
with self:
return pols, np.arange(len(pols), dtype=np.int32)
```
#### File: h5parm/tests/test_datapack.py
```python
from h5parm.utils import make_example_datapack, make_soltab
import numpy as np
import pytest
def test_datapack():
datapack = make_example_datapack(4,5,6,["X"],clobber=True)
phase,axes = datapack.phase
datapack.phase = phase+1.
phasep1, axes = datapack.phase
assert np.all(np.isclose(phasep1, phase+1.))
datapack.select(ant='RS509', time=slice(0,1,1))
phase,axes = datapack.phase
assert phase.shape == (1, 4, 1, 5, 1)
datapack.select(ant='CS')
phase, axes = datapack.phase
assert phase.shape == (1, 4, 48, 5, 6)
datapack.select(ant='RS*', time=slice(0, 1, 1))
phase, axes = datapack.phase
for a in axes['ant']:
assert b'RS' in a
assert len(axes['ant']) == 14
datapack.select(time=[1,3], dir=[0,1,3])
phase, axes = datapack.phase
with pytest.raises(IndexError):
datapack.select(time=[0, 1, 3], dir=[0, 1, 3])
phase, axes = datapack.phase
assert 'sol001' not in datapack.solsets
make_soltab(datapack, to_solset='sol001')
assert 'sol001' in datapack.solsets
``` |
{
"source": "Joshuaalbert/IonoTomo",
"score": 3
} |
#### File: astro/frames/enu_frame.py
```python
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
import astropy.units as u
import astropy.time as at
from astropy.coordinates.baseframe import (BaseCoordinateFrame, RepresentationMapping, frame_transform_graph)
from astropy.coordinates.attributes import (TimeAttribute, EarthLocationAttribute)
from astropy.coordinates.transformations import FunctionTransform
from astropy.coordinates.representation import (SphericalRepresentation,
UnitSphericalRepresentation,CartesianRepresentation)
from astropy.coordinates import AltAz
class ENU(BaseCoordinateFrame):
"""
Written by <NAME> - <EMAIL>
A coordinate or frame in the East-North-Up (ENU) system.
This frame has the following frame attributes, which are necessary for
transforming from ENU to some other system:
* ``obstime``
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
* ``location``
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
east : :class:`~astropy.units.Quantity`, optional, must be keyword
The east coordinate for this object (``north`` and ``up`` must also be given and
``representation`` must be None).
north : :class:`~astropy.units.Quantity`, optional, must be keyword
The north coordinate for this object (``east`` and ``up`` must also be given and
``representation`` must be None).
up : :class:`~astropy.units.Quantity`, optional, must be keyword
The up coordinate for this object (``north`` and ``east`` must also be given and
``representation`` must be None).
Notes
-----
This is useful as an intermediate frame between ITRS and UVW for radio astronomy
"""
frame_specific_representation_info = {
'cartesian': [RepresentationMapping('x', 'east'),
RepresentationMapping('y', 'north'),
RepresentationMapping('z','up')],
}
default_representation = CartesianRepresentation
obstime = TimeAttribute(default=None)#at.Time("2000-01-01T00:00:00.000",format="isot",scale="tai"))
location = EarthLocationAttribute(default=None)
def __init__(self, *args, **kwargs):
super(ENU, self).__init__(*args, **kwargs)
@property
def elevation(self):
"""
Elevation above the horizon of the direction, in degrees
"""
return np.arctan2(self.up,np.sqrt(self.north**2 + self.east**2))*180./np.pi
@frame_transform_graph.transform(FunctionTransform, AltAz, ENU)
def altaz_to_enu(altaz_coo, enu_frame):
'''Defines the transformation between AltAz and the ENU frame.
AltAz usually has units attached but ENU does not require units
if it specifies a direction.'''
rep = CartesianRepresentation(x = altaz_coo.cartesian.y,
y = altaz_coo.cartesian.x,
z = altaz_coo.cartesian.z,
copy=False)
return enu_frame.realize_frame(rep)
@frame_transform_graph.transform(FunctionTransform, ENU, AltAz)
def enu_to_itrs(enu_coo, altaz_frame):
rep = CartesianRepresentation(x = enu_coo.north,
y = enu_coo.east,
z = enu_coo.up,
copy=False)
return itrs_frame.realize_frame(rep)
@frame_transform_graph.transform(FunctionTransform, ENU, ENU)
def enu_to_enu(from_coo, to_frame):
# for now we just implement this through AltAz to make sure we get everything
# covered
return from_coo.transform_to(AltAz(obstime=from_coo.obstime)).transform_to(to_frame)
#class ENU(BaseCoordinateFrame):
# """
# Written by <NAME> - <EMAIL>
# A coordinate or frame in the East-North-Up (ENU) system.
#
# This frame has the following frame attributes, which are necessary for
# transforming from ENU to some other system:
#
# * ``obstime``
# The time at which the observation is taken. Used for determining the
# position and orientation of the Earth.
# * ``location``
# The location on the Earth. This can be specified either as an
# `~astropy.coordinates.EarthLocation` object or as anything that can be
# transformed to an `~astropy.coordinates.ITRS` frame.
#
# Parameters
# ----------
# representation : `BaseRepresentation` or None
# A representation object or None to have no data (or use the other keywords)
# east : :class:`~astropy.units.Quantity`, optional, must be keyword
# The east coordinate for this object (``north`` and ``up`` must also be given and
# ``representation`` must be None).
# north : :class:`~astropy.units.Quantity`, optional, must be keyword
# The north coordinate for this object (``east`` and ``up`` must also be given and
# ``representation`` must be None).
# up : :class:`~astropy.units.Quantity`, optional, must be keyword
# The up coordinate for this object (``north`` and ``east`` must also be given and
# ``representation`` must be None).
#
# Notes
# -----
# This is useful as an intermediate frame between ITRS and UVW for radio astronomy
#
# """
#
# frame_specific_representation_info = {
# 'cartesian': [RepresentationMapping('x', 'east'),
# RepresentationMapping('y', 'north'),
# RepresentationMapping('z','up')],
# }
#
# default_representation = CartesianRepresentation
#
# obstime = TimeAttribute(default=None)#at.Time("2000-01-01T00:00:00.000",format="isot",scale="tai"))
# location = EarthLocationAttribute(default=None)
#
# def __init__(self, *args, **kwargs):
# super(ENU, self).__init__(*args, **kwargs)
#
# @property
# def elevation(self):
# """
# Elevation above the horizon of the direction, in degrees
# """
# return np.arctan2(self.up,np.sqrt(self.north**2 + self.east**2))*180./np.pi
#
<EMAIL>(FunctionTransform, ITRS, ENU)
#def itrs_to_enu(itrs_coo, enu_frame):
# '''Defines the transformation between ITRS and the ENU frame.
# ITRS usually has units attached but ENU does not require units
# if it specifies a direction.'''
#
# #if np.any(itrs_coo.obstime != enu_frame.obstime):
# # itrs_coo = itrs_coo.transform_to(ITRS(obstime=enu_frame.obstime))
#
# # if the data are UnitSphericalRepresentation, we can skip the distance calculations
# is_unitspherical = (isinstance(itrs_coo.data, UnitSphericalRepresentation) or
# itrs_coo.cartesian.x.unit == u.one)
#
# lon, lat, height = enu_frame.location.to_geodetic('WGS84')
# lonrad = lon.to(u.radian).value
# latrad = lat.to(u.radian).value
# sinlat = np.sin(latrad)
# coslat = np.cos(latrad)
# sinlon = np.sin(lonrad)
# coslon = np.cos(lonrad)
# north = [-sinlat*coslon,
# -sinlat*sinlon,
# coslat]
# east = [-sinlon,coslon,0]
# up = [coslat*coslon,coslat*sinlon,sinlat]
# R = np.array([east,north,up])
#
# if is_unitspherical:
# #don't need to do distance calculation
# p = itrs_coo.cartesian.xyz.value
# diff = p
# penu = R.dot(diff)
#
# rep = CartesianRepresentation(x = u.Quantity(penu[0],u.one,copy=False),
# y = u.Quantity(penu[1],u.one,copy=False),
# z = u.Quantity(penu[2],u.one,copy=False),
# copy=False)
# else:
# p = itrs_coo.cartesian.xyz
# p0 = ITRS(*enu_frame.location.geocentric,obstime=enu_frame.obstime).cartesian.xyz
# diff = (p.T-p0).T
# penu = R.dot(diff)
#
# rep = CartesianRepresentation(x = penu[0],#u.Quantity(penu[0],u.m,copy=False),
# y = penu[1],#u.Quantity(penu[1],u.m,copy=False),
# z = penu[2],#u.Quantity(penu[2],u.m,copy=False),
# copy=False)
#
# return enu_frame.realize_frame(rep)
#
#
#@frame_transform_graph.transform(FunctionTransform, ENU, ITRS)
#def enu_to_itrs(enu_coo, itrs_frame):
# #p = itrs_frame.cartesian.xyz.to(u.m).value
# #p0 = np.array(enu_coo.location.to(u.m).value)
# #p = np.array(itrs_frame.location.to(u.m).value)
#
#
# lon, lat, height = enu_coo.location.to_geodetic('WGS84')
# sinlat = np.sin(lat.to(u.radian).value)
# coslat = np.cos(lat.to(u.radian).value)
# sinlon = np.sin(lon.to(u.radian).value)
# coslon = np.cos(lon.to(u.radian).value)
# north = [-sinlat*coslon,
# -sinlat*sinlon,
# coslat]
# east = [-sinlon,coslon,0]
# up = [coslat*coslon,coslat*sinlon,sinlat]
# R = np.array([east,north,up])
#
# if isinstance(enu_coo.data, UnitSphericalRepresentation) or enu_coo.cartesian.x.unit == u.one:
# diff = R.T.dot(enu_coo.cartesian.xyz)
# p = diff
# rep = CartesianRepresentation(x = u.Quantity(p[0],u.one,copy=False),
# y = u.Quantity(p[1],u.one,copy=False),
# z = u.Quantity(p[2],u.one,copy=False),
# copy=False)
# else:
# diff = R.T.dot(enu_coo.cartesian.xyz)
# p0 = ITRS(*enu_coo.location.geocentric,obstime=enu_coo.obstime).cartesian.xyz
# #print (R,diff)
# p = (diff.T + p0).T
# #print (p)
# rep = CartesianRepresentation(x = p[0],#u.Quantity(p[0],u.m,copy=False),
# y = p[1],#u.Quantity(p[1],u.m,copy=False),
# z = p[2],#u.Quantity(p[2],u.m,copy=False),
# copy=False)
#
# return itrs_frame.realize_frame(rep)
#
# #return ITRS(*p*u.m,obstime=enu_coo.obstime).transform_to(itrs_frame)
#
<EMAIL>(FunctionTransform, ENU, ENU)
#def enu_to_enu(from_coo, to_frame):
# # for now we just implement this through ITRS to make sure we get everything
# # covered
# return from_coo.transform_to(ITRS(obstime=from_coo.obstime)).transform_to(to_frame)
#
```
#### File: ionotomo/bayes/phase_screen_interp.py
```python
import tensorflow as tf
import numpy as np
import sys
import logging as log
class KernelND(object):
'''Base class for kernels in ND.
ndims : int
the number of dimensions of input
'''
def __init__(self,_hyperparams={},_hyperparams_bounds={},use_initializer=True,**kwargs):
self.use_initializer = use_initializer
assert isinstance(_hyperparams,dict)
assert isinstance(_hyperparams_bounds,dict)
_hyperparams.update(kwargs.get("hyperparams",{}))
_hyperparams_bounds.update(kwargs.get("hyperparams_bounds",{}))
self.hyperparams = {}
self.fixed = {}
self.hyperparams_bounds = {}
self.built = False
for name in _hyperparams:
self._add_hyperparam(name,_hyperparams[name],bounds = _hyperparams_bounds.get(name,None))
def _add_hyperparam(self,name,value,bounds=None):
self.hyperparams[name] = value
self.hyperparams[name] = np.atleast_1d(self.hyperparams[name]).astype(float)
self.fixed[name] = False
if bounds is None:
self.hyperparams_bounds[name] = [1e-5,1e5]
else:
self.hyperparams_bounds[name] = bounds
def _log_normal_initializer(self,lower,upper,seed=None):
def _initializer(shape, dtype, partition_info=None,seed=seed):
return tf.exp(tf.random_uniform(shape,lower,upper,dtype,seed=seed))
return _initializer
def build(self,batch_size,multi_dataset=False,seed=None):
"""Set up the variables (hyperparams)"""
if self.built:
return
self.batch_size = int(batch_size)
self.variables = {}
self.sync_ops = []
self.sync_placeholders = {}
with tf.variable_scope("{}_{}_hyperparams".format(type(self).__name__, self.__hash__())):
for name in self.hyperparams.keys():
if multi_dataset:
shape=(1,1,1)
else:
shape=(batch_size,1,1)
bounds = self.hyperparams_bounds[name]
if len(self.hyperparams[name].shape) == 1:
if self.hyperparams[name].shape[0] != shape[0]:
self.hyperparams[name] = np.array([self.hyperparams[name][0]]*shape[0]).reshape((-1,1,1))
value = self.hyperparams[name]
assert value.shape == shape
if self.use_initializer and not self.fixed[name]:
if bounds[0] > 0 and bounds[1] > 0:
self.variables[name] = tf.get_variable(\
name,
shape,
dtype=tf.float64,
initializer=self._log_normal_initializer(np.log(bounds[0]),np.log(bounds[1]),seed=seed),
trainable=True)
else:
self.variables[name] = tf.get_variable(\
name,
shape,
dtype=tf.float64,
initializer=tf.random_uniform_initializer(bounds[0],bounds[1],seed=seed),
trainable=True)
else:
self.variables[name] = tf.get_variable(\
name,
initializer=tf.constant(value,dtype=tf.float64),
trainable=not self.fixed[name])
self.sync_placeholders[name] = tf.placeholder(tf.float64,shape=shape,name='sync_{}'.format(name))
#self.sync_ops.append(tf.assign(self.variables[name],tf.expand_dims(tf.expand_dims(self.sync_placeholders[name],axis=-1),axis=-1)))
#self.sync_ops.append(tf.assign(self.variables[name],self.sync_placeholders[name]))
self.sync_ops.append(self.variables[name].assign(self.sync_placeholders[name]))
self.variables[name] = tf.clip_by_value(self.variables[name],bounds[0],bounds[1])
self.built = True
def _sync_variables(self,sess):
"""assign self.hyperparams to self.variables"""
ops = getattr(self,'sync_ops',None)
assert ops is not None,"Must build kernel first"
feed_dict = {}
for name in self.hyperparams.keys():
feed_dict[self.sync_placeholders[name]] = self.hyperparams[name]
sess.run(ops,feed_dict=feed_dict)
def _sync_hyperparams(self,sess):
'''Assign variables to hyperparams'''
hp = sess.run(self.get_variables())
self.set_hyperparams(hp)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
"""Construct the sub graph defining this kernel.
Return an output tensor"""
raise NotImplementedError("Setup in subclass")
def fix(self,name):
'''Sets the given hyperparam to be fixed.
# Example
K = SquaredExponential(1)
K.fix("l")
#results in symbol "l" having zero derivative
'''
assert name in self.hyperparams.keys()
self.fixed[name] = True
def unfix(self,name):
'''Sets the given hyperparam to be trainable.
# Example
K = SquaredExponential(1)
K.fix("l")
#results in symbol "l" having zero derivative
'''
assert name in self.hyperparams.keys()
self.fixed[name] = False
def set_hyperparams_bounds(self,name,bounds):
assert name in self.hyperparams.keys(),"{} is not a valid name".format(name)
self.hyperparams_bounds[name] = bounds
def set_hyperparams(self,hp):
assert len(hp) == len(self.hyperparams)
assert isinstance(hp,dict)
self.hyperparams.update(hp)
def get_hyperparams(self,idx=None):
if idx is None:
idx = slice(self.batch_size)
hp = {}
for name in self.hyperparams:
hp[name] = self.hyperparams[name][idx,:,:]
return hp
def get_variables(self):
return self.variables
def __add__(self,K):
'''Add another Kernel or SumKernel. Creates a SumKernel object'''
assert isinstance(K,KernelND), "Can only add kernels to kernels"
return SumKernel([self,K])
def __mul__(self,K):
"""Multiply the input kernel by this kernel and return a ProdKernel"""
assert isinstance(K,KernelND), "Can only multiply kernels by kernels"
return ProductKernel([self,K])
def __pow__(self,b):
"""Exponentiate the input kernel by this kernel and return a ExpKernel"""
return PowKernel([self],b)
def __repr__(self):
"""Get the string repr of this kernel, for pretty print"""
s = "** Kernel {} **\n".format(type(self).__name__)
for i,name in enumerate(self.hyperparams.keys()):
if self.fixed[name]:
s += "{} : {} in [{} , {}] (fixed)\n".format(name,np.ravel(self.hyperparams[name]),*self.hyperparams_bounds[name])
else:
s += "{} : {} in [{} , {}]\n".format(name,np.ravel(self.hyperparams[name]),*self.hyperparams_bounds[name])
return s
class MultiKernel(KernelND):
def __init__(self,kernels,**kwargs):
kernels = list(kernels)
#
super(MultiKernel,self).__init__(**kwargs)
self.kernels = kernels
def build(self,batch_size=1,multi_dataset=False,**kwargs):
"""Set up the variables (hyperparams)"""
for K in self.kernels:
K.build(batch_size=batch_size,
multi_dataset=multi_dataset,
**kwargs)
def _sync_variables(self,sess):
"""assign self.hyperparams to self.variables"""
for K in self.kernels:
K._sync_variables(sess)
def _sync_hyperparams(self,sess):
'''Assign variables to hyperparams'''
for K in self.kernels:
K._sync_hyperparams(sess)
@property
def kernels(self):
return self._kernels
@kernels.setter
def kernels(self,kernels):
self._kernels = kernels
# for K in kernels:
# if not isinstance(K,KernelND):
# raise TypeError("Only add KernelND, {}".format(type(K)))
# assert K.ndims == kernels[0].ndims, "Only add like dim kernels"
# self._kernels = kernels
#
def set_hyperparams(self,hp):
assert isinstance(hp,(list,tuple))
assert len(hp) == len(self.kernels)
for i in range(len(self.kernels)):
self.kernels[i].set_hyperparams(hp[i])
def get_hyperparams(self):
hp = []
for K in self.kernels:
hp.append(K.get_hyperparams())
return hp
def get_variables(self):
var = []
for K in self.kernels:
var.append(K.get_variables())
return var
class SumKernel(MultiKernel):
def __init__(self,kernels,**kwargs):
super(SumKernel,self).__init__(kernels,**kwargs)
assert len(self.kernels) > 1
def call(self,X,Y=None,share_x=False,eval_derivative=False):
"""Construct the sub graph defining this kernel.
Return an output tensor"""
output = self.kernels[0].call(X,Y,share_x,eval_derivative=eval_derivative)
for i in range(1,len(self.kernels)):
output += self.kernels[i].call(X,Y,share_x)
return output
def __repr__(self):
s = "**************\n"
s += self.kernels[0].__repr__()
for i in range(1,len(self.kernels)):
s += "** + **\n"
s += self.kernels[i].__repr__()
s += "**************\n"
return s
class ProductKernel(MultiKernel):
def __init__(self,kernels,**kwargs):
super(ProductKernel,self).__init__(kernels,**kwargs)
assert len(self.kernels) > 1
def call(self,X,Y=None,share_x=False,eval_derivative=False):
output = self.kernels[0].call(X,Y,share_x,eval_derivative=eval_derivative)
for i in range(1,len(self.kernels)):
output *= self.kernels[i].call(X,Y,share_x)
return output
def __repr__(self):
s = "**************\n"
s += self.kernels[0].__repr__()
for i in range(1,len(self.kernels)):
s += "** x **\n"
s += self.kernels[i].__repr__()
s += "**************\n"
return s
class PowKernel(MultiKernel):
def __init__(self,kernels,b,**kwargs):
super(PowKernel,self).__init__(kernels,**kwargs)
assert int(b) == b, "only integer powers are valid kernels"
self.b = int(b)
assert len(self.kernels) == 1
def call(self,X,Y=None,share_x=False,eval_derivative=False):
output = self.kernels[0].call(X,Y,share_x,eval_derivative=eval_derivative)**self.b
return output
def __repr__(self):
s = "*****POW({})******\n".format(self.b)
s += self.kernels[0].__repr__()
s += "**************\n"
return s
def cdist(x,y):
"""do pdist
x : Tensor (batch_size,num_points,ndims)"""
#D[:,i,j] = (a[:,i] - b[:,j]) (a[:,i] - b[:,j])'
#= a[:,i,p] a[:,i,p]' - b[:,j,p] a[:,i,p]' - a[:,i,p] b[:,j,p]' + b[:,j,p] b[:,j,p]'
# batch_size,num_points,1
r1 = tf.reduce_sum(x*x,axis=-1,keep_dims=True)
r2 = tf.reduce_sum(y*y,axis=-1,keep_dims=True)
out = r1 - 2*tf.matmul(x,y,transpose_b=True) + tf.transpose(r2,perm=[0,2,1])
return out
def pdist(x):
"""do pdist
x : Tensor (batch_size,num_points,ndims)"""
#D[:,i,j] = a[:,i] a[:,i]' - a[:,i] a[:,j]' -a[:,j] a[:,i]' + a[:,j] a[:,j]'
# = a[:,i,p] a[:,i,p]' - a[:,i,p] a[:,j,p]' - a[:,j,p] a[:,i,p]' + a[:,j,p] a[:,j,p]'
# batch_size,num_points,1
r = tf.reduce_sum(x*x,axis=-1,keep_dims=True)
#batch_size,num_points,num_points
A = tf.matmul(x,x,transpose_b=True)
B = r - 2*A
out = B + tf.transpose(r,perm=[0,2,1])
return out
class SquaredExponential(KernelND):
def __init__(self,_hyperparams={'l':1.,'sigma':1.},**kwargs):
super(SquaredExponential,self).__init__(_hyperparams=_hyperparams,**kwargs)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
"""return SE kernel.
inputs : Tenor (batch_size,ndims)
Input coordinates in ndims
returns kernel evaluated at all pair computations
"""
# batch_size,N,M
if share_x:
X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X)
else:
x2 = cdist(X,Y)
out = self.variables['sigma']**2 * tf.exp(-x2/(2*self.variables['l']**2))
if eval_derivative:
grad = {'sigma': 2. * out / self.variables['sigma'],
'l': out * x2/(self.variables['l']**3) }
return out, grad
return out
class SquaredExponentialSep(KernelND):
def __init__(self,dim,_hyperparams={'l':1.,'sigma':1.},**kwargs):
super(SquaredExponentialSep,self).__init__(_hyperparams=_hyperparams,**kwargs)
self.dim = int(dim)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
"""return SE kernel.
inputs : Tenor (batch_size,ndims)
Input coordinates in ndims
returns kernel evaluated at all pair computations
"""
# batch_size,ndims, 1
if share_x:
X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X[:,:,self.dim:self.dim+1])
else:
x2 = cdist(X[:,:,self.dim:self.dim+1],Y[:,:,self.dim:self.dim+1])
out = self.variables['sigma']**2 * tf.exp(-x2/(2*self.variables['l']**2))
if eval_derivative:
grad = {'sigma' : 2. * out / self.variables['sigma'],
'l' : out * x2/(self.variables['l']**3) }
return out, grad
return out
class GammaExponential(KernelND):
def __init__(self,_hyperparams={'l':1.,'gamma':1.,'sigma':1.},**kwargs):
super(GammaExponential,self).__init__(_hyperparams=_hyperparams,_hyperparams_bounds={'gamma':[1e-5,2.]},**kwargs)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
if share_x:
X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X)
else:
x2 = cdist(X,Y)
r = (tf.abs(x2) / self.variables['l'])**(self.variables['gamma'] / 2.)
out = self.variables['sigma']**2 * tf.exp(- r / 2.)
if eval_derivative:
l_ = r * out
grad = {'sigma' : 2. * out / self.variables['sigma'],
'l' : self.variables['gamma'] /(2 * self.variables['l']) * l_,
'gamma' : - l_ * tf.log(r) / 4.
}
return out, grad
return out
class GammaExponentialSep(KernelND):
def __init__(self,dim,_hyperparams={'l':1.,'gamma':1.,'sigma':1.},**kwargs):
super(GammaExponentialSep,self).__init__(_hyperparams=_hyperparams,_hyperparams_bounds={'gamma':[1e-5,2.]},**kwargs)
self.dim = int(dim)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
if share_x:
X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X[:,:,self.dim:self.dim+1])
else:
x2 = cdist(X[:,:,self.dim:self.dim+1],Y[:,:,self.dim:self.dim+1])
r = (x2 / self.variables['l'])**(self.variables['gamma'] / 2.)
out = self.variables['sigma']**2 * tf.exp(- r / 2.)
if eval_derivative:
l_ = r * out
grad = {'sigma' : 2. * out / self.variables['sigma'],
'l' : self.variables['gamma'] /(2 * self.variables['l']) * l_,
'gamma' : - l_ * tf.log(r) / 4.
}
return out, grad
return out
class MaternP(KernelND):
def __init__(self,p=1,_hyperparams={'l':1.,'sigma':1.},**kwargs):
super(MaternP,self).__init__(_hyperparams=_hyperparams,**kwargs)
self.p=int(p)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
from scipy.misc import factorial
# batch_size,ndims, 1
if share_x:
X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X)
else:
x2 = cdist(X,Y)
r = tf.sqrt(x2)/self.variables['l']
nu = self.p + 1./2.
out = [factorial(self.p)/(factorial(self.p)) * \
(np.sqrt(8.*nu) * r )**self.p]
for i in range(1,self.p+1):
out.append(factorial(self.p+i)/(factorial(i)*factorial(self.p-i)) * \
(np.sqrt(8.*nu) * r )**(self.p-i))
out = tf.stack(out,axis=0)
out = tf.reduce_sum(out,axis=0)
out *= self.variables['sigma']**2 * tf.exp(-np.sqrt(2 * nu) * r) * factorial(self.p) / factorial(2*self.p)
return out
class MaternPSep(KernelND):
def __init__(self,dim,p=1,_hyperparams={'l':1.,'sigma':1.},**kwargs):
super(MaternPSep,self).__init__(_hyperparams=_hyperparams,**kwargs)
self.dim=int(dim)
self.p=int(p)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
from scipy.misc import factorial
# batch_size,ndims, 1
if share_x:
X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X[:,:,self.dim:self.dim+1])
else:
x2 = cdist(X[:,:,self.dim:self.dim+1],Y[:,:,self.dim:self.dim+1])
r = tf.sqrt(x2)/self.variables['l']
nu = self.p + 1./2.
out = [factorial(self.p)/(factorial(self.p)) * \
(np.sqrt(8.*nu) * r )**self.p]
for i in range(1,self.p+1):
out.append(factorial(self.p+i)/(factorial(i)*factorial(self.p-i)) * \
(np.sqrt(8.*nu) * r )**(self.p-i))
out = tf.stack(out,axis=0)
out = tf.reduce_sum(out,axis=0)
out *= self.variables['sigma']**2 * tf.exp(-np.sqrt(2 * nu) * r) * factorial(self.p) / factorial(2*self.p)
return out
class Periodic(KernelND):
def __init__(self,_hyperparams={'l':1.,'p':1.,'sigma':1.},**kwargs):
super(Periodic,self).__init__(_hyperparams=_hyperparams,**kwargs)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X)
else:
x2 = cdist(X,Y)
r = tf.sqrt(tf.abs(x2))/(self.variables['p'] + 1e-15)
out = self.variables['sigma']**2 * tf.exp(-2*tf.sin(np.pi * r)**2 / (1e-15 + self.variables['l'])**2)
return out
class PeriodicSep(KernelND):
def __init__(self,dim,_hyperparams={'l':1.,'p':1.,'sigma':1.},**kwargs):
super(PeriodicSep,self).__init__(_hyperparams=_hyperparams,**kwargs)
self.dim = int(dim)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X[:,:,self.dim:self.dim+1])
else:
x2 = cdist(X[:,:,self.dim:self.dim+1],Y[:,:,self.dim:self.dim+1])
r = tf.sqrt(tf.abs(x2))/self.variables['p']
out = self.variables['sigma']**2 * tf.exp(-2*(tf.sin(np.pi * r) / self.variables['l'])**2)
return out
class Diagonal(KernelND):
def __init__(self,_hyperparams={'sigma':1.},**kwargs):
super(Diagonal,self).__init__(_hyperparams=_hyperparams,**kwargs)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
if Y is None:
xshape = tf.shape(X)
I = tf.eye(xshape[1],batch_shape=[xshape[0]],dtype=tf.float64)
else:
xshape = tf.shape(X)
yshape = tf.shape(Y)
I = tf.eye(num_rows=xshape[1],num_columns=yshape[1],batch_shape=[xshape[0]],dtype=tf.float64)
out = self.variables['sigma']**2 * I
if eval_derivative:
grad = {'sigma': 2. * self.variables['sigma'] * I}
return out, grad
return out
class RationalQuadratic(KernelND):
def __init__(self,_hyperparams={'l':1.,'alpha':1.,'sigma':1.},**kwargs):
super(RationalQuadratic,self).__init__(_hyperparams=_hyperparams,**kwargs)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X)
else:
x2 = cdist(X,Y)
r = x2/(2 * self.variables['l']**2 * self.variables['alpha'])
out = self.variables['sigma']**2 * (1 + r)**self.variables['alpha']
return out
class RationalQuadraticSep(KernelND):
def __init__(self,dim,_hyperparams={'l':1.,'alpha':1.,'sigma':1.},**kwargs):
super(RationalQuadraticSep,self).__init__(_hyperparams=_hyperparams,**kwargs)
self.dim = int(dim)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
if Y is None:
x2 = pdist(X[:,:,self.dim:self.dim+1])
else:
x2 = cdist(X[:,:,self.dim:self.dim+1],Y[:,:,self.dim:self.dim+1])
r = x2/(2 * self.variables['l']**2 * self.variables['alpha'])
out = self.variables['sigma']**2 * (1 + r)**self.variables['alpha']
return out
class DotProduct(KernelND):
def __init__(self,_hyperparams={'c':0,'sigma_b':1.,'sigma_v':1.},**kwargs):
super(DotProduct,self).__init__(_hyperparams=_hyperparams,
_hyperparams_bounds={'c':[-1e5,1e5]},**kwargs)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
X -= self.variables['c']
if Y is None:
#batch_size, num_points, ndim
x2 = tf.matmul(X,X,transpose_b=True)
else:
Y -= self.variables['c']
x2 = tf.matmul(X,Y,transpose_b=True)
out = x2 * self.variables['sigma_v']**2 + self.variables['sigma_b']**2
return out
class DotProductSep(KernelND):
def __init__(self,dim,_hyperparams={'c':0,'sigma_b':1.,'sigma_v':1.},**kwargs):
super(DotProductSep,self).__init__(_hyperparams=_hyperparams,
_hyperparams_bounds={'c':[-1e5,1e5]},**kwargs)
self.dim = int(dim)
def call(self,X,Y=None,share_x=False,eval_derivative=False):
# batch_size,ndims, 1
# if share_x:
# X = tf.expand_dims(X,0)
X = X[:,:,self.dim:self.dim+1] - self.variables['c']
if Y is None:
#batch_size, num_points, ndim
x2 = tf.matmul(X,X,transpose_b=True)
else:
Y = Y[:,:,self.dim:self.dim+1] - self.variables['c']
x2 = tf.matmul(X,Y,transpose_b=True)
out = x2 * self.variables['sigma_v']**2 + self.variables['sigma_b']**2
return out
class PhaseScreen(KernelND):
"""
D(t,x,y,a,b) + SE(tau_slow)SE(tau_long)
+ RQ(1/6,l_inertia)SE(L_outer)
+ PE(gamma)
+ SE(freq
"""
def __init__(self,_hyperparams={'sigma_D':1.,
'sigma_temporal':1.,'tau_slow': 1., 'tau_quick':1.,
'sigma_spatial':1.,'l_inertial':1.,'L_outer':1., 'alpha': 1./6.,
'sigma_freq':1., 'l_freq':1.},
_hyperparams_bounds={'sigma_D':[1e-5,np.pi],
'sigma_temporal':[1e-5,np.pi],'tau_slow': [100,1000], 'tau_quick':[16,100],
'sigma_spatial':[1e-5,3*np.pi],'l_inertial':[1,20],'L_outer':[4,10], 'alpha' : [1./100.,1e2],
'sigma_freq':[1e-5,np.pi], 'l_freq':[0.2e6,50e5]},**kwargs):
super(PhaseScreen,self).__init__(_hyperparams=_hyperparams,
_hyperparams_bounds=_hyperparams_bounds,**kwargs)
self.fixed['alpha'] = False
def call(self,X,Y=None,eval_derivative=False):
# batch_size,npoints, (ant_u, ant_v, ant_w, time, dir_u, dir_v, dir_w, freq)
if Y is None:
xshape = tf.shape(X)
I = tf.eye(xshape[1],batch_shape=[xshape[0]],dtype=tf.float64)
ray_tips = X[:,:,0:3] + X[:,:,4:7]*300./X[:,:,6:7]
x2 = pdist(ray_tips)
#t2 = pdist(X[:,:,3:4])
#x2 = pdist(X[:,:,0:2])
#batch_size, num_points, 3
#f2 = pdist(X[:,:,7:8])
else:
xshape = tf.shape(X)
yshape = tf.shape(Y)
I = tf.eye(num_rows=xshape[1],num_columns=yshape[1],batch_shape=[xshape[0]],dtype=tf.float64)
#t2 = cdist(X[:,:,3:4],Y[:,:,3:4])
#x2 = cdist(X[:,:,0:2],Y[:,:,0:2])
#batch_size, num_points, 3
ray_tips_X = X[:,:,0:3] + X[:,:,4:7]*300./X[:,:,6:7]
ray_tips_Y = Y[:,:,0:3] + Y[:,:,4:7]*300./Y[:,:,6:7]
x2 = cdist(ray_tips_X, ray_tips_Y)
#f2 = cdist(X[:,:,7:8],Y[:,:,7:8])
uncorrelated = self.variables['sigma_D']**2 * I
#temporal = self.variables['sigma_temporal']**2 * tf.exp(-t2*(1./self.variables['tau_slow']**2 + 1./self.variables['tau_quick']**2)/2.)
#alpha = self.variables['alpha']
#r = x2/(2 * self.variables['l_inertial']**2 * alpha)
s_ = tf.exp(-x2*(1./self.variables['L_outer']**2)/2.)
#spatial = self.variables['sigma_spatial']**2 * (1 +r)**alpha * s_
#freq = self.variables['sigma_freq']**2 * tf.exp(-f2/(2.*self.variables['l_freq']**2))
spatial = self.variables['sigma_spatial']**2 * s_
out = uncorrelated + spatial # temporal + freq
if eval_derivative:
grad = {'sigma_D' : 2. * uncorrelated/self.variables['sigma_D'],
#'sigma_temporal': temporal/self.variables['sigma_temporal'],
#'tau_slow':temporal * x2/(self.variables['tau_slow']**3),
#'tau_quick':temporal * x2/(self.variables['tau_quick']**3),
'sigma_spatial' : 2. * spatial/self.variables['sigma_spatial'],
#'l_inertial':self.variables['sigma_spatial']**2 * (1.+r)**(alpha-1) * s_ * ( -2.*r / self.variables['l_inertial']),
'L_outer' : spatial*x2/self.variables['L_outer']**3
#'alpha':spatial*(tf.log(1+r) - r/(r+1))
#'sigma_freq': freq/self.variables['sigma_freq'],
#'l_freq':freq*f2/self.variables['l_freq']**3
}
return out, grad
return out
def is_singular(A):
return np.linalg.cond(A) > 1/sys.float_info.epsilon
def _level1_solve(x,y,sigma_y,xstar,K,use_cholesky,batch_idx_from,batch_idx_to):
with tf.variable_scope("level1_solve"):
#batch_size
n = tf.to_double(tf.shape(y)[1])
Knn = K.call(x)[batch_idx_from:batch_idx_to,:,:]
Knm = K.call(x,xstar)[batch_idx_from:batch_idx_to,:,:]
Kmm = K.call(xstar)[batch_idx_from:batch_idx_to,:,:]
y = y[batch_idx_from:batch_idx_to,:]
sigma_y = sigma_y[batch_idx_from:batch_idx_to,:]
# batch_size, n,n
Kf = Knn + tf.matrix_diag(sigma_y**2,name='sigma_y2_diag')
def _cho(Kf=Kf,y=y):
# batch_size, n, n
L = tf.cholesky(Kf,name='L')
# batch_size, n
alpha = tf.squeeze(tf.cholesky_solve(L, tf.expand_dims(y,-1), name='alpha'),axis=-1)
# batch_size, n, m
# batch_size, n
# batch_size, m
fstar = tf.matmul(Knm,tf.expand_dims(alpha,-1),transpose_a=True)
#tf.einsum("bnm,bn->bm",Knm,alpha)
cov = Kmm
cov -= tf.matmul(Knm,tf.cholesky_solve(L,Knm),transpose_a=True)
#tf.einsum("bnm,bnl->bml",Knm,tf.cholesky_solve(L,Knm))
log_mar_like = -tf.reduce_sum(y*alpha,1)/2. - tf.reduce_sum(tf.log(tf.matrix_diag_part(L)),axis=1) - n*(np.log(2.*np.pi)/2.)
return fstar,cov,log_mar_like
def _no_cho(Kf=Kf,y=y):
Kf = (Kf + tf.transpose(Kf,perm=[0,2,1]))/2.
e,v = tf.self_adjoint_eig(Kf)
e = tf.where(e > 1e-14, e, 1e-14*tf.ones_like(e))
Kf = tf.matmul(tf.matmul(v,tf.matrix_diag(e),transpose_a=True),v)
logdet = tf.reduce_sum(tf.where(e > 1e-14, tf.log(e), tf.zeros_like(e)),axis=-1,name='logdet')
#batch_size, n, 1
alpha = tf.squeeze(tf.matrix_solve(Kf,tf.expand_dims(y,-1),name='solve_alpha'),axis=2)
fstar = tf.matmul(Knm,tf.expand_dims(alpha,-1),transpose_a=True)
cov = Kmm
cov -= tf.matmul(Knm,tf.matrix_solve(Kf,Knm),transpose_a=True)
log_mar_like = (-tf.reduce_sum(y*alpha,axis=1) - logdet - n*np.log(2.*np.pi))/2.
return fstar,cov,log_mar_like
return tf.cond(use_cholesky,_cho,_no_cho)
def _neg_log_mar_like(x,y,sigma_y,K,use_cholesky):
with tf.variable_scope("neg_log_mar_like"):
#batch_size
n = tf.to_double(tf.shape(y)[1])
Knn = K.call(x,x,eval_derivative=False)
# batch_size, n,n
Kf = Knn + tf.matrix_diag(sigma_y**2,name='sigma_y2_diag')
Kf = tf.Print(Kf,[Kf])
# batch_size, num_hp, n, n
def _cho(Kf=Kf,y=y):
# batch_size, n, n
L = tf.cholesky(Kf,name='L')
# batch_size, n,1
alpha = tf.cholesky_solve(L, tf.expand_dims(y,-1), name='alpha')
neg_log_mar_like = tf.reduce_sum(y*tf.squeeze(alpha,axis=2),1)/2. + tf.reduce_sum(tf.log(tf.matrix_diag_part(L)),axis=1) + n*(np.log(2.*np.pi)/2.)
return neg_log_mar_like
def _no_cho(Kf=Kf,y=y):
Kf = (Kf + tf.transpose(Kf,perm=[0,2,1]))/2.
e,v = tf.self_adjoint_eig(Kf)
e = tf.where(e > 1e-14, e, 1e-14*tf.ones_like(e))
Kf = tf.matmul(tf.matmul(v,tf.matrix_diag(e),transpose_a=True),v)
logdet = tf.reduce_sum(tf.where(e > 1e-14, tf.log(e), tf.zeros_like(e)),axis=-1,name='logdet')
#batch_size, n, 1
alpha = tf.matrix_solve(Kf,tf.expand_dims(y,-1),name='solve_alpha')
neg_log_mar_like = (tf.reduce_sum(y*tf.squeeze(alpha,axis=2),axis=1) + logdet + n*np.log(2.*np.pi))/2.
return neg_log_mar_like
# result = tf.stack([_no_cho(tf.expand_dims(Kf_,0),tf.expand_dims(y_,0)) for Kf_, y_ in zip(tf.unstack(Kf),tf.unstack(y))])
# return result
return tf.cond(use_cholesky,_cho,_no_cho)
def _neg_log_mar_like_and_grad(x,y,sigma_y,K,use_cholesky):
with tf.variable_scope("neg_log_mar_like"):
#batch_size
n = tf.to_double(tf.shape(y)[1])
Knn,grad = K.call(x,x,eval_derivative=True)
# batch_size, n,n
Kf = Knn + tf.matrix_diag(sigma_y**2,name='sigma_y2_diag')
# batch_size, num_hp, n, n
Kf_diff = grad#tf.stack([grad[name] for name in K.get_variables()],axis=1)
hp = [K.get_variables()[name] for name in K.get_variables()]
def _cho(Kf=Kf,y=y,hp=hp,Kf_diff=Kf_diff):
# batch_size, n, n
L = tf.cholesky(Kf,name='L')
# batch_size, n,1
alpha = tf.cholesky_solve(L, tf.expand_dims(y,-1), name='alpha')
neg_log_mar_like = tf.reduce_sum(y*tf.squeeze(alpha,axis=2),1)/2. + tf.reduce_sum(tf.log(tf.matrix_diag_part(L)),axis=1) + n*(np.log(2.*np.pi)/2.)
aa = tf.matmul(alpha,alpha,transpose_b=True)
grad = {}
for name in Kf_diff:#tf.unstack(Kf_diff,axis=1):
k_diff = Kf_diff[name]
aaK = tf.matmul(aa,k_diff,name='aaK')
KK = tf.cholesky_solve(L, k_diff, name='KK')
grad_ = (tf.trace(aaK) - tf.trace(KK))/2.
grad_ = tf.where(tf.is_finite(grad_),grad_,tf.zeros_like(grad_))
grad[name] = -grad_
return neg_log_mar_like, grad
def _no_cho(Kf=Kf,y=y,hp=hp,Kf_diff=Kf_diff):
Kf = (Kf + tf.transpose(Kf,perm=[0,2,1]))/2.
e,v = tf.self_adjoint_eig(Kf)
e = tf.where(e > 1e-14, e, 1e-14*tf.ones_like(e))
Kf = tf.matmul(tf.matmul(v,tf.matrix_diag(e),transpose_a=True),v)
logdet = tf.reduce_sum(tf.where(e > 1e-14, tf.log(e), tf.zeros_like(e)),axis=-1,name='logdet')
#batch_size, n, 1
alpha = tf.matrix_solve(Kf,tf.expand_dims(y,-1),name='solve_alpha')
neg_log_mar_like = (tf.reduce_sum(y*tf.squeeze(alpha,axis=2),axis=1) + logdet + n*np.log(2.*np.pi))/2.
aa = tf.matmul(alpha,alpha,transpose_b=True)
grad = {}
for name in Kf_diff:#tf.unstack(Kf_diff,axis=1):
k_diff = Kf_diff[name]
aaK = tf.matmul(aa,k_diff,name='aaK')
KK = tf.matrix_solve(Kf, k_diff, name='KK')
grad_ = (tf.trace(aaK) - tf.trace(KK))/2.
grad_ = tf.where(tf.is_finite(grad_),grad_,tf.zeros_like(grad_))
grad[name] = -grad_
return neg_log_mar_like, grad
# result = tf.stack([_no_cho(tf.expand_dims(Kf_,0),tf.expand_dims(y_,0)) for Kf_, y_ in zip(tf.unstack(Kf),tf.unstack(y))])
# return result
return tf.cond(use_cholesky,_cho,_no_cho)
def _level2_optimize(x,y,sigma_y,K,use_cholesky,learning_rate):
with tf.variable_scope("level2_solve"):
optimizer = tf.train.AdamOptimizer(learning_rate)
# neg_log_mar_like, grad =_neg_log_mar_like_and_grad(x,y,sigma_y,K,use_cholesky)
# grad = [(tf.expand_dims(tf.expand_dims(grad[name],-1),-1),K.get_variables_()[name]) for name in grad]
# print(grad)
neg_log_mar_like =_neg_log_mar_like(x,y,sigma_y,K,use_cholesky)
out = optimizer.minimize(tf.reduce_sum(neg_log_mar_like))
#out = optimizer.apply_gradients(grad)
return out, neg_log_mar_like
class Pipeline(object):
"""This class defines the problems that are to be solved using Gaussian processes.
In general many problems can be solved at once using batching so long
as the dimensions are the same."""
def __init__(self,batch_size,K,multi_dataset=False,share_x=False):
assert isinstance(K,KernelND)
self.K = K
self.batch_size = int(batch_size)
self.K.build(batch_size=self.batch_size,multi_dataset=multi_dataset)
self.multi_dataset = multi_dataset
self.share_x = share_x
self.sess = tf.Session()
self._build()
self.sess.run(tf.global_variables_initializer())
def _build(self):
with tf.variable_scope("pipeline"):
self.X = tf.placeholder(tf.float64,shape=None, name='X')
self.y = tf.placeholder(tf.float64,shape=None, name='y')
self.sigma_y = tf.placeholder(tf.float64,shape=None, name='sigma_y')
self.Xstar = tf.placeholder(tf.float64,shape=[None,None,None], name='Xstar')
self.use_cholesky = tf.placeholder(tf.bool,shape=(),name='use_cholesky')
self.batch_idx_from = tf.placeholder(tf.int32,shape=(),name='batch_idx_from')
self.batch_idx_to = tf.placeholder(tf.int32,shape=(),name='batch_idx_to')
self.ystar, self.cov, self.lml = _level1_solve(self.X,self.y,self.sigma_y,self.Xstar,self.K,self.use_cholesky,self.batch_idx_from,self.batch_idx_to)
self.learning_rate = tf.placeholder(tf.float64,shape=None, name='learning_rate')
self.level2_op, self.neg_log_mar_like = _level2_optimize(self.X,self.y,self.sigma_y,self.K,self.use_cholesky,self.learning_rate)
def level1_predict(self,X,y,sigma_y, Xstar=None, smooth=False,batch_idx=None):
'''
Predictive distribution.
X : array (batch_size,num_points, ndims)
training input coords
y : array (batch_size, num_points)
training outputs
sigma_y : array (batch_size, num_points)
uncertainty for each y point
Xstar : array (batch_size,num_points_test, ndims)
test input coords.
smooth : bool
if True smooth using Xstar = X
'''
if Xstar is None and smooth:
Xstar = X
assert Xstar is not None
if self.share_x:
if len(X.shape) == 2:
X = np.expand_dims(X,0)
if len(Xstar.shape) == 2:
Xstar = np.expand_dims(Xstar,0)
if len(y.shape) == 1:
y = np.expand_dims(y,0)
y = np.tile(y,(self.batch_size,1))
if len(sigma_y.shape) == 1:
sigma_y = np.expand_dims(sigma_y,0)
sigma_y = np.tile(sigma_y,(self.batch_size,1))
else:
assert len(X.shape) == 3
assert len(y.shape) == 2
assert y.shape[0] == X.shape[0]
assert y.shape[1] == X.shape[1]
assert y.shape == sigma_y.shape
assert Xstar.shape[0] == X.shape[0]
feed_dict = {self.X : X.astype(float),
self.y : y.astype(float),
self.sigma_y : sigma_y.astype(float),
self.Xstar : Xstar.astype(float)}
if batch_idx is None:
feed_dict[self.batch_idx_from] = 0
feed_dict[self.batch_idx_to] = self.batch_size
else:
feed_dict[self.batch_idx_from] = int(batch_idx)
feed_dict[self.batch_idx_to] = int(batch_idx + 1)
try:
feed_dict[self.use_cholesky] = True
ystar, cov, lml = self.sess.run([self.ystar, self.cov, self.lml],feed_dict=feed_dict)
except:
feed_dict[self.use_cholesky] = False
ystar, cov, lml = self.sess.run([self.ystar, self.cov, self.lml],feed_dict=feed_dict)
return ystar,cov,lml
def level2_optimize(self,X,y,sigma_y,delta=0.001,patience=20,epochs=1000):
if self.share_x:
if len(X.shape) == 2:
X = np.expand_dims(X,0)
if len(y.shape) == 1:
y = np.expand_dims(y,0)
y = np.tile(y,(self.batch_size,1))
if len(sigma_y.shape) == 1:
sigma_y = np.expand_dims(sigma_y,0)
sigma_y = np.tile(sigma_y,(self.batch_size,1))
else:
assert len(X.shape) == 3
assert len(y.shape) == 2
assert y.shape[0] == X.shape[0]
assert y.shape[1] == X.shape[1]
assert y.shape == sigma_y.shape
feed_dict = {self.X : X.astype(float),
self.y : y.astype(float),
self.sigma_y : sigma_y.astype(float),
self.learning_rate : 0.001}
neg_log_mar_lik_last = np.inf
patience_count = 0
epoch_count = 0
while epoch_count < epochs:
epoch_count += 1
try:
feed_dict[self.use_cholesky] = True
_, neg_log_mar_lik = self.sess.run([self.level2_op,self.neg_log_mar_like],feed_dict=feed_dict)
except:
feed_dict[self.use_cholesky] = False
_, neg_log_mar_lik = self.sess.run([self.level2_op,self.neg_log_mar_like],feed_dict=feed_dict)
print('Hamiltonian: {}'.format(neg_log_mar_lik))
if np.all((neg_log_mar_lik/neg_log_mar_lik_last - 1) > -delta):
patience_count += 1
#feed_dict[self.learning_rate] /= 3.
feed_dict[self.learning_rate] = max(0.00001,feed_dict[self.learning_rate])
if patience_count > patience:
break
else:
neg_log_mar_lik_last = neg_log_mar_lik
patience_count = 0
#feed_dict[self.learning_rate] *= 3.
feed_dict[self.learning_rate] = min(0.1,feed_dict[self.learning_rate])
hp = self.sess.run(self.K.get_variables())
self.K.set_hyperparams(hp)
print(self.K)
return neg_log_mar_lik
def test_kernels():
K1 = Periodic(use_initializer=False,hyperparams={'l':5,'sigma':10})
K1.set_hyperparams_bounds('l',[1e-2, 6])
K1.set_hyperparams_bounds('sigma',[1e-5,10])
K1.set_hyperparams_bounds('p',[1e-1,10])
K2 = Diagonal() + Diagonal()
K3 = SquaredExponential()
K = K1 + K2 + K3
K.build(4,seed=1234)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
K._sync_hyperparams(sess)
hp2 = K1.get_hyperparams()
assert np.all(hp2['l'] == 5)
hp2['l'] += 1
K1.set_hyperparams(hp2)
hp2_ = K1.get_hyperparams()
assert np.all(hp2_['l'] == 6.)
K._sync_variables(sess)
K._sync_hyperparams(sess)
hp3 = K1.get_hyperparams()
assert np.all(hp3['l']==6.)
def test_level2():
N = 250
X = np.random.uniform(size=[N,2])
xstar = np.linspace(0,1,50)
Xstar,Ystar = np.meshgrid(xstar,xstar)
Xstar = np.expand_dims(np.array([Xstar.flatten(),Ystar.flatten()]).T,0)
y = np.sin(X[:,0]*2*np.pi/0.5) *np.cos( X[:,1]*np.pi/0.5*2.) + np.random.normal(size=X.shape[0])*0.1
mean_y = np.mean(y)
y -= mean_y
sigma_y = np.ones_like(y)*0.1
#K1 = SquaredExponential(use_initializer=False,hyperparams={'l':0.5})
K1 = Periodic(use_initializer=True,hyperparams={'l':0.5,'sigma':8})
K1.set_hyperparams_bounds('l',[1e-2, 4])
K1.set_hyperparams_bounds('sigma',[1e-5,9])
K1.set_hyperparams_bounds('p',[1e-1,100])
K = SquaredExponential(use_initializer=False,hyperparams={'l':0.05}) + Diagonal(use_initializer=False,hyperparams={'sigma':1})
p = Pipeline(2,K,multi_dataset=False,share_x = True)
print(p.level1_predict(X,y,sigma_y,smooth=True))
print(K)
win_arg = np.argmin(p.level2_optimize(X,y,sigma_y,patience=20))
print(K)
ystar,cov,lml = p.level1_predict(X,y,sigma_y,Xstar,smooth=False,batch_idx=win_arg)
var = np.diag(cov[0,:,:])
import pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(X[:,0],X[:,1],y[:],cmap='bone')
# ax.scatter(Xstar[0,:,0],Xstar[0,:,1],ystar[0,:],cmap='bone')
# plt.show()
#print(var)
plt.imshow(ystar.reshape([50,50]),extent=(0,1,0,1),origin='lower')
plt.scatter(X[:,0],X[:,1],c=y)
#plt.scatter(xstar[:,0],xstar[:,1],c=fstar,marker='+')
plt.show()
if __name__=='__main__':
test_kernels()
test_level2()
```
#### File: ionotomo/inversion/gradient_and_adjoint.py
```python
from ionotomo.geometry.tri_cubic import bisection
import numpy as np
from scipy.integrate import simps
import dask.array as da
from dask import delayed
from dask.multiprocessing import get
from ionotomo.ionosphere.covariance import Covariance
def do_adjoint(rays, dd, K_ne, m_tci, sigma_m, Nkernel, size_cell, i0):
#print("Doing gradient")
L_m = Nkernel*size_cell
#if antennas parallelization Nt,Nd
#if directions parallelization Na,Nd
N1,N2,_,Ns = rays.shape
m_shape = [N1,N2,m_tci.nx,m_tci.ny,m_tci.nz]
grad = np.zeros([m_tci.nx,m_tci.ny,m_tci.nz],dtype=np.double)
mask = np.zeros(m_shape, dtype=np.bool)
idx_min = np.ones(m_shape,dtype=np.int64)*Ns
idx_max = np.ones(m_shape,dtype=np.int64)*-1
nevec = np.zeros([N1,N2,Ns],dtype=np.double)
#go through the mask
# X,Y,Z = np.meshgrid(np.arange(m_tci.xvec.size),
# np.arange(m_tci.yvec.size),
# np.arange(m_tci.zvec.size),indexing='ij')
j = 0
while j < N1:
k = 0
while k < N2:
x_ray = rays[j,k,0,:]
y_ray = rays[j,k,1,:]
z_ray = rays[j,k,2,:]
s_ray = rays[j,k,3,:]
nevec[j,k,:] = K_ne*np.exp(m_tci.interp(x_ray,y_ray,z_ray))/1e13
idx = 0
while idx < Ns:
#nevec[j,k,idx] = K_ne*np.exp(m_tci.interp(x_ray[idx],y_ray[idx],z_ray[idx]))/1e13
xi,yi,zi = bisection(m_tci.xvec,x_ray[idx]),bisection(m_tci.yvec,y_ray[idx]),bisection(m_tci.zvec,z_ray[idx])
local_mask = (j,k,slice(max(0,xi - Nkernel), min(m_tci.nx - 1, xi + Nkernel + 1)),
slice(max(0,yi - Nkernel) , min(m_tci.ny - 1,yi + Nkernel + 1)),
slice(max(0, zi - Nkernel), min(m_tci.nz - 1, zi + Nkernel + 1)))
mask[local_mask] = True
shape = mask[local_mask].shape
idx_max[local_mask] = np.max(np.stack([idx_max[local_mask],
np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
#print(idx_max[local_mask])
idx_min[local_mask] = np.min(np.stack([idx_min[local_mask],
np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
idx += 1
k += 1
j += 1
sum_mask = np.sum(np.sum(mask,axis=0),axis=0)
xi = 0
while xi < m_tci.nx:
yi = 0
while yi < m_tci.ny:
zi = 0
while zi < m_tci.nz:
if not sum_mask[xi,yi,zi]:
zi += 1
continue
x,y,z = m_tci.xvec[xi],m_tci.yvec[yi],m_tci.zvec[zi]
j = 0
while j < N2:
i = 0
while i < N1:
x_ray = rays[i,j,0,:]
y_ray = rays[i,j,1,:]
z_ray = rays[i,j,2,:]
s_ray = rays[i,j,3,:]
ne = nevec[i,j,:]
if mask[i,j,xi,yi,zi]:
segment_mask = (slice(idx_min[i,j,xi,yi,zi],idx_max[i,j,xi,yi,zi]+1),)
dx = x - x_ray[segment_mask]
dy = y - y_ray[segment_mask]
dz = z - z_ray[segment_mask]
Cm = dx**2
dy *= dy
dz *= dz
Cm += dy
Cm += dz
#np.sqrt(Cm,out=Cm)
Cm /= -2.*L_m**2
np.exp(Cm,out=Cm)
Cm *= sigma_m**2
Cm *= ne[segment_mask]
comp = simps(Cm*dd[i,j],s_ray[segment_mask])
grad[xi,yi,zi] += comp
# if i == i0:
# grad[xi,yi,zi] -= N1*comp
i += 1
j += 1
zi += 1
yi += 1
xi += 1
grad[:,:,:] -= grad[i0,:,:]
return grad
def compute_adjoint_dask(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
#parallelize over directions
gradient = da.sum(da.stack([da.from_delayed(delayed(do_adjoint)(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell, i0),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for d in range(Nd)],axis=-1),axis=-1)
gradient = gradient.compute(get=get)
gradient += m_tci.M
gradient -= m_prior
return gradient
def compute_adjoint(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
# if Na < Nd:
# #parallelize over antennas
# gradient = np.sum(np.stack([do_gradient(rays[i,:,:,:,:], dd[i,:,:], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for i in range(Na)],axis=-1),axis=-1)
# else:
# #parallelize over directions
# gradient = np.sum(np.stack([do_gradient(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for d in range(Nd)],axis=-1),axis=-1)
#parallelize over directions
gradient = np.sum(np.stack([do_adjoint(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell,i0) for d in range(Nd)],axis=-1),axis=-1)
gradient += m_tci.M
gradient -= m_prior
return gradient
def do_gradient(rays, dd, K_ne, m_tci, sigma_m, Nkernel, size_cell, i0):
'''Gradient of S is G^t.CdCt^-1.(g-dobs) + Cm^-1.(m - mprior)'''
adjoint = do_adjoint(rays, dd, K_ne, m_tci, sigma_m, Nkernel, size_cell, i0)
# Nkernel=0
# #print("Doing gradient")
# L_m = Nkernel*size_cell
# #if antennas parallelization Nt,Nd
# #if directions parallelization Na,Nd
# N1,N2,_,Ns = rays.shape
# m_shape = [N1,N2,m_tci.nx,m_tci.ny,m_tci.nz]
# grad = np.zeros([m_tci.nx,m_tci.ny,m_tci.nz],dtype=np.double)
#
# mask = np.zeros(m_shape, dtype=np.bool)
# #idx_min = np.ones(m_shape,dtype=np.int64)*Ns
# #idx_max = np.ones(m_shape,dtype=np.int64)*-1
# #nevec = np.zeros([N1,N2,Ns],dtype=np.double)
# #go through the mask
# j = 0
# while j < N1:
# k = 0
# while k < N2:
# x_ray = rays[j,k,0,:]
# y_ray = rays[j,k,1,:]
# z_ray = rays[j,k,2,:]
# s_ray = rays[j,k,3,:]
# idx = 0
# while idx < Ns:
# #nevec[j,k,idx] = K_ne*np.exp(m_tci.interp(x_ray[idx],y_ray[idx],z_ray[idx]))/1e16
# xi,yi,zi = bisection(m_tci.xvec,x_ray[idx]),bisection(m_tci.yvec,y_ray[idx]),bisection(m_tci.zvec,z_ray[idx])
# local_mask = (j,k,slice(max(0,xi - Nkernel), min(m_tci.nx - 1, xi + Nkernel + 1)),
# slice(max(0,yi - Nkernel) , min(m_tci.ny - 1,yi + Nkernel + 1)),
# slice(max(0, zi - Nkernel), min(m_tci.nz - 1, zi + Nkernel + 1)))
# mask[local_mask] = True
# shape = mask[local_mask].shape
## idx_max[local_mask] = np.max(np.stack([idx_max[local_mask],
## np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
## #print(idx_max[local_mask])
## idx_min[local_mask] = np.min(np.stack([idx_min[local_mask],
## np.ones(shape,dtype=np.int64)*idx],axis=-1),axis=-1)
# idx += 1
# k += 1
# j += 1
#
# #Cm^-1 (m-mprior)
# dmpart = np.zeros([m_tci.nx,m_tci.ny,m_tci.nz],dtype=np.double)
# sum_mask = np.sum(np.sum(mask,axis=0),axis=0)#is there any ray in the cell at all?
# xi = 0
# while xi < m_tci.nx:
# yi = 0
# while yi < m_tci.ny:
# zi = 0
# while zi < m_tci.nz:
# if not sum_mask[xi,yi,zi]:
# zi += 1
# continue
# x,y,z = m_tci.xvec[xi],m_tci.yvec[yi],m_tci.zvec[zi]
# j = 0
# while j < N2:
# i = 0
# while i < N1:
# paircomp = 0.
# if mask[i,j,xi,yi,zi]:
# paircomp = 1.
# if mask[i0,j,xi,yi,zi]:
# paircomp -= 1.
# grad[xi,yi,zi] += dd[i,j]*paircomp*K_ne*np.exp(m_tci.interp(m_tci.xvec[xi],
# m_tci.yvec[yi],
# m_tci.zvec[zi]))/1e12
#
#
# i += 1
# j += 1
# zi += 1
# yi += 1
# xi += 1
# return grad
def compute_gradient_dask(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell, cov_obj=None):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
# if Na < Nd:
# #parallelize over antennas
# gradient = da.sum(da.stack([da.from_delayed(delayed(do_gradient)(rays[i,:,:,:,:], dd[i,:,:], K_ne, m_tci,
# sigma_m, Nkernel, size_cell),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for i in range(Na)],axis=-1),axis=-1)
# else:
# #parallelize over directions
# gradient = da.sum(da.stack([da.from_delayed(delayed(do_gradient)(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
# sigma_m, Nkernel, size_cell),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for d in range(Nd)],axis=-1),axis=-1)
#parallelize over directions
gradient = da.sum(da.stack([da.from_delayed(delayed(do_gradient)(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell, i0),(m_tci.nx,m_tci.ny,m_tci.nz),dtype=np.double) for d in range(Nd)],axis=-1),axis=-1)
gradient = gradient.compute(get=get)
if cov_obj is not None:
dm = m_tci.M - m_prior
gradient + cov_obj.contract(dm)
#gradient += m_tci.M
#gradient -= m_prior
return gradient
def compute_gradient(rays, g, dobs, i0, K_ne, m_tci, m_prior, CdCt, sigma_m, Nkernel, size_cell, cov_obj=None):
L_m = Nkernel*size_cell
# #i not eq i0 mask
# mask = np.ones(rays.shape[0],dtype=np.bool)
# mask[i0] = False
# rays = rays[mask,:,:,:,:]
# g = g[mask,:,:]
# dobs = dobs[mask,:,:]
# CdCt = CdCt[mask,:,:]
#residuals
#g.shape, dobs.shape [Na,Nt,Nd]
dd = g - dobs
#weighted residuals
#Cd.shape [Na,Nt,Nd] i.e. diagonal
#CdCt^-1 = 1./CdCt
dd /= (CdCt + 1e-15)
#get ray info
Na, Nt, Nd, _ ,Ns = rays.shape
# if Na < Nd:
# #parallelize over antennas
# gradient = np.sum(np.stack([do_gradient(rays[i,:,:,:,:], dd[i,:,:], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for i in range(Na)],axis=-1),axis=-1)
# else:
# #parallelize over directions
# gradient = np.sum(np.stack([do_gradient(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
# sigma_m, Nkernel, size_cell) for d in range(Nd)],axis=-1),axis=-1)
#parallelize over directions
gradient = np.sum(np.stack([do_gradient(rays[:,:,d,:,:], dd[:,:,d], K_ne, m_tci,
sigma_m, Nkernel, size_cell,i0) for d in range(Nd)],axis=-1),axis=-1)
if cov_obj is not None:
dm = m_tci.M - m_prior
gradient + cov_obj.contract(dm)
#gradient += m_tci.M
#gradient -= m_prior
return gradient
```
#### File: ionotomo/inversion/initial_model.py
```python
import astropy.units as au
import astropy.coordinates as ac
import numpy as np
from ionotomo.astro.frames.uvw_frame import UVW
from ionotomo.astro.frames.pointing_frame import Pointing
from ionotomo.ionosphere.iri import a_priori_model
from ionotomo.geometry.tri_cubic import TriCubic
from ionotomo.ionosphere.simulation import IonosphereSimulation
from ionotomo.inversion.solution import Solution
import logging as log
def determine_inversion_domain(spacing,antennas, directions, pointing, zmax, padding = 20):
'''Determine the domain of the inversion'''
ants = antennas.transform_to(pointing).cartesian.xyz.to(au.km).value.transpose()
dirs = directions.transform_to(pointing).cartesian.xyz.value.transpose()
#old
uend = np.add.outer(ants[:,0],dirs[:,0]*zmax/dirs[:,2])
vend = np.add.outer(ants[:,1],dirs[:,1]*zmax/dirs[:,2])
wend = np.add.outer(ants[:,2],dirs[:,2]*zmax/dirs[:,2])
umin = min(np.min(ants[:,0]),np.min(uend.flatten()))-spacing*padding
umax = max(np.max(ants[:,0]),np.max(uend.flatten()))+spacing*padding
vmin = min(np.min(ants[:,1]),np.min(vend.flatten()))-spacing*padding
vmax = max(np.max(ants[:,1]),np.max(vend.flatten()))+spacing*padding
wmin = min(np.min(ants[:,2]),np.min(wend.flatten()))-spacing*padding
wmax = max(np.max(ants[:,2]),np.max(wend.flatten()))+spacing*padding
Nu = np.ceil((umax-umin)/spacing)
Nv = np.ceil((vmax-vmin)/spacing)
Nw = np.ceil((wmax-wmin)/spacing)
uvec = np.linspace(umin,umax,int(Nu))
vvec = np.linspace(vmin,vmax,int(Nv))
wvec = np.linspace(wmin,wmax,int(Nw))
log.info("Found domain u in {}..{}, v in {}..{}, w in {}..{}".format(umin,umax,vmin,vmax,wmin,wmax))
return uvec,vvec,wvec
def turbulent_perturbation(tci,sigma = 3.,corr = 20.,seed=None):
cov_obj = IonosphereSimulation(tci.xvec, tci.yvec,tci.zvec, sigma,corr,type='m52')
B = cov_obj.realization(seed=seed)
return B
def create_initial_model(datapack,ant_idx = -1, time_idx = -1, dir_idx = -1, zmax = 1000.,spacing=5.,padding=20):
antennas,antenna_labels = datapack.get_antennas(ant_idx = ant_idx)
patches, patch_names = datapack.get_directions(dir_idx=dir_idx)
times,timestamps = datapack.get_times(time_idx=time_idx)
Na = len(antennas)
Nt = len(times)
Nd = len(patches)
#Setting up ionosphere to use
log.info("Using radio array {}".format(datapack.radio_array))
phase = datapack.get_center_direction()
log.info("Using phase center {} {}".format(phase.ra,phase.dec))
fixtime = times[Nt>>1]
log.info("Fixing frame at {}".format(fixtime.isot))
uvw = UVW(location = datapack.radio_array.get_center().earth_location,obstime = fixtime,phase = phase)
log.info("Elevation is {}".format(uvw.elevation))
zenith = datapack.radio_array.get_sun_zenith_angle(fixtime)
log.info("Sun at zenith angle {}".format(zenith))
log.info("Creating ionosphere model...")
xvec,yvec,zvec = determine_inversion_domain(spacing,antennas, patches,uvw, zmax, padding = padding)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
log.info("Nx={} Ny={} Nz={} number of cells: {}".format(len(xvec),len(yvec),len(zvec),np.size(X)))
coords = ac.SkyCoord(X.flatten()*au.km,Y.flatten()*au.km,Z.flatten()*au.km,frame=uvw).transform_to('itrs').earth_location.to_geodetic('WGS84')
heights = coords[2].to(au.km).value#height in geodetic
hmax=zmax
lat=datapack.radio_array.get_center().earth_location.to_geodetic().lat.to(au.deg).value
lon=datapack.radio_array.get_center().earth_location.to_geodetic().lon.to(au.deg).value
ne_model = a_priori_model(heights,hmax,lat,lon,fixtime).reshape(X.shape)
# ne_model = a_priori_model(heights,zenith,thin_f=thin_f).reshape(X.shape)
# ne_model[ne_model<4e7] = 4e7
return TriCubic(xvec,yvec,zvec,ne_model)
def create_turbulent_model(datapack,factor=2.,corr=20.,seed=None, **initial_model_kwargs):
log.info("Generating turbulent ionospheric model, correlation scale : {}".format(corr))
if seed is not None:
np.random.seed(seed)
log.info("Seeding random seed to : {}".format(seed))
ne_tci = create_initial_model(datapack,**initial_model_kwargs)
#exp(-dm) = 0.5 -> dm = -log(1/2)= log(2)
#exp(dm) = 2 -> dm = log(2)
dm = turbulent_perturbation(ne_tci,sigma=np.log(factor),corr=corr,seed=seed)
ne_tci.M = ne_tci.M*np.exp(dm)
n = np.sqrt(1 - 8.98**2 * ne_tci.M/datapack.radio_array.frequency**2)
log.info("Refractive index stats:\n\
max(n) : {}\n\
min(n) : {}\n\
median(n) : {} \n\
mean(n) : {}\n\
std(n) : {}".format(np.max(n), np.min(n),np.median(n), np.mean(n), np.std(n)))
return ne_tci
def create_initial_solution(datapack,ant_idx = -1, time_idx = -1, dir_idx = -1, zmax = 1000.,spacing=5.,padding=20):
tci = create_initial_model(datapack,ant_idx = ant_idx, time_idx = time_idx, dir_idx = dir_idx, zmax = zmax,spacing=spacing,padding=padding)
antennas,antenna_labels = datapack.get_antennas(ant_idx = ant_idx)
patches, patch_names = datapack.get_directions(dir_idx=dir_idx)
times,timestamps = datapack.get_times(time_idx=time_idx)
Na = len(antennas)
Nt = len(times)
Nd = len(patches)
phase = datapack.get_center_direction()
fixtime = times[Nt>>1]
pointing = Pointing(location = datapack.radio_array.get_center().earth_location,obstime = times[0], fixtime=fixtime,phase = phase)
return Solution(tci=tci,pointing_frame=pointing)
```
#### File: ionotomo/inversion/inversion_pipeline.py
```python
import logging as log
from dask.distributed import Client
import dask
from dask.multiprocessing import get
from ionotomo.geometry.calc_rays import calc_rays
from ionotomo.inversion.initial_model import *
from ionotomo.inversion.solution import transfer_solutions
from ionotomo.inversion.iterative_newton import iterative_newton_solve
from ionotomo.astro.real_data import DataPack
from ionotomo.ionosphere.covariance import Covariance
from ionotomo.plotting.plot_tools import plot_datapack
import os
import numpy as np
from functools import partial
import astropy.time as at
import astropy.units as au
class InversionPipeline(object):
def __init__(self, datapack, output_folder = 'output', diagnostic_folder = 'diagnostics', **kwargs):
self.output_folder = os.path.join(os.getcwd(),output_folder)
self.diagnostic_folder = os.path.join(self.output_folder,diagnostic_folder)
try:
os.makedirs(self.diagnostic_folder)
except:
pass
log.basicConfig(filename=os.path.join(self.output_folder,"log"),format='%(asctime)s %(levelname)s:%(message)s', level=log.DEBUG)
log.info("Initializing inversion for {}".format(datapack))
self.datapack = datapack
self.default_params()
for key in kwargs.keys():
try:
cur_val = getattr(self,key)
log.info("Setting {} from {} to {}".format(key,cur_val,kwargs[key]))
setattr(self,key,kwargs[key])
except:
log.debug("denied: trying to set invalid param {} {}".format(key,kwargs[key]))
def default_params(self):
'''Set the default params for the pipeline.
Each can be changed by passing as kwargs to __init__'''
self.tmax = 1000. #length of rays
self.coherence_time = 32. #seconds where we consider the ionosphere to be the same
self.num_threads_per_solve = None #all (dangerous if using num_parallel_solves more than 1)
self.num_parallel_solves = 1 #serial solve
self.stateful = False #if True then use result of previous timestep as initial point for next
self.diagnostic_period = 1 #how often to save intermediate results and report
self.spacing = 10.#km spacing in model
def preprocess(self):
"""Prepare the model"""
#split into time chunks, assumes they are contiguous
antennas,antenna_labels = self.datapack.get_antennas(ant_idx=-1)
patches, patch_names = self.datapack.get_directions(dir_idx=-1)
times,timestamps = self.datapack.get_times(time_idx=-1)
freqs = self.datapack.get_freqs(freq_idx=-1)
clock = self.datapack.get_clock(ant_idx=-1,time_idx=-1)
pointing = Pointing(location = self.datapack.radio_array.get_center().earth_location,
obstime = times[0], fixtime = times[0], phase = self.datapack.get_center_direction())
#print(antennas.transform_to(pointing).cartesian.xyz.to(au.km).value)
Na = len(antennas)
Nt = len(times)
Nd = len(patches)
Nf = len(freqs)
dobs = self.datapack.get_phase(ant_idx = -1, dir_idx = -1, time_idx = -1, freq_idx = -1)
if len(times) == 1:
self.time_window = 1
else:
dt = times[1].gps - times[0].gps
if dt <= self.coherence_time:
log.debug("Time sampling is larger than coherence time")
self.time_window = int(np.ceil(self.coherence_time/dt))
#average chunks
Navg = int(np.ceil(float(Nt)/self.time_window))
times_new = np.zeros(Navg,dtype=float)
dobs_new = np.zeros([Na,Navg,Nd,Nf],dtype=float)
prop_new = np.zeros([Na,Navg,Nd,Nf],dtype=float)
clock_new = np.zeros([Na,Navg],dtype=float)
m = np.zeros(Navg,dtype=float)
for i in range(self.time_window):
t_tmp = times[i::self.time_window]
d_tmp = dobs[:,i::self.time_window,:,:]
p_tmp = dobs[:,i::self.time_window,:,:]
c_tmp = clock[:,i::self.time_window]
if len(t_tmp) == Navg:
times_new += t_tmp.gps
dobs_new += d_tmp
prop_new += d_tmp
clock_new += c_tmp
m += 1.
else:
times_new[:-1] += t_tmp.gps
dobs_new[:,:-1,:,:] += d_tmp
prop_new[:,:-1,:,:] += d_tmp
clock_new[:,:-1] += c_tmp
m[:-1] += 1
times_new /= m
minv = 1./m
dobs_new = np.einsum('ijkl,j->ijkl',dobs_new,minv)
prop_new = np.einsum('ijkl,j->ijkl',prop_new,minv)
clock_new = np.einsum('ij,j->ij',clock_new,minv)
times_new = at.Time(times_new,format='gps',scale='tai')
data_dict = self.datapack.get_data_dict()
data_dict.update({'times':times_new, 'timestamps':times_new.isot, 'prop':prop_new,'phase':dobs_new, 'clock':clock_new})
datapack = DataPack(data_dict)
datapack.set_reference_antenna(self.datapack.ref_ant)
self.datapack = datapack
def run(self):
antennas,antenna_labels = self.datapack.get_antennas(ant_idx = -1)
patches, patch_names = self.datapack.get_directions(dir_idx=-1)
times,timestamps = self.datapack.get_times(time_idx=-1)
freqs = self.datapack.get_freqs(freq_idx=-1)
dobs = self.datapack.get_phase(ant_idx = -1,time_idx=-1,dir_idx=-1,freq_idx=-1)
clock_prior = self.datapack.get_clock(ant_idx = -1, time_idx = -1)
const_prior = self.datapack.get_const(ant_idx = -1)
Cd = np.ones(dobs.shape)*(5*np.pi/180.)**2
Ct = 0#(np.abs(dobs)*0.01)**2
CdCt = Cd + Ct
Na = len(antennas)
Nt = len(times)
Nd = len(patches)
Nf = len(freqs)
chunk_size = int(np.ceil(float(Nt)/self.num_parallel_solves))
dsk = {}
dsk["datapack"] = self.datapack
dsk["antennas"] = antennas
dsk["patches"] = patches
dsk["freqs"] = freqs
dsk["array_center"] = self.datapack.radio_array.get_center()
dsk["phase"] = self.datapack.get_center_direction()
print("Setting covariance")
dsk["covariance"] = (lambda *x: x,Covariance(dx=self.spacing,dy=self.spacing,dz=self.spacing), (5e-9)**2)
objective = []
print("running")
indices = np.arange(Nt,dtype=int)
for i in range(chunk_size):
for thread_num, time_idx in enumerate(indices[i::chunk_size]):
save_folder = os.path.join(self.diagnostic_folder,"thread_{}_time_{}".format(thread_num,time_idx))
try:
os.makedirs(save_folder)
except:
pass
#observables of this step
dsk["dobs_{}_{}".format(thread_num,time_idx)] = dobs[:,time_idx:time_idx+1,:]
dsk["CdCt_{}_{}".format(thread_num,time_idx)] = CdCt[:,time_idx:time_idx+1,:]
#time of this step
dsk["time_{}_{}".format(thread_num,time_idx)] = times[time_idx:time_idx+1]
dsk["fixtime_{}_{}".format(thread_num,time_idx)] = times[time_idx]
#initial model for time step
dsk["clock_prior_{}_{}".format(thread_num, time_idx)] = clock_prior[:,time_idx:time_idx+1]
dsk["const_prior_{}_{}".format(thread_num, time_idx)] = const_prior[:]
dsk["ne_prior_{}_{}".format(thread_num, time_idx)] = (partial(create_initial_solution,
ant_idx=-1,time_idx=[time_idx],dir_idx=-1,zmax=self.tmax,spacing=self.spacing,padding=20),"datapack")
dsk["model_prior_{}_{}".format(thread_num, time_idx)] = (lambda ne, clock, const: (ne, clock, const), "ne_prior_{}_{}".format(thread_num, time_idx),
"clock_prior_{}_{}".format(thread_num, time_idx),
"const_prior_{}_{}".format(thread_num, time_idx))
#if stateful then use solution from last time step (transfer solutions)
if self.stateful and i > 0:
dsk["ne_0_{}_{}".format(thread_num,time_idx)] = (lambda sol, ne_prior: transfer_solutions(sol[0],ne_prior),
"sol_{}_{}".format(thread_num,time_idx-1),"ne_prior_{}_{}".format(thread_num,time_idx))
dsk["clock_0_{}_{}".format(thread_num,time_idx)] = (lambda sol: sol[1],
"sol_{}_{}".format(thread_num,time_idx-1))
dsk["const_0_{}_{}".format(thread_num,time_idx)] = (lambda sol: sol[2],
"sol_{}_{}".format(thread_num,time_idx-1))
#else take the a priori as starting point
elif i == 0 or not self.stateful:
dsk["ne_0_{}_{}".format(thread_num,time_idx)] = "ne_prior_{}_{}".format(thread_num,time_idx)
dsk["clock_0_{}_{}".format(thread_num,time_idx)] = "clock_prior_{}_{}".format(thread_num,time_idx)
dsk["const_0_{}_{}".format(thread_num,time_idx)] = "const_prior_{}_{}".format(thread_num,time_idx)
dsk["model_0_{}_{}".format(thread_num, time_idx)] = (lambda ne, clock, const: (ne, clock, const),
"ne_0_{}_{}".format(thread_num, time_idx),
"clock_0_{}_{}".format(thread_num, time_idx),
"const_0_{}_{}".format(thread_num, time_idx))
#calculate the rays
dsk["rays_{}_{}".format(thread_num, time_idx)] = (calc_rays,
"antennas","patches","time_{}_{}".format(thread_num, time_idx), "array_center", "fixtime_{}_{}".format(thread_num,time_idx),
"phase", "ne_0_{}_{}".format(thread_num,time_idx), self.datapack.radio_array.frequency, True, self.tmax, None)
#irls solve
# dsk["sol_{}_{}".format(thread_num,time_idx)] = (lambda *x: x[0],"ne_0_{}_{}".format(thread_num,time_idx),"ne_prior_{}_{}".format(thread_num,time_idx),"rays_{}_{}".format(thread_num,time_idx))
#
dsk["plot_datapack_{}_{}".format(thread_num,time_idx)] = (partial(plot_datapack,ant_idx=-1,time_idx=[time_idx], dir_idx=-1,freq_idx=-1,figname=os.path.join(save_folder,"dobs"),vmin=None,vmax=None), "datapack")
dsk["sol_{}_{}".format(thread_num,time_idx)] = (iterative_newton_solve,
"model_0_{}_{}".format(thread_num,time_idx),
"model_prior_{}_{}".format(thread_num,time_idx),
"rays_{}_{}".format(thread_num,time_idx),
"freqs",
"covariance",
"CdCt_{}_{}".format(thread_num,time_idx),
"dobs_{}_{}".format(thread_num,time_idx),
self.num_threads_per_solve,
save_folder)
#Add result of list of computations
objective.append("plot_datapack_{}_{}".format(thread_num,time_idx))
objective.append("sol_{}_{}".format(thread_num,time_idx))
from dask.callbacks import Callback
class PrintKeys(Callback):
def _pretask(self, key, dask, state):
"""Print the key of every task as it's started"""
print("Computing: {0}!".format(repr(key)))
with PrintKeys():
vals = dask.get(dsk,objective,num_workers = None)
print(vals)
@property
def num_threads_per_solve(self):
return self._num_threads_per_solve
@num_threads_per_solve.setter
def num_threads_per_solve(self,num):
if num is not None:
assert num > 0
self._num_threads_per_solve = int(num)
else:
self._num_threads_per_solve = None
@property
def coherence_time(self):
return self._coherence_time
@coherence_time.setter
def coherence_time(self,num):
assert num > 0
self._coherence_time = float(num)
@property
def num_parallel_solves(self):
return self._num_parallel_solves
@num_parallel_solves.setter
def num_parallel_solves(self,num):
assert num > 0
self._num_parallel_solves = int(num)
@property
def stateful(self):
return self._stateful
@stateful.setter
def stateful(self,num):
self._stateful = bool(num)
```
#### File: ionotomo/ionosphere/simulation.py
```python
import numpy as np
from scipy.special import gamma, kv
class IonosphereSimulation_OLD(object):
"""Simulate a realisation with a Matern kernel and Chapmen layers.
tci : TriCubic
defines the volume
sigma : the overall magnatude of kernel
corr : the correlation length
nu : smoothness"""
def __init__(self,tci,sigma,corr, nu):
self.sigma = sigma
self.corr = corr
self.nu = nu
self.nx = tci.nx
self.ny = tci.ny
self.nz = tci.nz
self.dx = tci.xvec[1] - tci.xvec[0]
lvec = np.fft.fftfreq(tci.nx,d=self.dx)
self.dy = tci.yvec[1] - tci.yvec[0]
mvec = np.fft.fftfreq(tci.ny,d=self.dy)
self.dz = tci.zvec[1] - tci.zvec[0]
self.dV = self.dx*self.dy*self.dz
nvec = np.fft.fftfreq(tci.nz,d=self.dz)
L,M,N = np.meshgrid(lvec,mvec,nvec,indexing='ij')
self.r = L**2
self.r += M**2
self.r += N**2
np.sqrt(self.r,out=self.r)
n = 3.
self.f = sigma**2*2**(n) * np.pi**(n/2.) * gamma(nu+n/2.) * (2*nu)**(nu) / gamma(nu) / corr**(2*nu) * (2*nu/corr**2 + 4*np.pi**2*self.r**2)**(-nu - n/2.)
def realization(self):
'''Generate a Gaussian Random field with given covariance'''
B = np.random.normal(size=[self.nx,self.ny, self.nz])
A = np.fft.fftn(B)
A *= np.sqrt(self.f/self.dV)/4.
B = (np.fft.ifftn(A)).real
return B
class IonosphereSimulation(object):
"""Simulate a realisation with a Matern kernel and Chapmen layers.
The grid.
sigma : the overall magnatude of kernel
corr : the correlation length
nu : smoothness"""
def __init__(self,xvec,yvec,zvec,sigma,corr, type='m52'):
assert type in ['m52']
self.nx = np.size(xvec)
self.ny = np.size(yvec)
self.nz = np.size(zvec)
self.dx = xvec[1] - xvec[0]
self.dy = yvec[1] - yvec[0]
self.dz = zvec[1] - zvec[0]
self.sigma = sigma
self.corr = corr
self.sx = 1./(self.dx*self.nx)
self.sy = 1./(self.dy*self.ny)
self.sz = 1./(self.dz*self.nz)
lvec = np.linspace(0,self.sx*self.nx/2.,self.nx)
mvec = np.linspace(0,self.sy*self.ny/2.,self.ny)
nvec = np.linspace(0,self.sz*self.nz/2.,self.nz)
# lvec = np.fft.ifftshift(np.fft.fftfreq(self.nx,d=1))
# mvec = np.fft.ifftshift(np.fft.fftfreq(self.ny,d=1))
# nvec = np.fft.ifftshift(np.fft.fftfreq(self.nz,d=1))[:self.nz>>1]
L,M,N = np.meshgrid(lvec,mvec,nvec,indexing='ij')
s2 = L**2
s2 += M**2
s2 += N**2
s2 = np.fft.ifftshift(s2)
s = np.sqrt(s2)
self.type=type
if self.type == 'm52':
#self.S = self.sigma**2 * 8*np.sqrt(5)**5*np.sqrt(np.pi)**3*gamma(4.)/gamma(5./2.)/self.corr**5 / np.sqrt(np.sqrt(5./self.corr**2 + (4.* np.pi**2) * s2))
n = 3.
nu = 5/2.
self.S = self.sigma**2*2**(n) * np.pi**(n/2.) * gamma(nu+n/2.) * (2*nu)**(nu) / gamma(nu) / self.corr**(2*nu) * (2*nu/self.corr**2 + 4*np.pi**2*s2)**(-nu - n/2.)
if self.type == 'rq16':
self.S = self.sigma**2 * (2**(5/2.) * self.corr**2/3./s) * kv(1./3., s * self.corr/np.sqrt(3))/gamma(1./6.)
self.S[s==0] = 0.
self.S = np.sqrt(self.S)
def realization(self,seed=None):
'''Generate a Gaussian Random field with given covariance'''
if seed is not None:
np.random.seed(seed)
Z = np.random.normal(size=self.S.shape)+1j*np.random.normal(size=self.S.shape)
#Z -= np.mean(Z)
#Z /= np.std(Z)
#print(np.mean(Z))
Y = self.S * Z# * (self.nx*self.ny*self.nz)
B = (np.fft.ifftn(Y,(self.nx,self.ny,self.nz))).real*(self.sx*self.nx)*(self.sy*self.ny)*(self.sz*self.nz)
B[::2,:,:] *= -1
B[:,::2,:] *= -1
B[:,:,::2] *= -1
#B -= np.mean(B)
###
# Hack to get scale right
###
B *= self.sigma/np.std(B)
return B
if __name__=='__main__':
import pylab as plt
xvec = np.linspace(0,1,100)
yvec = np.linspace(0,1,100)
zvec = np.linspace(0,1,100)
sim = IonosphereSimulation_(xvec,yvec,zvec,1.,0.4,type='m52')
dn = sim.realization(seed=1234)
print(dn.shape)
fig=plt.figure(figsize=(12,12))
fig.add_subplot(2,2,1)
plt.imshow(dn[50,:,:])
plt.colorbar()
fig.add_subplot(2,2,2)
plt.imshow(dn[:,50,:])
plt.colorbar()
fig.add_subplot(2,2,3)
plt.imshow(dn[:,:,50])
plt.colorbar()
plt.figure()
plt.hist(dn.flatten(),bins=25)
plt.show()
```
#### File: ionotomo/notebooks/ionosphere_characteristics_script.py
```python
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
from ionotomo import *
from ionotomo.tomography.simulate import SimulateTec
import tensorflow as tf
import numpy as np
import gpflow as gpf
import pymc3 as pm
import os
import pylab as plt
###
# Create radio array
load_preexisting = True
datapack_to_load = "../scripts/rvw_data_analysis/rvw_datapack_full_phase_dec27.hdf5"
if load_preexisting:
datapack_facets = DataPack(filename=datapack_to_load)
_,timestamps_flag = datapack_facets.get_times(-1)
timestamps_flag = timestamps_flag[1:]
freqs_flag = datapack_facets.get_freqs(-1)
keep_freqs = freqs_flag[200:220]
freqs_flag = freqs_flag[np.bitwise_not(np.isin(freqs_flag,keep_freqs))]
datapack_facets.flag_times(timestamps_flag)
#datapack_facets.flag_freqs(freqs_flag)
#Flagged all but first time, channels 200-219, etc
else:
ra = 126
dec = 64
timestamp = "2016-12-08T23:25:01.384"
radio_array = generate_example_radio_array(config='lofar')
p0 = ac.SkyCoord(ra=ra*au.deg,dec=dec*au.deg, frame='icrs')
obstime = at.Time(timestamp,format='isot')
location = radio_array.get_center()
altaz = ac.AltAz(location = location, obstime = obstime)
p = p0.transform_to(altaz)
print(p)
datapack_facets = generate_example_datapack(alt=p.alt.deg,az=p.az.deg,Ndir=42,Nfreqs=20,Ntime=1,radio_array=radio_array)
datapack_screen = phase_screen_datapack(15,datapack=datapack_facets)
times, timestamps = datapack_facets.get_times(-1)
antennas,antenna_labels = datapack_facets.get_antennas(-1)
freqs = datapack_facets.get_freqs(-1)
phase_track = datapack_facets.get_center_direction()
obstime = times[0]
location = datapack_facets.radio_array.get_center()
directions_facets,_ = datapack_facets.get_directions(-1)
Nd1 = directions_facets.shape[0]
directions_screen,_ = datapack_screen.get_directions(-1)
Nd2 = directions_screen.shape[0]
uvw = UVW(location = location,obstime=obstime,phase = phase_track)
X0 = directions_facets.transform_to(uvw)
X0 = np.array([np.arctan2(X0.u.value,X0.w.value),np.arctan2(X0.v.value,X0.w.value)]).T
X1 = directions_screen.transform_to(uvw)
X1 = np.array([np.arctan2(X1.u.value,X1.w.value),np.arctan2(X1.v.value,X1.w.value)]).T
x_scale = np.mean(np.std(X1,axis=0))
X1 /= x_scale
X0 /= x_scale
#Simulator
sim = SimulateTec(datapack_screen,spacing=1.,res_n=401)
###
# Generate ionospheres following I(sigma, l)
def sample_ionosphere(sim,sigma,l):
"""Generate an ionosphere, I(sigma,l).
sim : SimulatedTec object (non reentrant)
sigma : float log_electron variance
l : float length scale
Returns a the model as ndarray
"""
sim.generate_model(sigma, l)
model = sim.model
return model
###
# simulate and place in datapack_screen
def simulate_screen(sim,datapack,s=1.01,ls=10.,draw_new=False):
if draw_new:
sim.generate_model(s,ls)
tec = sim.simulate_tec()
phase = tec[...,None]*-8.4479e9/freqs
datapack.set_phase(phase,ant_idx=-1,time_idx=[0],dir_idx=-1,freq_idx=-1)
return tec
def log_posterior_true(tec,X1, tec_obs, X0):
"""
Calculate the logp of the true underlying.
tec : array (Nd1,)
X1 : array (Nd1,2)
tec_obs : array (Nd2,)
X0 : array (Nd2, 2)
"""
with pm.Model() as model:
l = pm.Exponential('l',1.)
sigma = pm.Exponential('sigma',1.)
#c = pm.Normal('c',mu=0,sd=1)
cov_func = pm.math.sqr(sigma)*pm.gp.cov.ExpQuad(1, ls=l)
#mean_func = pm.gp.mean.Constant(c=c)
gp = pm.gp.Marginal(cov_func=cov_func)
eps = pm.HalfNormal('eps',sd=0.1)
y0_ = gp.marginal_likelihood('y0',X0,tec_obs,eps)
mp = pm.find_MAP()
print(mp)
c = pm.approx_hessian(model.test_point)
step = pm.NUTS(scaling=c)
trace = pm.sample(100,step=step,start=mp)
print(trace.get_sampler_stats('depth'),trace.get_sampler_stats('tree_size'))
pm.traceplot(trace,combined=True)
plt.show()
print(pm.summary(trace))
with model:
y1_ = gp.conditional('y1',X1)#,given={'X':X0,'y':y0,'noise':0.1})
logp = y1_.logp
logp_val = np.zeros(len(trace))
for i,point in enumerate(trace):
point['y1'] = tec
logp_val[i] = logp(point)
return logp_val
logp = {}
d_mask = np.random.choice(Nd2,size=Nd1,replace=False)
for i in range(10):
tec = simulate_screen(sim,datapack_screen,draw_new=True)
logp[i] = []
for ai in range(1,62):
print(antenna_labels[ai])
tec_mean = np.mean(tec[ai,0,:])
tec_std = np.std(tec[ai,0,:])
tec_ = (tec[ai,0,:] - tec_mean) / tec_std
logp[i].append(np.mean(log_posterior_true(tec_,X1,tec_[d_mask],X1[d_mask,:])))
```
#### File: notebooks/itoh_gp/itoh_gp.py
```python
import matplotlib
matplotlib.use('Agg')
import numpy as np
from scipy.cluster.vq import kmeans2
import pylab as plt
plt.style.use('ggplot')
import astropy.units as au
import os
import gpflow as gp
from heterogp.latent import Latent
from gpflow import settings
from gpflow.decors import params_as_tensors,autoflow
from gpflow.quadrature import hermgauss
from gpflow import settings
from gpflow import transforms
from gpflow import logdensities as densities
from gpflow.decors import params_as_tensors
from gpflow.decors import params_as_tensors_for
from gpflow.decors import autoflow
from gpflow.params import Parameter
from gpflow.params import Parameterized
from gpflow.params import ParamList
from gpflow.quadrature import hermgauss
from gpflow.likelihoods import Likelihood
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
import tensorflow as tf
import h5py
# In[2]:
from gpflow.actions import Loop, Action
from gpflow.training import AdamOptimizer
class PrintAction(Action):
def __init__(self, model, text):
self.model = model
self.text = text
def run(self, ctx):
likelihood = ctx.session.run(self.model.likelihood_tensor)
logging.warning('{}: iteration {} likelihood {:.4f}'.format(self.text, ctx.iteration, likelihood))
# logging.warning(self.model)
class SendSummary(Action):
def __init__(self, model, writer):
self.model = model
self.writer = writer
self.summary = tf.summary.merge_all()
def run(self, ctx):
summary = ctx.session.run(self.summary)
self.writer.add_summary(summary,global_step=ctx.iteration)
from gpflow.training import NatGradOptimizer, AdamOptimizer, XiSqrtMeanVar
def run_with_adam_and_nat(model, lr,iterations, callback=None, gamma = 0.001):
if gamma == 0:
adam = AdamOptimizer(lr).make_optimize_action(model)
actions = [adam]
actions = actions if callback is None else actions + [callback]
Loop(actions, stop=iterations)()
model.anchor(model.enquire_session())
return
var_list = [(model.f_latent.q_mu, model.f_latent.q_sqrt)]
# we don't want adam optimizing these
model.f_latent.q_mu.set_trainable(False)
model.f_latent.q_sqrt.set_trainable(False)
adam = AdamOptimizer(lr).make_optimize_action(model)
natgrad = NatGradOptimizer(gamma).make_optimize_action(model, var_list=var_list)
actions = [adam, natgrad]
actions = actions if callback is None else actions + [callback]
Loop(actions, stop=iterations)()
model.anchor(model.enquire_session())
# # Create input data
# In[3]:
from ionotomo import *
from ionotomo.astro.real_data import phase_screen_datapack
def make_coord_array(*X):
"""
Return the design matrix from coordinates.
"""
def add_dims(x,where,sizes):
shape = []
tiles = []
for i in range(len(sizes)):
if i not in where:
shape.append(1)
tiles.append(sizes[i])
else:
shape.append(-1)
tiles.append(1)
return np.tile(np.reshape(x,shape),tiles)
N = [x.shape[0] for x in X]
X_ = []
for i,x in enumerate(X):
for dim in range(x.shape[1]):
X_.append(add_dims(x[:,dim],[i], N))
X = np.stack(X_,axis=-1)
return np.reshape(X,(-1,X.shape[-1]))
def make_data_vec(Y,freqs):
"""
Takes Y of shape [..., Nf, N]
returns [...,N+1] where last is freq of observation"""
shape = Y.shape
for _ in range(len(shape)-2):
freqs = freqs[None,...]
freqs = freqs[...,None]
# freqs is [1,1,...,Nf,1]
tiles = list(shape)
tiles[-1] = 1
tiles[-2] = 1
freqs = np.tile(freqs,tiles)
# ..., N+1
return np.concatenate([Y, freqs],axis=-1)
# # Decide on some priors
# In[4]:
from scipy.optimize import fmin,minimize
def log_normal_solve(mode,uncert):
def func(x):
mu,sigma2 = x
mode_ = np.exp(mu-sigma2)
var_ = (np.exp(sigma2 ) - 1) * np.exp(2*mu + sigma2)
return (mode_ - mode)**2 + (var_ - uncert**2)**2
res = minimize(func,(mode,uncert**2))
# res = fmin(func, (mode,uncert**2))
return res.x[0],np.sqrt(res.x[1])
# In[ ]:
# # Direction
# In[11]:
try:
@tf.RegisterGradient('WrapGrad')
def _wrap_grad(op,grad):
phi = op.inputs[0]
return tf.ones_like(phi)*grad
except:
pass#already defined
def wrap(phi):
out = tf.atan2(tf.sin(phi),tf.cos(phi))
with tf.get_default_graph().gradient_override_map({'Identity': 'WrapGrad'}):
return tf.identity(out)
from heterogp.likelihoods import HeteroscedasticLikelihood
float_type = settings.float_type
class HeteroWrappedPhaseGaussian(HeteroscedasticLikelihood):
def __init__(self, log_noise_latent, tec_scale=0.01, freq=140e6, name=None):
super().__init__(log_noise_latent, name=name)
self.variance = gp.params.Parameter(
1.0, transform=gp.transforms.positive, dtype=gp.settings.float_type)
self.tec_scale = tec_scale
self.num_gauss_hermite_points = 20
self.freq = tf.convert_to_tensor(freq,dtype=settings.float_type,name='test_freq') # frequency the phase is calculated at for the predictive distribution
self.tec_conversion = tf.convert_to_tensor(tec_scale * -8.4480e9,dtype=settings.float_type,name='tec_conversion') # rad Hz/ tecu
self.tec2phase = tf.convert_to_tensor(self.tec_conversion / self.freq,dtype=settings.float_type,name='tec2phase')
@params_as_tensors
def logp(self, F, Y, freqs=None,hetero_variance=None,**unused_kwargs):
"""The log-likelihood function."""
tec2phase = self.tec_conversion/freqs
phase = wrap(F*tec2phase)
dphase = wrap(phase) - wrap(Y) # Ito theorem
arg = tf.stack([-0.5*tf.square(dphase + 2*np.pi*k)/hetero_variance - 0.5 * tf.log((2*np.pi) * hetero_variance) for k in range(-2,3,1)],axis=-1)
return tf.reduce_logsumexp(arg,axis=-1)
# dphase = wrap(wrap(phase) - wrap(Y)) # Ito theorem
# return densities.gaussian(dphase, tf.fill(tf.shape(F),tf.cast(0.,settings.float_type)), hetero_variance)
@params_as_tensors
def conditional_mean(self, F, eval_freq=None,hetero_variance=None, **unused_kwargs): # pylint: disable=R0201
"""The mean of the likelihood conditioned on latent."""
eval_freq = self.freq if eval_freq is None else eval_freq
tec2phase = self.tec_conversion/eval_freq
phase = F*tec2phase
return phase
@params_as_tensors
def conditional_variance(self, F,hetero_variance=None, **unused_kwargs):
return hetero_variance
@params_as_tensors
def hetero_noise(self,X):
"""
Calculates the heterscedastic variance at points X.
X must be of shape [S, N, D]
Returns [S,N,num_latent]
"""
return tf.fill(tf.shape(X[:,:,:1]),tf.cast(self.variance,settings.float_type))
log_noise,_,_ = self.log_noise_latent.sample_from_conditional(X,full_cov=True)
hetero_noise = tf.exp(log_noise)
hetero_noise = tf.where(hetero_noise < self.min_noise,
tf.fill(tf.shape(hetero_noise), self.min_noise),
hetero_noise)
return hetero_noise
def weights_and_mean_uncert(phase,N=200):
def w(x):
return np.arctan(np.sin(x),np.cos(x))
weights = []
for k in range(phase.shape[1]):
dphase = phase[:,k]
dphase = w(w(dphase[:-1]) - w(dphase[1:]))
dphase = np.pad(dphase,(0,N),mode='symmetric')
uncert = np.sqrt(np.convolve(dphase**2, np.ones((N,))/N, mode='valid',))
weights.append(uncert)
weights = np.stack(weights,axis=-1)#uncert
mean_uncert = max(1e-3,np.mean(weights))
weights = 1./weights**2
weights /= np.mean(weights)
weights[np.isnan(weights)] = 1.
return weights, mean_uncert
from heterogp.hgp import HGP
class WrappedPhaseHGP(HGP):
def __init__(self,X, Y, Z, kern, likelihood,
mean_function=gp.mean_functions.Zero,
minibatch_size=None,
num_latent = None,
num_samples=1,
num_data=None,
whiten=True):
super(WrappedPhaseHGP,self).__init__(X, Y, Z, kern, likelihood,
mean_function=mean_function,
minibatch_size=minibatch_size,
num_latent = num_latent,
num_samples=num_samples,
num_data=num_data,
whiten=whiten)
def E_log_p_Y(self, X, Y):
"""
Calculate the expectation of the data log likelihood under the variational distribution
with MC samples
"""
X = tf.tile(X[None,:,:],[self.num_samples,1,1])
Fmean, Fvar = self._build_predict(X, full_cov=False, S=None)
# f = self.f_latent.sample_from_conditional(X, z=None, full_cov=False)
hetero_variance = tf.square(self.likelihood.hetero_noise(X))
lik_freqs = Y[:,-1:]
weights = Y[:,-2:-1]
# ###
# # could do the reparametrization trick f ~ f_latent.predict_sample(
# logp = self.likelihood.logp(f,self.Y[:,:-1],lik_freqs,hetero_variance)
# var_exp = tf.reduce_mean(logp,axis=0)
# return var_exp
var_exp = self.likelihood.variational_expectations(Fmean, Fvar, Y[:,:-2], freqs = lik_freqs, hetero_variance=hetero_variance) # S, N, D
return tf.reduce_mean(var_exp, 0)*weights # N, D
@params_as_tensors
def KL_tensors(self):
KL = [self.f_latent.KL()]
if hasattr(self.likelihood,'log_noise_latent'):
KL.append(self.likelihood.log_noise_latent.KL())
if hasattr(self.f_latent.kern,'log_ls_latent'):
KL.append(self.f_latent.kern.log_ls_latent.KL())
if hasattr(self.f_latent.kern,'log_sigma_latent'):
KL.append(self.f_latent.kern.log_sigma_latent.KL())
return KL
@autoflow((float_type, [None, None]), (tf.int32, []))
def predict_y(self, Xnew, num_samples):
"""
Draws the predictive mean and variance at the points `X`
num_samples times.
X should be [N,D] and this returns [S,N,num_latent], [S,N,num_latent]
"""
Xnew = tf.tile(Xnew[None,:,:],[num_samples,1,1])
Fmean, Fvar = self._build_predict(Xnew, full_cov=False, S=None)
hetero_variance = tf.square(self.likelihood.hetero_noise(Xnew))
return self.likelihood.predict_mean_and_var(Fmean, Fvar, hetero_variance=hetero_variance)
@autoflow((float_type, [None, None]), (tf.int32, []))
def predict_dtec(self, Xnew, num_samples):
"""
Draws the predictive mean and variance at the points `X`
num_samples times.
X should be [N,D] and this returns [S,N,num_latent], [S,N,num_latent]
"""
Xnew = tf.tile(Xnew[None,:,:],[num_samples,1,1])
mean, var = self._build_predict(Xnew, full_cov=False, S=None)
return mean*self.likelihood.tec_scale, var*self.likelihood.tec_scale**2
@autoflow((float_type, [None, None]), (float_type, [None, None]), (tf.int32, []))
def predict_density(self, Xnew, Ynew, num_samples):
Xnew = tf.tile(Xnew[None,:,:],[num_samples,1,1])
Fmean, Fvar = self._build_predict(Xnew, full_cov=False, S=None)
hetero_variance = tf.square(self.likelihood.hetero_variance(Xnew))
lik_freqs = Ynew[:,-1:]
l = self.likelihood.predict_density(Fmean, Fvar, Ynew[:,:-2], freqs=lik_freqs, hetero_variance=hetero_variance)
log_num_samples = tf.log(tf.cast(num_samples, float_type))
return tf.reduce_logsumexp(l - log_num_samples, axis=0)
def gamma_prior(mode,std):
a = std/mode#sqrt(k)/(k-1)
shape = (2* a**2 + np.sqrt((4 * a**2 + 1)/a**4) * a**2 + 1)/(2 *a**2)
scale = std/np.sqrt(shape)
return gp.priors.Gamma(shape,scale)
from heterogp.latent import Latent
def w(x):
return np.arctan2(np.sin(x),np.cos(x))
from ionotomo.plotting.plot_datapack import DatapackPlotter,animate_datapack
def run_solve(flags):
os.environ["KMP_BLOCKTIME"] = "1"
os.environ["KMP_SETTINGS"] = "1"
os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0"
if flags.intra_op_threads > 0:
os.environ["OMP_NUM_THREADS"]= str(flags.intra_op_threads)
dp = DataPack(filename='../../data/rvw_datapack_full_phase_dec27_wideband.hdf5')
ant_idx = -1
times_,_ = dp.get_times(-1)
Nt_ = len(times_)
end_time = min(flags.end_time, len(times_))
time_idx = range(flags.start_time,end_time)
dir_idx = -1
freq_idx = -1#range(4,20)
phase = dp.get_phase(ant_idx,time_idx,dir_idx,freq_idx)
times,_ = dp.get_times(time_idx)
antennas,antenna_labels = dp.get_antennas(ant_idx)
freqs = dp.get_freqs(freq_idx)
directions,patch_names = dp.get_directions(dir_idx)
Na,Nt,Nd,Nf = phase.shape
X_d = np.array([directions.ra.deg,directions.dec.deg]).T
X_t = times.mjd[:,None]*86400.#mjs
enu = ENU(obstime=times[0],location=dp.radio_array.get_center())
ant_enu = antennas.transform_to(enu)
X_a = np.array([ant_enu.east.to(au.km).value, ant_enu.north.to(au.km).value]).T
d_std = X_d.std(0).mean() + 1e-6
t_std = X_t.std() + 1e-6
a_std = X_a.std(0).mean() + 1e-6
X_a = (X_a - X_a.mean(0)) / a_std
X_t = (X_t - X_t.mean()) / t_std
d_mean = X_d.mean(0)
X_d = (X_d - X_d.mean(0)) / d_std
#~8 arcmin resolution
phase_screen_dp = phase_screen_datapack(30,ant_idx=ant_idx,time_idx=time_idx,freq_idx=freq_idx,datapack=dp)
directions_,patch_names_ = phase_screen_dp.get_directions(-1)
Nd_ = len(directions_)
X_d_ = np.array([directions_.ra.deg,directions_.dec.deg]).T
X_d_ = (X_d_ - d_mean) / d_std
if not os.path.exists(flags.solution_file):
with h5py.File(flags.solution_file,'a') as f:
f['dtec'] = np.zeros([Na,Nt_,Nd_])
f['dtec_facets'] = np.zeros([Na,Nt_,Nd])
f['ra'] = directions_.ra.deg
f['dec'] = directions_.dec.deg
f['dtec_variance'] = np.zeros([Na,Nt_,Nd_])
f['dtec_facets_variance'] = np.zeros([Na,Nt_,Nd])
f['count_facets'] = np.zeros([Na,Nt_,Nd])
f['count'] = np.zeros([Na,Nt_,Nd_])
f['ra_facets'] = directions.ra.deg
f['dec_facets'] = directions.dec.deg
f['time'] = times_.mjd*86400.
def make_hetero_model(X,Y,freqs,M=None,minibatch_size=None,Z = None, eval_freq=140e6):
N, num_latent = Y.shape
_, D = X.shape
M = M or N
if Z is None:
Z = kmeans2(X, M, minit='points')[0] if N < 10000 else X[np.random.choice(N,size=M,replace=False),:]
with gp.defer_build():
log_noise_mean_func = gp.mean_functions.Constant(log_noise_mean[0])
log_noise_mean_func.c.set_trainable(False)
# log_noise_mean_func.c.prior = gp.priors.Gaussian(log_noise_mean[0],log_noise_mean[1]**2)
log_noise_kern = gp.kernels.RBF(3,variance=0.01**2)#log_noise_kern_var[0])
log_noise_kern.variance.set_trainable(False)# = gamma_prior(0.05,0.05)
# log_noise_kern.variance.prior = gp.priors.LogNormal(log_noise_kern_var[0],log_noise_kern_var[1]**2)
log_noise_Z = Z#X[np.random.choice(N,size=42*9,replace=False),:]
log_noise_latent = Latent(log_noise_Z,
log_noise_mean_func, log_noise_kern, num_latent=1, whiten=False, name=None)
# log_noise_latent.feature.set_trainable(False)
# Define the likelihood
likelihood = HeteroWrappedPhaseGaussian(log_noise_latent,freq=eval_freq,tec_scale = flags.tec_scale)
likelihood.variance = np.exp(lik_var[0])
likelihood.variance.prior = gp.priors.LogNormal(lik_var[0],lik_var[1]**2)
likelihood.variance.set_trainable(True)
kern_time = gp.kernels.Matern52(1,active_dims=[0])
kern_time.lengthscales = np.exp(tec_kern_time_ls[0])
kern_time.lengthscales.set_trainable(True)
kern_time.lengthscales.prior = gp.priors.LogNormal(tec_kern_time_ls[0],tec_kern_time_ls[1]**2)#gamma_prior(70./t_std, 50./t_std)
kern_time.variance = np.exp(tec_kern_var[0])
kern_time.variance.set_trainable(True)
kern_time.variance.prior = gp.priors.LogNormal(tec_kern_var[0],tec_kern_var[1]**2)#gamma_prior(0.001, 0.005)
kern_space = gp.kernels.Matern52(2,active_dims=[1,2],variance=1.)
kern_space.variance.set_trainable(False)
kern_space.lengthscales = np.exp(tec_kern_dir_ls[0])
kern_space.lengthscales.set_trainable(True)
kern_space.lengthscales.prior = gp.priors.LogNormal(tec_kern_dir_ls[0],tec_kern_dir_ls[1]**2)#gamma_prior(0.3/d_std,0.2/d_std)
white = gp.kernels.White(3)
white.variance = 0.0005**2/flags.tec_scale**2
white.variance.set_trainable(False)
kern = kern_time*kern_space + white
mean = gp.mean_functions.Constant(0.)#tec_mean_mu)
mean.c.set_trainable(False)
mean.c.prior = gp.priors.Gaussian(tec_mean_mu,tec_mean_var)
model = WrappedPhaseHGP(X, Y, Z, kern, likelihood,
mean_function=mean,
minibatch_size=minibatch_size,
num_latent = num_latent-2,
num_samples=1,
num_data=N,
whiten=False)
model.f_latent.feature.set_trainable(True)
model.compile()
tf.summary.scalar('likelihood',-model.likelihood_tensor)
return model
gp.settings.numerics.jitter = flags.jitter
iterations = flags.iterations
learning_rate = flags.learning_rate
minibatch_size = flags.minibatch_size
i = flags.antenna
freq_l = np.argmin((freqs - flags.eval_freq)**2)
X = make_coord_array(X_t,X_d,freqs[:,None])[:,:-1]# N, 3
M = flags.inducing
if M is None:
Z = make_coord_array(X_t[::flags.time_skip,:],X_d[::1,:])
else:
Z = None
if M > 1.0:
M = int(M)
else:
M = int(M*X_t.shape[0]*X_d.shape[0])
assert M > 0, "Need at least one inducing point"
###
# get stat weights of data-points
weights, uncert_mean = weights_and_mean_uncert(phase[i,:,:,0],N=200)
# Nt, Nd, Nf
weights = np.tile(weights[:,:,None],(1,1,Nf))
# Nt, Nd, Nf, 2
data_vec = np.stack([w(phase[i,:,:,:]), weights], axis=-1)
# Nt, Nd, Nf, 3
Y = make_data_vec(data_vec,freqs)#N2
Y = Y.reshape((-1, Y.shape[-1]))
y_mean = Y[:,:-2].mean()*0.
Y[:,:-2] -= y_mean
###
# Using half-normal priors for positive params these should represent limits of distriubtion
log_noise_mean = log_normal_solve(0.35, 0.65)
log_noise_kern_var = log_normal_solve(log_normal_solve(0.25, 0.25)[1]**2, np.abs(log_normal_solve(0.25, 0.1)[1]**2 - log_normal_solve(0.25, 0.25)[1]**2))
lik_var = log_normal_solve(uncert_mean, uncert_mean*0.25)
tec_mean_mu, tec_mean_var = 0./flags.tec_scale, (0.005)**2/flags.tec_scale**2
tec_kern_time_ls = log_normal_solve(50./t_std, 20./t_std)
tec_kern_dir_ls = log_normal_solve(0.5/d_std, 0.3/d_std)
tec_kern_sigma = 0.005/flags.tec_scale
tec_kern_var = log_normal_solve(tec_kern_sigma**2,0.1*tec_kern_sigma**2)
# print("Log_noise mean Gaussian",log_noise_mean,'median (rad)',np.exp(log_noise_mean[0]))
# print("Log_noise kern var logGaussian",log_noise_kern_var,'median (log-rad)',np.sqrt(np.exp(log_noise_kern_var[0])))
print("likelihood var logGaussian",lik_var,'median (rad)',np.exp(lik_var[0]))
print("tec mean Gaussian",tec_mean_mu*flags.tec_scale, tec_mean_var*flags.tec_scale**2)
print("tec kern var logGaussian",tec_kern_var,'median (tec)',np.sqrt(np.exp(tec_kern_var[0]))*flags.tec_scale)
print("tec kern time ls logGaussian",tec_kern_time_ls,'median (sec)',np.exp(tec_kern_time_ls[0])*t_std)
print("tec kern dir ls logGaussian",tec_kern_dir_ls,'median (deg)',np.exp(tec_kern_dir_ls[0])*d_std)
tf.reset_default_graph()
graph=tf.Graph()
config = tf.ConfigProto()
config.intra_op_parallelism_threads = flags.intra_op_threads
config.inter_op_parallelism_threads = flags.inter_op_threads
sess = tf.Session(graph=graph,config=config)
try:
os.makedirs('summaries')
except:
pass
import glob
run_id = len(glob.glob('summaries/summary_{}_*'.format(antenna_labels[i])))
with graph.as_default(), sess.as_default(), tf.summary.FileWriter('summaries/summary_{}_{}'.format(antenna_labels[i],run_id), graph) as writer:
model = make_hetero_model(X,Y,freqs,M=M,minibatch_size=minibatch_size, Z=Z, eval_freq=freqs[freq_l])
run_with_adam_and_nat(model,learning_rate,iterations,SendSummary(model,writer), gamma = 0.000)
# run_with_adam_and_nat(model,1e-3,iterations,SendSummary(model,writer), gamma = 0.0001)
if False:
Xstar = make_coord_array(X_t,X_d_,freqs[:1,None])[:,:-1]
ystar,varstar = model.predict_y(Xstar,10)#at 140MHz
y_star = ystar.mean(0).reshape([Nt,Nd_])
y_star += y_mean
varstar = varstar.mean(0).reshape([Nt,Nd_])
dtec_ystar,dtec_varstar = model.predict_dtec(Xstar,10)
dtec_ystar = dtec_ystar.mean(0).reshape([Nt,Nd_])
dtec_varstar = dtec_varstar.mean(0).reshape([Nt,Nd_])
hetero_noise = model.likelihood.compute_hetero_noise(Xstar,10)
hetero_noise = hetero_noise.mean(0).reshape([Nt,Nd_])
else:
y_star,varstar,dtec_ystar, dtec_varstar, hetero_noise = [],[],[],[],[]
for k in range(Nd_):
Xstar = make_coord_array(X_t,X_d_[k:k+1,:],freqs[:1,None])[:,:-1]
ystar_,varstar_ = model.predict_y(Xstar,10)#at 140MHz
y_star_ = ystar_.mean(0).reshape([Nt,1])
y_star_ += y_mean
varstar_ = varstar_.mean(0).reshape([Nt,1])
dtec_ystar_,dtec_varstar_ = model.predict_dtec(Xstar,10)
dtec_ystar_ = dtec_ystar_.mean(0).reshape([Nt,1])
dtec_varstar_ = dtec_varstar_.mean(0).reshape([Nt,1])
hetero_noise_ = model.likelihood.compute_hetero_noise(Xstar,10)
hetero_noise_ = hetero_noise_.mean(0).reshape([Nt,1])
y_star.append(y_star_)
varstar.append(varstar_)
dtec_ystar.append(dtec_ystar_)
dtec_varstar.append(dtec_varstar_)
hetero_noise.append(hetero_noise_)
y_star = np.concatenate(y_star,axis=-1)
varstar = np.concatenate(varstar,axis=-1)
dtec_ystar = np.concatenate(dtec_ystar,axis=-1)
dtec_varstar = np.concatenate(dtec_varstar,axis=-1)
hetero_noise = np.concatenate(hetero_noise,axis=-1)
if flags.plot:
try:
os.makedirs('{}/{}/time_diagnostics'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/inferred_phase_diff'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/inferred_phase'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/inferred_phase_faceted'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/observed'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/hetero_noise'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/inferred_phase_error'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/inferred_dtec'.format(flags.plot_dir,antenna_labels[i]))
os.makedirs('{}/{}/inferred_dtec_error'.format(flags.plot_dir,antenna_labels[i]))
except:
pass
# with h5py.File('{}/{}/solution.hdf5'.format(flags.plot_dir,antenna_labels[i]),'a') as f:
# if 'tec' not in f.keys():
# f['tec'] = np.zeros((Na,Nt,Nd_),dtype=np.float64)
# if 'tec_var' not in f.keys():
# f['tec_var'] = np.zeros((Na,Nt,Nd_),dtype=np.float64)
# f['tec'][i,time_idx,:] = dtec_ystar
# f['tec_var'][i,time_idx,:] = dtec_varstar
dataplotter = DatapackPlotter(dp)
fignames = []
for t in time_idx:
fignames.append('{}/{}/observed/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
dataplotter.plot(ant_idx=[i], time_idx = time_idx, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='phase',fignames=fignames,
phase_wrap=True, plot_crosses=True,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
fignames = []
for t in time_idx:
fignames.append('{}/{}/inferred_phase_diff/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
with graph.as_default(), sess.as_default():
Xstar = make_coord_array(X_t,X_d,freqs[:1,None])[:,:-1]
ystar,_ = model.predict_y(Xstar,10)#at 140MHz
ystar = ystar.mean(0).reshape([Nt,Nd])
ystar += y_mean
dtec_facet_ystar, dtec_facet_varstar = model.predict_dtec(Xstar,10)#at 140MHz
dtec_facet_ystar = dtec_facet_ystar.mean(0).reshape([Nt,Nd])
dtec_facet_varstar = dtec_facet_varstar.mean(0).reshape([Nt,Nd])
with h5py.File(flags.solution_file,'a') as f:
f['dtec'][i,time_idx,:] += dtec_ystar/dtec_varstar
f['dtec_variance'][i,time_idx,:] += dtec_varstar/dtec_varstar
f['count'][i,time_idx,:] += 1./dtec_varstar
f['dtec_facets'][i,time_idx,:] += dtec_facet_ystar/dtec_facet_varstar
f['dtec_facets_variance'][i,time_idx,:] += dtec_facet_varstar/dtec_facet_varstar
f['count_facets'][i,time_idx,:] += 1./dtec_facet_varstar
for k in range(Nd):
plt.plot(X_t[:,0], w(dp.phase[i,time_idx,k,freq_l]),label='data')
plt.plot(X_t[:,0], ystar[:,k],label='inferred')
plt.legend()
plt.savefig('{}/{}/time_diagnostics/direction_{:02d}_{:04d}_{:04d}.png'.format(flags.plot_dir,antenna_labels[i],k,flags.start_time,flags.end_time))
plt.close('all')
dp.phase[i,time_idx,:,freq_l] = w(w(dp.phase[i,time_idx,:,freq_l]) - w(ystar))
dataplotter.plot(ant_idx=[i], time_idx = time_idx, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='phase',fignames=fignames,
phase_wrap=True, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
fignames = []
for t in time_idx:
fignames.append('{}/{}/inferred_phase_faceted/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
dp.phase[i,time_idx,:,freq_l] = ystar
dataplotter.plot(ant_idx=[i], time_idx = time_idx, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='phase',fignames=fignames,
phase_wrap=True, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
phase_screen_dp.set_reference_antenna(dp.ref_ant)
dataplotter = DatapackPlotter(phase_screen_dp)
fignames = []
for t in time_idx:
fignames.append('{}/{}/inferred_phase/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
phase_screen_dp.phase[i,:,:,freq_l] = y_star
dataplotter.plot(ant_idx=[i], time_idx = -1, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='phase',fignames=fignames,
phase_wrap=True, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
fignames = []
for t in time_idx:
fignames.append('{}/{}/hetero_noise/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
phase_screen_dp.variance[i,:,:,freq_l] = hetero_noise**2
dataplotter.plot(ant_idx=[i], time_idx = -1, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='std',fignames=fignames,
phase_wrap=False, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
fignames = []
for t in time_idx:
fignames.append('{}/{}/inferred_phase_error/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
phase_screen_dp.variance[i,:,:,freq_l] = varstar
dataplotter.plot(ant_idx=[i], time_idx = -1, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='std',fignames=fignames,
phase_wrap=False, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
fignames = []
for t in time_idx:
fignames.append('{}/{}/inferred_dtec/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
phase_screen_dp.phase[i,:,:,freq_l] = dtec_ystar
dataplotter.plot(ant_idx=[i], time_idx = -1, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='phase',fignames=fignames,
phase_wrap=False, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
fignames = []
for t in time_idx:
fignames.append('{}/{}/inferred_dtec_error/fig{:04d}.png'.format(flags.plot_dir,antenna_labels[i],t))
phase_screen_dp.variance[i,:,:,freq_l] = dtec_varstar
dataplotter.plot(ant_idx=[i], time_idx = -1, dir_idx=-1,
freq_idx=[freq_l], vmin=None,vmax=None,mode='perantenna',observable='std',fignames=fignames,
phase_wrap=False, plot_crosses=False,plot_facet_idx=False,plot_patchnames=False,
labels_in_radec=True,show=False)
plt.close('all')
# In[14]:
import argparse
def add_args(parser):
antenna_labels = ['CS001HBA0', 'CS001HBA1', 'CS002HBA0', 'CS002HBA1', 'CS003HBA0',
'CS003HBA1', 'CS004HBA0', 'CS004HBA1', 'CS005HBA0', 'CS005HBA1',
'CS006HBA0', 'CS006HBA1', 'CS007HBA0', 'CS007HBA1', 'CS011HBA0',
'CS011HBA1', 'CS013HBA0', 'CS013HBA1', 'CS017HBA0', 'CS017HBA1',
'CS021HBA0', 'CS021HBA1', 'CS024HBA0', 'CS024HBA1', 'CS026HBA0',
'CS026HBA1', 'CS028HBA0', 'CS028HBA1', 'CS030HBA0', 'CS030HBA1',
'CS031HBA0', 'CS031HBA1', 'CS032HBA0', 'CS032HBA1', 'CS101HBA0',
'CS101HBA1', 'CS103HBA0', 'CS103HBA1', 'CS201HBA0', 'CS201HBA1',
'CS301HBA0', 'CS301HBA1', 'CS302HBA0', 'CS302HBA1', 'CS401HBA0',
'CS401HBA1', 'CS501HBA0', 'CS501HBA1', 'RS106HBA', 'RS205HBA',
'RS208HBA', 'RS210HBA', 'RS305HBA', 'RS306HBA', 'RS307HBA',
'RS310HBA', 'RS406HBA', 'RS407HBA', 'RS409HBA', 'RS503HBA',
'RS508HBA', 'RS509HBA']
def _antenna_type(s):
try:
idx = int(s)
assert idx < len(antenna_labels)
return idx
except:
idx = None
for idx in range(len(antenna_labels)):
if antenna_labels[idx].lower() == s.lower():
return idx
raise ValueError("{} invalid antenna".format(s))
def _inducing(s):
if s.lower().strip() == 'none':
return None
else:
return float(s)
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.register("type", "antenna", _antenna_type)
parser.register('type','inducing',_inducing)
# network
parser.add_argument("--antenna", type="antenna", default=51,
help="""The index or name of antenna.\n{}""".format(list(zip(range(len(antenna_labels)),antenna_labels))))
parser.add_argument("--start_time", type=int, default=0,
help="Start time index")
parser.add_argument("--end_time", type=int, default=20,
help="End time index")
parser.add_argument("--time_skip", type=int, default=2,
help="Time skip")
parser.add_argument("--inducing", type='inducing', default='None',
help="""The number of inducing point if > 1.0 else the fraction of total number of points. If None then use time_skip instead.""")
parser.add_argument("--minibatch_size", type=int, default=256,
help="Size of minibatch")
parser.add_argument("--learning_rate", type=float, default=1e-3,
help="learning rate")
parser.add_argument("--plot", type="bool", default=True, const=True,nargs='?',
help="Whether to plot results")
parser.add_argument("--plot_dir", type=str, default='./figs',
help="Where to plot results")
parser.add_argument("--iterations", type=int, default=10000,
help="How many iterations to run")
parser.add_argument("--jitter", type=float, default=1e-6,
help="Jitter for stability")
parser.add_argument("--eval_freq", type=float, default=144e6,
help="Eval frequency")
parser.add_argument("--inter_op_threads", type=int, default=0,
help="""The max number of concurrent threads""")
parser.add_argument("--intra_op_threads", type=int, default=0,
help="""The number threads allowed for multi-threaded ops.""")
parser.add_argument("--tec_scale", type=float, default=0.01,
help="""The relative tec scale used for scaling the GP model for computational stability.""")
parser.add_argument("--solution_file", type=str, default='solution.hdf5',
help="""solution file path ending in .hdf5""")
# In[ ]:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_args(parser)
flags, unparsed = parser.parse_known_args()
print(flags)
run_solve(flags)
# tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
```
#### File: notebooks/itoh_gp/wrap_op_gradients.py
```python
import tensorflow as tf
import numpy as np
import pylab as plt
@tf.RegisterGradient('WrapGrad')
def _wrap_grad(op,grad):
phi = op.inputs[0]
return tf.ones_like(phi)*grad
def wrap(phi):
out = tf.atan2(tf.sin(phi),tf.cos(phi))
with tf.get_default_graph().gradient_override_map({'Identity': 'WrapGrad'}):
return tf.identity(out)
```
#### File: ionotomo/plotting/plot_tools.py
```python
import os
#__MAYAVI__ = False
#try:
# os.environ["QT_API"] = "pyqt"
# from mayavi import mlab
# __MAYAVI__ = True
#except:
# try:
# os.environ["QT_API"] = "pyside"
# from mayavi import mlab
# __MAYAVI__ = True
# except:
# print("Unable to import mayavi")
from ionotomo.geometry.tri_cubic import TriCubic
from ionotomo.astro.frames.uvw_frame import UVW
import numpy as np
import pylab as plt
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
## utility functions
try:
import cmocean
phase_cmap = cmocean.cm.phase
except:
phase_cmap = plt.cm.hsv
def interp_nearest(x,y,z,x_,y_):
dx = np.subtract.outer(x_,x)
dy = np.subtract.outer(y_,y)
r = dx**2
dy *= dy
r += dy
np.sqrt(r,out=r)
arg = np.argmin(r,axis=1)
z_ = z[arg]
return z_
def plot_tci(tci,rays=None,filename=None,show=False):
'''Plot the given tci using mayavi if possible.
tci : TriCubic object to plot
rays : array of shape (num_antennas, num_times, num_dirs, 4, num_steps)
filename : name of figure file to save to without extension e.g. "figure1"
show : boolean, whether to show the resulting figure.'''
xmin = tci.xvec[0]
xmax = tci.xvec[-1]
ymin = tci.yvec[0]
ymax = tci.yvec[-1]
zmin = tci.zvec[0]
zmax = tci.zvec[-1]
X,Y,Z = np.mgrid[xmin:xmax:len(tci.xvec)*1j,
ymin:ymax:len(tci.yvec)*1j,
zmin:zmax:len(tci.zvec)*1j]
#reshape array
data = tci.get_shaped_array()
xy = np.mean(data,axis=2)
yz = np.mean(data,axis=0)
zx = np.mean(data,axis=1)
fig,(ax1,ax2,ax3) = plt.subplots(1,3)
ax1.imshow(xy,origin='lower',aspect='auto')
ax1.set_title("X-Y projection")
ax2.imshow(yz,origin='lower',aspect='auto')
ax2.set_title("Y-Z projection")
ax3.imshow(zx,origin='lower',aspect='auto')
ax3.set_title("Z-X projection")
if filename is not None:
plt.savefig("{}.png".format(filename),format='png')
if show:
plt.show()
else:
plt.close()
def make_animation(datafolder,prefix='fig',fps=3):
'''Given a datafolder with figures of format `prefix`-%04d.png create a
video at framerate `fps`.
Output is datafolder/animation.mp4'''
if os.system('ffmpeg -framerate {} -i {}/{}-%04d.png -vf scale="trunc(iw/2)*2:trunc(ih/2)*2" -c:v libx264 -profile:v high -pix_fmt yuv420p -g 30 -r 30 {}/animation.mp4'.format(fps,datafolder,prefix,datafolder)):
print("{}/animation.mp4 exists already".format(datafolder))
def animate_tci_slices(TCI,output_folder,num_seconds=10.):
'''Animate the slicing of a tci by showing the xz, yz, zy planes as they
sweep across the volume (possibly depreciated)'''
try:
os.makedirs(output_folder)
except:
pass
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(221, projection='3d')
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
M = TCI.get_shaped_array()
if np.sum(M<0) > 0:
print("Using linear scaling")
log_spacing = False
else:
print("Using log scaling")
log_spacing = True
M[M==0] = np.min(M[M>0])
levels = [np.min(M),np.max(M)]
for q in np.linspace(1,99,15*5+2):
if log_spacing:
l = 10**np.percentile(np.log10(M),q)
if l not in levels and len(levels) < 15:
levels.append(l)
else:
l = np.percentile(M,q)
if l not in levels and len(levels) < 15:
levels.append(l)
levels = np.sort(levels)
#N = max(1,int((len(levels)-2)/13))
#levels = [levels[0]] + levels[1:-1][::N] + [levels[-1]]
print("plotting levels : {}".format(levels))
#M[M<levels[0]] = np.nan
#M[M>levels[-1]] = np.nan
vmin = np.min(M)
vmax = np.max(M)
Y_1,X_1 = np.meshgrid(TCI.yvec,TCI.xvec,indexing='ij')
Z_2,Y_2 = np.meshgrid(TCI.zvec,TCI.yvec,indexing='ij')
Z_3,X_3 = np.meshgrid(TCI.zvec,TCI.xvec,indexing='ij')
i = 0
while i < TCI.nz:
xy = M[:,:,i].transpose()#x by y
j1 = int(i/float(TCI.nz)*TCI.nx)
#j1 = TCI.nx >> 1
yz = M[j1,:,:].transpose()#y by z
j2 = (TCI.ny - 1) - int(i/float(TCI.nz)*TCI.ny)
#j2 = TCI.ny >> 1
xz = M[:,j2,:].transpose()#x by z
im = ax2.imshow(xy,origin='lower',vmin=vmin,vmax=vmax,aspect = 'auto',
extent=[TCI.xvec[0],TCI.xvec[-1],TCI.yvec[0],TCI.yvec[-1]],cmap=plt.cm.bone)
CS = ax2.contour(xy, levels,
origin='lower',
linewidths=2,
extent=[TCI.xvec[0],TCI.xvec[-1],TCI.yvec[0],TCI.yvec[-1]],cmap=plt.cm.hot_r)
zc = CS.collections[-1]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%.2g',
fontsize=14)
ax2.set_title("Height: {:.2g} km".format(TCI.zvec[i]))
ax2.set_xlabel('X km')
ax2.set_ylabel('Y km')
im = ax3.imshow(yz,origin='lower',vmin=vmin,vmax=vmax,aspect = 'auto',
extent=[TCI.yvec[0],TCI.yvec[-1],TCI.zvec[0],TCI.zvec[-1]],cmap=plt.cm.bone)
CS = ax3.contour(yz, levels,
origin='lower',
linewidths=2,
extent=[TCI.yvec[0],TCI.yvec[-1],TCI.zvec[0],TCI.zvec[-1]],cmap=plt.cm.hot_r)
zc = CS.collections[-1]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%.2g',
fontsize=14)
#ax3.set_title("Solution")
ax3.set_title("X_slice: {:.2g} km".format(TCI.xvec[j1]))
ax3.set_ylabel('Z km')
ax3.set_xlabel('Y km')
im = ax4.imshow(xz,origin='lower',vmin=vmin,vmax=vmax,aspect = 'auto',
extent=[TCI.xvec[0],TCI.xvec[-1],TCI.zvec[0],TCI.zvec[-1]],cmap=plt.cm.bone)
CS = ax4.contour(xz, levels,
origin='lower',
linewidths=2,
extent=[TCI.xvec[0],TCI.xvec[-1],TCI.zvec[0],TCI.zvec[-1]],cmap=plt.cm.hot_r)
zc = CS.collections[-1]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%.2g',
fontsize=14)
ax4.set_title("Y_slice: {:.2g} km".format(TCI.yvec[j2]))
ax4.set_xlabel('X km')
ax4.set_ylabel('Z km')
plt.savefig("{}/fig-{:04d}.png".format(output_folder,i))
ax1.cla()
ax2.cla()
ax3.cla()
ax4.cla()
i += 1
make_animation(output_folder,prefix='fig',fps=int(TCI.nz/float(num_seconds)))
def plot_datapack(datapack,ant_idx=-1,time_idx=[0], dir_idx=-1,freq_idx=-1,figname=None,vmin=None,vmax=None,mode='perantenna',observable='phase',phase_wrap=True,res_N = 25, plot_crosses=True,plot_facet_idx=False):
'''Plot phase at central frequency'''
assert datapack.ref_ant is not None, "set DataPack ref_ant first"
if len(time_idx) == 1 and figname is not None:
figname = [figname]
if len(time_idx) > 1 and figname is not None:
assert len(time_idx) == len(figname)
directions, patch_names = datapack.get_directions(dir_idx=dir_idx)
antennas, antLabels = datapack.get_antennas(ant_idx=ant_idx)
times,timestamps = datapack.get_times(time_idx=time_idx)
freqs = datapack.get_freqs(freq_idx=freq_idx)
if observable == 'phase':
obs = datapack.get_phase(ant_idx = ant_idx,dir_idx=dir_idx,time_idx=time_idx, freq_idx = freq_idx)
elif observable == 'prop':
obs = datapack.get_prop(ant_idx = ant_idx,dir_idx=dir_idx,time_idx=time_idx, freq_idx = freq_idx)
elif observable == 'variance':
phase_wrap=False
obs = datapack.get_variance(ant_idx = ant_idx,dir_idx=dir_idx,time_idx=time_idx, freq_idx = freq_idx)
elif observable == 'std':
phase_wrap = False
obs = np.sqrt(datapack.get_variance(ant_idx = ant_idx,dir_idx=dir_idx,time_idx=time_idx, freq_idx = freq_idx))
elif observable == 'snr':
obs = np.abs(datapack.get_phase(ant_idx = ant_idx,dir_idx=dir_idx,time_idx=time_idx, freq_idx = freq_idx) \
/ (np.sqrt(datapack.get_variance(ant_idx = ant_idx,dir_idx=dir_idx,time_idx=time_idx, freq_idx = freq_idx)) + 1e-10))
phase_wrap = False
#print("Plotting observable: {}".format(observable))
if phase_wrap:
obs = np.angle(np.exp(1j*obs))
vmin = -np.pi
vmax = np.pi
cmap = phase_cmap
else:
vmin = vmin or np.percentile(obs.flatten(), 2.5)
vmax = vmax or np.percentile(obs.flatten(), 97.5)
cmap = plt.cm.bone
print(vmin,vmax)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
ref_ant_idx = None
for i in range(Na):
if antLabels[i] == datapack.ref_ant:
ref_ant_idx = i
for idx,j in enumerate(time_idx):
print("Plotting {}".format(j))
fixtime = times[idx]
fixfreq = freqs[Nf>>1]
phase = datapack.get_center_direction()
array_center = datapack.radio_array.get_center()
uvw = UVW(location = array_center.earth_location,obstime = fixtime,phase = phase)
ants_uvw = antennas.transform_to(uvw)
dirs_uvw = directions.transform_to(uvw)
factor300 = 300./dirs_uvw.w.value
if mode == 'perantenna':
#make plots, M by M
M = int(np.ceil(np.sqrt(Na)))
fig = plt.figure(figsize=(4*M,4*M))
#use direction average as phase tracking direction
N = res_N
U,V = np.meshgrid(np.linspace(np.min(dirs_uvw.u.value*factor300),
np.max(dirs_uvw.u.value*factor300),N),
np.linspace(np.min(dirs_uvw.v.value*factor300),
np.max(dirs_uvw.v.value*factor300),N),indexing='ij')
i = 0
while i < Na:
ax = fig.add_subplot(M,M,i+1)
dx = np.sqrt((ants_uvw.u[i] - ants_uvw.u[ref_ant_idx])**2 + (ants_uvw.v[i] - ants_uvw.v[ref_ant_idx])**2).to(au.km).value
ax.annotate(s="{} : {:.2g} km".format(antLabels[i],dx),xy=(.2,.8),xycoords='axes fraction')
if i == 0:
#ax.annotate(s="{} : {:.2g} km\n{}".format(antLabels[i],dx,fixtime.isot),xy=(.2,.8),xycoords='axes fraction')
#ax.annotate(s=fixtime.isot,xy=(.2,0.05),xycoords='axes fraction')
ax.set_title("Phase {} MHz : {}".format(fixfreq/1e6,fixtime.isot))
#ax.set_title("Ref. Proj. Dist.: {:.2g} km".format(dx))
ax.set_xlabel("Projected East km")
ax.set_ylabel("Projected West km")
D = interp_nearest(dirs_uvw.u.value*factor300,dirs_uvw.v.value*factor300,obs[i,idx,:,Nf>>1],U.flatten(),V.flatten()).reshape(U.shape)
im = ax.imshow(D,origin='lower',extent=(np.min(U),np.max(U),np.min(V),np.max(V)),aspect='auto',
vmin = vmin, vmax= vmax,cmap=cmap,alpha=1.)
if plot_crosses:
sc1 = ax.scatter(dirs_uvw.u.value*factor300,dirs_uvw.v.value*factor300, c='black',
marker='+')
else:
if plot_facet_idx:
[ax.annotate(str(k),xy=((dirs_uvw.u.value*factor300)[k],(dirs_uvw.v[k].value*factor300)[k]),xycoords='data') for k in range(Nd)]
i += 1
#plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax,orientation='vertical')
elif mode == 'perdirection':
M = int(np.ceil(np.sqrt(Na)))
fig = plt.figure(figsize=(4*M,4*M))
#use direction average as phase tracking direction
vmax = np.pi
vmin = -np.pi
N = res_N
U,V = np.meshgrid(np.linspace(np.min(ants_uvw.u.to(au.km).value),
np.max(ants_uvw.u.to(au.km).value),N),
np.linspace(np.min(ants_uvw.v.to(au.km).value),
np.max(ants_uvw.v.to(au.km).value),N),indexing='ij')
k = 0
while k < Nd:
ax = fig.add_subplot(M,M,k+1)
#dx = np.sqrt((ants_uvw.u[i] - ants_uvw.u[ref_ant_idx])**2 + (ants_uvw.v[i] - ants_uvw.v[ref_ant_idx])**2).to(au.km).value
ax.annotate(s="{} : {} ".format(patch_names[k],directions[k]),xy=(.2,.8),xycoords='axes fraction')
if k == 0:
#ax.annotate(s="{} : {:.2g} km\n{}".format(antLabels[i],dx,fixtime.isot),xy=(.2,.8),xycoords='axes fraction')
#ax.annotate(s=fixtime.isot,xy=(.2,0.05),xycoords='axes fraction')
ax.set_title("Phase {} MHz : {}".format(fixfreq/1e6,fixtime.isot))
#ax.set_title("Ref. Proj. Dist.: {:.2g} km".format(dx))
ax.set_xlabel("Projected East km")
ax.set_ylabel("Projected North km")
D = interp_nearest(ants_uvw.u.to(au.km).value,ants_uvw.v.to(au.km).value,np.angle(np.exp(1j*phase_obs[:,idx,k,Nf>>1])),U.flatten(),V.flatten()).reshape(U.shape)
im = ax.imshow(D,origin='lower',extent=(np.min(U),np.max(U),np.min(V),np.max(V)),aspect='auto',
vmin = vmin, vmax= vmax,cmap=phase_cmap,alpha=1.)
sc1 = ax.scatter(ants_uvw.u.to(au.km).value,ants_uvw.v.to(au.km).value, c='black',
marker='+')
k += 1
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax,orientation='vertical')
#plt.tight_layout()
if figname is not None:
plt.savefig("{}.png".format(figname[idx]),format='png')
else:
plt.show()
plt.close()
def animate_datapack(datapack,output_folder, ant_idx=-1,time_idx=-1,dir_idx=-1,num_threads=1,mode='perantenna',observable='phase',**kwargs):
from dask.threaded import get
from functools import partial
try:
os.makedirs(output_folder)
except:
pass
times,timestamps = datapack.get_times(time_idx=time_idx)
Nt = len(times)
# j = 0
# idx = 0
# dsk = {}
# objective = []
# for thread in range(num_threads):
# figs = []
# time_idx = []
# for j in range(thread,Nt,num_threads):
# figs.append(os.path.join(output_folder,"fig-{:04d}".format(j)))
# time_idx.append(j)
# dsk[thread] = (partial(plot_datapack,ant_idx=ant_idx,time_idx=time_idx, dir_idx=dir_idx,figname=figs,mode=mode),datapack)
# objective.append(thread)
for j in range(Nt):
fig = os.path.join(output_folder,"fig-{:04d}".format(j))
plot_datapack(datapack,ant_idx=ant_idx,time_idx=[j], dir_idx=dir_idx,figname=fig,mode=mode,observable=observable,**kwargs)
#get(dsk,objective,num_workers=num_threads)
make_animation(output_folder,prefix="fig",fps=int(10))
```
#### File: scripts/2d_vs_3d/temporal_powerspectrum.py
```python
from ionotomo import *
from ionotomo.utils.gaussian_process import *
from rathings.phase_unwrap import *
import pylab as plt
import numpy as np
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10,10)
# In[2]:
# data intake ~ 28GB
datapack = DataPack(filename="../rvw_data_analysis/rvw_datapack.hdf5")
print("Loaded : {}".format(datapack))
antennas,antenna_labels = datapack.get_antennas(ant_idx=-1)
times,timestamps = datapack.get_times(time_idx=-1)
directions, patch_names = datapack.get_directions(dir_idx=-1)
phase = datapack.get_phase(ant_idx=-1,time_idx=-1,dir_idx=-1,freq_idx=[0])
# In[7]:
## functions
def prepare_phase(phase):
phase = phase_unwrapp1d(phase,axis=0)
phase -= np.mean(phase)
return phase
def opt_kernel(times, phase, K, sigma_y=0):
"""Bayes Optimization"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = np.array([times.gps]).T
y = prepare_phase(phase)
K.hyperparams = level2_solve(X,y,sigma_y,K,n_random_start=0)
return K
def multi_opt_kernel(times, phase, K, sigma_y=0):
"""Bayes Optimization over multiple directions
times : array (num_times,)
time array
phase : array (num_times, num_directions)
phases in several directions
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
num_directions = phase.shape[1]
X = [np.array([times.gps]).T]*num_directions
y = prepare_phase(phase).T
K.hyperparams = level2_multidataset_solve(X,y,[sigma_y]*num_directions,K,n_random_start=10)
return K
def plot_prediction(times_predict, times, phase, K, sigma_y = 0,phase_true=None,ant_label=None):
"""Level1 predictive"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = np.array([times.gps]).T
Xstar = X
#y = prepare_phase(phase)
y = phase
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
plt.plot(X[:,0],y,label='data')
plt.plot(Xstar[:,0],ystar,c='red',ls='--')
plt.plot(Xstar[:,0],ystar+np.sqrt(np.diag(cov)),c='green',ls='--')
plt.plot(Xstar[:,0],ystar-np.sqrt(np.diag(cov)),c='blue',ls='--')
if ant_label is not None:
plt.title(ant_label)
plt.xlabel('time (s)')
plt.ylabel('phase (rad)')
if phase_true is not None:
#y_true = prepare_phase(phase_true)
Xstar = np.array([times_predict.gps]).T
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
y_true = phase_true
plt.plot(Xstar[:,0],y_true,label="true")
plt.plot(Xstar[:,0],ystar,c='red',ls='-',label='pred')
plt.plot(Xstar[:,0],ystar+np.sqrt(np.diag(cov)),c='green',ls='-',label=r'$+\sigma$')
plt.plot(Xstar[:,0],ystar-np.sqrt(np.diag(cov)),c='blue',ls='-',label=r'$-\sigma$')
plt.legend(frameon=False)
plt.tight_layout()
plt.show()
# In[15]:
ant_id = 52
sigma_y = 2*np.pi/180.
for ant_id in range(62):
print("Using : {}".format(antenna_labels[ant_id]))
phases = prepare_phase(phase[ant_id,:,:,0])
# plt.imshow(phases,aspect='auto')
# plt.colorbar()
# plt.show()
K1 = Diagonal(1)
K1.set_hyperparams_bounds([1e-5,10*np.pi/180.],name='sigma')
K2 = SquaredExponential(1,l=20)
K2.set_hyperparams_bounds([8,50],name='l')
K2.set_hyperparams_bounds([1e-5,5],name='sigma')
K3 = SquaredExponential(1,l=220)
K3.set_hyperparams_bounds([50,1000],name='l')
K3.set_hyperparams_bounds([1e-5,5],name='sigma')
K = K1 + K2 + K3
K = multi_opt_kernel(times[:200],phases[:200],K,sigma_y=sigma_y)
print(K)
#plot_prediction(times[200:300],times[:200:2],phases[:200:2], K,sigma_y=0.03,phase_true=phases[200:300],ant_label=antenna_labels[ant_id])
# In[8]:
#for ant_id in range(62):
ant_id=60
print("Using : {}".format(antenna_labels[ant_id]))
print(phase.shape)
phases = prepare_phase(phase[ant_id,:,0,0])
K1 = Diagonal(1)
# K2 = SquaredExponential(1)
# K2.set_hyperparams_bounds([50,1000],name='l')
K3 = RationalQuadratic(1)
K3.set_hyperparams_bounds([50,500],name='l')
# K4 = DotProduct(1,c=times[0].gps)
K = K1 + K3
K = opt_kernel(times[:200:2],phases[:200:2],K,sigma_y=0.03)
print(K)
plot_prediction(times[200:300],times[:200:2],phases[:200:2], K,sigma_y=0.03,phase_true=phases[200:300],ant_label=antenna_labels[ant_id])
# In[ ]:
```
#### File: scripts/rvw_data_analysis/phase_unwrap.py
```python
import numpy as np
import tensorflow as tf
import pylab as plt
import cmocean
from scipy.spatial import cKDTree
# In[2]:
def generate_data_aliased(noise=0.,sample=100):
"""Generate Gaussian bump in phase.
noise : float
amount of gaussian noise to add as fraction of peak height
sample : int
number to sample
"""
#max gradient at b
a = 50
b = 1
max_slope = np.abs(a/np.sqrt(np.exp(1))/b)
#in dx want max_slope*dx > np.pi
dx = 1.1*np.pi/max_slope
N = 10
xvec = np.linspace(-dx*N, dx*N, N*2 + 1)
X,Y = np.meshgrid(xvec,xvec,indexing='ij')
phi = a * np.exp(-(X**2 + Y**2)/2./b**2)
X = np.array([X.flatten(),Y.flatten()]).T
phi += a*noise*np.random.normal(size=phi.shape)
phi = phi.flatten()
if sample != 0:
mask = np.random.choice(phi.size,size=min(sample,phi.size),replace=False)
return X[mask,:],phi[mask]
return X,phi
def generate_data_nonaliased(noise=0.,sample=100):
"""Generate Gaussian bump in phase.
noise : float
amount of gaussian noise to add as fraction of peak height
sample : int
number to sample
"""
#max gradient at b
a = 15
b = 1
max_slope = np.abs(a/np.sqrt(np.exp(1))/b)
#in dx want max_slope*dx < np.pi
dx = 0.5*np.pi/max_slope
N = 10
xvec = np.linspace(-dx*N, dx*N, N*2 + 1)
X,Y = np.meshgrid(xvec,xvec,indexing='ij')
phi = a * np.exp(-(X**2 + Y**2)/2./b**2)
X = np.array([X.flatten(),Y.flatten()]).T
phi += a*noise*np.random.normal(size=phi.shape)
phi = phi.flatten()
if sample != 0:
mask = np.random.choice(phi.size,size=min(sample,phi.size),replace=False)
return X[mask,:],phi[mask]
return X,phi
def generate_data_nonaliased_nonsquare(noise=0.,sample=100):
"""Generate Gaussian bump in phase.
noise : float
amount of gaussian noise to add as fraction of peak height
sample : int
number to sample
"""
#max gradient at b
a = 20
b = 1
max_slope = np.abs(a/np.sqrt(np.exp(1))/b)
#in dx want max_slope*dx = np.pi (nyquist limit)
dx = np.pi/max_slope/2.
#dx = sqrt(D^2/samples)
assert sample > 0
D = np.sqrt(dx**2*sample)
X = np.random.uniform(low=-D/2.,high=D/2.,size=(sample,2))
phi = a * np.exp(-(X[:,0]**2 + X[:,1]**2)/2./b**2)
phi += a*noise*np.random.normal(size=phi.shape)
return X,phi
def plot_phase(X,phi,label=None,figname=None):
"""Plot the phase.
X : array (num_points, 2)
The coords
phi : array (num_points,)
The phases
"""
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
sc = ax.scatter(X[:,0],X[:,1],phi,c=np.angle(np.exp(1j*phi)),cmap=cmocean.cm.phase,s=10,vmin=-np.pi,vmax=np.pi,label=label or "")
plt.colorbar(sc)
if label is not None:
plt.legend(frameon=False)
if figname is not None:
plt.savefig(figname)
plt.show()
# In[3]:
X,phi = generate_data_aliased(0.01,sample=0)
plot_phase(X,np.angle(np.exp(1j*phi)))
plot_phase(X,phi)
X,phi = generate_data_nonaliased_nonsquare(0.0,sample=500)
plot_phase(X,np.angle(np.exp(1j*phi)))
plot_phase(X,phi)
# In[4]:
def create_triplets(X,redundancy=2):
kt = cKDTree(X)
#get center of map
C = np.mean(X,axis=0)
_,idx0 = kt.query(C,k=1)
idx0 = np.random.randint(X.shape[0])
#define unique path
dist, idx = kt.query(X[idx0,:],k=2)
path = [(idx0, idx[1])]
included = [idx0, idx[1]]
while len(included) < X.shape[0]:
dist,idx = kt.query(X[included,:],k = len(included)+1)
mask = np.where(np.isin(idx,included,invert=True))
argmin = np.argmin(dist[mask])
idx_from = included[mask[0][argmin]]
idx_to = idx[mask[0][argmin]][mask[1][argmin]]
path.append((idx_from,idx_to))
included.append(idx_to)
M = np.mean(X[path,:],axis=1)
_,idx = kt.query(M,k=2 + redundancy)
triplets = []
for i,p in enumerate(path):
count = 0
for c in range(2 + redundancy):
if idx[i][c] not in p:
triplets.append(p + (idx[i][c],))
count += 1
if count == redundancy:
break
triplets = np.sort(triplets,axis=1)
triplets = np.unique(triplets,axis=0)
return path,triplets
fig = plt.figure(figsize=(8,8))
for i,j,k in create_triplets(X,redundancy=4)[1]:
plt.plot([X[i,0],X[j,0],X[k,0],X[i,0]],[X[i,1],X[j,1],X[k,1],X[i,1]])
#plt.savefig('residue_triplets_3_redundant.png')
plt.show()
# dist,idx = kt.query(X,k=4)
# triplets = np.sort(idx[:,[0,2,3]],axis=1)
# ra = np.ravel_multi_index((triplets[:,0],triplets[:,1],triplets[:,2]),[X.shape[0]]*3)
# unique_triplets = np.unique(ra)
# triplets = np.stack(np.unravel_index(unique_triplets,[X.shape[0]]*3),axis=1)
# print(triplets)
# M = (X[triplets[:,0],:] + X[triplets[:,1],:] + X[triplets[:,2],:])/3.
# kt = cKDTree(M)
# _,idx = kt.query(M[0,:],k = M.shape[0])
# print(idx)
# # trips_ = []
# # for n in range(len(idx)-1):
# # trips_.append((triplets[idx[n],0],triplets[idx[n],1], triplets[idx[n+1],2]))
# # triplets = np.concatenate([triplets,trips_],axis=0)
# # triplets = np.unique(triplets,axis=0)
# print(triplets)
# for i,j,k in triplets:
# plt.plot([X[i,0],X[j,0],X[k,0],X[i,0]],[X[i,1],X[j,1],X[k,1],X[i,1]])
# #plt.plot(M[idx,0],M[idx,1],c='red')
# plt.show()
# In[ ]:
# In[236]:
()
# In[5]:
def phase_unwrap(X,phi_wrap,phi_wrap_var=None,redundancy=2,dropout=0.5):
if len(phi_wrap.shape) == 1:
phi_wrap = phi_wrap[None,None,:,None]
Na,Nt,Nd,Nf = phi_wrap.shape
path_, triplets_ = create_triplets(X,redundancy=redundancy)
pairs = np.unique(np.sort(np.concatenate([triplets_[:,[0,1]],triplets_[:,[1,2]],triplets_[:,[2,0]]],axis=0),axis=1),axis=0)
N = pairs.shape[0]
g = tf.Graph()
sess = tf.InteractiveSession(graph=g)
with g.as_default():
with tf.name_scope("unwrapper") as scope:
g = tf.placeholder(tf.float32,shape=(Na,Nt,Nd,Nf),name='g')
triplets = tf.placeholder(tf.int32,shape=(len(triplets_),3),name='triplets')
path = tf.placeholder(tf.int32,shape=(len(path_),2),name='path')
def _init(shape,dtype=tf.float64,partition_info=None):
init = np.zeros(shape)
#init[:,shape[1]>>1] = np.log(2)
#init = tf.zeros(shape,dtype=dtype)
#init[:,shape[1]>>1] = 1.
return init
K_ = tf.placeholder(tf.float32, shape=(Na,Nt,Nd,Nf,9))
K = tf.get_variable("K",initializer=K_)
K_softmax = tf.nn.softmax(K,dim=-1)
indices = tf.constant((np.arange(9)-4.).reshape((1,1,1,1,-1)),dtype=tf.float32)
# print(indices)
K_int = tf.reduce_sum(K_softmax*indices,axis=-1)*2*np.pi
# print(K_int,triplets)
#entropy
entropy = - tf.reduce_mean(tf.reduce_sum(K_softmax*tf.log(K_softmax),axis=-1))
def _wrap(a):
return tf.cast(tf.angle(tf.exp(1j*tf.cast(a,tf.complex64))),tf.float32)
f_noise = tf.get_variable("f_noise",shape=(Na,Nt,Nd,Nf),dtype=tf.float32,initializer=_init)
#f ~ N(f_obs,sigma_f^2)
#f(K) = g_i + K 2pi
# f(K) = int_p dg(x) + 2pi K(x)
# K ~ N(0,C_K(x,x'))
# K = K(theta) ~ P(K, theta) = softmax(theta)
# log P(K,theta) = sum softmax(theta)_i log(softmax(theta)_i)
# Hamiltonian:
# H(K) =
K_int_cum = tf.cumsum(K_int,axis=1)
f = g + K_int_cum + f_noise
#sigma_f = tf.get_variable("sigma_f",shape=(),dtype=tf.float32,initializer=tf.zeros_initializer)
#prior for noise gaussian N(0,sigma_f^2)
#df2 = tf.gather(f,path[:,1]) - tf.gather(f,path[:,0])
#loss_path = tf.square(f[0] - g[0]) + tf.reduce_mean()
dropout_ = tf.placeholder(tf.float32,shape=())
phi_wrap_var_ = tf.placeholder(tf.float32,shape=phi_wrap.shape)
df = tf.gather(f,pairs[:,1],axis=2) - tf.gather(f,pairs[:,0],axis=2)#tf.get_variable("df",shape=(N,),dtype=tf.float32,initializer=tf.zeros_initializer)
consistency = tf.sqrt(1.+tf.square(_wrap(tf.gather(g,pairs[:,1],axis=2) - tf.gather(g,pairs[:,0],axis=2)) - df)) - 1.
consistency = tf.nn.dropout(consistency,dropout_)
loss_lse = tf.reduce_mean(consistency)
#cov = tf.expand_dims(f_noise,-1)
#loss_tv = tf.reduce_mean(tf.reduce_mean(tf.abs(cov*tf.transpose(cov,perm=[1,0])),axis=1),axis=0)
loss_tv = tf.reduce_mean(tf.square(f_noise)/phi_wrap_var_)
# smooth_residuals = tf.sqrt(1.+tf.square(tf.gather(f_noise,pairs[:,1],axis=1) - tf.gather(f_noise,pairs[:,0],axis=1))) - 1.
# #smooth_residuals = tf.nn.dropout(smooth_residuals,dropout_)
# loss_smooth = tf.reduce_mean(smooth_residuals)
# #loss_tv += tf.reduce_mean(tf.square(tf.gather(K_int,pairs[:,1]) - tf.gather(K_int,pairs[:,0])))
#loss_tv = tf.reduce_mean(tf.square(f_noise))
#length_scale = np.mean(np.abs(X[pairs[:,1],:] - X[pairs[:,0],:]))
#kernel = (0.1**2)*tf.cast(tf.exp(-pdist(tf.constant(X[None,:,:]))/2./(length_scale)**2),tf.float32)
#loss_reg = tf.reduce_mean(tf.matmul(tf.expand_dims(K_int,0),tf.linalg.triangular_solve(kernel[0,:,:],tf.expand_dims(K_int,-1)))/2.)
#tf.reduce_mean(tf.square(tf.gather(K_int,pairs[:,1]) - tf.gather(K_int,pairs[:,0])))
# mean,var = tf.nn.moments(df,axes=[0])
# loss_lse += var
Wf = _wrap(f)
df01 = tf.gather(Wf,triplets[:,1],axis=2) - tf.gather(Wf,triplets[:,0],axis=2)
df01 = _wrap(df01)
df12 = tf.gather(Wf,triplets[:,2],axis=2) - tf.gather(Wf,triplets[:,1],axis=2)
df12 = _wrap(df12)
df20 = tf.gather(Wf,triplets[:,0],axis=2) - tf.gather(Wf,triplets[:,2],axis=2)
df20 = _wrap(df20)
residue = tf.sqrt(1. + tf.square(df01 + df12 + df20))-1.
residue = tf.nn.dropout(residue,dropout_)
loss_residue = tf.reduce_mean(residue)
#K_int_mean = (tf.gather(K_int,triplets[:,0]) + tf.gather(K_int,triplets[:,1]) + tf.gather(K_int,triplets[:,2]))/3.
#loss_reg = tf.reduce_mean(1./(1+0)*tf.abs(tf.gather(K_int,triplets[:,0]) - K_int_mean) + tf.abs(tf.gather(K_int,triplets[:,1]) - K_int_mean) + tf.abs(tf.gather(K_int,triplets[:,2]) - K_int_mean))
#loss_reg = tf.reduce_mean(tf.sqrt(1.+tf.square(tf.gather(K_int,pairs[:,1]) - tf.gather(K_int,pairs[:,0]))))
learning_rate = tf.placeholder(tf.float32,shape=())
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
entropy_weight = tf.placeholder(tf.float32,shape=())
train_op = opt.minimize(loss_lse+entropy_weight*entropy+loss_residue+loss_tv)
losses = [loss_lse ,loss_residue,entropy,loss_tv]
sess.run(tf.global_variables_initializer(), feed_dict = {K_: np.zeros([Na,Nt,Nd,Nf,9])})
import time
time_ = time.mktime(time.gmtime())sess.run(tf.global_variables_initializer(), feed_dict = {K_: np.zeros([Na,Nt,Nd,Nf,9])})
loss_per_step_ = []
for epoch in range(25000):
ew = 0.0000001
lr = 0.1
dp = 0.2
if epoch > 1000:
ew = 0.000001
lr = 0.1
dp = 0.3
if epoch > 5000:
ew = 0.00001
lr = 0.05
dp = 0.3
if epoch > 10000:
ew = 0.001
lr = 0.03
dp = 0.5
if epoch > 15000:
ew = 0.01
lr = 0.01
dp = 0.5
if epoch > 20000:
ew = 0.01
lr = 0.001
dp = 0.8
if phi_wrap_var is None:
phi_wrap_var = np.ones_like(phi_wrap)
_,losses_,df_,K_int_,K_softmax_,f_noise_ = sess.run([train_op,losses,df,K_int,K_softmax,f_noise],
feed_dict={dropout_:dp,
learning_rate:lr,
entropy_weight: ew,
g : phi_wrap,
triplets: triplets_,
path:path_,
phi_wrap_var_ : phi_wrap_var})
loss_per_step_.append(np.sum(losses_))
if np.sum(losses_) < 0.1:
print("Epoch : {} loss={:.4f} | LSE: {:.4f} | Residue: {:.4f} | Entropy: {:.4f} | TV: {:.4f} ".format(epoch,np.sum(losses_),*losses_))
break
if time.mktime(time.gmtime()) - time_ > 5. or epoch==0:
time_ = time.mktime(time.gmtime())
print("Epoch : {} loss={:.4f} | LSE: {:.4f} | Residue: {:.4f} | Entropy: {:.4f} | TV: {:.4f} ".format(epoch,np.sum(losses_),*losses_))
if np.sum(losses_) < 0.1:
break
print("Epoch : {} loss={:.4f} | LSE: {:.4f} | Residue: {:.4f} | Entropy: {:.4f} | TV: {:.4f} ".format(epoch,np.sum(losses_),*losses_))
f_rec = np.zeros_like(phi_wrap)
f_rec[:,:,path_[0][0],:] = phi_wrap[:,:,path_[0][0],:]
K_int_sum_ = np.cumsum((np.argmax(K_softmax_,axis=4)-4)*2*np.pi,axis=1)
#print(df_)
for i,p in enumerate(path_):
df_ = phi_wrap[:,:,p[1],:] - phi_wrap[:,:,p[0],:] + K_int_sum_[:,:,p[1],:] - K_int_sum_[:,:,p[0],:]
f_rec[:,:,p[1],:] = f_rec[:,:,p[0],:] + df_
plt.plot(loss_per_step_)
plt.yscale('log')
plt.show()
return f_rec
#df = (np.argmax(K_softmax_,axis=1)-5)*2*np.pi
#f_rec = phi_wrap + df
#f_rec = phi_wrap + K_int_
X,phi = generate_data_nonaliased_nonsquare(0.03,sample=100)
#X,phi = generate_data_aliased(0.0,sample=0)
path_, triplets_ = create_triplets(X,redundancy=2)
dist = np.concatenate([phi[triplets_[:,1]] - phi[triplets_[:,0]],
phi[triplets_[:,2]] - phi[triplets_[:,1]],
phi[triplets_[:,0]] - phi[triplets_[:,2]]],axis=0)/np.pi
plt.hist(dist,bins=20)
plt.xlabel("pair difference (pi jumps)")
plt.show()
for i,j,k in triplets_:
plt.plot([X[i,0],X[j,0],X[k,0],X[i,0]],[X[i,1],X[j,1],X[k,1],X[i,1]])
#plt.savefig('residue_triplets_3_redundant.png')
plt.show()
phi_wrap = np.angle(np.exp(1j*phi))
f_rec = phase_unwrap(X,phi_wrap,redundancy=2)[0,0,:,0]
plot_phase(X,phi_wrap,label='phi_wrap',figname='phi_wrap.png')
plot_phase(X,f_rec,label='f_rec',figname='phi_rec.png')
plot_phase(X,phi,label='true',figname='phi_true.png')
plot_phase(X,f_rec-phi,label='f_rec - true',figname='rec_true_diff.png')
plot_phase(X,(f_rec-np.angle(np.exp(1j*f_rec)))/(2*np.pi),label='jumps',figname='jumps_rec.png')
plot_phase(X,(phi-phi_wrap)/(2*np.pi),label='true jumps',figname='jumps_true.png')
# In[11]:
from ionotomo import *
datapack = DataPack(filename='rvw_datapack.hdf5')
# In[6]:
ant_idx=-1
time_idx=-1
dir_idx=-1
freq_idx=range(0,125,5)
directions, patch_names = datapack.get_directions(dir_idx=dir_idx)
antennas, antenna_labels = datapack.get_antennas(ant_idx=ant_idx)
times,timestamps = datapack.get_times(time_idx=time_idx)
freqs = datapack.get_freqs(freq_idx=freq_idx)
Na = len(antennas)
Nt = len(times)
Nd = len(directions)
Nf = len(freqs)
fixtime = times[0]
fixfreq = freqs[Nf>>1]
phase_center = datapack.get_center_direction()
array_center = datapack.radio_array.get_center()
uvw = [Pointing(location = array_center.earth_location,obstime = times[j],fixtime=fixtime, phase = phase_center) for j in range(1)]
ants_uvw = [antennas.transform_to(uvw[j]) for j in range(1)]
dirs_uvw = [directions.transform_to(uvw[j]) for j in range(1)]
# In[13]:
phase=np.angle(np.exp(1j*datapack.get_phase(ant_idx=ant_idx,time_idx=time_idx,dir_idx=dir_idx,freq_idx=freq_idx)))
#plt.plot(phase[0,:,:,0])
from rathings.phase_unwrap import phase_unwrapp1d
phase = np.transpose(phase_unwrapp1d(np.transpose(phase,axes=[1,0,2,3]),axis=0),axes=[1,0,2,3])
phase = np.transpose(phase_unwrapp1d(np.transpose(phase,axes=[3,1,2,0]),axis=0),axes=[3,1,2,0])
plt.plot(phase[51,:,:,0])
plt.show()
phi = phase[:,:,:,:]
phi_wrap=phi
X = np.array([dirs_uvw[0].u.value,dirs_uvw[0].v.value]).T
# In[ ]:
# In[21]:
phase_rec = []
print(X.shape)
for i in range(62):
f_rec = phase_unwrap(X,phase[i:i+1,:,:,:],redundancy=2)
phase_rec.append(f_rec)
phase_rec = np.concatenate(phase_rec,axis=0)
# plot_phase(X,phi_wrap,label='phi_wrap',figname='phi_wrap.png')
# plot_phase(X,f_rec,label='f_rec',figname='phi_rec.png')
# plot_phase(X,phi,label='true',figname='phi_true.png')
# plot_phase(X,f_rec-phi,label='f_rec - true',figname='rec_true_diff.png')
# plot_phase(X,(f_rec-np.angle(np.exp(1j*f_rec)))/(2*np.pi),label='jumps',figname='jumps_rec.png')
# plot_phase(X,(phi-phi_wrap)/(2*np.pi),label='true jumps',figname='jumps_true.png')
# In[147]:
#f_rec = phase_unwrapp1d(f_rec,axis=0)
jumps = (f_rec-phi_wrap)/(2*np.pi)
#phase[0,:,:,0] += jumps[None,:]
[plt.plot(range(3595),jumps[:,i]) for i in range(42)]
plt.show()
# In[27]:
phase = np.transpose(phase_unwrapp1d(np.transpose(phase,axes=[1,0,2,3]),axis=0),axes=[1,0,2,3])
plt.plot(phase[0,:,:,0])
plt.show()
# In[63]:
import time
# In[65]:
# In[7]:
import gpflow as gp
# In[11]:
K = gp.kernels.RBF(2,lengthscales=[1.],variance=1.)
m = gp.models.GPR(X,phi[:,None],kern=K)
# In[22]:
plt.scatter(X[:,0],X[:,1],c=m.predict_f_samples(X,1).flatten())
plt.show()
# In[13]:
import numpy as np
import pylab as plt
x = 0.1*np.random.normal(size=100)+1
plt.polar(np.linspace(0,np.pi*2,100),x)
plt.show()
# In[ ]:
```
#### File: scripts/rvw_data_analysis/temporal_powerspectrum.py
```python
from ionotomo import *
from ionotomo.utils.gaussian_process import *
if __name__ == '__main__':
time_idx = range(200)
datapack = DataPack(filename="rvw_datapack.hdf5")
antennas,antenna_labels = datapack.get_antennas(ant_idx=-1)
times,timestamps = datapack.get_times(time_idx=time_idx)
directions, patch_names = datapack.get_directions(dir_idx=-1)
phase = datapack.get_phase(ant_idx=-1,time_idx=time_idx,dir_idx=-1)
def get_kernel(times, phase, K):
X = np.array([times.gps]).T
y = phase
K.hyperparams = level2_solve(X,y,0,K)
return K
K1 = Diagonal(1)
K2 = SquaredExponential(1)
K2.set_hyperparams_bounds([2,40],name='l')
K3 = SquaredExponential(1)
K3.set_hyperparams_bounds([50,1000],name='l')
K = K1 + K2 + K3
K = get_kernel(times,phase[1,:,0,0])
print(K)
```
#### File: scripts/solution_smoothing/smooth_tec_dd.py
```python
from ionotomo import *
from ionotomo.utils.gaussian_process import *
from rathings.phase_unwrap import *
import pylab as plt
import numpy as np
import logging as log
import os
import h5py
import sys
import astropy.time as at
import astropy.coordinates as ac
import astropy.units as au
if sys.hexversion >= 0x3000000:
def str_(s):
return str(s,'utf-8')
else:
def str_(s):
return str(s)
tec_conversion = -8.4480e9# rad Hz/tecu
def prepare_phase(phase,axis=0,center=True):
"""unwrap phase and mean center
phase : array
phase to be unwrapped.
axis : int
the axis to unwrap down (default 0)
center : bool
whether to mean center (defualt True)
"""
phase = phase_unwrapp1d(phase,axis=axis)
if center:
phase -= np.mean(phase)
return phase
def opt_kernel(times, phase, K, sigma_y=0, n_random_start=0):
"""Bayes Optimization of kernel wrt hyper params
times : array
array of times in seconds most likely
phase : array
1D array of phases already prepared.
K : NDKernel
the kernel for the level 2 optimization.
sigma_y : float or array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
n_random_start : int
number of random initializations to use in optimization (default 0)
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = times.reshape((-1,1))
y = phase
K.hyperparams = level2_solve(X,y,sigma_y,K,n_random_start=n_random_start)
return K
def multi_opt_kernel(times, phase, K, sigma_y=0, n_random_start=0):
"""Bayes Optimization of kernel wrt hyper params over multiple directions
times : array (num_times,)
time array
phase : array (num_times, num_directions)
phases in several directions
K : NDKernel
the kernel for the level 2 optimization.
sigma_y : float or array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
n_random_start : int
number of random initializations to use in optimization (default 0)
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
num_directions = phase.shape[1]
X = [times.reshape((-1,1))]*num_directions
y = phase
K.hyperparams = level2_multidataset_solve(X,y,[sigma_y]*num_directions,K,n_random_start=10)
return K
def plot_prediction(times_predict, times, phase, K, sigma_y = 0,phase_true=None,figname=None,ant_label=None,patch_name=None):
"""Level1 predictive and plot
times_predict : array
the times to predict at
times : array
times for training set
phase : array
phase for training set
K : NDKernel
optimized kernel
sigma_y : float of array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
phase_true : array (optional)
if given then the phases for `times_predict`
ant_label : str (optional)
if given plots the label
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = times.reshape((-1,1))
#smooth
Xstar = X
y = phase
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
plt.plot(X[:,0],y,c='teal',label='data')
plt.plot(Xstar[:,0],ystar,c='red',ls='--')
plt.plot(Xstar[:,0],ystar+np.sqrt(np.diag(cov)),c='green',ls='--')
plt.plot(Xstar[:,0],ystar-np.sqrt(np.diag(cov)),c='blue',ls='--')
if ant_label is not None:
plt.title(ant_label)
plt.xlabel('time (s)')
plt.ylabel('phase (rad)')
#y_true = prepare_phase(phase_true)
Xstar = times_predict.reshape((-1,1))
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
std = np.sqrt(np.diag(cov))
plt.plot(Xstar[:,0],ystar,c='red',ls='-',label='pred')
plt.plot(Xstar[:,0],ystar+std,c='green',ls='-',label=r'$+\sigma$')
plt.plot(Xstar[:,0],ystar-std,c='blue',ls='-',label=r'$-\sigma$')
if phase_true is not None:
y_true = phase_true
plt.plot(Xstar[:,0],y_true,c='orange',label="true")
plt.legend(frameon=False)
plt.tight_layout()
if figname is not None:
plt.savefig(figname)
else:
plt.show()
plt.close()
def plot_bayes_smoothed(times, data, smoothed, std, figname,ant_label,patch_name,type):
"""Plot the smoothed
times : array
times for training set
data : array
tec_dd for training set
smoothed : array
smoothed version of tec
figname : str
figure name to save to
ant_label : str
antenna label
patch_name : str
patch name
"""
plt.plot(times,data,c='orange',label='data')
plt.plot(times,smoothed,c='red',ls='--',label='mean')
plt.plot(times,smoothed + std,c='green',ls='--',label=r'$+\sigma$')
plt.plot(times,smoothed - std,c='blue',ls='--',label=r'$-\sigma$')
plt.title("{} | {}".format(ant_label,patch_name))
plt.xlabel('time (s)')
if type == 'tec':
plt.ylabel('TEC (TEC units)')
if type == 'cs':
plt.ylabel('Scalar Phase (radians)')
plt.legend(frameon=False)
plt.tight_layout()
plt.savefig(figname,format='png')
plt.close()
#plt.show()
def smooth_data(times, phase, K, sigma_y = 0):
"""Level1 predictive of data
times : array
times for training set
phase : array
phase for training set
K : NDKernel
optimized kernel
sigma_y : float of array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = times.reshape((-1,1))
#smooth
Xstar = X
y = phase
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
std = np.sqrt(np.diag(cov))
return ystar, std
def smooth_dd_tec(dd_file,output_folder):
"""Use optima bayesian filtering.
dd_file : str
the hdf5 file containing direction dependent solutions
"""
output_folder = os.path.join(os.getcwd(),output_folder)
diagnostic_folder = os.path.join(output_folder,'diagnostics')
try:
os.makedirs(diagnostic_folder)
except:
pass
log.basicConfig(filename=os.path.join(output_folder,"log"),format='%(asctime)s %(levelname)s:%(message)s', level=log.DEBUG)
f_dd = h5py.File(dd_file,"r",libver="earliest")
antenna_labels = []
antenna_positions = []
for row in f_dd['/sol000/antenna']:
antenna_labels.append(str_(row[0]))
antenna_positions.append(row[1])
antenna_labels = np.array(antenna_labels)
antenna_positions = np.array(antenna_positions)
Na = len(antenna_labels)
times = at.Time(f_dd['/sol000/tec000/time'][...]/86400., format='mjd',scale='tai')
timestamps = times.isot
times = times.gps
Nt = len(times)
patch_names = []
directions = []
for row in f_dd['/sol000/source']:
patch_names.append(str_(row[0]).replace('[','').replace(']',''))
directions.append(row[1])
patch_names = np.array(patch_names).flatten()
directions = np.array(directions)
directions = ac.SkyCoord(directions[:,0]*au.rad, directions[:,1]*au.rad,frame='icrs')
Nd = len(patch_names)
#times, antennas, direction -> ijk
tec_dd = np.einsum("jik->ijk",
f_dd['/sol000/tec000/val'][:,:,:,0])
scalarphase_dd = np.einsum("jik->ijk",
f_dd['/sol000/scalarphase000/val'][:,:,:,0])
coherence_time = 200.#seconds
dt = times[1]-times[0]
num_opt = int(coherence_time / dt * 4)
sigma_y = 0#0.14/8.4480e9*120e6 #0.14 rad in TEC at 120MHz for approximation
K1 = Diagonal(1, sigma = sigma_y / 2.)
K1.set_hyperparams_bounds([1e-5,0.2],name='sigma')
K2 = SquaredExponential(1,l=20)
K2.set_hyperparams_bounds([dt*2,70],name='l')
K2.set_hyperparams_bounds([1e-5,1],name='sigma')
K3 = SquaredExponential(1,l=220)
K3.set_hyperparams_bounds([70,300],name='l')
K3.set_hyperparams_bounds([1e-5,1],name='sigma')
K = K2 * K3 + K1
tec_dd_smoothed = np.zeros([Na,Nt,Nd],dtype=float)
tec_dd_std = np.zeros([Na,Nt,Nd],dtype=float)
sc_dd_smoothed = np.zeros([Na,Nt,Nd],dtype=float)
sc_dd_std = np.zeros([Na,Nt,Nd],dtype=float)
for i in range(Na):
for k in range(Nd):
log.info("Working on {} | {}".format(antenna_labels[i],patch_names[k]))
slices = range(0,Nt,num_opt>>1)
count = np.zeros(Nt)
tec_m = np.mean(tec_dd[i,:,k])
scalarphase_dd[i,:,k] = phase_unwrapp1d(scalarphase_dd[i,:,k])
for s in slices:
start = s
stop = min(Nt,start+num_opt)
X = times[start:stop]
count[start:stop] += 1
y = tec_dd[i,start:stop,k]-tec_m
K = opt_kernel(X,y, K, sigma_y=sigma_y, n_random_start=1)
log.info(K)
ystar,std = smooth_data(X, y, K, sigma_y = 0)
ystar += tec_m
tec_dd_smoothed[i,start:stop,k] += ystar
tec_dd_std[i,start:stop,k] += std**2
y = scalarphase_dd[i,start:stop,k]
K = opt_kernel(X,y, K, sigma_y=sigma_y, n_random_start=1)
log.info(K)
ystar,std = smooth_data(X, y, K, sigma_y = 0)
sc_dd_smoothed[i,start:stop,k] += ystar
sc_dd_std[i,start:stop,k] += std
tec_dd_smoothed[i,:,k] /= count
tec_dd_std[i,:,k] /= count
sc_dd_smoothed[i,:,k] /= count
sc_dd_std[i,:,k] /= count
np.sqrt(tec_dd_std[i,:,k],out=tec_dd_std[i,:,k])
np.sqrt(sc_dd_std[i,:,k],out=sc_dd_std[i,:,k])
figname=os.path.join(diagnostic_folder,"tec_bayes_smoothed_{}_{}.png".format(antenna_labels[i],patch_names[k]))
plot_bayes_smoothed(times, tec_dd[i,:,k], tec_dd_smoothed[i,:,k], tec_dd_std[i,:,k],
figname,antenna_labels[i],patch_names[k],type='tec')
figname=os.path.join(diagnostic_folder,"scalarphase_bayes_smoothed_{}_{}.png".format(antenna_labels[i],patch_names[k]))
plot_bayes_smoothed(times, scalarphase_dd[i,:,k], sc_dd_smoothed[i,:,k], sc_dd_std[i,:,k],
figname,antenna_labels[i],patch_names[k],type='cs')
f_dd.close()
os.system("cp {} {}".format(dd_file,os.path.join(output_folder,dd_file.split('/')[-1].replace('.hdf5','_bayes_smoothed.hdf5'))))
f_dd = h5py.File(os.path.join(output_folder,dd_file.split('/')[-1].replace('.hdf5','_bayes_smoothed.hdf5')),"r",libver="earliest")
f_dd['/sol000/tec000/val'][:,:,:,0] = tec_dd_smoothed
f_dd['/sol000/scalarphase000/val'][:,:,:,0] = sc_dd_smoothed
f_dd.close()
if __name__=='__main__':
dd_file = "../../data/NsolutionsDDE_2.5Jy_tecandphasePF_correctedlosoto.hdf5"
smooth_dd_tec(dd_file,'output_bayes_smoothing')
```
#### File: ionotomo/tests/test_cho_solver.py
```python
import numpy as np
from ionotomo.utils.cho_solver import *
def test_cho_solver():
from scipy.linalg.lapack import dpotrs
N = 5
y = np.random.uniform(size=N)
Y = np.random.uniform(size=[N,2])
a = np.random.uniform(size=[N,N])
a = a.T.dot(a)
L = np.linalg.cholesky(a)
X = cho_solve(L,Y,False)
xa = cho_solve(L,Y[:,0],False)
xb = cho_solve(L,Y[:,1],False)
assert np.alltrue(np.isclose(X[:,0],xa)),"a fails"
assert np.alltrue(np.isclose(X[:,1],xb)),"b fails"
#with y vec mod (no copy)
#built in
#x1 = cho_solve((L,True),y)
x1 = dpotrs(L,y,1,0)
x2 = cho_solve(L,y,False)
#x1 = dpotrs(L,y,1,1)
assert np.all(np.isclose(x1[0],x2))
# times1 = []
# times2 = []
# Ns = 10**np.linspace(1,4,10)
# from time import clock
# for N in Ns:
# N = int(N)
# y = np.random.uniform(size=N)
# a = np.random.uniform(size=[N,N])
# a = a.T.dot(a)
# L = np.linalg.cholesky(a)
# t1 = clock()
# #x1 = cho_solve((L,True),y)
# x1 = dpotrs(L,y,1,0)
# times1.append(clock()-t1)
# t1 = clock()
# x2 = cho_solve(L,y,False)
# times2.append(clock()-t1)
# import pylab as plt
# plt.plot(Ns,times1,label='scipy.linalg.cho_solve')
# plt.plot(Ns,times2,label='my choSolve')
# plt.yscale('log')
# plt.xscale('log')
# plt.legend()
# plt.show()
```
#### File: ionotomo/tests/test_forward_equation.py
```python
import numpy as np
import pylab as plt
from ionotomo import *
def test_forward_equation():
datapack = generate_example_datapack()
antennas,antenna_labels = datapack.get_antennas(ant_idx = -1)
patches, patch_names = datapack.get_directions(dir_idx = -1)
times,timestamps = datapack.get_times(time_idx=-1)
Na = len(antennas)
Nt = len(times)
Nd = len(patches)
fixtime = times[Nt>>1]
phase = datapack.get_center_direction()
array_center = datapack.radio_array.get_center()
ne_tci = create_initial_model(datapack)
rays = calc_rays(antennas,patches,times, array_center, fixtime, phase, ne_tci, datapack.radio_array.frequency, True, 1000, ne_tci.nz)
m_tci = ne_tci.copy()
K_ne = np.median(m_tci.M)
m_tci.M = np.log(m_tci.M/K_ne)
#print(m_tci.M)
i0 = 0
d = forward_equation(rays,K_ne,m_tci,i0)
assert d.shape[0] == Na and d.shape[1] == Nt and d.shape[2] == Nd
assert not np.any(np.isnan(d))
d_dask = forward_equation_dask(rays,K_ne, m_tci,i0)
assert np.all(d==d_dask)
t1 = clock()
res = [forward_equation(rays,K_ne,m_tci,i0) for i in range(10)]
print("Average time (serial) {}s".format((clock() - t1)/10.))
t1 = clock()
res = [forward_equation_dask(rays,K_ne,m_tci,i0) for i in range(10)]
print("Average time (dask) {}s".format((clock() - t1)/10.))
```
#### File: ionotomo/tests/test_ionosphere.py
```python
from ionotomo import *
import numpy as np
import pylab as plt
def test_chapman_layers(plot=True):
zenith = 45.
heights = np.linspace(-10,2000,1000)
ne = iri.a_priori_model(heights,zenith)
if plot:
import pylab as plt
print("Plotting iri with zenith angles 0,20,45,65,90")
for zenith in [0,20,45,65,90]:
ne = iri.a_priori_model(heights,zenith)
plt.plot(heights,ne)
plt.xlabel('height (km)')
plt.ylabel('ne [m^-3]')
plt.yscale('log')
plt.show()
```
#### File: ionotomo/tomography/interpolation.py
```python
import tensorflow as tf
import numpy as np
from ionotomo.settings import TFSettings
import itertools
def _bisection(array,value):
'''Find the index of value in sorted array.
array : Tensor (N,)
absissa to sort into
value : Tensor (M,)
The values to sort into array
Note: inserts on left side.
'''
array = tf.cast(array,TFSettings.tf_float)
value = tf.cast(value,TFSettings.tf_float)
M = tf.shape(value)[0]
N = tf.shape(array)[0]
def _cond(jl,ju,value,array):
"""Loop for bin search
ju_l : Tensor (M,)
ju - jl
"""
cond_vec = (ju - jl) > 1
return tf.reduce_any(cond_vec)
def _body(jl,ju,value,array):
jm=tf.truncatediv(ju+jl, 2)# compute a midpoint,
#jm = tf.Print(jm,[jl,jm,ju])
value_ = tf.gather(array,jm)
#array[jl] <= value < array[ju]
#value_ = tf.Print(value_,[value_,value])
jl = tf.where(value >= value_, jm, jl)
ju = tf.where(value < value_, jm, ju)
return (jl, ju, value, array)
jl = tf.zeros((M,),M.dtype)
ju = (N-1)*tf.ones((M,),N.dtype)
jl, ju, _, _ = tf.while_loop(_cond, _body, (jl,ju,value,array), back_prop = False)
jl = tf.where(value < array[0],-tf.ones_like(jl), jl)
jl = tf.where(value >= array[-1], (N-1)*tf.ones_like(jl), jl)
return jl
#def _ndim_coords_from_arrays(points):
# """
# Convert a tuple of coordinate arrays to a (..., ndim)-shaped array.
# """
# if isinstance(points, tuple) and len(points) == 1:
# # handle argument tuple
# points = points[0]
# if isinstance(points, tuple):
# p = tf.meshgrid(points,indexing='ij')
## points = tf.zeros_like(p[0])
## points = tf.expand_dims(points,-1)
## points = tf.tile(points,(1,)*len(p) + (len(p),))
# points = tf.stack(p,-1)
# else:
# raise ValueError("coords should be tuple")
# return points
class RegularGridInterpolator(object):
"""
Batched interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of Tensors of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : Tensor, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
Values outside the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
"""
def __init__(self, points, values, method="linear"):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
#assume type already tf_float for both
self.ndim = len(points)
self.grid = tuple([tf.cast(p,TFSettings.tf_float) for p in points])
self.values = tf.cast(values,TFSettings.tf_float)
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : batched Tensor of shape (B1,...,Bb, ndim)
or tuple of ndim coords ( (B1,..., Bb)_1, ..., (B1,...,Bb)_ndim )
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
if isinstance(xi,(list,tuple)):
xi = tf.stack([tf.cast(x,TFSettings.tf_float) for x in xi],axis=-1)
else:
xi = tf.cast(xi,TFSettings.tf_float)
#xi = _ndim_coords_from_arrays(xi)
xi_shape = tf.shape(xi)
xi = tf.reshape(xi,(-1, self.ndim))
indices, norm_distances, out_of_bounds = self._find_indices(tf.transpose(xi))
#indices[0] = tf.Print(indices[0], [indices,norm_distances,out_of_bounds])
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
return tf.reshape(result,xi_shape[:-1])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
edges = itertools.product(*[[i, i + 1] for i in indices])
values = tf.zeros(tf.shape(indices[0]),TFSettings.tf_float)
for edge_indices in edges:
with tf.control_dependencies([values]):
weight = tf.ones(tf.shape(indices[0]),TFSettings.tf_float)
for k in range(self.ndim):
ei = edge_indices[k]
i = indices[k]
yi = norm_distances[k]
with tf.control_dependencies([weight]):
weight *= tf.where(ei == i, 1 - yi, yi)
values += tf.gather_nd(self.values,tf.transpose(edge_indices)) * weight
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(tf.where(yi <= .5, i, i + 1))
return tf.gather_nd(self.values,tf.stack(idx_res,axis=-1))
def _find_indices(self, xi):
"""Find the index of abcissa for each coord in xi
xi : Tensor shape (ndim, M)
"""
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
M = tf.shape(xi)[1]
out_of_bounds = tf.zeros((M,), dtype=TFSettings.tf_int)
xi = tf.unstack(xi,axis=0)
control = None
# iterate through dimensions
for d in range(self.ndim):
with tf.control_dependencies(control):
x = xi[d]
grid = self.grid[d]
i = _bisection(grid, x)
#i = tf.Print(i,[i])
i = tf.where(i < 0, tf.zeros_like(i),i)
ub = tf.shape(grid)[0] - 2
i = tf.where(i > ub, tf.ones_like(i) * ub, i)
indices.append(i)
norm_distances.append((x - tf.cast(tf.gather(grid,i),TFSettings.tf_float)) / tf.cast(tf.gather(grid,i+1) - tf.gather(grid,i), TFSettings.tf_float))
out_of_bounds += tf.cast(x < grid[0],TFSettings.tf_int)
out_of_bounds += tf.cast(x > grid[-1],TFSettings.tf_int)
control = [indices[-1],norm_distances[-1],out_of_bounds]
return indices, norm_distances, tf.cast(out_of_bounds,TFSettings.tf_bool)
def test_regular_grid_interpolator():
points = [np.linspace(0,1,100) for i in range(3)]
M = np.random.normal(size=[100]*3)
y = tuple([ np.random.uniform(size=100) for i in range(3)])
r = RegularGridInterpolator(tuple([tf.constant(p) for p in points]),tf.constant(M),method='linear')
from scipy.interpolate import RegularGridInterpolator as rgi
r_ = rgi(points,M, method='linear',fill_value=None,bounds_error=False)
sess = tf.Session()
u = sess.run(r(y))
sess.close()
u_ = r_(y)
# import pylab as plt
# print(u-u_)
# plt.hist(u-u_,bins=100)
# plt.show()
#print(u-u_)
assert np.all(np.isclose(u,u_,atol=1e-4))
def test_bisection():
array = np.linspace(0,1,100)
values = np.linspace(-1,2,100)
#print(values[34])
i_np = np.searchsorted(array,values)-1
#print(array[i_np]<values)
sess = tf.Session()
i_tf = sess.run(_bisection(array,values))
# print(i_tf,i_np)
# print(values[np.where(i_np!=i_tf)])
# print(array[i_tf[np.where(i_np!=i_tf)]])
#print(array[i_np],array[i_tf],values)
# assert np.all(i_np == i_tf)
sess.close()
if __name__ == '__main__':
test_bisection()
test_regular_grid_interpolator()
```
#### File: ionotomo/utils/extract_antenna_config.py
```python
import pyrap.tables as pt
import numpy as np
import time
def generate(msfile,outfile):
t = pt.table("{0}/ANTENNA".format(msfile))
names = t.getcol('NAME')
position = t.getcol('POSITION')
diam = t.getcol('DISH_DIAMETER')
f = file(outfile,'w')
if f is None:
print("Failed to create outfile")
return
f.write("# observatory=LOFAR\n# coordsys=XYZ\n# datum=WGS84\n\n")
f.write("# created from {0}\n".format(msfile))
f.write("# created on {0} by <NAME>\n\n".format(time.strftime("%d-%m-%Y",time.localtime())))
f.write("#X Y Z Diam Station\n")
i = 0
while i < len(names):
f.write("{0:0.3f} {1:0.3f} {2:0.3f} {3:0.3f} {4}\n".format(position[i,0],position[i,1],position[i,2],diam[i],names[i]))
i += 1
f.close()
if __name__=='__main__':
ms = "/net/para11/data1/mandal/lockman294287/products/L294287_SBgr016-10_uv.dppp.pre-cal.ms"
outfile = "arrays/lofar.hba.antenna.cfg"
generate(ms,outfile)
```
#### File: ionotomo/utils/gaussian_process_expected_improvement.py
```python
import numpy as np
import pylab as plt
from scipy.special import erf
from scipy.integrate import simps
from scipy.linalg import cho_solve
#from ChoSolver import choSolve, choBackSubstitution
def styblinsky(x):
return (x[0]**4 - 16*x[0]**2 + 5*x[0] + x[1]**4 - 16*x[1]**2 + 5*x[1])/2.
def rosenbrock(x):
a = 1
b = 100
return (a-x[0])**2 + b*(x[1] - x[0]**2)**2
def complexInjunction(x):
Nm = len(x)
a = np.arange(Nm)
A = np.outer(np.cos(np.arange(Nm)),np.sin(1j*np.arange(Nm))-Nm)
y = np.exp(1j*A.dot(x))
return -np.abs((np.min(y)/np.max(y)).real)
def mean(x):
#return styblinsky(x)
return np.log10(1+rosenbrock(x))# + rosenbrock((x-1))
return np.sqrt((x[0]-0.5)**2 + (x[1])**2)
def M52(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
r2 = np.zeros([N,N],dtype=np.double)
K = np.zeros([N,N],dtype=np.double)
i = 0
while i < len(lengthScales):
r2 += (XX[:,i,:,i]/lengthScales[i])**2
i += 1
K += r2*(5./3.)
np.sqrt(5*r2,out=r2)
K += 1+r2
np.exp(-r2,out=r2)
K *= r2
K *= theta0
return K
def expK(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
K = np.zeros([N,N],dtype=np.double)
i = 0
while i < len(lengthScales):
K -= (XX[:,i,:,i]/lengthScales[i])**2
i += 1
K /= 2.
np.exp(K,out=K)
K *= theta0
#K += nu**2*np.eye(N)
return K
def expK_derivative(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
Kdiff = np.zeros([N,N,len(theta)],dtype=np.double)
K = np.zeros([N,N],dtype=np.double)
#0 -> exp(-r^2)
#1 -> 2*eye(N)*nu
#2: ->-2r*eye(-r^2)*-2*(x1[i]-x2[i])^2/(lengthScale[i])^3
i = 0
while i < len(lengthScales):
Kdiff[:,:,0] -= (XX[:,i,:,i]/lengthScales[i])**2
Kdiff[:,:,2+i] += 4*XX[:,i,:,i]**2/lengthScales[i]**3
i += 1
#*r
#np.rollaxis(K[:,:,2:],2,0) *= np.sqrt(-Kdiff[:,:,0])
K /= 2.
np.exp(K,out=K)
K *= theta0
K += nu**2*np.eye(N)
return K
class Prior(object):
def __init__(self, **kwargs):
for key in kwargs.keys():
setattr(self,key,kwargs[key])
def domain(self):
'''Get domain of prior'''
return None
def sample(self,N=1):
'''get a sample from the distribution'''
return None
def pdf(self,x):
'''get the pdf at x'''
return None
class UniformPrior(Prior):
def __init__(self,xmin,xmax):
d = {"xmin":float(min(xmin,xmax)),"xmax":float(max(xmin,xmax)),"width":float(max(xmin,xmax) - min(xmin,xmax))}
super(UniformPrior,self).__init__(**d)
def sample(self,N=1):
return np.random.uniform(low=self.xmin,high=self.xmax,size=N)
def pdf(self,x):
out = np.ones_like(x)
out /= self.width
out[x>self.xmax] *= 0.
out[x<self.xmin] *= 0.
return out
class NormalPrior(Prior):
def __init__(self,mean,std):
d = {"mean":float(mean),"std":float(std)}
super(NormalPrior,self).__init__(**d)
def sample(self,N=1):
return self.mean + self.std*np.random.normal(size=N)
def pdf(self,x):
return np.exp(-(x - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std
class LogNormalPrior(Prior):
def __init__(self,mean,std):
d = {"mean":float(mean),"std":float(std)}
super(LogNormalPrior,self).__init__(**d)
def sample(self,N=1):
return np.random.lognormal(mean=self.mean, sigma=self.std, size=N)
def pdf(self,x):
return np.exp(-(np.log(x) - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std/x
class ClassPrior(Prior):
def __init__(self,numClasses,weights=None):
if weights is None:
weights = np.ones(numClasses,dtype=np.double)/numClasses
d = {"numClasses":float(numClasses),"weights":float(weights)}
super(ClassPrior,self).__init__(**d)
def sample(self,N=1):
samples = np.zeros(N,dtype=np.int64)
i = 0
while i < N:
c = -1
while c == -1:
c_ = np.random.randint(self.numClasses)
if np.random.uniform() < self.weights[c_]:
c = c_
samples[i] = c
i += 1
return samples
def pdf(self,x):
return self.weights[np.int64(x)]
class DiscretePrior(Prior):
def __init__(self,values,prior=None):
if prior is None:
prior = UniformPrior(np.min(values),np.max(values))
d = {"values":values,"prior":prior}
super(DiscretePrior,self).__init__(**d)
def sample(self,N=1):
samples = np.zeros(N,dtype=np.int64)
i = 0
while i < N:
c = -1
while c == -1:
c_ = np.random.randint(len(self.values))
if np.random.uniform() < self.prior.pdf(self.values[c_]):
c = c_
samples[i] = self.values[c]
i += 1
return samples
def pdf(self,x):
return self.prior.pdf(x)
if __name__ == '__main__':
def sampleX(xPriors,N):
X = np.zeros([N,len(xPriors)],dtype=np.double)
for i in range(len(xPriors)):
X[:,i] = xPriors[i].sample(N)
return X
def computeAquisition(Xstar,X,y,thetaPriors,iteration=1):
Xstar = np.atleast_2d(Xstar)
shape = []
indices = []
for thetaPrior in thetaPriors:
ar = thetaPrior.values
shape.append(len(ar))
indices.append(np.arange(len(ar)))
n = len(thetaPriors)
postTheta = np.zeros(shape,dtype=np.double)
COMP = np.zeros(shape,dtype=np.double)
DF = np.zeros(shape,dtype=np.double)
LML = np.zeros(shape,dtype=np.double)
Xboth = np.vstack([X,Xstar])
XXboth = np.subtract.outer(Xboth,Xboth)
arg = np.argsort(y)
xbest = X[arg[0],:]
fbest = y[arg[0]]
aq_full = np.zeros([Xstar.shape[0]]+shape,dtype=np.double)
for idx in product(*indices):
theta = np.zeros(len(indices),dtype=np.double)
for i in range(len(idx)):
theta[i] = thetaPriors[i].values[idx[i]]
nu = theta[1]
#Kboth = expK(XXboth,theta)
Kboth = M52(XXboth,theta)
K00 = Kboth[0:X.shape[0],0:X.shape[0]]
K00 += nu**2*np.eye(X.shape[0])
K01 = Kboth[0:X.shape[0],X.shape[0]:]
K10 = K01.T
K11 = Kboth[X.shape[0]:,X.shape[0]:]
L = np.linalg.cholesky(K00)
alpha = cho_solve((L,False),y)#choSolve(L,y,False)
#mu[j] = sum_i alpha[i]K01[i,j]
mu = K10.dot(alpha)
#cov = K11 - K10.(K00+sigma)(^-1).K01
V = choBackSubstitution(L,K01,True,False)
std = np.sqrt(np.diag(K11 - V.T.dot(V)))
gamma = (fbest - mu)/std
#POI
cum = (1 + erf(gamma/np.sqrt(2)))/2.
#return
#EI
aq = std*(gamma*cum + np.exp(-gamma**2/2)/np.sqrt(2*np.pi))
#aq = (1./(iteration+1))*std - mu
datafit = -y.dot(alpha)/2.
complexity = np.sum(np.log(np.diag(L)))
marLik = np.exp(datafit - complexity - np.log(2*np.pi)*n/2.)
COMP[idx] = complexity
DF[idx] = datafit
LML[idx] = np.log(marLik)
prior = 1.
for t,tp in zip(theta,thetaPriors):
prior *= tp.pdf(t)
postTheta[idx] = marLik * prior
aq_full[ [slice(0,Xstar.shape[0])]+list(idx)] = aq*postTheta[idx]
prob = np.copy(postTheta)
for axis in range(len(thetaPriors)):
aq_full = simps(aq_full,thetaPriors[len(thetaPriors)-axis-1].values,axis=len(thetaPriors)-axis)
prob = simps(prob,thetaPriors[len(thetaPriors)-axis-1].values,axis=len(thetaPriors)-axis-1)
aq_full /= prob
postTheta /= prob
return aq_full,postTheta
def maximizeAquisition(xPriors,X,y,thetaPriors=None,iteration=0):
'''Using gradient (or steepest if desired) maximize the Expected Improvment aquisition
while integration over aquisition hyper parameters.
'''
if thetaPriors is None:
#Set up thetaPriors
res = 10
#theta0 ~ max(y) - min(y), uniform, log spacing 4 mag
m2 = np.max(y) - np.min(y)
m1 = m2/1e4
theta0Prior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),
prior=UniformPrior(m1,m2))
# nu ~ obs noise. similarly but scaled down by 10%
m2 = (np.max(y) - np.min(y))/10.
m1 = (m2/1e4)/10.
nuPrior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),
prior=UniformPrior(m1,m2))
thetaPriors = [theta0Prior,nuPrior]
for i in range(len(xPriors)):
#handles uniform x priors right now
m2 = (xPriors[i].xmax - xPriors[i].xmin)*10.
m1 = (xPriors[i].xmax - xPriors[i].xmin)/10.
lsPrior = DiscretePrior(10**np.linspace(np.log10(m1),np.log10(m2),res),
prior=UniformPrior(m1,m2))
thetaPriors.append(lsPrior)
for thetaPrior in thetaPriors:
assert isinstance(thetaPrior,DiscretePrior), "one theta prior is not discrete"
from itertools import product
#First sample points to initialize maximization
#create aquisition at x
Xstar = sampleX(xPriors,max(2,len(thetaPriors))**max(2,len(xPriors)))
Xstar = sampleX(xPriors,10**max(2,len(xPriors)))
arg = np.argsort(y)
xbest = X[arg[0],:]
fbest = y[arg[0]]
aq_all = []
Xstar_all = []
N = len(y)
aq_init,postTheta = computeAquisition(Xstar,X,y,thetaPriors,iteration)
aq_all.append(aq_init)
Xstar_all.append(Xstar)
arg = np.argsort(aq_init)
Xsimp = Xstar[arg[-len(xPriors)-1:],:]
aq_simp = aq_init[arg[-len(xPriors)-1:]]
#min to max
alpha,gamma,rho,sigma = 1.,2.,0.5,0.5
iter = 0
NonCovergent = True
while NonCovergent:
if iter >= 5:
break
iter += 1
#order for min (flip aq sign)
arg = np.argsort(-aq_simp)
aq_simp = aq_simp[arg]
Xsimp = Xsimp[arg,:]
#print(Xsimp,aq_simp)
#centorid except last
x0 = np.mean(Xsimp[:-1,:],axis=0)
#reflection
xr = x0 + alpha*(x0 - Xsimp[-1,:])
aq_r,postTheta = computeAquisition(xr,X,y,thetaPriors,iteration)
#print(xr,aq_r)
aq_all.append(aq_r)
Xstar_all.append(xr)
if -aq_simp[0] <= -aq_r and -aq_r < -aq_simp[-2]:
Xsimp[-1,:] = xr
aq_simp[-1] = aq_r
continue
#expansion
if -aq_r < -aq_simp[0]:
xe = x0 + gamma*(xr - x0)
aq_e,postTheta = computeAquisition(xe,X,y,thetaPriors,iteration)
aq_all.append(aq_e)
Xstar_all.append(xe)
if -aq_e < -aq_r:
Xsimp[-1,:] = xe
aq_simp[-1] = aq_e
continue
else:
Xsimp[-1,:] = xr
aq_simp[-1] = aq_r
continue
#contractions
xc = x0 + rho*(Xsimp[-1,:] - x0)
aq_c,postTheta = computeAquisition(xc,X,y,thetaPriors,iteration)
aq_all.append(aq_c)
Xstar_all.append(xc)
if -aq_c < -aq_simp[-1]:
Xsimp[-1,:] = xc
aq_simp[-1] = aq_c
continue
#shrink
for i in range(Xsimp.shape[0]):
Xsimp[i,:] = Xsimp[0,:] + sigma*(Xsimp[i,:] - Xsimp[0,:])
xbest_nm = Xsimp[0,:]
#print(xbest_nm)
aq_all = np.hstack(aq_all)
Xstar = np.vstack(Xstar_all)
arg = np.argsort(aq_all)
xbest = Xstar[arg[-1],:]
if True:
vmin = np.min(aq_all)
vmax = np.max(aq_all)
plt.figure()
sc=plt.scatter(Xstar[:,0],Xstar[:,1],c=aq_all,
vmin=vmin,vmax=vmax,alpha=0.6)
plt.scatter(xbest[0],xbest[1],c='red',alpha=0.6)
plt.scatter(xbest_nm[0],xbest_nm[1],c='red',marker='*',alpha=0.6)
plt.colorbar(sc)
plt.show()
fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)
ax1.plot(thetaPriors[0].values,
simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),
thetaPriors[2].values,axis=2),
thetaPriors[1].values,axis=1))
ax1.set_xlabel("theta0")
ax2.plot(thetaPriors[1].values,
simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),
thetaPriors[2].values,axis=2),
thetaPriors[0].values,axis=0))
ax2.set_xlabel("nu")
ax3.plot(thetaPriors[2].values,
simps(simps(simps(postTheta,thetaPriors[3].values,axis=3),
thetaPriors[1].values,axis=1),
thetaPriors[0].values,axis=0))
ax3.set_xlabel("ls0")
ax4.plot(thetaPriors[3].values,
simps(simps(simps(postTheta,thetaPriors[2].values,axis=2),
thetaPriors[1].values,axis=1),
thetaPriors[0].values,axis=0))
ax4.set_xlabel("ls1")
plt.show()
return xbest
#Set up data
np.random.seed(12344)
nu = 0.01
xPriors = [UniformPrior(-1,1.5),
UniformPrior(-1,1.5)]
thetaPriors = [DiscretePrior(10**np.linspace(np.log10(0.1),np.log10(5),10),prior=UniformPrior(0,5)),
DiscretePrior(10**np.linspace(np.log10(0.001),np.log10(0.5),10),prior=LogNormalPrior(np.log(0.1),np.log(0.5/0.01))),
DiscretePrior(np.linspace(0.5,6,10),prior=LogNormalPrior(np.log(1),np.log(6/0.5))),
DiscretePrior(np.linspace(0.5,6,10),prior=LogNormalPrior(np.log(1),np.log(6/0.5)))]
X,Y = np.meshgrid(np.linspace(xPriors[0].xmin,xPriors[0].xmax,100),
np.linspace(xPriors[1].xmin,xPriors[1].xmax,100),
indexing='ij')
A = []
for x,y in zip(X.flatten(),Y.flatten()):
A.append(mean(np.array([x,y])))
Niter = 10
minidx = np.zeros([4,Niter],dtype=np.double)
for r in range(4):
score = []
#plt.figure()
c1 = plt.contour(X,Y,np.array(A).reshape(X.shape),20)
plt.clabel(c1,inline=1,fontsize=10)
plt.title("True")
plt.xlabel("x")
plt.ylabel("y")
arg = np.argsort(A)
plt.scatter(X.flatten()[arg[0]],Y.flatten()[arg[0]],zorder=20,c='red',marker='*',alpha=1)
#sample corners and center
xCorners = []
for xPrior in xPriors:
xCorners.append([xPrior.xmin,xPrior.xmax])
from itertools import product
Xdata = []
y = []
for x in product(*xCorners):
Xdata.append(np.array(x))
y.append(mean(Xdata[-1]) + nu*np.random.normal())
Xdata.append(np.mean(np.array(xCorners),axis=1))
y.append(mean(Xdata[-1]) + nu*np.random.normal())
Xdata = np.array(Xdata)
y = np.array(y)
sc=plt.scatter(Xdata[:,0],Xdata[:,1],c=y,vmin=np.min(y),vmax=np.max(y),alpha=0.6)
arg = np.argsort(y)
plt.scatter(Xdata[arg[0],0],Xdata[arg[0],1],c='red',vmin=np.min(y),vmax=np.max(y),alpha=1)
plt.colorbar(sc)
plt.show()
#do iterations to find min
arg = np.argsort(y)
fbest = y[arg[0]]
xprev = Xdata[arg[0]]
i = 0
while i < Niter:
#do gradient decent to find max of full aquisition
xnext = maximizeAquisition(xPriors,Xdata,y,thetaPriors=None,iteration=i)
xprev = xnext
#print(y)
f = mean(xnext) + nu*np.random.normal()
Xdata = np.vstack([Xdata,xnext])
y = np.hstack([y,f])
fbest = np.min(y)
score.append(f)
print(xnext,f,fbest)
i += 1
c1 = plt.contour(X,Y,np.array(A).reshape(X.shape),20)
plt.clabel(c1,inline=1,fontsize=10)
plt.title("True")
plt.xlabel("x")
plt.ylabel("y")
arg = np.argsort(A)
plt.scatter(X.flatten()[arg[0]],Y.flatten()[arg[0]],zorder=20,c='red',marker='*',alpha=1)
sc=plt.scatter(Xdata[:,0],Xdata[:,1],c=y,vmin=np.min(y),vmax=np.max(y),alpha=0.6)
arg = np.argsort(y)
plt.scatter(Xdata[arg[0],0],Xdata[arg[0],1],c='red',vmin=np.min(y),vmax=np.max(y),alpha=1)
plt.colorbar(sc)
plt.show()
plt.plot(score)
plt.ylabel('score (lower better)')
plt.xlabel("iteration")
plt.show()
minidx[r,:] = score
plt.plot(np.mean(minidx,axis=0))
plt.plot(np.mean(minidx,axis=0)+np.std(minidx,axis=0),ls='--')
plt.plot(np.mean(minidx,axis=0)-np.std(minidx,axis=0),ls='--')
plt.show()
``` |
{
"source": "Joshuaalbert/jaxnlds",
"score": 2
} |
#### File: jaxnlds/examples/jones_scalars_data_dtec_hmm.py
```python
from jaxnlds.nlds_smoother import NonLinearDynamicsSmoother
from jaxnlds.forward_updates import TecLinearPhaseNestedSampling
import jax.numpy as jnp
from jax import random, jit, disable_jit
from functools import partial
import pylab as plt
def main():
Gamma0, Omega, Sigma, T, Y_obs, amp, mu0, tec, freqs = generate_data()
hmm = NonLinearDynamicsSmoother(TecLinearPhaseNestedSampling(freqs))
hmm = jit(partial(hmm, tol=1., maxiter=2, omega_window=None, sigma_window=None, momentum=0.,
omega_diag_range=(0, jnp.inf), sigma_diag_range=(0, jnp.inf)))
#
# with disable_jit():
keys = random.split(random.PRNGKey(0), T)
# with disable_jit():
res = hmm(Y_obs, Sigma, mu0, Gamma0, Omega, amp, keys)
print(res.converged, res.niter)
plt.plot(tec, label='true tec')
plt.plot(res.post_mu[:, 0], label='infer tec')
plt.fill_between(jnp.arange(T),
res.post_mu[:, 0] - jnp.sqrt(res.post_Gamma[:, 0, 0]),
res.post_mu[:, 0] + jnp.sqrt(res.post_Gamma[:, 0, 0]),
alpha=0.5)
plt.legend()
plt.show()
plt.plot(jnp.sqrt(res.post_Gamma[:, 0, 0]))
plt.title("Uncertainty tec")
plt.show()
plt.plot(tec - res.post_mu[:, 0], label='infer')
plt.fill_between(jnp.arange(T),
(tec - res.post_mu[:, 0]) - jnp.sqrt(res.post_Gamma[:, 0, 0]),
(tec - res.post_mu[:, 0]) + jnp.sqrt(res.post_Gamma[:, 0, 0]),
alpha=0.5)
plt.title("Residual tec")
plt.legend()
plt.show()
plt.plot(jnp.sqrt(res.Omega[:, 0, 0]))
plt.title("omega")
plt.show()
plt.plot(jnp.mean(jnp.sqrt(jnp.diagonal(res.Sigma, axis2=-2, axis1=-1)), axis=-1))
plt.title("mean sigma")
plt.show()
def generate_data():
T = 1000
tec = jnp.cumsum(15. * random.normal(random.PRNGKey(0),shape=(T,)))
TEC_CONV = -8.4479745e6 # mTECU/Hz
freqs = jnp.linspace(121e6, 168e6, 24)
phase = tec[:, None] / freqs * TEC_CONV
Y = jnp.concatenate([jnp.cos(phase), jnp.sin(phase)], axis=1)
Y_obs = Y + 0.75 * random.normal(random.PRNGKey(1), shape=Y.shape)
# Y_obs[500:550:2, :] += 3. * random.normal(random.PRNGKey(1),shape=Y[500:550:2, :].shape)
Sigma = 0.5 ** 2 * jnp.eye(48)
Omega = jnp.diag(jnp.array([30.]))**2
mu0 = jnp.zeros(1)
Gamma0 = jnp.diag(jnp.array([200.]))**2
amp = jnp.ones_like(phase)
return Gamma0, Omega, Sigma, T, Y_obs, amp, mu0, tec, freqs
if __name__ == '__main__':
main()
```
#### File: jaxnlds/forward_updates/forward_update.py
```python
class ForwardUpdateEquation(object):
@property
def num_control_params(self):
"""
Number of control parameters expected.
Returns: int
"""
raise NotImplementedError()
def neg_elbo(self, *args):
"""
Return the negative ELBO.
Args:
*args:
Returns:
"""
raise NotImplementedError()
def forward_model(self, mu, *control_params):
"""
Return the model data.
Args:
mu: [K]
*control_params: list of any other arrays
Returns:
Model data [N]
"""
raise NotImplementedError()
def E_update(self, prior_mu, prior_Gamma, Y, Sigma, *control_params):
"""
Given the current data and control params as well as a Gaussian prior, return the conditional mean and covariance
of a Gaussian variational posterior.
Args:
prior_mu: [K] prior mean
prior_Gamma: [K,K] prior covariance
Y: [N] observed data
Sigma: [N,N] Observed data covariance
*control_params: list of arrays of arbitrary shape.
Returns:
posterior mean [K]
posterior covariance [K,K]
"""
return prior_mu, prior_Gamma
```
#### File: jaxnlds/optimize/b_bfgs_minimize.py
```python
import jax
import jax.numpy as jnp
from jax.lax import while_loop
from .line_search import line_search
from .bfgs_minimize import BFGSResults
from typing import NamedTuple, Optional, Tuple
def fmin_b_bfgs(func, x0, args=(), options=None):
"""
The BFGS algorithm from
Algorithm 6.1 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 136-143
with bounded parameters, using the active set approach from,
<NAME>., <NAME>., <NAME>., & <NAME>. (1995).
'A Limited Memory Algorithm for Bound Constrained Optimization.'
SIAM Journal on Scientific Computing, 16(5), 1190–1208.
doi:10.1137/0916069
Notes:
We utilise boolean arithmetic to avoid jax.cond calls which don't work on accelerators.
A side effect is that we perform more gradient evaluations than scipy's BFGS
func: callable
Function of the form f(x) where x is a flat ndarray and returns a real scalar. The function should be
composed of operations with vjp defined. If func is jittable then fmin_bfgs is jittable. If func is
not jittable, then _nojit should be set to True.
x0: ndarray
initial variable
args: tuple, optional
Extra arguments to pass to func as func(x,*args)
options: Optional dict of parameters
maxiter: int
Maximum number of evaluations
norm: float
Order of norm for convergence check. Default inf.
gtol: flat
Terminates minimization when |grad|_norm < g_tol
ls_maxiter: int
Maximum number of linesearch iterations
bounds: 2-tuple of two vectors specifying the lower and upper bounds.
e.g. (l, u) where l and u have the same size as x0. For parameters x_i without constraints the
corresponding l_i=-jnp.inf and u_i=jnp.inf. Specifying l=None or u=None means no constraints on that
side.
Returns: BFGSResults
"""
if options is None:
options = dict()
maxiter: Optional[int] = options.get('maxiter', None)
norm: float = options.get('norm', jnp.inf)
gtol: float = options.get('gtol', 1e-5)
ls_maxiter: int = options.get('ls_maxiter', 10)
bounds: Tuple[jnp.ndarray, jnp.ndarray] = tuple(options.get('bounds', (None, None)))
state = BFGSResults(converged=False,
failed=False,
k=0,
nfev=0,
ngev=0,
nhev=0,
x_k=x0,
f_k=None,
g_k=None,
H_k=None,
status=None,
ls_status=jnp.array(0))
if maxiter is None:
maxiter = jnp.size(x0) * 200
d = x0.shape[0]
l = bounds[0]
u = bounds[1]
if l is None:
l = -jnp.inf * jnp.ones_like(x0)
if u is None:
u = jnp.inf * jnp.ones_like(x0)
l,u = jnp.where(l<u, l, u), jnp.where(l<u,u, l)
def project(x,l,u):
return jnp.clip(x,l, u)
def get_active_set(x, l, u):
return jnp.where((x==l) | (x==u))
def func_with_args(x):
return func(x, *args)
def get_generalised_Cauchy_point(xk, gk, l, u):
def func(t):
return func_with_args(project(xk - t* gk, l, u))
initial_H = jnp.eye(d)
initial_H = options.get('hess_inv', initial_H)
value_and_grad = jax.value_and_grad(func_with_args)
f_0, g_0 = value_and_grad(x0)
state = state._replace(f_k=f_0, g_k=g_0, H_k=initial_H, nfev=state.nfev + 1, ngev=state.ngev + 1,
converged=jnp.linalg.norm(g_0, ord=norm) < gtol)
def body(state):
p_k = -(state.H_k @ state.g_k)
line_search_results = line_search(value_and_grad, state.x_k, p_k, old_fval=state.f_k, gfk=state.g_k,
maxiter=ls_maxiter)
state = state._replace(nfev=state.nfev + line_search_results.nfev,
ngev=state.ngev + line_search_results.ngev,
failed=line_search_results.failed,
ls_status=line_search_results.status)
s_k = line_search_results.a_k * p_k
x_kp1 = state.x_k + s_k
f_kp1 = line_search_results.f_k
g_kp1 = line_search_results.g_k
# print(g_kp1)
y_k = g_kp1 - state.g_k
rho_k = jnp.reciprocal(y_k @ s_k)
sy_k = s_k[:, None] * y_k[None, :]
w = jnp.eye(d) - rho_k * sy_k
H_kp1 = jnp.where(jnp.isfinite(rho_k),
jnp.linalg.multi_dot([w, state.H_k, w.T]) + rho_k * s_k[:, None] * s_k[None, :], state.H_k)
converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol
state = state._replace(converged=converged,
k=state.k + 1,
x_k=x_kp1,
f_k=f_kp1,
g_k=g_kp1,
H_k=H_kp1
)
return state
state = while_loop(
lambda state: (~ state.converged) & (~state.failed) & (state.k < maxiter),
body,
state)
state = state._replace(status=jnp.where(state.converged, jnp.array(0), # converged
jnp.where(state.k == maxiter, jnp.array(1), # max iters reached
jnp.where(state.failed, jnp.array(2) + state.ls_status,
# ls failed (+ reason)
jnp.array(-1))))) # undefined
return state
```
#### File: jaxnlds/optimize/bfgs_minimize.py
```python
import jax
import jax.numpy as jnp
from jax.lax import while_loop
from .line_search import line_search
from typing import NamedTuple, Optional
class BFGSResults(NamedTuple):
converged: bool # bool, True if minimization converges
failed: bool # bool, True if line search fails
k: int # The number of iterations of the BFGS update.
nfev: int # The total number of objective evaluations performed.
ngev: int # total number of jacobian evaluations
nhev: int # total number of hessian evaluations
x_k: jnp.ndarray # A tensor containing the last argument value found during the search. If the search converged,
# then this value is the argmin of the objective function.
f_k: jnp.ndarray # A tensor containing the value of the objective
# function at the `position`. If the search
# converged, then this is the (local) minimum of
# the objective function.
g_k: jnp.ndarray # A tensor containing the gradient of the objective function at the `final_position`.
# If the search converged the l2-norm of this tensor should be below the tolerance.
H_k: jnp.ndarray # A tensor containing the inverse of the estimated Hessian.
status: int #int describing end state
ls_status: int #int describing ls end state (only means something if ls fails)
def fmin_bfgs(func, x0, args=(), options=None):
"""
The BFGS algorithm from
Algorithm 6.1 from Wright and Nocedal, 'Numerical Optimization', 1999, pg. 136-143
Notes:
We utilise boolean arithmetic to avoid jax.cond calls which don't work on accelerators.
A side effect is that we perform more gradient evaluations than scipy's BFGS
func: callable
Function of the form f(x) where x is a flat ndarray and returns a real scalar. The function should be
composed of operations with vjp defined. If func is jittable then fmin_bfgs is jittable. If func is
not jittable, then _nojit should be set to True.
x0: ndarray
initial variable
args: tuple, optional
Extra arguments to pass to func as func(x,*args)
options: Optional dict of parameters
maxiter: int
Maximum number of evaluations
norm: float
Order of norm for convergence check. Default inf.
gtol: flat
Terminates minimization when |grad|_norm < g_tol
ls_maxiter: int
Maximum number of linesearch iterations
Returns: BFGSResults
"""
if options is None:
options = dict()
maxiter: Optional[int] = options.get('maxiter', None)
norm: float = options.get('norm', jnp.inf)
gtol: float = options.get('gtol', 1e-5)
ls_maxiter: int = options.get('ls_maxiter', 10)
state = BFGSResults(converged=False,
failed=False,
k=0,
nfev=0,
ngev=0,
nhev=0,
x_k=x0,
f_k=None,
g_k=None,
H_k=None,
status=None,
ls_status=jnp.array(0))
if maxiter is None:
maxiter = jnp.size(x0) * 200
d = x0.shape[0]
initial_H = jnp.eye(d)
initial_H = options.get('hess_inv', initial_H)
def func_with_args(x):
return func(x, *args)
value_and_grad = jax.value_and_grad(func_with_args)
f_0, g_0 = value_and_grad(x0)
state = state._replace(f_k=f_0, g_k=g_0, H_k=initial_H, nfev=state.nfev + 1, ngev=state.ngev + 1,
converged=jnp.linalg.norm(g_0, ord=norm) < gtol)
def body(state):
p_k = -(state.H_k @ state.g_k)
line_search_results = line_search(value_and_grad, state.x_k, p_k, old_fval=state.f_k, gfk=state.g_k,
maxiter=ls_maxiter)
state = state._replace(nfev=state.nfev + line_search_results.nfev,
ngev=state.ngev + line_search_results.ngev,
failed=line_search_results.failed,
ls_status=line_search_results.status)
s_k = line_search_results.a_k * p_k
x_kp1 = state.x_k + s_k
f_kp1 = line_search_results.f_k
g_kp1 = line_search_results.g_k
# print(g_kp1)
y_k = g_kp1 - state.g_k
rho_k = jnp.reciprocal(y_k @ s_k)
sy_k = s_k[:, None] * y_k[None, :]
w = jnp.eye(d) - rho_k * sy_k
H_kp1 = jnp.where(jnp.isfinite(rho_k),
jnp.linalg.multi_dot([w, state.H_k, w.T]) + rho_k * s_k[:, None] * s_k[None, :], state.H_k)
converged = jnp.linalg.norm(g_kp1, ord=norm) < gtol
state = state._replace(converged=converged,
k=state.k + 1,
x_k=x_kp1,
f_k=f_kp1,
g_k=g_kp1,
H_k=H_kp1
)
return state
state = while_loop(
lambda state: (~ state.converged) & (~state.failed) & (state.k < maxiter),
body,
state)
state = state._replace(status=jnp.where(state.converged, jnp.array(0),#converged
jnp.where(state.k == maxiter, jnp.array(1),#max iters reached
jnp.where(state.failed, jnp.array(2)+state.ls_status,#ls failed (+ reason)
jnp.array(-1)))))#undefined
return state
``` |
{
"source": "Joshuaalbert/jaxns_paper",
"score": 2
} |
#### File: jaxns_paper/second_model/gaussian_mixture_speed_test.py
```python
import argparse
import shutil
from timeit import default_timer
import jax.scipy.linalg as jax_linalg
from jax.scipy.special import logsumexp as jax_logsumexp
from jax import numpy as jnp, vmap, random
import pylab as plt
import numpy as np
from scipy.special import logsumexp as np_logsumexp, ndtri
import scipy.linalg as scipy_linalg
from jaxns.plotting import plot_cornerplot, plot_diagnostics, add_colorbar_to_axes
from jaxns.likelihood_samplers.ellipsoid_utils import ellipsoid_clustering, bounding_ellipsoid, ellipsoid_params
from jax.config import config
config.update("jax_enable_x64", True)
def np_log_normal(x, mean, cov):
L = np.linalg.cholesky(cov)
dx = x - mean
dx = scipy_linalg.solve_triangular(L, dx, lower=True)
return -0.5 * x.size * np.log(2. * np.pi) - np.sum(np.log(np.diag(L))) \
- 0.5 * dx @ dx
def jax_log_normal(x, mean, cov):
L = jnp.linalg.cholesky(cov)
dx = x - mean
dx = jax_linalg.solve_triangular(L, dx, lower=True)
return -0.5 * x.size * jnp.log(2. * jnp.pi) - jnp.sum(jnp.log(jnp.diag(L))) \
- 0.5 * dx @ dx
def plot_log_likelihood(log_likelihood, plot_ellipsoids = True):
theta1 = jnp.linspace(-5, 15, 500)
T1, T2 = jnp.meshgrid(theta1, theta1, indexing='ij')
theta1 = jnp.stack([T1.flatten(), T2.flatten()], axis=1)
lik = vmap(log_likelihood)(theta1).reshape((500, 500))
fig, ax = plt.subplots(1,1,figsize=(5,4))
ax.imshow(jnp.exp(lik).T, cmap='bone_r', origin='lower', extent=(-5,15,-5,15))
ax.set_xlabel(r'$\theta_1$')
ax.set_ylabel(r'$\theta_2$')
ax.grid()
add_colorbar_to_axes(ax, 'bone_r', vmin=0., vmax=jnp.max(jnp.exp(lik)),
label=r"$\mathcal{L}(\theta_1, \theta_2)$")
plt.tight_layout()
fig.savefig('gaussian_mixture_log_likelihood.pdf')
if plot_ellipsoids:
select = jnp.where(lik.flatten() > jnp.percentile(lik.flatten(), 95))[0]
keep = random.shuffle(random.PRNGKey(345326),select)[:1000]
ax.scatter(theta1[keep,0], theta1[keep,1], c='red',marker='.', s=1, label='samples', alpha=0.5)
ax.legend()
log_VS = jnp.log(select.size) - jnp.log(lik.size)
points = theta1[keep, :]
depth=7
K = 2**(depth-1)
cluster_id, ellipsoid_parameters = ellipsoid_clustering(random.PRNGKey(324532), points, depth, log_VS=log_VS)
mu, C = vmap(lambda k: bounding_ellipsoid(points, cluster_id == k))(jnp.arange(K))
radii, rotation = vmap(ellipsoid_params)(C)
theta = jnp.linspace(0., jnp.pi * 2, 100)
x = jnp.stack([jnp.cos(theta), jnp.sin(theta)], axis=0)
for i, (mu, radii, rotation) in enumerate(zip(mu, radii, rotation)):
y = mu[:, None] + rotation @ jnp.diag(radii) @ x
ax.plot(y[0, :], y[1, :], c=plt.cm.jet(i / K), lw=0.5)
mask = cluster_id == i
# plt.scatter(points[mask, 0], points[mask, 1], c=jnp.atleast_2d(plt.cm.jet(i / K)))
plt.tight_layout()
fig.savefig('gaussian_mixture_log_likelihood_with_ellipses.pdf')
plt.show()
def main(ndims, num_live_points, K, do_dynesty, do_polychord, do_multinest, do_jaxns):
prior_mu = np.zeros(ndims)
prior_cov = K**2 * np.diag(np.ones(ndims)) ** 2
data_mu = [np.zeros(ndims) + k for k in range(K)]
jax_data_mu = jnp.stack(data_mu, axis=0)
data_cov1 = np.diag(np.where(np.arange(ndims)==0, 0.5+0.49, 0.5-0.49))
data_cov2 = np.diag(np.where(np.arange(ndims)==0, 0.5-0.49, 0.5+0.49))
true_logZ = -np.inf
for muk in data_mu:
true_logZ = np.logaddexp(true_logZ,
np.logaddexp(
np_log_normal(muk, 0., data_cov1+prior_cov),
np_log_normal(muk, 0., data_cov2 + prior_cov)))
true_logZ = true_logZ - np.log(2*K)
print("True logZ={}".format(true_logZ))
def np_log_likelihood(theta, **kwargs):
log_prob = -np.inf
# log(a (e^x + e^y)) = log(a) + logaddexp(x, y)
for muk in data_mu:
log_prob = np.logaddexp(log_prob,
np.logaddexp(
np_log_normal(theta, muk, data_cov1),
np_log_normal(theta, muk, data_cov2)))
return log_prob - np.log(2*K)
def jax_log_likelihood(theta, **kwargs):
log_prob1 = vmap(lambda muk: jax_log_normal(theta, muk, data_cov1))(jax_data_mu)
log_prob2 = vmap(lambda muk: jax_log_normal(theta, muk, data_cov2))(jax_data_mu)
log_prob = jnp.logaddexp(log_prob1, log_prob2)
return jax_logsumexp(log_prob, axis=0) - jnp.log(2.*K)
if ndims == 2:
print("Plotting the 2D likelihood of the second model.")
plot_log_likelihood(jax_log_likelihood)
def run_dynesty():
try:
import dynesty
except:
raise ImportError("Dynesty not installed. Run `pip install dynesty`.")
# prior transform (iid standard normal prior)
def prior_transform(u):
"""Transforms our unit cube samples `u` to a standard normal prior."""
return ndtri(u) * np.sqrt(np.diag(prior_cov)) + prior_mu
def counting_function(params):
counting_function.calls += 1
return np_log_likelihood(params)
counting_function.calls = 0
sampler = dynesty.NestedSampler(counting_function,
prior_transform,
ndims,
nlive=num_live_points,
bound='multi',
sample='slice',
slices=5)
t0 = default_timer()
sampler.run_nested(dlogz=0.01)
res = sampler.results
run_time = default_timer() - t0
print("Dynesty result keys: {}".format(list(res.keys())))
logZ = res['logz'][-1]
logZerr = res['logzerr'][-1]
ess = np_logsumexp(res['logwt']) * 2 - np_logsumexp(res['logwt'] * 2)
num_likelihood_evaluations = np.sum(res['ncall'])
score = num_likelihood_evaluations / ess
print(f"Dynesty run time: {run_time}")
print(f"Dynesty log(Z): {logZ} +- {logZerr}")
print(f"Dynesty num_likelihood/ESS: {score}")
return run_time, logZ, logZerr,counting_function.calls
def run_polychord():
try:
import pypolychord
from pypolychord.settings import PolyChordSettings
from pypolychord.priors import UniformPrior, GaussianPrior
except:
raise ImportError("Polychord not installed.\n"
"Run `git clone https://github.com/PolyChord/PolyChordLite.git\n"
"cd PolyChordLite\npython setup.py install`.")
def log_likelihood(theta):
""" Simple Gaussian Likelihood"""
logL = np_log_likelihood(theta)
return logL, [np.sum(theta)]
def counting_function(params):
counting_function.calls += 1
return log_likelihood(params)
counting_function.calls = 0
def prior(hypercube):
""" Uniform prior from [-1,1]^D. """
return ndtri(hypercube) * np.sqrt(np.diag(prior_cov)) + prior_mu
def dumper(live, dead, logweights, logZ, logZerr):
return
settings = PolyChordSettings(ndims, 1)
settings.file_root = 'polychord'
settings.nlive = num_live_points
settings.do_clustering = True
settings.read_resume = False
settings.num_repeats = 5*ndims
t0 = default_timer()
output = pypolychord.run_polychord(counting_function, ndims, 1, settings, prior, dumper)
run_time = default_timer() - t0
logZ, logZerr = output.logZ, output.logZerr
score = 0.
print("PolyChord result keys: {}".format(output))
print(f"PolyChord run time: {run_time}")
print(f"PolyChord log(Z): {logZ} +- {logZerr}")
print(f"PolyChord num_likelihood/ESS: {score}")
return run_time, logZ, logZerr, counting_function.calls
def run_multinest():
### multinest
try:
from pymultinest.solve import solve
from pymultinest.analyse import Analyzer
except:
raise ImportError(
"Multinest is not installed.\nFollow directions on http://johannesbuchner.github.io/PyMultiNest/install.html.")
import os
os.makedirs('chains', exist_ok=True)
prefix = "chains/multinest-"
# prior transform (iid standard normal prior)
def prior_transform(u):
"""Transforms our unit cube samples `u` to a standard normal prior."""
return ndtri(u) * np.sqrt(np.diag(prior_cov)) + prior_mu
def counting_function(params):
counting_function.calls += 1
return np_log_likelihood(params)
counting_function.calls = 0
# run MultiNest
t0 = default_timer()
result = solve(LogLikelihood=counting_function,
Prior=prior_transform,
n_dims=ndims,
outputfiles_basename=prefix,
verbose=False,
n_live_points=num_live_points,
max_modes=100,
evidence_tolerance=0.5,
sampling_efficiency=0.3)
run_time = default_timer() - t0
# analyser = Analyzer(ndims, outputfiles_basename = prefix)
# stats = analyser.get_stats()
logZ, logZerr = result['logZ'], result['logZerr']
score = 0.
print("Multinest results:", result)
print(f"MultiNEST run time: {run_time}")
print(f"MultiNEST log(Z): {logZ} +- {logZerr}")
print(f"MultiNEST num_likelihood/ESS: {score}")
return run_time, logZ, logZerr, counting_function.calls
def run_jaxns():
try:
from jaxns.nested_sampling import NestedSampler
from jaxns.prior_transforms import PriorChain, UniformPrior, NormalPrior
except:
raise ImportError("Install JaxNS!")
from timeit import default_timer
from jax import random, jit
import jax.numpy as jnp
if ndims < 5:
depth = 7
elif ndims == 5:
depth = 8
elif ndims == 6:
depth = 9
else:
depth = 9
prior_transform = PriorChain().push(NormalPrior('theta', prior_mu, jnp.sqrt(jnp.diag(prior_cov))))
ns = NestedSampler(jax_log_likelihood, prior_transform, sampler_name='slice')
def run_with_n(n):
@jit
def run(key):
return ns(key=key,
num_live_points=n,
max_samples=1e8,
collect_samples=False,
termination_frac=0.001,
sampler_kwargs=dict(depth=depth, num_slices=5))
results = run(random.PRNGKey(0))
results.logZ.block_until_ready()
t0 = default_timer()
results = run(random.PRNGKey(132624))
results.logZ.block_until_ready()
run_time = (default_timer() - t0)
logZ, logZerr = results.logZ, results.logZerr
score = results.num_likelihood_evaluations/results.ESS
print('Number of samples taken: {}'.format(results.num_samples))
print(f"JAXNS run time: {run_time}")
print(f"JAXNS log(Z): {logZ} +- {logZerr}")
print(f"JAXNS num_likelihood/ESS: {score}")
return run_time, logZ, logZerr
return run_with_n(num_live_points)
try:
shutil.rmtree('chains')
except FileNotFoundError:
pass
file_name = f"{ndims}D_n{num_live_points}.npz"
names = []
run_data = []
names.append("Dynesty")
if do_dynesty:
run_data.append(run_dynesty())
else:
try:
run_data.append((np.load(file_name)['run_data'][0, 0],
np.load(file_name)['run_data'][1, 0],
np.load(file_name)['run_data'][2, 0],
np.load(file_name)['run_data'][3, 0]
))
except:
run_data.append((np.nan, np.nan, np.nan, np.nan))
names.append("PolyChord")
if do_polychord:
run_data.append(run_polychord())
else:
try:
run_data.append((np.load(file_name)['run_data'][0, 1],
np.load(file_name)['run_data'][1, 1],
np.load(file_name)['run_data'][2, 1],
np.load(file_name)['run_data'][3, 1]))
except:
run_data.append((np.nan, np.nan, np.nan, np.nan))
names.append("MultiNest")
if do_multinest:
run_data.append(run_multinest())
else:
try:
run_data.append((np.load(file_name)['run_data'][0, 2],
np.load(file_name)['run_data'][1, 2],
np.load(file_name)['run_data'][2, 2],
np.load(file_name)['run_data'][3, 2],
))
except:
run_data.append((np.nan, np.nan, np.nan, np.nan))
names.append('JaxNS')
if do_jaxns:
run_data.append(run_jaxns())
else:
try:
run_data.append((np.load(file_name)['run_data'][0, 3],
np.load(file_name)['run_data'][1, 3],
np.load(file_name)['run_data'][2, 3],
np.load(file_name)['run_data'][3, 3],
))
except:
run_data.append((np.nan, np.nan, np.nan, np.nan))
run_data = np.array(run_data)
run_time, logZ, logZerr, nlik_calls = run_data.T
np.savez(file_name, run_data=run_data.T, true_logZ=true_logZ)
plt.bar(names, run_time, fc="none", ec='black', lw=3.)
plt.xlabel("Nested sampling package")
plt.ylabel("Execution time (s)")
plt.yscale('log')
plt.savefig(f"{ndims}D_n{num_live_points}_speed_test.png")
plt.savefig(f"{ndims}D_n{num_live_points}_speed_test.pdf")
np.savez(file_name, run_data=run_data.T, true_logZ=true_logZ)
plt.bar(names, nlik_calls, fc="none", ec='black', lw=3.)
plt.xlabel("Nested sampling package")
plt.ylabel("Number of likelihood evaluations [1]")
plt.yscale('log')
plt.savefig(f"{ndims}D_n{num_live_points}_efficiency_test.png")
plt.savefig(f"{ndims}D_n{num_live_points}_efficiency_test.pdf")
def add_args(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument('--ndims', help='number of problem dimensions.',
default=2, type=int, required=False)
parser.add_argument('--num_live_points', help='Number of live points.',
default=1000, type=int, required=False)
parser.add_argument('--K', help='Number of mixture components.',
default=10, type=int, required=False)
parser.add_argument('--do_dynesty', help='Whether to do dynesty run.',
default=False, type="bool", required=False)
parser.add_argument('--do_polychord', help='Whether to do polychord run.',
default=False, type="bool", required=False)
parser.add_argument('--do_multinest', help='Whether to do multinest run.',
default=False, type="bool", required=False)
parser.add_argument('--do_jaxns', help='Whether to do JAXNS run.',
default=True, type="bool", required=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Runs a single experiment of the second problem in JAXNS paper.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_args(parser)
flags, unparsed = parser.parse_known_args()
print("Running with:")
for option, value in vars(flags).items():
print(" {} -> {}".format(option, value))
main(**vars(flags))
``` |
{
"source": "Joshuaalbert/neural_deprojection",
"score": 2
} |
#### File: neural_deprojection/neural_deprojection/graph_net_utils.py
```python
import tensorflow as tf
from graph_nets.graphs import GraphsTuple
from graph_nets.modules import SelfAttention
import tensorflow_probability as tfp
from graph_nets import utils_tf, blocks
import tqdm
import sonnet as snt
from sonnet.src.base import Optimizer, Module
from sonnet.src import utils, once
import numpy as np
import six
import abc
import contextlib
import os
from collections import namedtuple
from functools import partial, reduce
import itertools
def sort_graph(graphs:GraphsTuple, node_ids, edge_ids)->GraphsTuple:
"""
Sorts the nodes and edges of a batch of graphs such that they are ordered in blocks.
Args:
graphs: GraphsTuple
node_ids: int32 1D array giving graph index of each node
edge_ids: int32 1D array giving graph index of each edge
Returns:
GraphsTuple
"""
#sort the nodes into blocks
node_sort = tf.argsort(node_ids)
nodes = graphs.nodes[node_sort]
senders = node_sort[graphs.senders]
receivers = node_sort[graphs.receivers]
#sort edges into blocks
edge_sort = tf.argsort(edge_ids)
senders = senders[edge_sort]
receivers = receivers[edge_sort]
edges = graphs.edges[edge_sort]
return graphs.replace(nodes=nodes,
edges=edges,
senders=senders,
receivers=receivers)
def replicate_graph(graph, num_repeats):
if isinstance(num_repeats, int):
return utils_tf.concat([graph]*num_repeats, axis=0)
def _repeat(tensor):
if tensor is None:
return None
shape = get_shape(tensor)
return tf.tile(tensor,[num_repeats]+[1]*(len(shape) - 1))
graph = graph.map(_repeat, ('nodes', 'edges', 'senders', 'receivers', 'globals', 'n_node', 'n_edge'))
offsets = utils_tf._compute_stacked_offsets(graph.n_node, graph.n_edge)
if graph.senders is not None:
graph = graph.replace(senders = graph.senders + offsets)
if graph.receivers is not None:
graph = graph.replace(receivers = graph.receivers + offsets)
return graph
def get_shape(tensor):
"""Returns the tensor's shape.
Each shape element is either:
- an `int`, when static shape values are available, or
- a `tf.Tensor`, when the shape is dynamic.
Args:
tensor: A `tf.Tensor` to get the shape of.
Returns:
The `list` which contains the tensor's shape.
"""
shape_list = tensor.shape.as_list()
if all(s is not None for s in shape_list):
return shape_list
shape_tensor = tf.shape(tensor)
return [shape_tensor[i] if s is None else s for i, s in enumerate(shape_list)]
def reconstruct_fields_from_gaussians(tokens, positions):
"""
Computes the reconstruction of fields as a sum of spatial Gaussian basis functions.
rho(x) = sum_i w_i * e^{-0.5 * (x - mu_i)^T @ R_i^T @ W_i^{-1} @ R_i (x - mu_i)}
M = (R^T @ W^{-1} @ R)
= L^T @ L
(x - mu_i)^T @ M (x - mu_i)
((x - mu_i)^T @ L^T) @ (L @ (x - mu_i))
dx = (L @ (x - mu))
dx^T @ dx
L = [a, 0, 0],
[b, c, 0],
[d, e, f]
Args:
tokens: [batch, num_gaussian_components, num_properties * 10]
positions: [batch, n_nodes_per_graph, 3]
Returns:
[batch, n_nodes_per_graph, num_properties]
"""
def _single_gaussian_property(arg):
"""
Args:
positions: [N, 3]
weight: [C]
mu: [C, 3]
L: [C, 3, 3]
Returns: [N]
"""
positions, weight, mu, L = arg
dx = (positions - mu[:, None, :])#C, N, 3
dx = tf.einsum("cij,cnj->cni", L, dx )#C, N, 3
maha = tf.einsum("cni,cni->cn",dx,dx)#C,N
return tf.reduce_sum(weight[:, None] * tf.math.exp(-0.5 * maha), axis=0)#N
def _single_batch_evaluation(arg):
"""
Evaluation Gaussian for single graph
Args:
positions: [N, 3]
tokens: [C, P*10]
Returns:
[N,P]
"""
# vectorized_map only takes single inputs
positions, tokens = arg
P = tokens.shape[1]//10
weights = tf.transpose(tokens[:, 0:P], (1, 0))#P, C
mu = tf.transpose(tf.reshape(tokens[:, P:P*3+P], (-1, P, 3)), (1, 0, 2))#P,C,3
L_flat = tf.transpose(tf.reshape(tokens[:, P*3+P:], (-1, P, 6)), (1,0,2))#P,C,6
L = tfp.math.fill_triangular(L_flat)#P,C,3,3
# tf.stack(P*[positions]) = P, N, 3
properties = tf.vectorized_map(_single_gaussian_property, (tf.stack(P*[positions]), weights, mu, L))#P,N
properties = tf.transpose(properties, (1,0)) #N, P
return properties
return tf.vectorized_map(_single_batch_evaluation, (positions, tokens)) # [batch, N, P]
def graph_batch_reshape(graphs:GraphsTuple)->GraphsTuple:
"""
If each graph is exactly the same size, i.e. has the same number of nodes and edges,
then you can reshape into batch form.
Args:
graph: GraphsTuple
Returns:
GraphsTuple with
nodes: [n_graphs, n_node[0]//n_graphs,...]
edges: [n_graphs, n_edge[0]//n_graphs,...]
senders: [n_graphs, n_edge[0]//n_graphs]
receivers: [n_graphs, n_edge[0]//n_graphs]
"""
n_graphs = utils_tf.get_num_graphs(graphs)
def _to_batched(tensor):
if tensor is None:
return tensor
in_shape = get_shape(tensor)
to_shape = [n_graphs, in_shape[0]//n_graphs] + in_shape[1:]
new_tensor = tf.reshape(tensor, to_shape)
return new_tensor
return graphs.map(_to_batched,fields=('nodes','edges','senders','receivers'))
def graph_unbatch_reshape(graphs: GraphsTuple)->GraphsTuple:
"""
Undoes `graph_batch_reshape`.
Args:
graph: GraphsTuple with
nodes: [n_graphs, n_node[0]//n_graphs,...]
edges: [n_graphs, n_edge[0]//n_graphs,...]
senders: [n_graphs, n_edge[0]//n_graphs]
receivers: [n_graphs, n_edge[0]//n_graphs]
Returns:
GraphsTuple with normal shaping of elements.
"""
n_graphs = utils_tf.get_num_graphs(graphs)
def _to_unbatched(tensor):
if tensor is None:
return tensor
from_shape = get_shape(tensor)
to_shape = [from_shape[1] * n_graphs] + from_shape[2:]
new_tensor = tf.reshape(tensor, to_shape)
return new_tensor
return graphs.map(_to_unbatched, fields=('nodes', 'edges', 'senders', 'receivers'))
def gaussian_loss_function(gaussian_tokens, graphs:GraphsTuple):
"""
Args:
gaussian tokens: [batch, n_tokens, num_properties*10]
graph: GraphsTuple
graph.nodes: [n_node, num_positions + num_properties]
Returns:
scalar
"""
graphs = graph_batch_reshape(graphs)
positions = graphs.nodes[:,:,:3]#batch,n_node, 3
input_properties = graphs.nodes[:,:,3:]#batch, n_node, n_prop
with tf.GradientTape() as tape:
field_properties = reconstruct_fields_from_gaussians(gaussian_tokens, positions)#N,P
diff_properties = (input_properties - field_properties)
return field_properties, tf.reduce_mean(tf.reduce_sum(tf.math.square(diff_properties),axis=1),axis=0)
def efficient_nn_index(query_positions, positions):
"""
For each point in query_positions, find the index of the closest point in positions.
:param query_positions: [N, D]
:param positions: [M, D]
:return: int64, indices of shape [N]
"""
def _nearest_neighbour_index(state, point):
return tf.argmin(tf.reduce_sum(tf.math.square(point - positions),axis=1))
results = tf.scan(_nearest_neighbour_index,query_positions,initializer=tf.zeros((),dtype=tf.int64))
return results
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(snt.Module):
"""Makes Sonnet1-style childs from this look like a Sonnet2 module."""
def __init__(self, *args, **kwargs):
super(AbstractModule, self).__init__(*args, **kwargs)
self.__call__.__func__.__doc__ = self._build.__doc__ # pytype: disable=attribute-error
# In snt2 calls to `_enter_variable_scope` are ignored.
@contextlib.contextmanager
def _enter_variable_scope(self, *args, **kwargs):
yield None
def __call__(self, *args, **kwargs):
return self._build(*args, **kwargs)
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Similar to Sonnet 1 ._build method."""
class TrainOneEpoch(Module):
_model: AbstractModule
_opt: Optimizer
def __init__(self, model: AbstractModule, loss, opt: Optimizer, strategy: tf.distribute.MirroredStrategy = None,
name=None):
super(TrainOneEpoch, self).__init__(name=name)
self.epoch = tf.Variable(0, dtype=tf.int64, trainable=False)
self.minibatch = tf.Variable(0, dtype=tf.int64, trainable=False)
self.log_counter = tf.Variable(0, dtype=tf.int64, trainable=False)
self._model = model
self._learn_variables = None
self._model.epoch = self.epoch
self._model.step = self.minibatch
self._model.log_counter = self.log_counter
self._opt = opt
self._loss = loss
self._strategy = strategy
self._checkpoint = tf.train.Checkpoint(module=model)
@property
def strategy(self) -> tf.distribute.MirroredStrategy:
return self._strategy
@property
def model(self):
return self._model
@property
def opt(self):
return self._opt
def loss(self, model_output, batch):
return self._loss(model_output, batch)
# def summarize(self, model_output, batch):
# summaries = model_output['summaries']
# for key in summaries:
# summary_func = summaries[key][0]
# summary_input = summaries[key][1]
def train_step(self, batch):
"""
Trains on a single batch.
Args:
batch: user defined batch from a dataset.
Returns:
loss
"""
with tf.GradientTape() as tape:
if not isinstance(batch, (list, tuple)):
batch = (batch,)
model_output = self.model(*batch)
loss = self.loss(model_output, batch)
# summaries = self.summarize(model_output, batch)
if self._learn_variables is None:
params = self.model.trainable_variables
else:
params = self._learn_variables
grads = tape.gradient(loss, params)
if self.strategy is not None:
replica_ctx = tf.distribute.get_replica_context()
grads = replica_ctx.all_reduce("mean", grads)
# for (param, grad) in zip(params, grads):
# if grad is not None:
# tf.summary.histogram(param.name + "_grad", grad, step=self.minibatch)
self.opt.apply(grads, params)
return loss
def one_epoch_step(self, train_dataset):
"""
Updates a model with one epoch of train_one_epoch, and returns a dictionary of values to monitor, i.e. metrics.
Returns:
average loss
"""
self.epoch.assign_add(1)
# metrics = None
loss = 0.
num_batches = 0.
for train_batch in train_dataset:
self.minibatch.assign_add(1)
self.log_counter.assign_add(1)
if self.strategy is not None:
_loss = self.strategy.run(self.train_step, args=(train_batch,))
_loss = self.strategy.reduce("sum", _loss, axis=None)
else:
_loss = self.train_step(train_batch)
tf.summary.scalar('mini_batch_loss', _loss, step=self.minibatch)
loss += _loss
num_batches += 1.
tf.summary.scalar('epoch_loss', loss / num_batches, step=self.epoch)
return loss / num_batches
def evaluate(self, test_dataset):
loss = 0.
num_batches = 0.
for test_batch in test_dataset:
self.log_counter.assign_add(1)
if not isinstance(test_batch, (list, tuple)):
test_batch = (test_batch,)
if self.strategy is not None:
model_output = self.strategy.run(self.model, args=(test_batch,))
_loss = self.strategy.run(self.loss, args=(model_output, test_batch))
loss += self.strategy.reduce("sum", _loss, axis=0)
else:
model_output = self.model(*test_batch)
loss += self.loss(model_output, test_batch)
num_batches += 1.
tf.summary.scalar('loss', loss / num_batches, step=self.epoch)
return loss / num_batches
def get_distribution_strategy(use_cpus=True, logical_per_physical_factor=1,
memory_limit=2000) -> tf.distribute.MirroredStrategy:
# trying to set GPU distribution
physical_gpus = tf.config.experimental.list_physical_devices("GPU")
physical_cpus = tf.config.experimental.list_physical_devices("CPU")
if len(physical_gpus) > 0 and not use_cpus:
print("Physical GPUS: {}".format(physical_gpus))
if logical_per_physical_factor > 1:
for dev in physical_gpus:
tf.config.experimental.set_virtual_device_configuration(
dev,
[tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=memory_limit)] * logical_per_physical_factor
)
gpus = tf.config.experimental.list_logical_devices("GPU")
print("Logical GPUs: {}".format(gpus))
strategy = snt.distribute.Replicator(
["/device:GPU:{}".format(i) for i in range(len(gpus))],
tf.distribute.ReductionToOneDevice("GPU:0"))
else:
print("Physical CPUS: {}".format(physical_cpus))
if logical_per_physical_factor > 1:
for dev in physical_cpus:
tf.config.experimental.set_virtual_device_configuration(
dev,
[tf.config.experimental.VirtualDeviceConfiguration()] * logical_per_physical_factor
)
cpus = tf.config.experimental.list_logical_devices("CPU")
print("Logical CPUs: {}".format(cpus))
strategy = snt.distribute.Replicator(
["/device:CPU:{}".format(i) for i in range(len(cpus))],
tf.distribute.ReductionToOneDevice("CPU:0"))
return strategy
def test_checkpoint_restore():
class TestClassA(AbstractModule):
def __init__(self, name=None):
super(TestClassA, self).__init__(name=name)
self.mlp = snt.nets.MLP([1], name='mlp_a')
def _build(self, input):
return self.mlp(input)
class TestClassB(AbstractModule):
def __init__(self, name=None):
super(TestClassB, self).__init__(name=name)
self.a = TestClassA()
def _build(self, input):
return self.a(input) + input
b = TestClassB()
input = tf.ones((5,1))
output = b(input)
print(b.trainable_variables)
checkpoint_dir = 'test_ckpt_2'
### save b
checkpoint = tf.train.Checkpoint(module=b.a)#saving b.a means that we can't load b but only a later
manager = tf.train.CheckpointManager(checkpoint,
checkpoint_dir,
max_to_keep=3,
checkpoint_name=b.__class__.__name__)
manager.save()
### restore into b
b = TestClassB()
input = tf.ones((5, 1))
output = b(input)
print("Before restore", b.trainable_variables)
checkpoint = tf.train.Checkpoint(module=b)#won't work because we saved from a
checkpoint.restore(manager.latest_checkpoint).expect_partial()
print(f"Restored from {manager.latest_checkpoint}")
print("After restore", b.trainable_variables)
### restore into a
a = TestClassA()
input = tf.ones((5, 1))
output = a(input)
print("Before restore", a.trainable_variables)
checkpoint = tf.train.Checkpoint(module=a)#will work because we saved from a
checkpoint.restore(manager.latest_checkpoint).expect_partial()
# print(restore_checkpoint_from_other_model(manager.latest_checkpoint, a.trainable_variables))
print(f"Restored from {manager.latest_checkpoint}")
print("After restore", a.trainable_variables)
# Note, no re.match happens.
def vanilla_training_loop(train_one_epoch: TrainOneEpoch, training_dataset, test_dataset=None, num_epochs=1,
early_stop_patience=None, checkpoint_dir=None, log_dir=None, save_model_dir=None, variables=None, debug=False):
"""
A simple training loop.
Args:
train_one_epoch: TrainOneEpoch object
training_dataset: training dataset, elements are expected to be tuples of model input
test_dataset: test dataset, elements are expected to be tuples of model input
num_epochs: int
early_stop_patience: int, how many epochs with non-decreasing loss before stopping.
checkpoint_dir: where to save checkpoints
log_dir: where to log to tensorboard
save_model_dir: where to save the model
variables: optional, if not None then which variables to train on, defaults to model.trainable_variables.
debug: bool, whether to not compile to faster code but allow debugging.
Returns:
"""
if checkpoint_dir is not None:
os.makedirs(checkpoint_dir, exist_ok=True)
if save_model_dir is not None:
os.makedirs(save_model_dir, exist_ok=True)
if train_one_epoch.strategy is not None:
training_dataset = training_dataset.prefetch(tf.data.experimental.AUTOTUNE).cache()
if test_dataset is not None:
test_dataset = test_dataset.prefetch(tf.data.experimental.AUTOTUNE).cache()
if variables is not None:
train_one_epoch._learn_variables = variables
else:
train_one_epoch._learn_variables = None
# We'll turn the one_epoch_step function which updates our models into a tf.function using
# autograph. This makes train_one_epoch much faster. If debugging, you can turn this
# off by setting `debug = True`.
step = train_one_epoch.one_epoch_step
evaluate = train_one_epoch.evaluate
if not debug:
step = tf.function(step)
evaluate = tf.function(evaluate)
fancy_progress_bar = tqdm.tqdm(range(num_epochs),
unit='epochs',
position=0)
early_stop_min_loss = np.inf
early_stop_interval = 0
train_log_dir = os.path.join(log_dir, "train")
test_log_dir = os.path.join(log_dir, "test")
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
checkpoint = tf.train.Checkpoint(module=train_one_epoch.model)
manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3,
checkpoint_name=train_one_epoch.model.__class__.__name__)
if manager.latest_checkpoint is not None:
checkpoint.restore(manager.latest_checkpoint).expect_partial()
print(f"Restored from {manager.latest_checkpoint}")
for step_num in fancy_progress_bar:
with train_summary_writer.as_default():
loss = step(iter(training_dataset))
if save_model_dir is not None:
tf.saved_model.save(train_one_epoch.model, save_model_dir)
tqdm.tqdm.write(
'\nEpoch = {}/{} (loss = {:.02f})'.format(
train_one_epoch.epoch.numpy(), num_epochs, loss))
if test_dataset is not None:
with test_summary_writer.as_default():
test_loss = evaluate(iter(test_dataset))
tqdm.tqdm.write(
'\n\tTest loss = {:.02f})'.format(test_loss))
if early_stop_patience is not None:
if test_loss <= early_stop_min_loss:
early_stop_min_loss = test_loss
early_stop_interval = 0
manager.save()
else:
early_stop_interval += 1
if early_stop_interval == early_stop_patience:
tqdm.tqdm.write(
'\n\tStopping Early')
break
else:
manager.save()
else:
manager.save()
train_summary_writer.close()
test_summary_writer.close()
def batch_dataset_set_graph_tuples(*, all_graphs_same_size=False, dataset: tf.data.Dataset,
batch_size) -> tf.data.Dataset:
"""
Args:
dataset: dataset of GraphTuple containing only a single graph.
batch_size:
all_graphs_same_size:
Returns:
"""
if not all_graphs_same_size:
raise ValueError("Only able to batch graphs with the same number of nodes and edges.")
TempGraphTuple = namedtuple('TempGraphTuple',
['nodes', 'edges', 'senders', 'receivers', 'globals', 'n_node', 'n_edge'])
def _concat_graph_from_batched(*args):
_output_args = []
for arg in args:
if isinstance(arg, TempGraphTuple):
graph: TempGraphTuple = arg
# # nodes: [batch_size,nnodes_max,Fnodes]
# graph.nodes.set_shape([batch_size, None, None])
# # edges: [batch_size,nedges_max,Fedges]
# graph.edges.set_shape([batch_size, None, None])
# # senders: [batch_size, nedges_max]
# graph.senders.set_shape([batch_size, None])
# # receivers: [batch_size, nedges_max]
# graph.receivers.set_shape([batch_size, None])
# # globals: [batch_size, 1, Fglobals]
# graph.globals.set_shape([batch_size, None, None])
# # nnodes: [batch_size, 1]
# graph.n_node.set_shape([batch_size, None])
# # nedges: [batch_size, 1]
# graph.n_edge.set_shape([batch_size, None])
nodes = tf.unstack(graph.nodes, num=batch_size, name='nodes')
edges = tf.unstack(graph.edges, num=batch_size, name='edges')
senders = tf.unstack(graph.senders, num=batch_size, name='senders')
receivers = tf.unstack(graph.receivers, num=batch_size, name='receivers')
_globals = tf.unstack(graph.globals, num=batch_size, name='globals')
n_node = tf.unstack(graph.n_node, num=batch_size, name='n_node')
n_edge = tf.unstack(graph.n_edge, num=batch_size, name='n_edge')
graphs = []
for _nodes, _edges, _senders, _receivers, _n_node, _n_edge, __globals in zip(nodes, edges, senders,
receivers, n_node, n_edge,
_globals):
graphs.append(GraphsTuple(nodes=_nodes,
edges=_edges,
globals=__globals,
receivers=_receivers,
senders=_senders,
n_node=_n_node,
n_edge=_n_edge))
# print(graphs[-1])
graphs = utils_tf.concat(graphs, axis=0)
_output_args.append(graphs)
else:
_output_args.append(arg)
if len(_output_args) == 1:
return _output_args[0]
return tuple(_output_args)
def _to_temp_graph_tuple(*args):
_output_args = []
for arg in args:
if isinstance(arg, GraphsTuple):
_output_args.append(TempGraphTuple(**arg._asdict()))
else:
_output_args.append(arg)
return tuple(_output_args)
return dataset.map(_to_temp_graph_tuple).padded_batch(batch_size=batch_size, drop_remainder=True).map(
_concat_graph_from_batched)
def test_batch_dataset_set_graph_tuples():
graphs = []
images = []
n_node = 5
n_edge = n_node * 2
for i in range(5, 11):
graph = GraphsTuple(nodes=np.random.normal(size=(n_node, 2)).astype(np.float32),
edges=np.random.normal(size=(n_edge, 3)).astype(np.float32),
senders=np.random.randint(n_node, size=(n_edge,)).astype(np.int32),
receivers=np.random.randint(n_node, size=(n_edge,)).astype(np.int32),
globals=np.random.normal(size=(1, 4)).astype(np.float32),
n_node=[n_node],
n_edge=[n_edge]
)
graphs.append(graph)
images.append(np.random.normal(size=(24, 24, 1)))
dataset = tf.data.Dataset.from_generator(lambda: iter(zip(graphs, images)),
output_types=
(GraphsTuple(nodes=tf.float32,
edges=tf.float32,
senders=tf.int32,
receivers=tf.int32,
globals=tf.float32,
n_node=tf.int32,
n_edge=tf.int32
),
tf.float32),
output_shapes=(GraphsTuple(nodes=tf.TensorShape([None, None]),
edges=tf.TensorShape([None, None]),
senders=tf.TensorShape([None]),
receivers=tf.TensorShape([None]),
globals=tf.TensorShape([None, None]),
n_node=tf.TensorShape([None]),
n_edge=tf.TensorShape([None])
),
tf.TensorShape([None, None, None])))
# for graph in iter(dataset):
# print(graph.receivers.dtype)
dataset = batch_dataset_set_graph_tuples(all_graphs_same_size=True, dataset=dataset, batch_size=2)
for graph, image in iter(dataset):
assert graph.nodes.shape == (n_node * 2, 2)
assert graph.edges.shape == (n_edge * 2, 3)
assert graph.globals.shape == (2, 4)
assert image.shape == (2, 24, 24, 1)
def batched_tensor_to_fully_connected_graph_tuple_dynamic(nodes_tensor, pos=None, globals=None):
"""
Convert tensor with batch dim to batch of GraphTuples.
:param nodes_tensor: [B, num_nodes, F] Tensor to turn into nodes. F must be statically known.
:param pos: [B, num_nodes, D] Tensor to calculate edge distance using difference. D must be statically known.
:param globals: [B, G] Tensor to use as global. G must be statically known.
:return: GraphTuple with batch of fully connected graphs
"""
shape = tf.shape(nodes_tensor)
batch_size, num_nodes = shape[0], shape[1]
F = nodes_tensor.shape.as_list()[-1]
graphs_with_nodes = GraphsTuple(n_node=tf.fill([batch_size], num_nodes),
n_edge=tf.fill([batch_size], 0),
nodes=tf.reshape(nodes_tensor, [batch_size * num_nodes, F]),
edges=None, globals=None, receivers=None, senders=None)
graphs_tuple_with_nodes_connectivity = utils_tf.fully_connect_graph_dynamic(
graphs_with_nodes, exclude_self_edges=False)
if pos is not None:
D = pos.shape.as_list()[-1]
graphs_with_position = graphs_tuple_with_nodes_connectivity.replace(
nodes=tf.reshape(pos, [batch_size * num_nodes, D]))
edge_distances = (
blocks.broadcast_receiver_nodes_to_edges(graphs_with_position) -
blocks.broadcast_sender_nodes_to_edges(graphs_with_position))
graphs_with_nodes_edges = graphs_tuple_with_nodes_connectivity.replace(edges=edge_distances)
else:
graphs_with_nodes_edges = utils_tf.set_zero_edge_features(graphs_tuple_with_nodes_connectivity, 1,
dtype=nodes_tensor.dtype)
if globals is not None:
graphs_with_nodes_edges_globals = graphs_with_nodes_edges.replace(globals=globals)
else:
graphs_with_nodes_edges_globals = utils_tf.set_zero_global_features(
graphs_with_nodes_edges, global_size=1)
return graphs_with_nodes_edges_globals
def build_log_dir(base_log_dir, config):
"""
Builds log dir.
Args:
base_log_dir: where all logs should be based from.
config: dict with following structure
Example config:
config = dict(model_type='model1',
model_parameters=dict(num_layers=3),
optimizer_parameters=dict(learning_rate=1e-5, opt_type='adam'),
loss_parameters=dict(loss_type='cross_entropy'))
Returns:
log_dir representing this config
"""
log_dir_subdir = stringify_config(config)
log_dir = os.path.join(base_log_dir, log_dir_subdir)
return log_dir
def build_checkpoint_dir(base_checkpoint_dir, config):
"""
Builds log dir.
Args:
base_checkpoint_dir: where all logs should be based from.
config: dict with following structure
Example config:
config = dict(model_type='model1',
model_parameters=dict(num_layers=3),
optimizer_parameters=dict(learning_rate=1e-5, opt_type='adam'),
loss_parameters=dict(loss_type='cross_entropy'))
Returns:
checkpoint_dir representing this config
"""
checkpoint_dir_subdir = stringify_config(config)
checkpoint_dir = os.path.join(base_checkpoint_dir, checkpoint_dir_subdir)
return checkpoint_dir
def stringify_config(config):
def transform_key(key: str):
# use every other letter of key as name
keys = key.split("_")
parts = []
for key in keys:
vowels = 'aeiou'
for v in vowels:
key = key[0] + key[1:].replace(v, '')
parts.append(key)
return "".join(parts)
def transform_value(value):
if isinstance(value, int):
return str(value)
if isinstance(value, (float)):
return "{:.1e}".format(value)
else:
return value
def stringify_dict(d):
return "|{}|".format(",".join(["{}={}".format(transform_key(k), transform_value(d[k]))
for k in sorted(d.keys())]))
model_type = f"|{config['model_type']}|"
model_parameters = stringify_dict(config['model_parameters'])
optimizer_parameters = stringify_dict(config['optimizer_parameters'])
loss_parameters = stringify_dict(config['loss_parameters'])
subdir = "".join([model_type, model_parameters, optimizer_parameters, loss_parameters])
return subdir
def tf_ravel_multi_index(multi_index, dims):
"""
Equivalent of np.ravel_multi_index.
Args:
multi_index: [N, D]
dims: [D]
Returns: [N]
"""
strides = tf.math.cumprod(dims, exclusive=True, reverse=True) # D
return tf.reduce_sum(multi_index * strides[:, None], axis=0) # D,N -> N
def histogramdd(sample, bins=10, weights=None, density=None):
"""
Compute histogram over D-dimensional samples, potentially summing weights.
Args:
sample: [N, D] or tuple of array[N]
bins: int
weights: [N, P], optionally [N]
density: bool
Returns:
[bins-1]*D + [P], optionally missing [P] if weights is 1-D
"""
if isinstance(sample, (tuple, list)):
sample = tf.stack(sample, axis=-1)
N, D = get_shape(sample)
if weights is None:
weights = tf.ones((N,))
if not isinstance(bins, int):
raise ValueError("Only support integer bins")
bin_idx_by_dim = D * [None]
nbins = [None]*D
bin_edges_by_dim = D * [None]
dedges = D * [None]
vmin = tf.reduce_min(sample, axis=0)
vmax = tf.reduce_max(sample, axis=0)
bin_edges = tf.cast(tf.linspace(0., 1., bins + 1), sample.dtype)[:, None] * (vmax - vmin) + vmin
for i in range(D):
dim_bin_edges = bin_edges[:, i]
bin_idx = tf.searchsorted(dim_bin_edges, sample[:, i], side='right')
bin_idx = tf.where(sample[:, i] == dim_bin_edges[-1], bin_idx - 1, bin_idx)
bin_idx_by_dim[i] = bin_idx
nbins[i] = dim_bin_edges.shape[0] + 1
bin_edges_by_dim[i] = dim_bin_edges
dedges[i] = bin_edges_by_dim[i][1:] - bin_edges_by_dim[i][:-1]
minlength = maxlength = tf.constant(np.prod(nbins), dtype=tf.int32)
nbins = tf.constant(nbins, dtype=tf.int32)
xy = tf_ravel_multi_index(bin_idx_by_dim, nbins)
def _sum_weights(weights):
hist = tf.math.bincount(tf.cast(xy, tf.int32), weights,
minlength=minlength, maxlength=maxlength)
hist = tf.reshape(hist, nbins)
core = D * (slice(1, -1),)
hist = hist[core]
return hist
if len(get_shape(weights)) == 2:
hist = tf.vectorized_map(_sum_weights, tf.transpose(weights, (1,0)), weights) #[P] + [bins]*D
perm = list(range(len(hist.shape)))
perm.append(perm[0])
del perm[0]
hist = tf.transpose(hist, perm)#[bins]*D + [P]
else:
hist = _sum_weights(weights)
if density:
raise ValueError('density=True not supported.')
# s = sum(hist)
# for i in range(D):
# _shape = np.ones(D, int)
# _shape[i] = nbins[i] - 2
# hist = hist / tf.maximum(1, tf.cast(tf.reshape(dedges[i], _shape), hist.dtype))
# hist /= tf.cast(s, hist.dtype)
return hist, bin_edges_by_dim
class GraphDecoder(AbstractModule):
def __init__(self, output_property_size, name=None):
super(GraphDecoder, self).__init__(name=name)
self.output_property_size = output_property_size
def _build(self, encoded_graph, positions, **kwargs):
mean_position = tf.reduce_mean(positions, axis=0)
centroid_index = tf.argmin(tf.reduce_sum(tf.math.square(positions - mean_position), axis=1))
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""
Parallel computation of multi-head linear.
Args:
inputs: [n_nodes, node_size]
Returns:
[n_nodes, num_heads, output_size]
"""
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.ln1 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.ln2 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent, positions=None):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.ln1(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.ln2(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
if positions is not None:
prepend_nodes = tf.concat([positions, output_graph.nodes[:, 3:]], axis=1)
output_graph = output_graph.replace(nodes=prepend_nodes)
return output_graph
# interpolate on nd-array
_nonempty_prod = partial(reduce, tf.multiply)
_nonempty_sum = partial(reduce, tf.add)
_INDEX_FIXERS = {
'constant': lambda index, size: index,
'nearest': lambda index, size: tf.clip_by_value(index, 0, size - 1),
'wrap': lambda index, size: index % size,
}
def _round_half_away_from_zero(a):
return tf.round(a)
def _nearest_indices_and_weights(coordinate):
index = tf.cast(_round_half_away_from_zero(coordinate), tf.int32)
weight = coordinate.dtype.type(1)
return [(index, weight)]
def _linear_indices_and_weights(coordinate):
lower = tf.math.floor(coordinate)
upper_weight = coordinate - lower
lower_weight = 1 - upper_weight
index = tf.cast(lower, tf.int32)
return [(index, lower_weight), (index + 1, upper_weight)]
def _map_coordinates(input, coordinates, order, mode, cval):
input = tf.convert_to_tensor(input)
coordinates = [tf.convert_to_tensor(c) for c in coordinates]
cval = tf.constant(cval, input.dtype)
if len(coordinates) != len(get_shape(input)):
raise ValueError('coordinates must be a sequence of length input.ndim, but '
'{} != {}'.format(len(coordinates), len(get_shape(input))))
index_fixer = _INDEX_FIXERS.get(mode)
if mode == 'constant':
is_valid = lambda index, size: (0 <= index) & (index < size)
else:
is_valid = lambda index, size: True
if order == 0:
interp_fun = _nearest_indices_and_weights
elif order == 1:
interp_fun = _linear_indices_and_weights
else:
raise NotImplementedError(
'map_coordinates currently requires order<=1')
valid_1d_interpolations = []
for coordinate, size in zip(coordinates, get_shape(input)):
interp_nodes = interp_fun(coordinate)
valid_interp = []
for index, weight in interp_nodes:
fixed_index = index_fixer(index, size)
valid = is_valid(index, size)
valid_interp.append((fixed_index, valid, weight))
valid_1d_interpolations.append(valid_interp)
outputs = []
for items in itertools.product(*valid_1d_interpolations):
indices, validities, weights = zip(*items)
indices = tf.stack(indices, axis=-1)
if all(valid is True for valid in validities):
# fast path
contribution = tf.gather_nd(input, indices)
else:
all_valid = reduce(tf.logical_and, validities)
contribution = tf.where(all_valid, tf.gather_nd(input, indices), cval)
outputs.append(_nonempty_prod(weights) * contribution)
result = _nonempty_sum(outputs)
return tf.cast(result, input.dtype)
def map_coordinates(input, coordinates, order, mode='constant', cval=0.0):
return _map_coordinates(input, coordinates, order, mode, cval)
### grid graph onto voxel grid
def grid_properties(positions, properties, voxels_per_dimension):
"""
We construct a meshgrid over the min/max range of positions, which act as the bin boundaries.
Args:
positions: [n_node_per_graph, 3]
properties: [n_node_per_graph, num_properties]
voxels_per_dimension: int
Returns:
[voxels_per_dimension, voxels_per_dimension, voxels_per_dimension, num_properties]
"""
binned_properties, bin_edges = histogramdd(positions,
bins=voxels_per_dimension,
weights=properties) # n_node_per_graph, num_properties
bin_count, _ = histogramdd(positions,
bins=voxels_per_dimension) # n_node_per_graph
# binned_properties /= bin_count[:, None]# n_node_per_graph, num_properties
binned_properties = tf.where(bin_count[..., None] > 0, binned_properties/bin_count[..., None], 0.)
return binned_properties
@tf.function
def grid_graph_smoothing(gridded_graphs):
filter = tf.ones((3, 3, 3, 1, 1)) / (3.*3.*3.)
return tf.nn.conv3d(gridded_graphs[..., None],
filters=filter,
strides=[1, 1, 1, 1, 1],
padding='SAME')[..., 0]
def grid_graphs(graphs, voxels_per_dimension):
"""
Grid the nodes onto a voxel 3D meshgrid.
Args:
graphs: GraphTuples a batch of graphs
Returns:
[batch, voxels_per_dimension, voxels_per_dimension, voxels_per_dimension, num_properties]
"""
batched_graphs = graphs
positions = batched_graphs.nodes[..., :3]#num_graphs, n_node_per_graph, 3
properties = batched_graphs.nodes[..., 3:]#num_graphs, n_node_per_graph, num_properties
#[batch, voxels_per_dimension, voxels_per_dimension, voxels_per_dimension, num_properties]
gridded_graphs = tf.vectorized_map(lambda args: grid_properties(*args, voxels_per_dimension), (positions, properties))#[batch, voxels_per_dimension, voxels_per_dimension, voxels_per_dimension, num_properties]
# #smooth out
smooth_gridded_graphs = tf.vectorized_map(lambda graph: grid_graph_smoothing(graph), tf.transpose(gridded_graphs, (4, 0, 1, 2, 3)))
smooth_gridded_graphs = tf.transpose(smooth_gridded_graphs, (1, 2, 3, 4, 0))
return smooth_gridded_graphs
def build_example_dataset(num_examples, batch_size, num_blobs=3, num_nodes=64**3, image_dim=256):
"""
Creates an example dataset
Args:
num_examples: int, number of examples in an epoch
batch_size: int, ideally should divide num_examples
num_blobs: number of components in the 3D medium
n_voxels_per_dimension: size of one cube dimension
Returns:
Dataset (GraphsTuple,
image [batch, n_voxels_per_dimension, n_voxels_per_dimension, 1]
"""
def _single_blob(positions):
# all same weight
weight = tf.random.uniform(shape=(), minval=1., maxval=1.)
shift = tf.random.uniform(shape=(3,))
min_blob_size = 0.1*1./num_blobs
max_blob_size = 0.5*1./num_blobs
lengthscale = tf.random.uniform(shape=(), minval=min_blob_size, maxval=max_blob_size)
density = weight * tf.math.exp(-0.5 * tf.linalg.norm(positions - shift, axis=-1) ** 2 / lengthscale ** 2)
return density
def _map(i):
positions = tf.random.uniform(shape=(num_nodes, 3))
density = _single_blob(positions)
for _ in range(num_blobs-1):
density += _single_blob(positions)
image = grid_properties(positions[:,:2], density[:, None], image_dim)#[image_dim, image_dim, 1]
image += tfp.stats.percentile(image, 5) * tf.random.normal(shape=image.shape)
image = tf.math.log(tf.math.maximum(image, 1e-5))
log_properties = tf.math.log(tf.math.maximum(density, 1e-10))
nodes = tf.concat([positions, log_properties[:, None]], axis=-1)
n_node = tf.shape(nodes)[:1]
data_dict = dict(nodes=nodes,n_node=n_node, n_edge=tf.zeros_like(n_node))
return (data_dict, image)
dataset = tf.data.Dataset.range(num_examples)
dataset = dataset.map(_map).batch(batch_size)
#batch fixing mechanism
dataset = dataset.map(lambda data_dict, image: (batch_graph_data_dict(data_dict), image))
dataset = dataset.map(lambda data_dict, image: (GraphsTuple(**data_dict,
edges=None, receivers=None, senders=None, globals=None), image))
dataset = dataset.map(lambda batched_graphs, image: (graph_unbatch_reshape(batched_graphs), image))
# dataset = dataset.cache()
return dataset
def batch_graph_data_dict(batched_data_dict):
"""
After running dataset.batch() on data_dict representation of GraphTuple, correct the batch dimensions.
Args:
batched_data_dict: dict(
nodes[num_graphs, n_node_per_graph, F_nodes],
edges[num_graphs, n_edge_per_graph, F_edges],
senders[num_graphs, n_edge_per_graph],
receivers[num_graphs, n_edge_per_graph],
globals[num_graphs, 1, F_globals],
n_node[num_graphs, 1],
n_edge[num_graphs, 1])
Returns:
batched_data_dict representing a batched GraphTuple:
dict(
nodes[num_graphs, n_node_per_graph, F_nodes],
edges[num_graphs, n_edge_per_graph, F_edges],
senders[num_graphs, n_edge_per_graph],
receivers[num_graphs, n_edge_per_graph],
globals[num_graphs, F_globals],
n_node[num_graphs],
n_edge[num_graphs])
"""
if "globals" in batched_data_dict.keys():
batched_data_dict["globals"] = batched_data_dict["globals"][:,0,:]
if "n_node" in batched_data_dict.keys():
batched_data_dict['n_node'] = batched_data_dict['n_node'][:,0]
if "n_edge" in batched_data_dict.keys():
batched_data_dict['n_edge'] = batched_data_dict['n_edge'][:,0]
return batched_data_dict
def test_temperature_schedule():
# log(t_final) = -0.375 * log(n)
import pylab as plt
for log_n in np.linspace(0.5, 7, 20):
n = np.exp(log_n)
get_temp = temperature_schedule(int(n), 15)
tfinal = get_temp(15)
plt.scatter(np.log(n), tf.math.log(tfinal))
plt.show()
def temperature_schedule(num_embedding, num_epochs, S=100, t0=1., thresh=0.95):
"""
Returns callable for temperature schedule.
Assumes logits will be normalised, such that std(logits) = 1
then the schedule will be,
temp = max( final_temp, t0 * exp(alpha * i))
where
alpha = log(final_temp / t0) / num_epochs
and
final_temp is determined through a quick MC search.
Args:
num_embedding:
num_epochs: int number of epochs after which to be at final temp.
S: int number of samples to use in search
t0: float, initial temperature
thresh: float, mean maximum value when to consider one-hot
Returns:
callable(epoch: tf.int32) -> temperature:tf.float32
Callable that takes epoch to (tf.int32) and get the temperature (tf.float32)
"""
def softmax(r, temp):
return np.exp(r/temp)/np.sum(np.exp(r/temp), axis=-1, keepdims=True)
final_temp = t0
while True:
r = np.random.normal(size=(S, num_embedding))
x = softmax(r, final_temp)
_max = np.max(x, axis=-1)
if np.mean(_max) > thresh:
break
final_temp *= 0.95
t0 = tf.constant(t0, dtype=tf.float32)
final_temp = tf.constant(final_temp, dtype=tf.float32)
alpha = tf.constant(np.log(final_temp / t0)/num_epochs, dtype=tf.float32)
def _get_temperature(step):
step = tf.cast(tf.convert_to_tensor(step), dtype=tf.float32)
return tf.math.maximum(final_temp, t0 * tf.math.exp(alpha * step))
return _get_temperature
```
#### File: models/graph_vae_GCD/main.py
```python
import sys
sys.path.insert(1, '/data/s2675544/git/neural_deprojection/')
from functools import partial
from graph_nets.graphs import GraphsTuple
from neural_deprojection.models.identify_medium_GCD.model_utils import decode_examples
from neural_deprojection.models.graph_vae_GCD.graph_VAE_utils import Model, DiscreteGraphVAE, EncoderNetwork3D, DecoderNetwork3D
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, build_log_dir, \
build_checkpoint_dir, batch_dataset_set_graph_tuples, get_distribution_strategy
import glob, os
import tensorflow as tf
import json
import sonnet as snt
MODEL_MAP = {'model': Model}
def build_training(model_type, model_parameters, optimizer_parameters, loss_parameters, strategy=None, **kwargs) -> TrainOneEpoch:
model_cls = MODEL_MAP[model_type]
model = model_cls(**model_parameters, **kwargs)
def build_opt(**kwargs):
opt_type = kwargs.get('opt_type')
if opt_type == 'adam':
learning_rate = kwargs.get('learning_rate', 1e-4)
opt = snt.optimizers.Adam(learning_rate, beta1=1 - 1 / 100, beta2=1 - 1 / 500)
else:
raise ValueError('Opt {} invalid'.format(opt_type))
return opt
def build_loss(**loss_parameters):
def loss(model_outputs, batch):
graph = batch
decoded_graph, nn_index = model_outputs
print('shape', decoded_graph.nodes.shape)
return tf.reduce_mean((tf.gather(graph.nodes[:, 3:], nn_index) - decoded_graph.nodes) ** 2 * tf.constant([0,0,0,1,0,0,0],dtype=graph.nodes.dtype))
return loss
loss = build_loss(**loss_parameters)
opt = build_opt(**optimizer_parameters)
training = TrainOneEpoch(model, loss, opt, strategy=strategy)
return training
def build_dataset(tfrecords, temperature, beta, batch_size):
"""
Build data set from a directory of tfrecords. With graph batching
Args:
data_dir: str, path to *.tfrecords
Returns: Dataset obj.
"""
dataset = tf.data.TFRecordDataset(tfrecords).map(partial(decode_examples,
node_shape=(10,),
edge_shape=(2,),
image_shape=(1024, 1024, 1))) # (graph, image, idx)
dataset = dataset.map(lambda graph_data_dict, img, cluster_idx, projection_idx, vprime:
graph_data_dict).shuffle(buffer_size=50)
dataset = dataset.map(lambda graph_data_dict: GraphsTuple(**graph_data_dict))
# dataset = batch_dataset_set_graph_tuples(all_graphs_same_size=True, dataset=dataset, batch_size=batch_size)
return dataset
def train_ae_3d(data_dir, config):
train_tfrecords = glob.glob(os.path.join(data_dir, 'train', '*.tfrecords'))
test_tfrecords = glob.glob(os.path.join(data_dir, 'test', '*.tfrecords'))
print(f'Number of training tfrecord files : {len(train_tfrecords)}')
print(f'Number of test tfrecord files : {len(test_tfrecords)}')
print(f'Total : {len(train_tfrecords) + len(test_tfrecords)}')
train_dataset = build_dataset(train_tfrecords)
test_dataset = build_dataset(test_tfrecords)
train_one_epoch = build_training(**config)
log_dir = build_log_dir('test_log_dir', config)
checkpoint_dir = build_checkpoint_dir('test_checkpointing', config)
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
# checkpoint = tf.train.Checkpoint(module=train_one_epoch)
# manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3,
# checkpoint_name=train_one_epoch.model.__class__.__name__)
#
# if manager.latest_checkpoint is not None:
# checkpoint.restore(manager.latest_checkpoint)
# print(f"Restored from {manager.latest_checkpoint}")
# output_dir = './output_evaluations'
# os.makedirs(output_dir, exist_ok=True)
#
# property_names = ['vx','vy','vz','rho','U','mass','smoothing_length']
# for i, test_graph in enumerate(iter(test_dataset)):
# input_properties = test_graph.nodes[:,3:].numpy()
# reconstructed_graph = train_one_epoch.model(test_graph)
# decoded_properties = reconstructed_graph.nodes.numpy()
# positions = test_graph.nodes[:,:3].numpy()
# save_dict = dict(positions=positions)
# for j in range(len(property_names)):
# save_dict[f"prop_{property_names[j]}_input"] = input_properties[:, j]
# save_dict[f"prop_{property_names[j]}_decoded"] = decoded_properties[:, j]
# np.savez(os.path.join(output_dir,'test_example_{:04d}.npz'.format(i)), **save_dict)
# if i == 20:
# break
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=100,
early_stop_patience=10,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
debug=False)
def main(data_dir):
config = dict(optimizer_parameters=dict(learning_rate=1e-5,
opt_type='adam'),
loss_parameters=dict())
train_ae_3d(data_dir, config)
if __name__ == '__main__':
# tfrec_base_dir = '/home/s2675544/data/tf_records'
tfrec_base_dir = '/home/matthijs/Documents/Studie/Master_Astronomy/1st_Research_Project/Data/tf_records'
tfrec_dir = os.path.join(tfrec_base_dir, 'snap_128_tf_records')
main(tfrec_dir)
```
#### File: models/graph_VAE_SCD/graph_VAE_utils.py
```python
import sys
sys.path.insert(1, '/data/s1825216/git/neural_deprojection/')
from neural_deprojection.graph_net_utils import AbstractModule, \
histogramdd, efficient_nn_index
from neural_deprojection.graph_net_utils import AbstractModule, gaussian_loss_function, \
reconstruct_fields_from_gaussians
import tensorflow as tf
# import tensorflow_addons as tfa
from graph_nets import blocks
import sonnet as snt
from graph_nets.modules import SelfAttention
from sonnet.src import utils, once
from tensorflow_probability.python.math.psd_kernels.internal import util
from graph_nets.utils_tf import fully_connect_graph_static, fully_connect_graph_dynamic, concat
from graph_nets.utils_np import graphs_tuple_to_networkxs, networkxs_to_graphs_tuple, get_graph
import numpy as np
import networkx as nx
from scipy.spatial.ckdtree import cKDTree
import time
from graph_nets.graphs import GraphsTuple
import tensorflow_probability as tfp
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class RelationNetwork(AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def \
__init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_mean,
use_globals=False,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=use_globals)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=use_globals,
edges_reducer=reducer)
def _build(self, graph):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
edge_block = self._edge_block(graph)
output_graph = self._global_block(edge_block)
return output_graph
# TODO: give option to feed position in the core network
class EncodeProcessDecode_E(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode_E"):
super(EncodeProcessDecode_E, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps, positions):
latent_graph = self._encoder(input_graph, positions)
# for _ in range(num_processing_steps):
# latent_graph = self._core(latent_graph)
# state = (counter, latent_graph)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state, positions)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph, positions)
class EncodeProcessDecode_D(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode_D"):
super(EncodeProcessDecode_D, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps, positions):
latent_graph = self._encoder(input_graph, positions)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state, positions)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph)
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.ln1 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.ln2 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent, positions=None):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.ln1(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.ln2(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
if positions is not None:
prepend_nodes = tf.concat([positions, output_graph.nodes[:, 3:]], axis=1)
output_graph = output_graph.replace(nodes=prepend_nodes)
return output_graph
class EncoderNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the Core network.
Contains a node block to update the edges and a relation network to generate edges and globals.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
name=None):
super(EncoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
self.relation_network = RelationNetwork(edge_model_fn=edge_model_fn,
global_model_fn=global_model_fn)
def _build(self, input_graph, positions):
latent = self.node_block(input_graph)
if positions is not None:
prepend_nodes = tf.concat([positions, latent.nodes[:, 3:]], axis=1)
latent = latent.replace(nodes=prepend_nodes)
output = self.relation_network(latent)
return output
class DecoderNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the Core network.
Contains a node block to update the edges and a relation network to generate edges and globals.
"""
def __init__(self,
node_model_fn,
name=None):
super(DecoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=False,
use_globals=True)
def _build(self, input_graph, positions):
output = self.node_block(input_graph.replace(n_node=tf.constant([positions.shape[0]], dtype=tf.int32)))
output = output._replace(edges=tf.constant(1.))
if positions is not None:
prepend_nodes = tf.concat([positions, output.nodes[:, 3:]], axis=1)
output = output.replace(nodes=prepend_nodes)
return output
def nearest_neighbours_connected_graph(virtual_positions, k):
kdtree = cKDTree(virtual_positions)
dist, idx = kdtree.query(virtual_positions, k=k + 1)
receivers = idx[:, 1:] # N,k
senders = np.arange(virtual_positions.shape[0]) # N
senders = np.tile(senders[:, None], [1, k]) # N,k
receivers = receivers.flatten()
senders = senders.flatten()
graph_nodes = tf.convert_to_tensor(virtual_positions, tf.float32)
graph_nodes.set_shape([None, 3])
receivers = tf.convert_to_tensor(receivers, tf.int32)
receivers.set_shape([None])
senders = tf.convert_to_tensor(senders, tf.int32)
senders.set_shape([None])
n_node = tf.shape(graph_nodes)[0:1]
n_edge = tf.shape(senders)[0:1]
graph_data_dict = dict(nodes=graph_nodes,
edges=tf.zeros((n_edge[0], 1)),
globals=tf.zeros([1]),
receivers=receivers,
senders=senders,
n_node=n_node,
n_edge=n_edge)
return GraphsTuple(**graph_data_dict)
class Model(AbstractModule):
"""Model inherits from AbstractModule, which contains a __call__ function which executes a _build function
that is to be specified in the child class. So for example:
model = Model(), then model() returns the output of _build()
AbstractModule inherits from snt.Module, which has useful functions that can return the (trainable) variables,
so the Model class has this functionality as well
An instance of the RelationNetwork class also inherits from AbstractModule,
so it also executes its _build() function when called and it can return its (trainable) variables
A RelationNetwork contains an edge block and a global block:
The edge block generally uses the edge, receiver, sender and global attributes of the input graph
to calculate the new edges.
In our case we currently only use the receiver and sender attributes to calculate the edges.
The global block generally uses the aggregated edge, aggregated node and the global attributes of the input graph
to calculate the new globals.
In our case we currently only use the aggregated edge attributes to calculate the new globals.
As input the RelationNetwork needs two (neural network) functions:
one to calculate the new edges from receiver and sender nodes
and one to calculate the globals from the aggregated edges.
The new edges will be a vector with size 16 (i.e. the output of the first function in the RelationNetwork)
The new globals will also be a vector with size 16 (i.e. the output of the second function in the RelationNetwork)
The image_cnn downscales the image (currently from 4880x4880 to 35x35) and encodes the image in 16 channels.
So we (currently) go from (4880,4880,1) to (35,35,16)
"""
def __init__(self,
activation='leaky_relu',
mlp_size=16,
cluster_encoded_size=11,
num_heads=10,
core_steps=10, name=None):
super(Model, self).__init__(name=name)
if activation == 'leaky_relu':
self.activation = tf.nn.leaky_relu
elif activation == 'relu':
self.activation = tf.nn.relu
else:
self.activation = tf.nn.relu
self.epd_encoder = EncodeProcessDecode_E(encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=self.activation),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=self.activation)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=self.activation),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([32, 32, 64], activate_final=True, activation=self.activation)))
self.epd_decoder = EncodeProcessDecode_D(encoder=DecoderNetwork(node_model_fn=lambda: snt.nets.MLP([32, 32, cluster_encoded_size], activate_final=True, activation=self.activation)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=snt.Sequential([RelationNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=self.activation),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True, activation=self.activation)),
blocks.NodeBlock(
node_model_fn=lambda: snt.nets.MLP(
[cluster_encoded_size-3], activate_final=True, activation=self.activation),
use_received_edges=True,
use_sent_edges=True,
use_nodes=True,
use_globals=True)
])
)
self._core_steps = core_steps
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch, *args, **kwargs):
graph = batch
# del img
# del c
positions = graph.nodes[:, :3]
for i in range(3, 11):
image_before, _ = histogramdd(positions[:, :2], bins=50, weights=graph.nodes[:, i])
image_before -= tf.reduce_min(image_before)
image_before /= tf.reduce_max(image_before)
tf.summary.image(f"{i}_xy_image_before", image_before[None, :, :, None], step=self.step)
tf.summary.scalar(f"properties{i}_std_before", tf.math.reduce_std(graph.nodes[:,i]), step=self.step)
t0 = time.time()
encoded_graph = self.epd_encoder(graph, self._core_steps, positions)
encoded_graph = encoded_graph._replace(nodes=None, edges=None, receivers=None, senders=None) # only pass through globals for sure
# decoded_graph = self.epd_decoder(encoded_graph, self._core_steps, positions)
t1 = time.time()
print(f'encoder time {t1-t0} s')
number_of_nodes = positions.shape[0]
decode_positions = tf.random.uniform(shape=(number_of_nodes, 3),
minval=tf.reduce_min(positions, axis=0),
maxval=tf.reduce_max(positions, axis=0))
t2 = time.time()
print(f'decode pos time {t2 - t1} s')
# encoded_graph = encoded_graph._replace(nodes=decode_positions)
random_pos_graph = nearest_neighbours_connected_graph(decode_positions, 6)
t3 = time.time()
print(f'random pos time {t3 - t2} s')
random_pos_graph = random_pos_graph._replace(nodes=None, edges=None, globals=encoded_graph.globals.numpy())
t4 = time.time()
print(f'replace pos time {t4 - t3} s')
# encoded_graph = fully_connect_graph_static(encoded_graph) # TODO: only works if batch_size=1, might need to use dynamic
t4 = time.time()
print(f'random pos time {t4 - t3} s')
decoded_graph = self.epd_decoder(random_pos_graph, self._core_steps, decode_positions)
t5 = time.time()
print(f'decoder time {t5 - t4} s')
nn_index = efficient_nn_index(decode_positions, positions)
t6 = time.time()
print(f'nn time {t6 - t5} s')
for i in range(8):
image_after, _ = histogramdd(decode_positions[:, :2], bins=50, weights=decoded_graph.nodes[:, i])
image_after -= tf.reduce_min(image_after)
image_after /= tf.reduce_max(image_after)
tf.summary.image(f"{i+3}_xy_image_after", image_after[None, :, :, None], step=self.step)
tf.summary.scalar(f"properties{i+3}_std_after", tf.math.reduce_std(decoded_graph.nodes[:,i]), step=self.step)
return decoded_graph, nn_index
class DiscreteGraphVAE(AbstractModule):
def __init__(self, encoder_fn: AbstractModule,
decode_fn: AbstractModule,
embedding_dim: int = 64,
num_embedding: int = 1024,
num_gaussian_components: int=128,
num_token_samples: int = 1,
num_properties: int = 10,
temperature: float = 50.,
beta: float = 1.,
encoder_kwargs: dict = None,
decode_kwargs: dict = None,
name=None):
super(DiscreteGraphVAE, self).__init__(name=name)
# (num_embedding, embedding_dim)
self.temperature = temperature
self.beta = beta
self.embeddings = tf.Variable(initial_value=tf.random.truncated_normal((num_embedding, embedding_dim)),
name='embeddings')
self.encoder = encoder_fn(num_output=num_embedding, output_size=embedding_dim,
**encoder_kwargs)
self.decoder = decode_fn(num_output=num_gaussian_components, output_size=num_properties*10,
**decode_kwargs)
self.num_token_samples = num_token_samples
self.num_properties = num_properties
self.num_embedding = num_embedding
# @tf.function(input_signature=tf.TensorSpec(shape=[None], dtype=tf.float32)) # what is the shape ???
# def sample_encoder(self, graph):
# return self.encoder(graph)
@tf.function(input_signature=[tf.TensorSpec([None,3], dtype=tf.float32),
tf.TensorSpec([None,None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.float32)])
def sample_decoder(self, positions, logits, temperature):
token_distribution = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=logits)
token_samples_onehot = token_distribution.sample((1,),
name='token_samples')
token_sample_onehot = token_samples_onehot[0]#[n_node, num_embedding]
token_sample = tf.matmul(token_sample_onehot, self.embeddings) # [n_node, embedding_dim]
n_node = tf.shape(token_sample)[0]
latent_graph = GraphsTuple(nodes=token_sample,
edges=None,
globals=tf.constant([0.], dtype=tf.float32),
senders=None,
receivers=None,
n_node=[n_node],
n_edge=tf.constant([0], dtype=tf.int32)) # [n_node, embedding_dim]
latent_graph = fully_connect_graph_dynamic(latent_graph)
gaussian_tokens = self.decoder(latent_graph) # nodes=[num_gaussian_components, component_dim]
reconstructed_fields = reconstruct_fields_from_gaussians(gaussian_tokens, positions)
return reconstructed_fields
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch, **kwargs) -> dict:
# graph, temperature, beta = batch
graph = batch
encoded_graph = self.encoder(graph)
print('encoded_graph', encoded_graph)
print(dir(encoded_graph.nodes))
encoded_graph.replace(nodes=encoded_graph.nodes[10000:])
n_node = encoded_graph.n_node
# nodes = [n_node, num_embeddings]
# node = [num_embeddings] -> log(p_i) = logits
# -> [S, n_node, embedding_dim]
logits = encoded_graph.nodes # [n_node, num_embeddings]
log_norm = tf.math.reduce_logsumexp(logits, axis=1) # [n_node]
token_distribution = tfp.distributions.RelaxedOneHotCategorical(self.temperature, logits=logits)
token_samples_onehot = token_distribution.sample((self.num_token_samples,),
name='token_samples') # [S, n_node, num_embeddings]
def _single_decode(token_sample_onehot):
"""
Args:
token_sample: [n_node, embedding_dim]
Returns:
log_likelihood: scalar
kl_term: scalar
"""
token_sample = tf.matmul(token_sample_onehot, self.embeddings) # [n_node, embedding_dim] # = z ~ q(z|x)
latent_graph = GraphsTuple(nodes=token_sample,
edges=None,
globals=tf.constant([0.], dtype=tf.float32),
senders=None,
receivers=None,
n_node=n_node,
n_edge=tf.constant([0], dtype=tf.int32)) # [n_node, embedding_dim]
print('latent_graph', latent_graph)
latent_graph = fully_connect_graph_dynamic(latent_graph)
gaussian_tokens = self.decoder(latent_graph) # nodes=[num_gaussian_components, component_dim]
_, log_likelihood = gaussian_loss_function(gaussian_tokens.nodes, graph)
# [n_node, num_embeddings].[n_node, num_embeddings]
sum_selected_logits = tf.math.reduce_sum(token_sample_onehot * logits, axis=1) # [n_node]
kl_term = sum_selected_logits - tf.cast(self.num_embedding, tf.float32) * tf.cast(log_norm, tf.float32) + \
tf.cast(self.num_embedding, tf.float32) * tf.math.log(tf.cast(self.num_embedding, tf.float32)) # [n_node]
kl_term = self.beta * tf.reduce_mean(kl_term)
return log_likelihood, kl_term
print('token_samples_onehot',token_samples_onehot)
log_likelihood_samples, kl_term_samples = _single_decode(token_samples_onehot[0]) # tf.vectorized_map(_single_decode, token_samples_onehot) # [S],[S]
# good metric = average entropy of embedding usage! The more precisely embeddings are selected the lower the entropy.
log_prob_tokens = logits - log_norm[:, None]#num_tokens, num_embeddings
entropy = -tf.reduce_sum(log_prob_tokens * tf.math.exp(log_prob_tokens), axis=1)#num_tokens
perplexity = 2.**(-entropy/tf.math.log(2.))
mean_perplexity = tf.reduce_mean(perplexity)
var_exp = tf.reduce_mean(log_likelihood_samples)
tf.summary.scalar('var_exp', var_exp, step=self._step)
kl_term=tf.reduce_mean(kl_term_samples)
tf.summary.scalar('kl_term', kl_term, step=self._step)
tf.summary.scalar('mean_perplexity', mean_perplexity, step=self._step)
return dict(loss=tf.reduce_mean(log_likelihood_samples - kl_term_samples),
var_exp=var_exp,
kl_term=tf.reduce_mean(kl_term_samples),
mean_perplexity=mean_perplexity)
class GraphMappingNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the DiscreteGraphVAE network.
"""
def __init__(self,
num_output: int,
output_size: int,
node_size: int = 4,
edge_size: int = 4,
starting_global_size: int = 10,
inter_graph_connect_prob: float = 0.01,
crossing_steps: int = 4,
reducer=tf.math.unsorted_segment_mean,
properties_size=10,
name=None):
super(GraphMappingNetwork, self).__init__(name=name)
self.num_output = num_output
self.output_size = output_size
self.crossing_steps=crossing_steps
self.empty_node_variable = tf.Variable(initial_value=tf.random.truncated_normal((node_size,)),
name='empty_token_node')
# values for different kinds of edges in the graph, which will be learned
self.intra_graph_edge_variable = tf.Variable(initial_value=tf.random.truncated_normal((edge_size,)),
name='intra_graph_edge_var')
self.intra_token_graph_edge_variable = tf.Variable(initial_value=tf.random.truncated_normal((edge_size,)),
name='intra_token_graph_edge_var')
self.inter_graph_edge_variable = tf.Variable(initial_value=tf.random.truncated_normal((edge_size,)),
name='inter_graph_edge_var')
self.starting_global_variable = tf.Variable(initial_value=tf.random.truncated_normal((starting_global_size,)),
name='starting_global_var')
self.inter_graph_connect_prob = inter_graph_connect_prob
self.projection_node_block = blocks.NodeBlock(lambda: snt.Linear(node_size, name='project'),
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
node_model_fn = lambda: snt.nets.MLP([node_size, node_size], activate_final=True, activation=tf.nn.leaky_relu)
edge_model_fn = lambda: snt.nets.MLP([edge_size, edge_size], activate_final=True, activation=tf.nn.leaky_relu)
global_model_fn = lambda: snt.nets.MLP([starting_global_size, starting_global_size], activate_final=True,
activation=tf.nn.leaky_relu)
self.edge_block = blocks.EdgeBlock(edge_model_fn,
use_edges=True,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=True)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=True,
use_sent_edges=True,
use_nodes=True,
use_globals=True)
self.global_block = blocks.GlobalBlock(global_model_fn,
use_edges=True,
use_nodes=True,
use_globals=True,
edges_reducer=reducer)
self.output_projection_node_block = blocks.NodeBlock(lambda: snt.Linear(self.output_size, name='project'),
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
def _build(self, graph):
n_edge = graph.n_edge[0]
graph = graph.replace(edges=tf.tile(self.intra_graph_edge_variable[None, :], [n_edge, 1]))
graph = self.projection_node_block(graph) # [n_nodes, node_size]
n_node = tf.shape(graph.nodes)[0]
# create fully connected output token nodes
token_start_nodes = tf.tile(self.empty_node_variable[None, :], [self.num_output, 1])
graph.replace(n_node=tf.constant(n_node, dtype=tf.int32))
token_graph = GraphsTuple(nodes=token_start_nodes,
edges=None,
globals=tf.constant([0.], dtype=tf.float32),
senders=None,
receivers=None,
n_node=tf.constant([self.num_output], dtype=tf.int32),
n_edge=tf.constant([0], dtype=tf.int32))
token_graph = fully_connect_graph_static(token_graph)
n_edge = token_graph.n_edge[0]
token_graph = token_graph.replace(edges=tf.tile(self.intra_token_graph_edge_variable[None, :], [n_edge, 1]))
concat_graph = concat([graph, token_graph], axis=0) # n_node = [n_nodes, n_tokes]
concat_graph = concat_graph.replace(n_node=tf.reduce_sum(concat_graph.n_node, keepdims=True),
n_edge=tf.reduce_sum(concat_graph.n_edge, keepdims=True)) # n_node=[n_nodes+n_tokens]
# add random edges between
# choose random unique set of nodes in graph, choose random set of nodes in token_graph
gumbel = -tf.math.log(-tf.math.log(tf.random.uniform((n_node,))))
n_connect_edges = tf.cast(tf.multiply(tf.constant([self.inter_graph_connect_prob]), tf.cast(n_node, tf.float32)), tf.int32)
_, graph_senders = tf.nn.top_k(gumbel, n_connect_edges[0])
token_graph_receivers = n_node + tf.random.uniform(shape=n_connect_edges, minval=0, maxval=self.num_output,
dtype=tf.int32)
senders = tf.concat([concat_graph.senders, graph_senders, token_graph_receivers],
axis=0) # add bi-directional senders + receivers
receivers = tf.concat([concat_graph.receivers, token_graph_receivers, graph_senders], axis=0)
inter_edges = tf.tile(self.inter_graph_edge_variable[None, :], tf.concat([2 * n_connect_edges, tf.constant([1], dtype=tf.int32)], axis=0)) # 200 = 10000(n_nodes) * 0.01 * 2
edges = tf.concat([concat_graph.edges, inter_edges], axis=0)
concat_graph = concat_graph.replace(senders=senders, receivers=receivers, edges=edges,
n_edge=concat_graph.n_edge[0] + 2 * n_connect_edges[0], # concat_graph.n_edge[0] + 2 * n_connect_edges
globals=self.starting_global_variable[None, :])
latent_graph = concat_graph
print('concat_graph', concat_graph)
for _ in range(
self.crossing_steps): # this would be that theoretical crossing time for information through the graph
input_nodes = latent_graph.nodes
latent_graph = self.edge_block(latent_graph)
latent_graph = self.node_block(latent_graph)
latent_graph = self.global_block(latent_graph)
latent_graph = latent_graph.replace(nodes=latent_graph.nodes + input_nodes) # residual connections
latent_graph = latent_graph.replace(nodes=latent_graph.nodes[n_node:])
output_graph = self.output_projection_node_block(latent_graph)
return output_graph
class EncoderNetwork3D(GraphMappingNetwork):
def __init__(self, num_output: int,
output_size: int,
inter_graph_connect_prob: float = 0.01,
reducer=tf.math.unsorted_segment_mean,
starting_global_size=4,
node_size=64,
edge_size=4,
crossing_steps=4,
name=None):
super(EncoderNetwork3D, self).__init__(num_output=num_output,
output_size=output_size,
inter_graph_connect_prob=inter_graph_connect_prob,
reducer=reducer,
starting_global_size=starting_global_size,
node_size=node_size,
edge_size=edge_size,
crossing_steps=crossing_steps,
name=name)
class DecoderNetwork3D(GraphMappingNetwork):
def __init__(self, num_output: int,
output_size: int,
inter_graph_connect_prob: float = 0.01,
reducer=tf.math.unsorted_segment_mean,
starting_global_size=4,
node_size=64,
edge_size=4,
crossing_steps=4,
name=None):
super(DecoderNetwork3D, self).__init__(num_output=num_output,
output_size=output_size,
inter_graph_connect_prob=inter_graph_connect_prob,
reducer=reducer,
starting_global_size=starting_global_size,
node_size=node_size,
edge_size=edge_size,
crossing_steps=crossing_steps,
name=name)
```
#### File: models/identify_medium_GCD/generate_data_with_voxel_data.py
```python
import sys
sys.path.insert(1, '/data/s2675544/git/neural_deprojection/')
sys.path.insert(1, '/home/matthijs/git/neural_deprojection/')
import os
import glob
import yt
import h5py
import soxs
import pyxsim
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import neural_deprojection.models.identify_medium_GCD.gadget as g
from tqdm import tqdm
from astropy.io import fits
from multiprocessing import Pool, Lock
mp_lock = Lock()
def grid_properties(positions, properties, n=128):
# positions.shape = (N,D)
# properties.shape = (N,P)
voxels = []
# bins are n+1 equally spaced edges
bins = [np.linspace(positions[:, d].min(), positions[:, d].max(), n+1) for d in range(positions.shape[1])]
for p in range(properties.shape[1]):
sum_properties, _ = np.histogramdd(positions, bins=bins, weights=properties[:, p])
count, _ = np.histogramdd(positions, bins=bins)
mean_properties = np.where(count == 0, 0, sum_properties/count)
voxels.append(mean_properties)
# central point of bins is grid point center
center_points = [(b[:-1] + b[1:]) / 2. for b in bins]
return np.stack(voxels, axis=-1), center_points
def _random_ortho_matrix(n):
"""
Samples a random orthonormal num_parent,num_parent matrix from Stiefels manifold.
From https://stackoverflow.com/a/38430739
Args:
n: Size of matrix, draws from O(n) group.
Returns: random [n,n] matrix with determinant = +-1
"""
H = np.random.normal(size=(n, n))
Q, R = np.linalg.qr(H)
Q = Q @ np.diag(np.sign(np.diag(R)))
return Q
def _random_special_ortho_matrix(n):
"""
Samples a random orthonormal num_parent,num_parent matrix from Stiefels manifold.
From https://stackoverflow.com/a/38430739
Args:
n: Size of matrix, draws from O(n) group.
Returns: random [n,n] matrix with determinant = +-1
"""
det = -1.
while det < 0:
Q = _random_ortho_matrix(n)
det = np.linalg.det(Q)
return Q
def get_box_size(positions):
max_pos = np.max(positions.T, axis=1)
min_pos = np.min(positions.T, axis=1)
box_size = max_pos - min_pos
return box_size
# Finds the center of the gas particles in the snapshot by taking the average of the position extrema
# Check if a cluster is split by a periodic boundary
def check_split(positions, simulation_box):
box_size = get_box_size(positions)
split_cluster = False
for coord, side in enumerate(box_size):
if side > 0.5 * simulation_box[coord]:
split_cluster = True
return split_cluster
def unsplit_positions(positions, simulation_box):
"""
Move the positions to the center of the simulation box, so they are no longer
split by a periodic boundary.
"""
new_positions = positions
box_size = get_box_size(new_positions)
for coord, side in enumerate(box_size):
half_sim_box = 0.5 * simulation_box[coord]
if side > half_sim_box:
new_positions[:, coord] = np.where(positions[:, coord] > half_sim_box,
positions[:, coord] - half_sim_box,
positions[:, coord] + half_sim_box)
return new_positions
def get_index(cluster_dirname):
if 'AGN' in cluster_dirname:
index = int(cluster_dirname.split('/')[-1][-3:])
else:
index = int(cluster_dirname.split('/')[-3])
return index
def get_simulation_name(cluster):
if cluster.split('/')[-3] == 'Bahamas':
return 'Bahamas'
else:
return 'Magneticum'
def existing_clusters(record_bytes):
"""
Determines which clusters are already made into tfrecords
Args:
record_bytes: raw bytes
Returns: (cluster_idx, projection_idx)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
cluster_idx=tf.io.FixedLenFeature([], dtype=tf.string),
projection_idx=tf.io.FixedLenFeature([], dtype=tf.string),
image=tf.io.FixedLenFeature([], dtype=tf.string)
)
)
cluster_idx = tf.io.parse_tensor(parsed_example['cluster_idx'], tf.int32)
projection_idx = tf.io.parse_tensor(parsed_example['projection_idx'], tf.int32)
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
return cluster_idx, projection_idx, image
@tf.function
def downsample(image):
filter_2d = tf.ones((2, 2, 1, 1)) * 0.25
return tf.nn.conv2d(image[None, :, :, None],
filters=filter_2d, strides=2,
padding='SAME')[0, :, :, 0]
def get_clusters(snap_path, defect_clusters):
snap_dir = os.path.basename(snap_path)
# Make a list of clusters in the snapshot
if snap_dir[0:3] == 'AGN':
cluster_dirs = glob.glob(os.path.join(snap_path, '*'))
else:
cluster_dirs = glob.glob(os.path.join(snap_path, '*/*/*'))
# List the indices of bad clusters
print(f'\nNumber of clusters : {len(cluster_dirs)}')
bad_cluster_idx = defect_clusters[snap_dir]['too_small'] + \
defect_clusters[snap_dir]['photon_max']
print(f'Number of viable clusters : {len(cluster_dirs) - len(bad_cluster_idx)}')
# Remove bad clusters from cluster list
bad_cluster_dirs = []
for cluster_dir in cluster_dirs:
if get_index(cluster_dir) in bad_cluster_idx:
bad_cluster_dirs.append(cluster_dir)
for bad_cluster_dir in bad_cluster_dirs:
cluster_dirs.remove(bad_cluster_dir)
return cluster_dirs
def get_dirs_and_filename(cluster):
tail, head = os.path.split(cluster)
while os.path.basename(tail) != 'data':
tail, head = os.path.split(tail)
data_path = tail
if get_simulation_name(cluster) == 'Bahamas':
snap_dir = cluster.split('/')[-2]
cluster_file = os.path.join(cluster, os.path.basename(cluster) + '.hdf5')
else:
snap_dir = cluster.split('/')[-4]
cluster_file = os.path.join(cluster, snap_dir)
return data_path, snap_dir, cluster_file
def load_data_magneticum(cluster_dir):
_, _, cluster_file = get_dirs_and_filename(cluster_dir)
ds = yt.load(cluster_file, long_ids=True)
ad = ds.all_data()
positions = ad['Gas', 'Coordinates'].in_cgs().d
velocities = ad['Gas', 'Velocities'].in_cgs().d
rho = ad['Gas', 'Density'].in_cgs().d
u = ad['Gas', 'InternalEnergy'].in_cgs().d
mass = ad['Gas', 'Mass'].in_cgs().d
smooth = ad['Gas', 'SmoothingLength'].in_cgs().d
codelength_per_cm = ad['Gas', 'Coordinates'].d[0][0] / positions[0][0]
# Dimension of the box in which the particles exist
cluster_box = get_box_size(positions)
# Dimensions of to whole simulation box
simulation_box = ds.domain_width.in_cgs().d
# Adjust positions for clusters on periodic boundaries
on_periodic_boundary = check_split(positions, simulation_box)
if on_periodic_boundary:
print('Cluster is located on a periodic boundary')
positions = unsplit_positions(positions, simulation_box)
# Center of the positions
positions_center = np.mean(positions.T, axis=1)
# For making the xray image, we still need the center as it's defined in the original data
# Since the mean of positions split by a periodic boundary is not located at the
# center of the cluster, we calculate the mean on the offset positions and undo the offset
# on the calculated center.
cluster_center = np.mean(positions.T, axis=1)
if on_periodic_boundary:
for coord, cluster_box_side in enumerate(cluster_box):
half_sim_box = 0.5 * simulation_box[coord]
if cluster_box_side > half_sim_box:
if cluster_center[coord] > half_sim_box:
cluster_center[coord] -= half_sim_box
else:
cluster_center[coord] += half_sim_box
# Convert to codelength units for pyxsim
cluster_center = cluster_center * codelength_per_cm
properties = np.stack((positions.T[0],
positions.T[1],
positions.T[2],
velocities.T[0],
velocities.T[1],
velocities.T[2],
rho,
u,
mass,
smooth), axis=1)
return properties, cluster_center, positions_center, ds
def load_data_bahamas(cluster_dir, centers):
data_path, snap_dir, cluster_file = get_dirs_and_filename(cluster_dir)
with h5py.File(cluster_file, 'r') as ds:
positions = np.array(ds['PartType0']['Coordinates'])
velocities = np.array(ds['PartType0']['Velocity'])
rho = np.array(ds['PartType0']['Density'])
u = np.array(ds['PartType0']['InternalEnergy'])
mass = np.array(ds['PartType0']['Mass'])
smooth = np.array(ds['PartType0']['SmoothingLength'])
# For some reason the Bahamas snapshots are structured so that when you load one part of the snapshot,
# you load the entire simulation box, so there is not a specific reason to choose the first element of filenames
filenames = glob.glob(os.path.join(data_path, snap_dir, 'data/snapshot_032/*.hdf5'))
snap_file = filenames[0]
ds = yt.load(snap_file)
# Dimensions of to whole simulation box
simulation_box = ds.domain_width.in_cgs().d
if check_split(positions, simulation_box):
print('Cluster is located on a periodic boundary')
positions = unsplit_positions(positions, simulation_box)
# Create a sphere around the center of the snapshot, which captures the photons
positions_center = np.mean(positions.T, axis=1)
# We can get the Bahamas cluster centers from the data itself
cluster_center = centers[get_index(cluster_dir)]
properties = np.stack((positions.T[0],
positions.T[1],
positions.T[2],
velocities.T[0],
velocities.T[1],
velocities.T[2],
rho,
u,
mass,
smooth), axis=1)
return properties, cluster_center, positions_center, ds
def load_data(cluster):
data_path, snap_dir, _ = get_dirs_and_filename(cluster)
# Load in particle data and prepare for making an xray image.
if get_simulation_name(cluster) == 'Bahamas':
gdata = g.Gadget(os.path.join(data_path, snap_dir), 'subh', snapnum=32, sim='BAHAMAS')
subhalo_ids = [int(idx) for idx in gdata.read_var('FOF/FirstSubhaloID', verbose=False)]
centers = gdata.read_var('Subhalo/CentreOfPotential', verbose=False)
centers = centers[subhalo_ids[:-1]]
# Convert to codelength by going from cm to Mpc and from Mpc to codelength
centers /= gdata.cm_per_mpc / 0.7
properties, cluster_center, unsplit_positions_center, dataset = load_data_bahamas(cluster_dir=cluster,
centers=centers)
else:
properties, cluster_center, unsplit_positions_center, dataset = load_data_magneticum(cluster_dir=cluster)
sim_box = dataset.domain_width.in_cgs()
return properties, cluster_center, unsplit_positions_center, dataset, sim_box
def save_examples(generator,
save_dir=None,
examples_per_file=32,
num_examples=1,
exp_time=None,
prefix='train'):
"""
Saves a list of GraphTuples to tfrecords.
Args:
generator: generator (or list) of (GraphTuples, image).
Generator is more efficient.
save_dir: dir to save tfrecords in
examples_per_file: int, max number examples per file
num_examples: number of examples
exp_time: exposure time (used in filename)
prefix: string, prefix for the tf record file name
Returns: list of tfrecord files.
"""
print("Saving data in tfrecords.")
# If the directory where to save the tfrecords is not specified, save them in the current working directory
if save_dir is None:
save_dir = os.getcwd()
# If the save directory does not yet exist, create it
os.makedirs(save_dir, exist_ok=True)
# Files will be returned
files = []
# next(data_iterable) gives the next dataset in the iterable
data_iterable = iter(generator)
data_left = True
# Status bar
pbar = tqdm(total=num_examples)
while data_left:
# For every 'examples_per_file' (=32) example directories, create a tf_records file
if exp_time is not None:
exp_time_str = f'_{int(exp_time)}ks_'
else:
exp_time_str = ''
mp_lock.acquire() # make sure no duplicate files are made / replaced
tf_files = glob.glob(os.path.join(save_dir, 'train_*'))
file_idx = len(tf_files)
indices = sorted([int(tf_file.split('.')[0][-4:]) for tf_file in tf_files])
for idx, ind in enumerate(indices):
if idx != ind:
file_idx = idx
break
file = os.path.join(save_dir, prefix + exp_time_str + '{:04d}.tfrecords'.format(file_idx))
files.append(file)
mp_lock.release()
# 'writer' can write to 'file'
with tf.io.TFRecordWriter(file) as writer:
for i in range(examples_per_file + 1):
# Yield a dataset extracted by the generator
try:
(voxels, image, cluster_idx, projection_idx, vprime) = next(data_iterable)
except StopIteration:
data_left = False
break
# Write the graph, image and example_idx to the tfrecord file
# graph = get_graph(graph, 0)
features = dict(
voxels=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(voxels, tf.float32)).numpy()])),
image=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(image, tf.float32)).numpy()])),
vprime=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(vprime, tf.float32)).numpy()])),
cluster_idx=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(cluster_idx, tf.int32)).numpy()])),
projection_idx=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(projection_idx, tf.int32)).numpy()]))
)
# Set the features up so they can be written to the tfrecord file
features = tf.train.Features(feature=features)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
# Status bar update
pbar.update(1)
print("Saved in tfrecords: {}".format(files))
return files
def decode_examples(record_bytes, voxels_shape=None, image_shape=None):
"""
Decodes raw bytes as returned from tf.data.TFRecordDataset([example_path]) into a GraphTuple and image
Args:
record_bytes: raw bytes
voxels_shape: shape of voxels if known.
image_shape: shape of image if known.
Returns: (GraphTuple, image)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
voxels=tf.io.FixedLenFeature([], dtype=tf.string),
image=tf.io.FixedLenFeature([], dtype=tf.string),
cluster_idx=tf.io.FixedLenFeature([], dtype=tf.string),
projection_idx=tf.io.FixedLenFeature([], dtype=tf.string),
vprime=tf.io.FixedLenFeature([], dtype=tf.string)
)
)
voxels = tf.io.parse_tensor(parsed_example['voxels'], tf.float32)
voxels.set_shape(voxels_shape)
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
image.set_shape(image_shape)
vprime = tf.io.parse_tensor(parsed_example['vprime'], tf.float32)
vprime.set_shape((3, 3))
cluster_idx = tf.io.parse_tensor(parsed_example['cluster_idx'], tf.int32)
cluster_idx.set_shape(())
projection_idx = tf.io.parse_tensor(parsed_example['projection_idx'], tf.int32)
projection_idx.set_shape(())
return voxels, image, cluster_idx, projection_idx, vprime
def plot_data(voxels, xray_image, mayavi=False, mayavi_prop_idx=0, histograms=False, xray=False):
if mayavi:
from mayavi import mlab
mlab.figure(1, bgcolor=(0, 0, 0), size=(800, 800))
mlab.clf()
prop = np.where(voxels[..., mayavi_prop_idx] == 0, -6, np.log10(voxels[..., mayavi_prop_idx]))
mayavi_voxels = mlab.pipeline.scalar_field(prop)
mlab.pipeline.volume(mayavi_voxels)
# mlab.pipeline.iso_surface(mayavi_voxels, contours=24, opacity=0.05)
# mlab.pipeline.scalar_cut_plane(mayavi_voxels, line_width=2.0, plane_orientation='z_axes')
mlab.show()
if histograms:
plot_titles = ['rho', 'U',
'rho gradient x', 'rho gradient y', 'rho gradient z',
'U gradient x', 'U gradient y', 'U gradient z',
'rho laplacian', 'U laplacian']
for i in range(voxels.shape[-1]):
# project the voxels along the z-axis (we take the mean value along the z-axis)
projected_img = np.sum(voxels, axis=-2) / voxels.shape[2]
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
prop_plot = ax.imshow(projected_img[..., i])
fig.colorbar(prop_plot, ax=ax)
ax.set_title(f'{plot_titles[i]}')
plt.savefig(f'{plot_titles[i]}.png')
plt.show()
if xray:
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
ax.imshow(xray_image)
plt.show()
def generate_data(cluster,
tfrecord_dir,
plt_kwargs,
number_of_clusters,
number_of_projections=26,
exp_time=1000.,
redshift=0.20,
number_of_voxels_per_dimension=64):
data_path, snap_dir, _ = get_dirs_and_filename(cluster)
cluster_idx = get_index(cluster)
good_cluster = True
print(f'\nStarting new cluster : {cluster_idx}')
yr = 3.15576e7 # in seconds
pc = 3.085678e18 # in cm
Mpc = 1e6 * pc
M_sun = 1.989e33 # in gram
# Parameters for making the xray images
exp_t = (exp_time, "ks") # exposure time
area = (1000.0, "cm**2") # collecting area
emin = 0.05 # Minimum energy of photons in keV
emax = 11.0 # Maximum energy of photons in keV
metallicty = 0.3 # Metallicity in units of solar metallicity
kt_min = 0.05 # Minimum temperature to solve emission for
n_chan = 1000 # Number of channels in the spectrum
hydrogen_dens = 0.04 # The foreground column density in units of 10^22 cm^{-2}. Only used if absorption is applied.
radius = (4.0, "Mpc") # Radius of the sphere which captures photons
sky_center = [0., 0.] # Ra and dec coordinates of the cluster (which are currently dummy values)
units = np.array([Mpc,
Mpc,
Mpc,
1e-4 * pc / yr,
1e-4 * pc / yr,
1e-4 * pc / yr,
36.689 * 1e-7 * M_sun / pc ** 3, # 36.6891392703865
4.2057 * 1e-7 * (pc / yr) ** 2, # 4.205786556116279
1e8 * M_sun,
1e5 * pc])
properties, cluster_center, positions_center, dataset, simulation_box = load_data(cluster)
photon_source = dataset.sphere(cluster_center, radius)
if properties.shape[0] < 10000:
print(f'\nThe cluster contains {properties.shape[0]} particles '
f'which is less than the threshold of 10000.')
good_cluster = False
# Set a minimum temperature to ignore particles that shouldn't be X-ray emitting,
# set metallicity to 0.3 * Zsolar (should maybe fix later)
# The source model determines the photon energy distribution and which photon energies to look at
source_model = pyxsim.ThermalSourceModel(spectral_model="apec",
emin=emin,
emax=emax,
nchan=n_chan,
Zmet=metallicty,
kT_min=kt_min)
# Simulate the photons from the source located at a certain redshift,
# that pass through a certain area, during a certain exposure time
photons = pyxsim.PhotonList.from_data_source(data_source=photon_source,
redshift=redshift,
area=area,
exp_time=exp_t,
source_model=source_model)
# Calculate the physical diameter of the image with : distance * fov = diameter
chandra_acis_fov = 0.0049160 # in radians
cutout_box_size = photons.parameters["fid_d_a"].d * chandra_acis_fov * Mpc
number_of_photons = int(np.sum(photons["num_photons"]))
if number_of_photons > 5e8:
print(f'\nThe number of photons {number_of_photons} is too large and will take too long to process '
f'so cluster {cluster_idx} is skipped.')
good_cluster = False
def data_generator():
for projection_idx in tqdm(np.arange(number_of_projections)):
print(f'\n\nCluster file: {cluster}')
print(f'Cluster index: {cluster_idx}')
print(f'Clusters done (or in the making): {len(glob.glob(os.path.join(tfrecord_dir, "*")))}')
print(f'Projection: {projection_idx + 1} / {number_of_projections}')
_properties = properties.copy()
print(f'Particles in cluster: {_properties.shape[0]}\n')
# Rotate variables
rot_mat = _random_special_ortho_matrix(3)
_properties[:, :3] = (rot_mat @ _properties[:, :3].T).T
_properties[:, 3:6] = (rot_mat @ _properties[:, 3:6].T).T
center = (rot_mat @ np.array(positions_center).T).T
# Cut out box in 3D space based on diameter of the xray image
lower_lim = center - 0.5 * cutout_box_size * np.array([1, 1, 1])
upper_lim = center + 0.5 * cutout_box_size * np.array([1, 1, 1])
indices = np.where((_properties[:, 0:3] < lower_lim) | (_properties[:, 0:3] > upper_lim))[0]
_properties = np.delete(_properties, indices, axis=0)
# Use neural network friendly units
_properties[:, 0:3] = (_properties[:, 0:3] - center) / units[0:3]
_properties[:, 3:6] = _properties[:, 3:6] / units[3:6]
_properties[:, 6:] = _properties[:, 6:] / units[6:]
# Create voxels from the sph particle properties
voxels, center_points = grid_properties(positions=_properties[:, 0:3],
properties=_properties[:, 6:8],
n=number_of_voxels_per_dimension)
# Calculate gradients and laplacians for the voxel properties
voxels_grads = []
voxels_laplacians = []
for p in range(voxels.shape[-1]):
_voxels_grads = np.gradient(voxels[:, :, :, p],
*center_points) # tuple of three arrays of shape (n,n,n)
voxels_grads.append(np.stack(_voxels_grads, axis=-1)) # stack into shape (n,n,n,3)
_voxels_laplacian = [np.gradient(_voxels_grads[i], center_points[i], axis=i) for i in range(3)]
voxels_laplacians.append(sum(_voxels_laplacian))
# Add the gradients and laplacians as channels to the voxels
voxels_grads = np.concatenate(voxels_grads, axis=-1) # (n,n,n,3*P)
voxels_laplacians = np.stack(voxels_laplacians, axis=-1) # (n,n,n,P)
# Recalculate the voxels with the scaled log of the properties
# 0.7036346061741174, 0.6853849479729205
_properties[:, 6:8] = np.log10(_properties[:, 6:8]) / np.array([0.7036, 0.6853])
voxels, _ = grid_properties(positions=_properties[:, 0:3],
properties=_properties[:, 6:8],
n=number_of_voxels_per_dimension)
voxels_all = np.concatenate([voxels, voxels_grads, voxels_laplacians], axis=-1) # (n,n,n,P+3*P+P)
voxel_center = int(number_of_voxels_per_dimension / 2)
print(f'Center voxel data :', voxels_all[voxel_center, voxel_center, voxel_center, :])
print('Voxels shape: ', voxels_all.shape)
v = np.eye(3)
vprime = rot_mat.T @ v
north_vector = vprime[:, 1]
viewing_vec = vprime[:, 2]
# Finds the events along a certain line of sight
events_z = photons.project_photons(viewing_vec, sky_center, absorb_model="tbabs", nH=hydrogen_dens,
north_vector=north_vector)
# Write the events to a simput file
cluster_projection_identity = number_of_projections * cluster_idx + projection_idx
events_z.write_simput_file(f'snap_{cluster_projection_identity}', overwrite=True)
# Determine which events get detected by the AcisI instrument of Chandra
soxs.instrument_simulator(f'snap_{cluster_projection_identity}_simput.fits',
f'snap_{cluster_projection_identity}_evt.fits',
exp_t,
"chandra_acisi_cy0",
sky_center,
overwrite=True,
ptsrc_bkgnd=False,
foreground=False,
instr_bkgnd=False)
# Soxs creates fits files to store the Chandra mock xray image
soxs.write_image(f'snap_{cluster_projection_identity}_evt.fits',
f'snap_{cluster_projection_identity}_img.fits',
emin=emin,
emax=emax,
overwrite=True)
# Crop the xray image to 2048x2048 and store it in a numpy array
with fits.open(f'snap_{cluster_projection_identity}_img.fits') as hdu:
xray_image = np.array(hdu[0].data, dtype='float32')[1358:3406, 1329:3377] # [2048,2048]
# Remove the fits files created by soxs (now that we have the array, the fits files are no longer needed)
temp_fits_files = glob.glob(os.path.join(os.getcwd(), f'snap_{cluster_projection_identity}_*.fits'))
for file in temp_fits_files:
print(f'Removing : {os.path.basename(file)}')
os.remove(file)
# Downsample xray image from 2048x2048 to 256x256
xray_image = downsample(xray_image)
xray_image = downsample(xray_image)
xray_image = downsample(xray_image)[:, :, None]
# Take the log (base 10) and enforce a minimum value of 1e-5
xray_image = np.log10(np.where(xray_image < 1e-5, 1e-5, xray_image))
# Plot voxel properties
plot_data(voxels, xray_image, **plt_kwargs)
voxels_all = tf.convert_to_tensor(voxels_all, tf.float32)
# This function is a generator, which has the advantage of not keeping used and upcoming data in memory.
yield voxels_all, xray_image, cluster_idx, projection_idx, vprime
if good_cluster:
# Save the data as tfrecords and return the filenames of the tfrecords
save_examples(data_generator(),
save_dir=tfrecord_dir,
examples_per_file=number_of_projections,
num_examples=number_of_projections * number_of_clusters,
exp_time=exp_t[0],
prefix='train')
def main(data_dir,
magneticum_snap_directories,
bahamas_snap_directories,
plt_kwargs,
multi_processing=False,
number_of_voxels_per_dimension=64,
number_of_projections=26,
exposure_time=5000.,
redshift=0.20,
cores=16,
move_to_front=None):
yt.funcs.mylog.setLevel(40) # Suppresses yt status output.
soxs.utils.soxsLogger.setLevel(40) # Suppresses soxs status output.
pyxsim.utils.pyxsimLogger.setLevel(40) # Suppresses pyxsim status output.
# Define the data directories of each simulation
magneticum_data_dir = os.path.join(data_dir, 'Magneticum/Box2_hr')
bahamas_data_dir = os.path.join(data_dir, 'Bahamas')
# Directory where tf records will be saved
my_tf_records_dir = os.path.join(data_dir, 'tf_records')
# Define the full paths of the snapshots in each simulation
magneticum_snap_paths = [os.path.join(magneticum_data_dir, snap_dir) for snap_dir in magneticum_snap_directories]
bahamas_snap_paths = [os.path.join(bahamas_data_dir, snap_dir) for snap_dir in bahamas_snap_directories]
# Per snapshot, define the indices of cluster that are excluded from generating the tf records
defect_clusters = {'snap_128': {'photon_max': [53, 78],
'too_small': []},
'snap_132': {'photon_max': [8, 52, 55, 93, 139, 289],
'too_small': []},
'snap_136': {'photon_max': [96, 137, 51, 315, 216, 55, 102, 101, 20, 3],
'too_small': []},
'AGN_TUNED_nu0_L100N256_WMAP9': {'photon_max': [3],
'too_small': [4, 10] + list(
set(np.arange(20, 200)) - {20, 21, 22, 28})},
'AGN_TUNED_nu0_L400N1024_WMAP9': {'photon_max': [],
'too_small': []}}
# Iterate over the cosmological simulation snapshots
for snap_path in magneticum_snap_paths + bahamas_snap_paths:
print(f'\nSnapshot path : {snap_path}')
snap_dir = os.path.basename(snap_path)
# Directory where the tf records for the specific snapshot will be saved
tfrecord_dir = os.path.join(my_tf_records_dir, snap_dir + '_tf_records')
print(f'Tensorflow records will be saved in : {tfrecord_dir}')
clusters = get_clusters(snap_path, defect_clusters)
n_clusters = len(clusters)
if move_to_front is not None:
clusters.insert(0, clusters.pop([get_index(cluster)
for cluster in clusters].index(move_to_front)))
# Generate tf records from the cluster files
if multi_processing:
params = [(cluster,
tfrecord_dir,
plt_kwargs,
n_clusters,
number_of_projections,
exposure_time,
redshift,
number_of_voxels_per_dimension) for cluster in clusters]
pool = Pool(cores)
pool.starmap(generate_data, params)
else:
for cluster in clusters:
generate_data(cluster=cluster,
tfrecord_dir=tfrecord_dir,
plt_kwargs=plt_kwargs,
number_of_clusters=n_clusters,
number_of_projections=number_of_projections,
exp_time=exposure_time,
redshift=redshift,
number_of_voxels_per_dimension=number_of_voxels_per_dimension)
if __name__ == '__main__':
# Define the directories containing the data
if os.getcwd().split('/')[2] == 's2675544':
print('Running on ALICE')
main_data_dir = '/home/s2675544/data'
# Determine which snapshots to use on ALICE
magneticum_snap_dirs = ['snap_132']
bahamas_snap_dirs = []
# Possible Magneticum dirs ['snap_128', 'snap_132', 'snap_136']
# Possible Bahamas dirs : ['AGN_TUNED_nu0_L100N256_WMAP9', 'AGN_TUNED_nu0_L400N1024_WMAP9']
multi_proc = True
plotting_kwargs = {'mayavi': False,
'mayavi_prop_idx': 0,
'histograms': False,
'xray': False}
mv_to_front = None
else:
main_data_dir = '/home/matthijs/Documents/Studie/Master_Astronomy/1st_Research_Project/data'
print('Running at home')
# Determine which snapshots to use at home
magneticum_snap_dirs = ['snap_132']
bahamas_snap_dirs = []
multi_proc = False
plotting_kwargs = {'mayavi': False,
'mayavi_prop_idx': 0,
'histograms': False,
'xray': False}
mv_to_front = 18
main(data_dir=main_data_dir,
magneticum_snap_directories=magneticum_snap_dirs,
bahamas_snap_directories=bahamas_snap_dirs,
plt_kwargs=plotting_kwargs,
multi_processing=multi_proc,
number_of_voxels_per_dimension=128,
number_of_projections=26,
exposure_time=1000.,
redshift=0.20,
cores=8,
move_to_front=mv_to_front)
```
#### File: models/identify_medium_GCD/model_utils.py
```python
import tensorflow as tf
import sonnet as snt
import numpy as np
from graph_nets import blocks
from tensorflow_addons.image import gaussian_filter2d
from graph_nets.graphs import GraphsTuple
from graph_nets.modules import _unsorted_segment_softmax, _received_edges_normalizer, GraphIndependent, SelfAttention, GraphNetwork
from graph_nets.utils_tf import fully_connect_graph_dynamic, concat
from sonnet.src import utils, once
from neural_deprojection.graph_net_utils import AbstractModule
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class RelationNetwork(AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def \
__init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_mean,
use_globals=False,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=use_globals)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=use_globals,
edges_reducer=reducer)
def _build(self, graph):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
edge_block = self._edge_block(graph)
output_graph = self._global_block(edge_block)
return output_graph
class EncodeProcessDecode(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode"):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps):
latent_graph = self._encoder(input_graph)
# for _ in range(num_processing_steps):
# latent_graph = self._core(latent_graph)
# state = (counter, latent_graph)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph)
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.ln1 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.ln2 = snt.LayerNorm(axis=1, eps=1e-6, create_scale=True, create_offset=True)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.ln1(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.ln2(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
return output_graph
class EncoderNetwork(AbstractModule):
"""
Encoder network that updates the graph to viable input for the Core network.
Contains a node block to update the edges and a relation network to generate edges and globals.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
name=None):
super(EncoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
self.relation_network = RelationNetwork(edge_model_fn=edge_model_fn,
global_model_fn=global_model_fn)
def _build(self, input_graph):
latent = self.node_block(input_graph)
output = self.relation_network(latent)
return output
class AutoEncoder(AbstractModule):
def __init__(self, kernel_size=4, name=None):
super(AutoEncoder, self).__init__(name=name)
self.encoder = snt.Sequential([snt.Conv2D(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2D(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2D(64, kernel_size, stride=4, padding='SAME'), tf.nn.relu])
self.decoder = snt.Sequential([snt.Conv2DTranspose(64, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2DTranspose(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2DTranspose(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu,
snt.Conv2D(1, kernel_size, padding='SAME')])
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch):
(img, ) = batch
img = gaussian_filter2d(img, filter_shape=[6, 6])
img_before_autoencoder = (img - tf.reduce_min(img)) / (
tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_autoencoder', img_before_autoencoder, step=self.step)
encoded_img = self.encoder(img)
print(encoded_img.shape)
decoded_img = self.decoder(encoded_img)
img_after_autoencoder = (decoded_img - tf.reduce_min(decoded_img)) / (
tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'img_after_autoencoder', img_after_autoencoder, step=self.step)
return decoded_img
class Model(AbstractModule):
"""Model inherits from AbstractModule, which contains a __call__ function which executes a _build function
that is to be specified in the child class. So for example:
model = Model(), then model() returns the output of _build()
AbstractModule inherits from snt.Module, which has useful functions that can return the (trainable) variables,
so the Model class has this functionality as well
An instance of the RelationNetwork class also inherits from AbstractModule,
so it also executes its _build() function when called and it can return its (trainable) variables
A RelationNetwork contains an edge block and a global block:
The edge block generally uses the edge, receiver, sender and global attributes of the input graph
to calculate the new edges.
In our case we currently only use the receiver and sender attributes to calculate the edges.
The global block generally uses the aggregated edge, aggregated node and the global attributes of the input graph
to calculate the new globals.
In our case we currently only use the aggregated edge attributes to calculate the new globals.
As input the RelationNetwork needs two (neural network) functions:
one to calculate the new edges from receiver and sender nodes
and one to calculate the globals from the aggregated edges.
The new edges will be a vector with size 16 (i.e. the output of the first function in the RelationNetwork)
The new globals will also be a vector with size 16 (i.e. the output of the second function in the RelationNetwork)
The image_cnn downscales the image (currently from 4880x4880 to 35x35) and encodes the image in 16 channels.
So we (currently) go from (4880,4880,1) to (35,35,16)
"""
def __init__(self,
mlp_size=16,
cluster_encoded_size=10,
image_encoded_size=64,
num_heads=10,
kernel_size=4,
image_feature_size=16,
core_steps=10, name=None):
super(Model, self).__init__(name=name)
self.epd_graph = EncodeProcessDecode(encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([32, 32, 256], activate_final=True)))
self.epd_image = EncodeProcessDecode(encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(image_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=image_encoded_size,
input_node_size=image_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(image_encoded_size),
global_model_fn=lambda: snt.nets.MLP([32, 32, 256], activate_final=True)))
# Load the autoencoder model from checkpoint
pretrained_auto_encoder = AutoEncoder(kernel_size=kernel_size)
checkpoint_dir = '/home/s2675544/git/neural_deprojection/neural_deprojection/models/identify_medium_GCD/autoencoder_checkpointing'
encoder_decoder_cp = tf.train.Checkpoint(encoder=pretrained_auto_encoder.encoder, decoder=pretrained_auto_encoder.decoder)
model_cp = tf.train.Checkpoint(_model=encoder_decoder_cp)
checkpoint = tf.train.Checkpoint(module=model_cp)
status = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(status).expect_partial()
self.auto_encoder = pretrained_auto_encoder
self.compare = snt.nets.MLP([32, 1])
self.image_feature_size = image_feature_size
self._core_steps = core_steps
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch, *args, **kwargs):
(graph, img, c) = batch
del c
print(graph.nodes.shape)
# The encoded cluster graph has globals which can be compared against the encoded image graph
encoded_graph = self.epd_graph(graph, self._core_steps)
# # Add an extra dimension to the image (tf.summary expects a Tensor of rank 4)
# img = img[None, ...]
print("IMG SHAPE:", img.shape)
print("IMG MIN MAX:", tf.math.reduce_min(img), tf.math.reduce_max(img))
img_before_cnn = (img - tf.reduce_min(img)) / \
(tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_cnn', img_before_cnn, step=self.step)
# Smooth the image and use the encoder from the autoencoder to reduce the dimensionality of the image
# The autoencoder was trained on images that were smoothed in the same way
img = gaussian_filter2d(img, filter_shape=[6, 6])
img = self.auto_encoder.encoder(img)
# Prevent the autoencoder from learning
try:
for variable in self.auto_encoder.encoder.trainable_variables:
variable._trainable = False
for variable in self.auto_encoder.decoder.trainable_variables:
variable._trainable = False
except:
pass
print("IMG SHAPE AFTER CNN:", img.shape)
print("IMG MIN MAX AFTER CNN:", tf.math.reduce_min(img), tf.math.reduce_max(img))
img_after_cnn = (img - tf.reduce_min(img)) / \
(tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'quantized_img', img_after_cnn[:, :, :, np.random.randint(low=0, high=64)][:, :, :, None],
step=self.step)
decoded_img = self.auto_encoder.decoder(img)
decoded_img = (decoded_img - tf.reduce_min(decoded_img)) / \
(tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'decoded_img', decoded_img, step=self.step)
# Reshape the encoded image so it can be used for the nodes
img_nodes = tf.reshape(img, (-1, self.image_feature_size))
print(img_nodes.shape)
# Create a graph that has a node for every encoded pixel. The features of each node
# are the channels of the corresponding pixel. Then connect each node with every other
# node.
img_graph = GraphsTuple(nodes=img_nodes,
edges=None,
globals=None,
receivers=None,
senders=None,
n_node=tf.shape(img_nodes)[0:1],
n_edge=tf.constant([0]))
connected_graph = fully_connect_graph_dynamic(img_graph)
# The encoded image graph has globals which can be compared against the encoded cluster graph
encoded_img = self.epd_image(connected_graph, 1)
# Compare the globals from the encoded cluster graph and encoded image graph
# to estimate the similarity between the input graph and input image
print(encoded_img.globals.shape)
print(encoded_graph.globals.shape)
distance = self.compare(tf.concat([encoded_graph.globals, encoded_img.globals], axis=1)) + self.compare(tf.concat([encoded_img.globals, encoded_graph.globals], axis=1))
return distance
def graph_tuple_to_feature(graph: GraphsTuple, name=''):
return {
f'{name}_nodes': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.nodes, tf.float32)).numpy()])),
f'{name}_edges': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.edges, tf.float32)).numpy()])),
f'{name}_senders': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.senders, tf.int64)).numpy()])),
f'{name}_receivers': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.receivers, tf.int64)).numpy()]))}
def feature_to_graph_tuple(name=''):
return {f'{name}_nodes': tf.io.FixedLenFeature([], dtype=tf.string),
f'{name}_edges': tf.io.FixedLenFeature([], dtype=tf.string),
f'{name}_senders': tf.io.FixedLenFeature([], dtype=tf.string),
f'{name}_receivers': tf.io.FixedLenFeature([], dtype=tf.string)}
def decode_examples(record_bytes, node_shape=None, edge_shape=None, image_shape=None):
"""
Decodes raw bytes as returned from tf.data.TFRecordDataset([example_path]) into a GraphTuple and image
Args:
record_bytes: raw bytes
node_shape: shape of nodes if known.
edge_shape: shape of edges if known.
image_shape: shape of image if known.
Returns: (GraphTuple, image)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
image=tf.io.FixedLenFeature([], dtype=tf.string),
cluster_idx=tf.io.FixedLenFeature([], dtype=tf.string),
projection_idx=tf.io.FixedLenFeature([], dtype=tf.string),
vprime=tf.io.FixedLenFeature([], dtype=tf.string),
**feature_to_graph_tuple('graph')
)
)
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
# image = tf.math.log(image / 43.) + tf.math.log(0.5)
image.set_shape(image_shape)
vprime = tf.io.parse_tensor(parsed_example['vprime'], tf.float32)
vprime.set_shape((3, 3))
cluster_idx = tf.io.parse_tensor(parsed_example['cluster_idx'], tf.int32)
cluster_idx.set_shape(())
projection_idx = tf.io.parse_tensor(parsed_example['projection_idx'], tf.int32)
projection_idx.set_shape(())
graph_nodes = tf.io.parse_tensor(parsed_example['graph_nodes'], tf.float32)
if node_shape is not None:
graph_nodes.set_shape([None] + list(node_shape))
graph_edges = tf.io.parse_tensor(parsed_example['graph_edges'], tf.float32)
if edge_shape is not None:
graph_edges.set_shape([None] + list(edge_shape))
receivers = tf.io.parse_tensor(parsed_example['graph_receivers'], tf.int64)
receivers = tf.cast(receivers, tf.int32)
receivers.set_shape([None])
senders = tf.io.parse_tensor(parsed_example['graph_senders'], tf.int64)
senders = tf.cast(senders, tf.int32)
senders.set_shape([None])
n_node = tf.shape(graph_nodes)[0:1]
n_edge = tf.shape(graph_edges)[0:1]
# graph = GraphsTuple(nodes=graph_nodes,
# edges=graph_edges,
# globals=tf.zeros([1]),
# receivers=receivers,
# senders=senders,
# n_node=n_node,
# n_edge=n_edge)
graph_data_dict = dict(nodes=graph_nodes,
edges=tf.zeros((n_edge[0], 1)),
globals=tf.zeros([1]),
receivers=receivers,
senders=senders,
n_node=n_node,
n_edge=n_edge)
return (graph_data_dict, image, cluster_idx, projection_idx, vprime)
```
#### File: models/identify_medium_SCD/generate_data.py
```python
import os
import glob
import tensorflow as tf
from timeit import default_timer
from itertools import product
from graph_nets.graphs import GraphsTuple
from graph_nets.utils_np import graphs_tuple_to_networkxs, networkxs_to_graphs_tuple, get_graph
import numpy as np
import networkx as nx
from networkx.drawing import draw
from tqdm import tqdm
from scipy.optimize import bisect
from scipy.spatial.ckdtree import cKDTree
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Pool, Lock
mp_lock = Lock()
def std(tensor, axis):
return tf.math.sqrt(tf.reduce_mean(tensor ** 2, axis=axis))
def find_screen_length(distance_matrix, k_mean):
"""
Get optimal screening length.
Args:
distance_matrix: [num_points, num_points]
k_mean: float
Returns: float the optimal screen length
"""
dist_max = distance_matrix.max()
distance_matrix_no_loops = np.where(distance_matrix == 0., np.inf, distance_matrix)
def get_k_mean(length):
paired = distance_matrix_no_loops < length
degree = np.sum(paired, axis=-1)
return degree.mean()
def loss(length):
return get_k_mean(length) - k_mean
if loss(0.) * loss(dist_max) >= 0.:
# When there are fewer than k_mean+1 nodes in the list,
# it's impossible for the average degree to be equal to k_mean.
# So choose max screening length. Happens when f(low) and f(high) have same sign.
return dist_max
return bisect(loss, 0., dist_max, xtol=0.001)
def generate_example_random_choice(positions, properties, k=26, plot=False):
print('choice nn')
idx_list = np.arange(len(positions))
virtual_node_positions = positions[np.random.choice(idx_list, 1000, replace=False)]
kdtree = cKDTree(virtual_node_positions)
dist, indices = kdtree.query(positions)
virtual_properties = np.zeros((len(np.bincount(indices)), len(properties[0])))
mean_sum = [lambda x: np.bincount(indices, weights=x) / np.maximum(1., np.bincount(indices)), # mean
lambda x: np.bincount(indices, weights=x)] # sum
mean_sum_enc = [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]
for p, enc in zip(np.arange(len(properties[0])), mean_sum_enc):
virtual_properties[:, p] = mean_sum[enc](properties[:, p])
virtual_positions = virtual_properties[:, :3]
graph = nx.DiGraph()
kdtree = cKDTree(virtual_positions)
dist, idx = kdtree.query(virtual_positions, k=k + 1)
receivers = idx[:, 1:] # N,k
senders = np.arange(virtual_positions.shape[0]) # N
senders = np.tile(senders[:, None], [1, k]) # N,k
receivers = receivers.flatten()
senders = senders.flatten()
n_nodes = virtual_positions.shape[0]
pos = dict() # for plotting node positions.
edgelist = []
for node, feature, position in zip(np.arange(n_nodes), virtual_properties, virtual_positions):
graph.add_node(node, features=feature)
pos[node] = position[:2]
# edges = np.stack([senders, receivers], axis=-1) + sibling_node_offset
for u, v in zip(senders, receivers):
graph.add_edge(u, v, features=np.array([1., 0.]))
graph.add_edge(v, u, features=np.array([1., 0.]))
edgelist.append((u, v))
edgelist.append((v, u))
graph.graph["features"] = np.array([0.])
# plotting
print('len(pos) = {}\nlen(edgelist) = {}'.format(len(pos), len(edgelist)))
if plot:
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
draw(graph, ax=ax, pos=pos, node_color='blue', edge_color='red', node_size=10, width=0.1)
image_dir = '/data2/hendrix/images/'
graph_image_idx = len(glob.glob(os.path.join(image_dir, 'graph_image_*')))
plt.savefig(os.path.join(image_dir, 'graph_image_{}'.format(graph_image_idx)))
return networkxs_to_graphs_tuple([graph],
node_shape_hint=[virtual_positions.shape[1] + virtual_properties.shape[1]],
edge_shape_hint=[2])
def generate_example_nn(positions, properties, k=26, resolution=2, plot=False):
print('example nn')
resolution = 3.086e18 * resolution # pc to cm
node_features = []
node_positions = []
box_size = (np.max(positions), np.min(positions)) # box that encompasses all of the nodes
axis = np.arange(box_size[1] + resolution, box_size[0], resolution)
lists = [axis] * 3
virtual_node_pos = [p for p in product(*lists)]
virtual_kdtree = cKDTree(virtual_node_pos)
particle_kdtree = cKDTree(positions)
indices = virtual_kdtree.query_ball_tree(particle_kdtree, np.sqrt(3) / 2. * resolution)
for i, p in enumerate(indices):
if len(p) == 0:
continue
virt_pos, virt_prop = make_virtual_node(properties[p])
node_positions.append(virt_pos)
node_features.append(virt_prop)
node_features = np.array(node_features)
node_positions = np.array(node_positions)
graph = nx.DiGraph()
kdtree = cKDTree(node_positions)
dist, idx = kdtree.query(node_positions, k=k + 1)
receivers = idx[:, 1:] # N,k
senders = np.arange(node_positions.shape[0]) # N
senders = np.tile(senders[:, None], [1, k]) # N,k
receivers = receivers.flatten()
senders = senders.flatten()
n_nodes = node_positions.shape[0]
pos = dict() # for plotting node positions.
edgelist = []
for node, feature, position in zip(np.arange(n_nodes), node_features, node_positions):
graph.add_node(node, features=feature)
pos[node] = (position[:2] - box_size[1]) / (box_size[0] - box_size[1])
# edges = np.stack([senders, receivers], axis=-1) + sibling_node_offset
for u, v in zip(senders, receivers):
graph.add_edge(u, v, features=np.array([1., 0.]))
graph.add_edge(v, u, features=np.array([1., 0.]))
edgelist.append((u, v))
edgelist.append((v, u))
graph.graph["features"] = np.array([0.])
# plotting
print('len(pos) = {}\nlen(edgelist) = {}'.format(len(pos), len(edgelist)))
if plot:
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
draw(graph, ax=ax, pos=pos, node_color='green', edge_color='red')
image_dir = '/data2/hendrix/images/'
graph_image_idx = len(glob.glob(os.path.join(image_dir, 'graph_image_*')))
plt.savefig(os.path.join(image_dir, 'graph_image_{}'.format(graph_image_idx)))
return networkxs_to_graphs_tuple([graph],
node_shape_hint=[node_positions.shape[1] + node_features.shape[1]],
edge_shape_hint=[2])
def generate_example(positions, properties, k_mean=26, plot=False):
"""
Generate a geometric graph from positions.
Args:
positions: [num_points, 3] positions used for graph construction.
properties: [num_points, F0,...,Fd] each node will have these properties of shape [F0,...,Fd]
k_mean: float
plot: whether to plot graph.
Returns: GraphTuple
"""
graph = nx.DiGraph()
sibling_edgelist = []
parent_edgelist = []
pos = dict() # for plotting node positions.
real_nodes = list(np.arange(positions.shape[0]))
while positions.shape[0] > 1:
# n_nodes, n_nodes
dist = np.linalg.norm(positions[:, None, :] - positions[None, :, :], axis=-1)
opt_screen_length = find_screen_length(dist, k_mean)
print("Found optimal screening length {}".format(opt_screen_length))
distance_matrix_no_loops = np.where(dist == 0., np.inf, dist)
A = distance_matrix_no_loops < opt_screen_length
senders, receivers = np.where(A)
n_edge = senders.size
# num_points, F0,...Fd
# if positions is to be part of features then this should already be set in properties.
# We don't concatentate here. Mainly because properties could be an image, etc.
sibling_nodes = properties
n_nodes = sibling_nodes.shape[0]
sibling_node_offset = len(graph.nodes)
for node, feature, position in zip(np.arange(sibling_node_offset, sibling_node_offset + n_nodes), sibling_nodes,
positions):
graph.add_node(node, features=feature)
pos[node] = position[:2]
# edges = np.stack([senders, receivers], axis=-1) + sibling_node_offset
for u, v in zip(senders + sibling_node_offset, receivers + sibling_node_offset):
graph.add_edge(u, v, features=np.array([1., 0.]))
graph.add_edge(v, u, features=np.array([1., 0.]))
sibling_edgelist.append((u, v))
sibling_edgelist.append((v, u))
# for virtual nodes
sibling_graph = GraphsTuple(nodes=None, # sibling_nodes,
edges=None,
senders=senders,
receivers=receivers,
globals=None,
n_node=np.array([n_nodes]),
n_edge=np.array([n_edge]))
sibling_graph = graphs_tuple_to_networkxs(sibling_graph)[0]
# completely connect
connected_components = sorted(nx.connected_components(nx.Graph(sibling_graph)), key=len)
_positions = []
_properties = []
for connected_component in connected_components:
print("Found connected component {}".format(connected_component))
indices = list(sorted(list(connected_component)))
virtual_position, virtual_property = make_virtual_node(positions[indices, :], properties[indices, ...])
_positions.append(virtual_position)
_properties.append(virtual_property)
virtual_positions = np.stack(_positions, axis=0)
virtual_properties = np.stack(_properties, axis=0)
###
# add virutal nodes
# num_parents, 3+F
parent_nodes = virtual_properties
n_nodes = parent_nodes.shape[0]
parent_node_offset = len(graph.nodes)
parent_indices = np.arange(parent_node_offset, parent_node_offset + n_nodes)
# adding the nodes to global graph
for node, feature, virtual_position in zip(parent_indices, parent_nodes, virtual_positions):
graph.add_node(node, features=feature)
print("new virtual {}".format(node))
pos[node] = virtual_position[:2]
for parent_idx, connected_component in zip(parent_indices, connected_components):
child_node_indices = [idx + sibling_node_offset for idx in list(sorted(list(connected_component)))]
for child_node_idx in child_node_indices:
graph.add_edge(parent_idx, child_node_idx, features=np.array([0., 1.]))
graph.add_edge(child_node_idx, parent_idx, features=np.array([0., 1.]))
parent_edgelist.append((parent_idx, child_node_idx))
parent_edgelist.append((child_node_idx, parent_idx))
print("connecting {}<->{}".format(parent_idx, child_node_idx))
positions = virtual_positions
properties = virtual_properties
# plotting
virutal_nodes = list(set(graph.nodes) - set(real_nodes))
if plot:
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
draw(graph, ax=ax, pos=pos, node_color='green', edgelist=[], nodelist=real_nodes)
draw(graph, ax=ax, pos=pos, node_color='purple', edgelist=[], nodelist=virutal_nodes)
draw(graph, ax=ax, pos=pos, edge_color='blue', edgelist=sibling_edgelist, nodelist=[])
draw(graph, ax=ax, pos=pos, edge_color='red', edgelist=parent_edgelist, nodelist=[])
plt.show()
return networkxs_to_graphs_tuple([graph],
node_shape_hint=[positions.shape[1] + properties.shape[1]],
edge_shape_hint=[2])
def graph_tuple_to_feature(graph: GraphsTuple, name=''):
return {
f'{name}_nodes': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.nodes, tf.float32)).numpy()])),
f'{name}_edges': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.edges, tf.float32)).numpy()])),
f'{name}_senders': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.senders, tf.int64)).numpy()])),
f'{name}_receivers': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.receivers, tf.int64)).numpy()]))}
def save_examples(generator, save_dir=None,
examples_per_file=26, num_examples=1, prefix='train'):
"""
Saves a list of GraphTuples to tfrecords.
Args:
generator: generator (or list) of (GraphTuples, image).
Generator is more efficient.
save_dir: dir to save tfrecords in
examples_per_file: int, max number examples per file
Returns: list of tfrecord files.
"""
print("Saving data in tfrecords.")
if save_dir is None:
save_dir = os.getcwd()
os.makedirs(save_dir, exist_ok=True)
files = []
data_iterable = iter(generator)
data_left = True
pbar = tqdm(total=num_examples)
while data_left:
mp_lock.acquire() # make sure no duplicate files are made / replaced
tf_files = glob.glob(os.path.join(save_dir, 'train_*'))
file_idx = len(tf_files)
mp_lock.release()
file = os.path.join(save_dir, 'train_{:04d}.tfrecords'.format(file_idx))
files.append(file)
with tf.io.TFRecordWriter(file) as writer:
for i in range(examples_per_file):
try:
(graph, image, example_idx) = next(data_iterable)
except StopIteration:
data_left = False
break
graph = get_graph(graph, 0)
features = dict(
image=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(image, tf.float32)).numpy()])),
example_idx=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(example_idx, tf.int32)).numpy()])),
**graph_tuple_to_feature(graph, name='graph')
)
features = tf.train.Features(feature=features)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
pbar.update(1)
print("Saved in tfrecords: {}".format(files))
return files
def feature_to_graph_tuple(name=''):
schema = {}
schema[f'{name}_nodes'] = tf.io.FixedLenFeature([], dtype=tf.string)
schema[f'{name}_senders'] = tf.io.FixedLenFeature([], dtype=tf.string)
schema[f'{name}_receivers'] = tf.io.FixedLenFeature([], dtype=tf.string)
return schema
def decode_examples_old(record_bytes, node_shape=None, image_shape=None):
"""
Decodes raw bytes as returned from tf.data.TFRecordDataset([example_path]) into a GraphTuple and image
Args:
record_bytes: raw bytes
node_shape: shape of nodes if known.
edge_shape: shape of edges if known.
image_shape: shape of image if known.
Returns: (GraphTuple, image)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
image=tf.io.FixedLenFeature([], dtype=tf.string),
snapshot=tf.io.FixedLenFeature([], dtype=tf.string),
projection=tf.io.FixedLenFeature([], dtype=tf.string),
**feature_to_graph_tuple('graph')
)
)
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
image.set_shape(image_shape)
snapshot = tf.io.parse_tensor(parsed_example['snapshot'], tf.int32)
snapshot.set_shape(())
projection = tf.io.parse_tensor(parsed_example['projection'], tf.int32)
projection.set_shape(())
graph_nodes = tf.io.parse_tensor(parsed_example['graph_nodes'], tf.float32)
graph_nodes.set_shape([None] + list(node_shape))
receivers = tf.io.parse_tensor(parsed_example['graph_receivers'], tf.int64)
receivers = tf.cast(receivers, tf.int32)
receivers.set_shape([None])
senders = tf.io.parse_tensor(parsed_example['graph_senders'], tf.int64)
senders = tf.cast(senders, tf.int32)
senders.set_shape([None])
n_node = tf.shape(graph_nodes)[0:1]
n_edge = tf.shape(senders)[0:1]
# graph = GraphsTuple(nodes=graph_nodes,
# edges=graph_edges,
# globals=tf.zeros([1]),
# receivers=receivers,
# senders=senders,
# n_node=tf.shape(graph_nodes)[0:1],
# n_edge=tf.shape(graph_edges)[0:1])
graph_data_dict = dict(nodes=graph_nodes,
edges=tf.zeros((n_edge[0], 1)),
globals=tf.zeros([1]),
receivers=receivers,
senders=senders,
n_node=n_node,
n_edge=n_edge)
return (graph_data_dict, image, snapshot, projection)
def decode_examples(record_bytes, node_shape=None, image_shape=None, k=None):
"""
Decodes raw bytes as returned from tf.data.TFRecordDataset([example_path]) into a GraphTuple and image
Args:
k: number of nearest neighbours
record_bytes: raw bytes
node_shape: shape of nodes if known.
edge_shape: shape of edges if known.
image_shape: shape of image if known.
Returns: (GraphTuple, image)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
idx=tf.io.FixedLenFeature([], dtype=tf.string),
image=tf.io.FixedLenFeature([], dtype=tf.string),
virtual_properties=tf.io.FixedLenFeature([], dtype=tf.string),
snapshot=tf.io.FixedLenFeature([], dtype=tf.string),
projection=tf.io.FixedLenFeature([], dtype=tf.string),
extra_info=tf.io.FixedLenFeature([], dtype=tf.string)
# **feature_to_graph_tuple('graph')
)
)
idx = tf.io.parse_tensor(parsed_example['idx'], tf.int32)
idx.set_shape([None] + [k + 1])
graph_nodes = tf.io.parse_tensor(parsed_example['virtual_properties'], tf.float32)
graph_nodes.set_shape([None] + list(node_shape))
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
image.set_shape(image_shape)
snapshot = tf.io.parse_tensor(parsed_example['snapshot'], tf.int32)
snapshot.set_shape(())
projection = tf.io.parse_tensor(parsed_example['projection'], tf.int32)
projection.set_shape(())
extra_info = tf.io.parse_tensor(parsed_example['extra_info'], tf.float32)
extra_info.set_shape([None])
receivers = idx[:, 1:] # N,k
senders = tf.cast(tf.range(tf.shape(graph_nodes)[0:1][0]), idx.dtype) # N
senders = tf.tile(senders[:, None], tf.constant([1, k], tf.int32)) # N, k
receivers = tf.reshape(receivers, shape=[-1])
senders = tf.reshape(senders, shape=[-1])
receivers_both_directions = tf.concat([receivers, senders], axis=0)
senders_both_directions = tf.concat([senders, receivers], axis=0)
n_node = tf.shape(graph_nodes)[0:1]
n_edge = tf.shape(senders_both_directions)[0:1]
# property_names = ['x', 'y', 'z', 'velocity_x', 'velocity_y', 'velocity_z', 'gravitational_potential',
# 'density', 'temperature', 'cell_mass', 'cell_volume']
print('before', graph_nodes.shape)
mask = tf.constant([True, True, True, # mask for density
False, False, False,
False,
True,
False, False, False], dtype=tf.bool)
graph_nodes = tf.boolean_mask(graph_nodes, mask, axis=1)
graph_nodes.set_shape([None, 4])
print('after', graph_nodes.shape)
# graph_data_dict = dict(nodes=graph_nodes,
# edges=tf.zeros((n_edge[0], 1)),
# globals=tf.zeros([1, 1]),
# receivers=receivers_both_directions,
# senders=senders_both_directions,
# n_node=n_node,
# n_edge=n_edge)
graph_data_dict = dict(nodes=graph_nodes,
# edges=tf.zeros((n_edge[0], 1)),
# globals=tf.zeros([1, 1]),
# receivers=receivers_both_directions,
# senders=senders_both_directions,
n_node=n_node,
n_edge=tf.zeros_like(n_node))
return (graph_data_dict, image, snapshot, projection, extra_info)
def get_data_info(data_dirs):
"""
Get information of saved data.
Args:
data_dirs: data directories
Returns:
"""
def data_generator():
for idx, dir in tqdm(enumerate(data_dirs)):
print("Generating data from {}".format(dir))
positions, properties, image = _get_data(dir)
yield (properties, image, dir)
data_iterable = iter(data_generator())
open('data_info.txt', 'w').close()
while True:
try:
(properties, image, dir) = next(data_iterable)
except StopIteration:
break
with open("data_info.txt", "a") as text_file:
print(f"dir: {dir}\n"
f" image_min: {np.min(image)}\n"
f" image_max: {np.max(image)}\n"
f" properties_min: {np.around(np.min(properties, axis=0), 2)}\n"
f" properties_max: {np.around(np.max(properties, axis=0), 2)}\n", file=text_file)
def get_data_image(data_dirs):
"""
Get information of saved data.
Args:
data_dirs: data directories
Returns:
"""
image_dir = '/data2/hendrix/projection_images/'
def data_generator():
for idx, dir in tqdm(enumerate(data_dirs)):
print("Generating data from {}".format(dir))
positions, properties, image = _get_data(dir)
yield (properties, image, dir)
data_iterable = iter(data_generator())
while True:
try:
(properties, image, dir) = next(data_iterable)
except StopIteration:
break
print('save image...')
proj_image_idx = len(glob.glob(os.path.join(image_dir, 'proj_image_*')))
plt.imsave(os.path.join(image_dir, 'proj_image_{}.png'.format(proj_image_idx)),
image[:, :, 0])
print('saved.')
def generate_data(data_dir, save_dir='/data2/hendrix/train_data_2/'):
"""
Routine for generating train data in tfrecords
Args:
data_dirs: where simulation data is.
save_dir: where tfrecords will go.
Returns: list of tfrecords.
"""
npz_files = glob.glob(os.path.join(data_dir, '*'))
def data_generator():
print("Making graphs.")
for idx, dir in tqdm(enumerate(npz_files)):
print("Generating data from {}/{}".format(data_dir, dir))
positions, properties, image = _get_data(dir)
graph = generate_example_random_choice(positions, properties)
yield (graph, image, idx)
train_tfrecords = save_examples(data_generator(),
save_dir=save_dir,
examples_per_file=len(npz_files),
num_examples=len(example_dirs),
prefix='train')
return train_tfrecords
###
# specific to project
def make_virtual_node(properties):
"""
Aggregate positions and properties of nodes into one virtual node.
Args:
positions: [N, 3]
properties: [N, F0,...Fd]
Returns: [3], [F0,...,Fd]
"""
virtual_properties = np.zeros(11)
virtual_properties[:6] = np.mean(properties[:, 6], axis=0)
virtual_properties[6] = np.sum(properties[:, 6])
virtual_properties[7:9] = np.mean(properties[:, 7:9], axis=0)
virtual_properties[9:11] = np.sum(properties[:, 9:11], axis=0)
return np.mean(properties[:, 3], axis=0), virtual_properties
def aggregate_lowest_level_cells(positions, properties):
'''
aggregate the lowest level particles.
Args:
positions: node positions [n, 3]
properties: node properties [n, f]
Returns:
agg_positions: aggregated node positions [m, 3]
agg_properties: aggregated node properties [m, f]
'''
lowest_level = np.max(properties[:, 11])
lowest_level_positions = positions[properties[:, 11] == lowest_level] # [j, 3]
lowest_level_properties = properties[properties[:, 11] == lowest_level] # [j, f]
cell_inds = list(set(lowest_level_properties[:, 12])) # [m-(n-j)]
grouped_ll_positions = [lowest_level_positions[lowest_level_properties[:, 12] == ind] for ind in
cell_inds] # [m-(n-j), 4096, 3]
grouped_ll_properties = [lowest_level_properties[lowest_level_properties[:, 12] == ind] for ind in
cell_inds] # [m-(n-j), 4096, f]
agg_positions = positions[properties[:, 11] < lowest_level] # [n-j, 3]
agg_properties = properties[properties[:, 11] < lowest_level] # [n-j, f]
agg_positions = np.concatenate((agg_positions, np.mean(grouped_ll_positions, axis=0))) # [m, 3]
agg_properties = np.concatenate((agg_properties, np.mean(grouped_ll_properties, axis=0))) # [m, f]
return agg_positions, agg_properties
def _get_data(dir):
"""
Should return the information for a single simulation.
Args:
dir: directory with sim data.
Returns:
positions for building graph
properties for putting in nodes and aggregating upwards
image corresponding to the graph
extra info corresponding to the example
"""
f = np.load(dir)
positions = f['positions']
properties = f['properties']
image = f['proj_image']
image = image.reshape((256, 256, 1))
# properties = properties / np.std(properties, axis=0) # normalize values
# extra_info = f['extra_info']
return positions, properties, image # , extra_info
def make_tutorial_data(examples_dir):
for i in range(10):
example_idx = len(glob.glob(os.path.join(examples_dir, 'example_*')))
data_dir = os.path.join(examples_dir, 'example_{:04d}'.format(example_idx))
os.makedirs(data_dir, exist_ok=True)
positions = np.random.uniform(0., 1., size=(50, 3))
properties = np.random.uniform(0., 1., size=(50, 5))
image = np.random.uniform(size=(24, 24, 1))
np.savez(os.path.join(data_dir, 'data.npz'), positions=positions, properties=properties, image=image)
if __name__ == '__main__':
examples_dir = '/data2/hendrix/examples/'
train_data_dir = '/data2/hendrix/train_data_2/'
example_dirs = glob.glob(os.path.join(examples_dir, 'example_*'))
print(example_dirs)
# get_data_info(example_dirs)
# get_data_image(example_dirs)
# list_of_example_dirs = []
# temp_lst = []
# for example_dir in example_dirs:
# if len(temp_lst) == 32:
# list_of_example_dirs.append(temp_lst)
# temp_lst = []
# else:
# temp_lst.append(example_dir)
# list_of_example_dirs.append(temp_lst)
# print(f'number of tfrecfiles: {len(list_of_example_dirs)}')
pool = Pool(1)
pool.map(generate_data, example_dirs)
```
#### File: models/identify_medium_SCD/model_utils.py
```python
import sys
sys.path.insert(1, '/data/s1825216/git/neural_deprojection/')
from neural_deprojection.models.identify_medium_SCD.generate_data import generate_data, decode_examples_old
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, AbstractModule, \
get_distribution_strategy, build_log_dir, build_checkpoint_dir, batch_dataset_set_graph_tuples
import glob, os
import tensorflow as tf
from tensorflow_addons.image import gaussian_filter2d
import json
# import tensorflow_addons as tfa
import numpy as np
from functools import partial
from graph_nets.utils_tf import set_zero_global_features
from graph_nets import blocks
from graph_nets.modules import GraphNetwork
from graph_nets._base import WrappedModelFnModule
import sonnet as snt
from graph_nets.graphs import GraphsTuple
from graph_nets.modules import _unsorted_segment_softmax, _received_edges_normalizer, GraphIndependent, SelfAttention, GraphNetwork
from graph_nets.utils_np import graphs_tuple_to_networkxs
from graph_nets.utils_tf import fully_connect_graph_dynamic
from networkx.drawing import draw
from networkx.linalg.spectrum import normalized_laplacian_spectrum
from networkx import Graph
import pylab as plt
from typing import Callable, Iterable, Optional, Text
from sonnet.src import base
from sonnet.src import initializers
from sonnet.src import linear
from sonnet.src import utils, once
class RelationNetwork(AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def \
__init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_mean, # try with mean instead of sum
use_globals=False,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=use_globals)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=use_globals,
edges_reducer=reducer)
def _build(self, graph):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
edge_block = self._edge_block(graph)
# print(edge_block)
output_graph = self._global_block(edge_block)
# print(output_graph.globals)
return output_graph # graph.replace(globals=output_graph.globals)
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.normalization(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.normalization(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
return output_graph
class EncoderNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
name=None):
super(EncoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
self.relation_network = RelationNetwork(edge_model_fn=edge_model_fn,
global_model_fn=global_model_fn)
def _build(self, input_graph):
latent = self.node_block(input_graph)
output = self.relation_network(latent)
return output
class EncodeProcessDecode(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode"):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps):
latent_graph = self._encoder(input_graph)
# for _ in range(num_processing_steps):
# latent_graph = self._core(latent_graph)
# state = (counter, latent_graph)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph)
class AutoEncoder(AbstractModule):
def __init__(self, kernel_size=4, name=None):
super(AutoEncoder, self).__init__(name=name)
self.encoder = snt.Sequential([snt.Conv2D(4, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [4, 128, 128]
snt.Conv2D(8, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [8, 64, 64]
snt.Conv2D(16, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [16, 32, 32]
snt.Conv2D(32, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu]) # [32, 16, 16]
# snt.Conv2D(32, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
# snt.Conv2D(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu])
# self.decoder = snt.Sequential([snt.Conv2DTranspose(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
# snt.Conv2DTranspose(32, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
# snt.Conv2DTranspose(16, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
self.decoder = snt.Sequential([snt.Conv2DTranspose(32, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [32, 16, 16]
snt.Conv2DTranspose(16, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [16, 32, 32]
snt.Conv2DTranspose(8, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [8, 64, 64]
snt.Conv2DTranspose(4, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [4, 128, 128]
snt.Conv2D(1, kernel_size, padding='SAME')]) # [1, 256, 256]
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch):
(img, ) = batch
# img = gaussian_filter2d(img, filter_shape=[6, 6])
img_before_autoencoder = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_autoencoder', img_before_autoencoder, step=self.step)
encoded_img = self.encoder(img)
print(encoded_img.shape)
decoded_img = self.decoder(encoded_img)
img_after_autoencoder = (decoded_img - tf.reduce_min(decoded_img)) / (
tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'img_after_autoencoder', img_after_autoencoder, step=self.step)
return decoded_img
class Model(AbstractModule):
"""Model inherits from AbstractModule, which contains a __call__ function which executes a _build function
that is to be specified in the child class. So for example:
model = Model(), then model() returns the output of _build()
AbstractModule inherits from snt.Module, which has useful functions that can return the (trainable) variables,
so the Model class has this functionality as well
An instance of the RelationNetwork class also inherits from AbstractModule,
so it also executes its _build() function when called and it can return its (trainable) variables
A RelationNetwork contains an edge block and a global block:
The edge block generally uses the edge, receiver, sender and global attributes of the input graph
to calculate the new edges.
In our case we currently only use the receiver and sender attributes to calculate the edges.
The global block generally uses the aggregated edge, aggregated node and the global attributes of the input graph
to calculate the new globals.
In our case we currently only use the aggregated edge attributes to calculate the new globals.
As input the RelationNetwork needs two (neural network) functions:
one to calculate the new edges from receiver and sender nodes
and one to calculate the globals from the aggregated edges.
The new edges will be a vector with size 16 (i.e. the output of the first function in the RelationNetwork)
The new globals will also be a vector with size 16 (i.e. the output of the second function in the RelationNetwork)
The image_cnn downscales the image (currently from 256x256 to 35x35) and encodes the image in 16 channels.
So we (currently) go from (256,256,1) to (29,29,16)
"""
def __init__(self,
mlp_size=16,
cluster_encoded_size=10,
image_encoded_size=64,
num_heads=10,
kernel_size=4,
image_feature_size=16,
core_steps=10, name=None):
super(Model, self).__init__(name=name)
self.epd_graph = EncodeProcessDecode(
encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=cluster_encoded_size,
input_node_size=cluster_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(cluster_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)))
self.epd_image = EncodeProcessDecode(
encoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(image_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)),
core=CoreNetwork(num_heads=num_heads,
multi_head_output_size=image_encoded_size,
input_node_size=image_encoded_size),
decoder=EncoderNetwork(edge_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True),
node_model_fn=lambda: snt.Linear(image_encoded_size),
global_model_fn=lambda: snt.nets.MLP([mlp_size], activate_final=True)))
# Load the autoencoder model from checkpoint
pretrained_auto_encoder = AutoEncoder(kernel_size=kernel_size)
checkpoint_dir = '/home/s1825216/git/neural_deprojection/neural_deprojection/models/identify_medium_SCD/autoencoder_checkpointing'
encoder_decoder_cp = tf.train.Checkpoint(encoder=pretrained_auto_encoder.encoder,
decoder=pretrained_auto_encoder.decoder)
model_cp = tf.train.Checkpoint(_model=encoder_decoder_cp)
checkpoint = tf.train.Checkpoint(module=model_cp)
status = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(status).expect_partial()
self.auto_encoder = pretrained_auto_encoder
self.compare = snt.nets.MLP([32, 1])
self.image_feature_size = image_feature_size
self._core_steps = core_steps
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch, *args, **kwargs):
(graph, img, c) = batch
del c
# The encoded cluster graph has globals which can be compared against the encoded image graph
encoded_graph = self.epd_graph(graph, self._core_steps)
# Add an extra dimension to the image (tf.summary expects a Tensor of rank 4)
img = img[None, ...]
im_before_cnn = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_cnn', im_before_cnn, step=self.step)
img = self.auto_encoder.encoder(img)
# Prevent the autoencoder from learning
try:
for variable in self.auto_encoder.encoder.trainable_variables:
variable._trainable = False
for variable in self.auto_encoder.decoder.trainable_variables:
variable._trainable = False
except:
pass
img_after_autoencoder = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_after_autoencoder', tf.transpose(img_after_autoencoder, [3, 1, 2, 0]), step=self.step)
decoded_img = self.auto_encoder.decoder(img)
decoded_img = (decoded_img - tf.reduce_min(decoded_img)) / (tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'decoded_img', decoded_img, step=self.step)
# Reshape the encoded image so it can be used for the nodes
#1, w,h,c -> w*h, c
nodes = tf.reshape(img, (-1,self.image_feature_size))
# Create a graph that has a node for every encoded pixel. The features of each node
# are the channels of the corresponding pixel. Then connect each node with every other
# node.
img_graph = GraphsTuple(nodes=nodes,
edges=None,
globals=None,
receivers=None,
senders=None,
n_node=tf.shape(nodes)[0:1],
n_edge=tf.constant([0]))
connected_graph = fully_connect_graph_dynamic(img_graph)
# The encoded image graph has globals which can be compared against the encoded cluster graph
encoded_img = self.epd_image(connected_graph, 1)
# Compare the globals from the encoded cluster graph and encoded image graph
# to estimate the similarity between the input graph and input image
distance = self.compare(tf.concat([encoded_graph.globals, encoded_img.globals], axis=1)) + self.compare(
tf.concat([encoded_img.globals, encoded_graph.globals], axis=1))
return distance
MODEL_MAP = dict(model1=Model)
def build_training(model_type, model_parameters, optimizer_parameters, loss_parameters, strategy=None) -> TrainOneEpoch:
model_cls = MODEL_MAP[model_type]
model = model_cls(**model_parameters)
def build_opt(**kwargs):
opt_type = kwargs.get('opt_type')
if opt_type == 'adam':
learning_rate = kwargs.get('learning_rate', 1e-4)
opt = snt.optimizers.Adam(learning_rate, beta1=1-1/100., beta2=1-1/500.)
else:
raise ValueError('Opt {} invalid'.format(opt_type))
return opt
def build_loss(**loss_parameters):
def loss(model_outputs, batch):
(graph, img, c) = batch
# loss = mean(-sum_k^2 true[k] * log(pred[k]/true[k]))
return tf.reduce_mean(tf.losses.binary_crossentropy(c[None,None], model_outputs, from_logits=True))# tf.math.sqrt(tf.reduce_mean(tf.math.square(rank - tf.nn.sigmoid(model_outputs[:, 0]))))
return loss
loss = build_loss(**loss_parameters)
opt = build_opt(**optimizer_parameters)
training = TrainOneEpoch(model, loss, opt, strategy=strategy)
return training
def build_dataset(data_dir):
"""
Build data set from a directory of tfrecords.
Args:
data_dir: str, path to *.tfrecords
Returns: Dataset obj.
"""
tfrecords = glob.glob(os.path.join(data_dir, '*.tfrecords'))
dataset = tf.data.TFRecordDataset(tfrecords).map(partial(decode_examples_old,
node_shape=(11,),
image_shape=(256, 256, 1))) # (graph, image, spsh, proj)
_graphs = dataset.map(lambda graph_data_dict, img, spsh, proj: (graph_data_dict, spsh, proj)).shuffle(buffer_size=50)
_images = dataset.map(lambda graph_data_dict, img, spsh, proj: (img, spsh, proj)).shuffle(buffer_size=50)
shuffled_dataset = tf.data.Dataset.zip((_graphs, _images)) # ((graph_data_dict, idx1), (img, idx2))
shuffled_dataset = shuffled_dataset.map(lambda ds1, ds2: (ds1[0], ds2[0], (ds1[1] == ds2[1]) and
(ds1[2] == ds2[2]))) # (graph, img, yes/no)
shuffled_dataset = shuffled_dataset.filter(lambda graph_data_dict, img, c: ~c)
shuffled_dataset = shuffled_dataset.map(lambda graph_data_dict, img, c: (graph_data_dict, img, tf.cast(c, tf.int32)))
nonshuffeled_dataset = dataset.map(
lambda graph_data_dict, img, spsh, proj : (graph_data_dict, img, tf.constant(1, dtype=tf.int32))) # (graph, img, yes)
dataset = tf.data.experimental.sample_from_datasets([shuffled_dataset, nonshuffeled_dataset])
dataset = dataset.map(lambda graph_data_dict, img, c: (GraphsTuple(**graph_data_dict), img, c))
# dataset = batch_dataset_set_graph_tuples(all_graphs_same_size=True, dataset=dataset, batch_size=16)
return dataset
```
#### File: models/openai_dvae_modules/modules.py
```python
import tensorflow as tf
import sonnet as snt
from neural_deprojection.graph_net_utils import AbstractModule
def same_padding(filter_size):
return tuple([(filter_size - 1) // 2] * 2)
class EncoderResBlock2D(AbstractModule):
def __init__(self, out_size, post_gain, name=None):
super(EncoderResBlock2D, self).__init__(name=name)
assert out_size % 4 == 0
self.out_size = out_size
hidden_size = out_size // 4
self.id_path = snt.Conv2D(self.out_size, 1, name='id_path')
self.conv_block = snt.Sequential([snt.LayerNorm(-1, True, True, name='layer_norm'),
tf.nn.relu, snt.Conv2D(hidden_size, 3, padding="SAME", name='conv_1'),
tf.nn.relu, snt.Conv2D(hidden_size, 3, padding="SAME", name='conv_2'),
tf.nn.relu, snt.Conv2D(hidden_size, 3, padding="SAME", name='conv_3'),
tf.nn.relu, snt.Conv2D(out_size, 1, padding="SAME", name='conv_4')])
self.post_gain = post_gain
def _build(self, img, **kwargs):
output = self.id_path(img) + self.post_gain * self.conv_block(img)
return output
class Encoder2D(AbstractModule):
def __init__(self, hidden_size, num_embeddings, num_groups=4, name=None):
super(Encoder2D, self).__init__(name=name)
self.shrink_factor = 2 ** (num_groups - 1)
num_blk_per_group = 1
num_layers = num_groups * num_blk_per_group
post_gain = 1. / num_layers ** 2
def _single_group(group_idx):
blk_hidden_size = 2 ** group_idx * hidden_size
res_blocks = [EncoderResBlock2D(blk_hidden_size, post_gain, name=f'blk_{res_blk}')
for res_blk in range(num_blk_per_group)]
if group_idx < num_groups - 1:
res_blocks.append(lambda x: tf.nn.max_pool2d(x, 2, strides=2, padding='SAME'))
return snt.Sequential(res_blocks, name=f'group_{group_idx}')
groups = [snt.Conv2D(hidden_size, 7, padding="SAME", name='input_group')]
for groud_idx in range(num_groups):
groups.append(_single_group(groud_idx))
groups.append(
snt.Sequential([tf.nn.relu, snt.Conv2D(num_embeddings, 1, padding="SAME", name='logits_conv')],
name='output_group'))
self.blocks = snt.Sequential(groups, name='groups')
def _build(self, img, **kwargs):
return self.blocks(img)
class DecoderResBlock2D(AbstractModule):
def __init__(self, out_size, post_gain, name=None):
super(DecoderResBlock2D, self).__init__(name=name)
assert out_size % 4 == 0
self.out_size = out_size
hidden_size = out_size // 4
self.id_path = snt.Conv2D(self.out_size, 1, name='id_path')
self.conv_block = snt.Sequential([snt.LayerNorm(-1,True, True, name='layer_norm'),
tf.nn.relu, snt.Conv2D(hidden_size, 1, padding="SAME", name='conv_1'),
tf.nn.relu, snt.Conv2D(hidden_size, 3, padding="SAME", name='conv_2'),
tf.nn.relu, snt.Conv2D(hidden_size, 3, padding="SAME", name='conv_3'),
tf.nn.relu, snt.Conv2D(out_size, 3, padding="SAME", name='conv_4')])
self.post_gain = post_gain
def _build(self, img, **kwargs):
output = self.id_path(img) + self.post_gain * self.conv_block(img)
return output
def upsample(x):
"""
Doubles resolution.
Args:
x: [batch, W, H, ..., C]
Returns:
[batch, 2*W, 2*H, 2*..., C]
"""
out = x
for i in range(len(x.shape)-2):
out = tf.repeat(out, 2, axis=1+i)
return out
class Decoder2D(AbstractModule):
def __init__(self, hidden_size, num_channels=1, num_groups=4, name=None):
super(Decoder2D, self).__init__(name=name)
self.shrink_factor = 2 ** (num_groups - 1)
num_blk_per_group = 1
num_layers = num_groups * num_blk_per_group
post_gain = 1. / num_layers ** 2
def _single_group(group_idx):
blk_hidden_size = 2 ** (num_groups - group_idx - 1) * hidden_size
res_blocks = [DecoderResBlock2D(blk_hidden_size, post_gain, name=f'blk_{res_blk}')
for res_blk in range(num_blk_per_group)]
if group_idx < num_groups - 1:
res_blocks.append(upsample)
return snt.Sequential(res_blocks, name=f'group_{group_idx}')
groups = [snt.Conv2D(hidden_size // 2, 1, padding="SAME", name='input_group')]
for groud_idx in range(num_groups):
groups.append(_single_group(groud_idx))
groups.append(
snt.Sequential(
[tf.nn.relu, snt.Conv2D(num_channels * 2, 1, padding="SAME", name='likelihood_params_conv')],
name='output_group'))
self.blocks = snt.Sequential(groups, name='groups')
def _build(self, img, **kwargs):
return self.blocks(img)
class DecoderResBlock3D(AbstractModule):
def __init__(self, out_size, post_gain, name=None):
super(DecoderResBlock3D, self).__init__(name=name)
assert out_size % 4 == 0
self.out_size = out_size
hidden_size = out_size // 4
self.id_path = snt.Conv3D(self.out_size, 1, name='id_path')
self.conv_block = snt.Sequential([snt.LayerNorm(-1, True, True, name='layer_norm'),
tf.nn.relu, snt.Conv3D(hidden_size, 1, padding="SAME", name='conv_1'),
tf.nn.relu, snt.Conv3D(hidden_size, 3, padding="SAME", name='conv_2'),
tf.nn.relu, snt.Conv3D(hidden_size, 3, padding="SAME", name='conv_3'),
tf.nn.relu, snt.Conv3D(out_size, 3, padding="SAME", name='conv_4')])
self.post_gain = post_gain
def _build(self, img, **kwargs):
# self.initialise(img)
output = self.id_path(img) + self.post_gain * self.conv_block(img)
return output
class Decoder3D(AbstractModule):
def __init__(self, hidden_size, num_channels=1, num_groups=4, name=None):
super(Decoder3D, self).__init__(name=name)
self.shrink_factor = 2**(num_groups-1)
num_blk_per_group = 1
num_layers = num_groups * num_blk_per_group
post_gain = 1. / num_layers ** 2
def _single_group(group_idx):
blk_hidden_size = 2 ** (num_groups - group_idx - 1) * hidden_size
res_blocks = [DecoderResBlock3D(blk_hidden_size, post_gain, name=f'blk_{res_blk}')
for res_blk in range(num_blk_per_group)]
if group_idx < num_groups - 1:
res_blocks.append(upsample)
return snt.Sequential(res_blocks, name=f'group_{group_idx}')
groups = [snt.Conv3D(hidden_size // 2, 1, padding="SAME", name='input_group')]
for groud_idx in range(num_groups):
groups.append(_single_group(groud_idx))
groups.append(
snt.Sequential(
[tf.nn.relu, snt.Conv3D(num_channels * 2, 1, padding="SAME", name='likelihood_params_conv')],
name='output_group'))
self.blocks = snt.Sequential(groups, name='groups')
def _build(self, img, **kwargs):
return self.blocks(img)
class EncoderResBlock3D(AbstractModule):
"""
O[out] = sum_{i,j,k,in} W[i,j,k,in,out]*I[i,j,k,in]
count = kernel^3 * N_in
"""
def __init__(self, out_size, post_gain, name=None):
super(EncoderResBlock3D, self).__init__(name=name)
assert out_size % 4 == 0
self.out_size = out_size
hidden_size = out_size // 4
self.id_path = snt.Conv3D(self.out_size, 1, name='id_path')
self.conv_block = snt.Sequential([snt.LayerNorm(-1,True, True,name='layer_norm'),
tf.nn.relu, snt.Conv3D(hidden_size, 3, padding="SAME", name='conv_1'),
tf.nn.relu, snt.Conv3D(hidden_size, 3, padding="SAME", name='conv_2'),
tf.nn.relu, snt.Conv3D(hidden_size, 3, padding="SAME", name='conv_3'),
tf.nn.relu, snt.Conv3D(out_size, 1, padding="SAME", name='conv_4')])
self.post_gain = post_gain
def _build(self, img, **kwargs):
# self.initialise(img)
output = self.id_path(img) + self.post_gain * self.conv_block(img)
return output
class Encoder3D(AbstractModule):
def __init__(self, hidden_size, num_embeddings, num_groups=4, name=None):
super(Encoder3D, self).__init__(name=name)
self.shrink_factor = 2**(num_groups-1)
num_blk_per_group = 1
num_layers = num_groups * num_blk_per_group
post_gain = 1. / num_layers ** 2
def _single_group(group_idx):
blk_hidden_size = 2 ** group_idx * hidden_size
res_blocks = [EncoderResBlock3D(blk_hidden_size, post_gain, name=f'blk_{res_blk}')
for res_blk in range(num_blk_per_group)]
if group_idx < num_groups - 1:
res_blocks.append(lambda x: tf.nn.max_pool3d(x, 2, strides=2, padding='SAME'))
return snt.Sequential(res_blocks, name=f'group_{group_idx}')
groups = [snt.Conv3D(hidden_size, 7, padding="SAME", name='input_group')]
for groud_idx in range(num_groups):
groups.append(_single_group(groud_idx))
groups.append(
snt.Sequential([tf.nn.relu, snt.Conv3D(num_embeddings, 1, padding="SAME", name='logits_conv')],
name='output_group'))
self.blocks = snt.Sequential(groups, name='groups')
def _build(self, img, **kwargs):
return self.blocks(img)
```
#### File: models/Simple_complete_model/autoencoder_2d.py
```python
import tensorflow as tf
from neural_deprojection.graph_net_utils import AbstractModule, get_shape
import tensorflow_probability as tfp
from neural_deprojection.models.openai_dvae_modules.modules import Encoder2D, Decoder2D
class DiscreteImageVAE(AbstractModule):
def __init__(self,
hidden_size: int = 32,
embedding_dim: int = 64,
num_embedding: int = 1024,
num_token_samples: int = 4,
num_channels:int = 1,
compute_temperature:callable = None,
beta:float = 1.,
num_groups=4,
name=None):
super(DiscreteImageVAE, self).__init__(name=name)
self.num_channels = num_channels
self.embeddings = tf.Variable(initial_value=tf.random.truncated_normal((num_embedding, embedding_dim)),
name='embeddings')
self.num_token_samples = num_token_samples
self.num_embedding = num_embedding
self.embedding_dim = embedding_dim
self.compute_temperature = compute_temperature
self.beta = tf.convert_to_tensor(beta, dtype=tf.float32)
self._encoder = Encoder2D(hidden_size=hidden_size, num_embeddings=num_embedding, num_groups=num_groups,name='EncoderImage')
self._decoder = Decoder2D(hidden_size=hidden_size, num_channels=num_channels, num_groups=num_groups, name='DecoderImage')
assert self._encoder.shrink_factor == self._decoder.shrink_factor, "Shrink factors should be same. Use same num_groups."
self.shrink_factor = self._decoder.shrink_factor
@property
def temperature(self):
return self.compute_temperature(self.epoch)
def set_beta(self, beta):
self.beta.assign(beta)
def set_temperature(self, temperature):
self.temperature.assign(temperature)
def compute_logits(self, img):
"""
Computes normalised logits representing the variational posterior, q(z | img).
Args:
img: [batch, W', H', num_channels]
Returns:
logits: [batch, W, H, num_embeddings]
"""
#[batch, W, H, num_embeddings]
logits = self._encoder(img)
logits /= 1e-6 + tf.math.reduce_std(logits, axis=-1, keepdims=True)
logits -= tf.reduce_logsumexp(logits, axis=-1, keepdims=True)
return logits
def sample_latent(self, logits, temperature, num_samples):
"""
Sample one-hot encodings of each latent variable.
Args:
logits: [batch, W, H, num_embeddings]
num_samples: int
Returns:
log_token_samples_onehot, token_samples_onehot: [num_samples, batch, W, H, num_embeddings]
latent_tokens: [num_samples, batch, W, H, embedding_size]
"""
token_distribution = tfp.distributions.ExpRelaxedOneHotCategorical(temperature, logits=logits)
log_token_samples_onehot = token_distribution.sample((num_samples,), name='token_samples') # [S, batch, W, H, num_embeddings]
token_samples_onehot = tf.math.exp(log_token_samples_onehot)
latent_tokens = tf.einsum("sbwhn,nm->sbwhm",token_samples_onehot, self.embeddings) # [S, batch, W, H, embedding_size]
return log_token_samples_onehot, token_samples_onehot, latent_tokens
def compute_likelihood_parameters(self, latent_tokens):
"""
Compute the likelihood parameters from logits.
Args:
latent_tokens: [num_samples, batch, W, H, embedding_size]
Returns:
mu, logb: [num_samples, batch, W', H', num_channels]
"""
[num_samples, batch, H, W, _] = get_shape(latent_tokens)
latent_tokens = tf.reshape(latent_tokens, [num_samples*batch, H, W, self.embedding_dim]) #[num_samples*batch, H, W, self.embedding_dim]
decoded_imgs = self._decoder(latent_tokens) # [num_samples * batch, H', W', C*2]
decoded_imgs.set_shape([None,
None, None,
self.num_channels*2])
[_, H_2, W_2, _] = get_shape(decoded_imgs)
decoded_imgs = tf.reshape(decoded_imgs, [num_samples, batch, H_2, W_2, 2 * self.num_channels]) # [S, batch, H, W, embedding_dim]
mu, logb = decoded_imgs[..., :self.num_channels], decoded_imgs[..., self.num_channels:]
return mu, logb # [S, batch, H', W' C], [S, batch, H', W' C]
def log_likelihood(self, properties, mu, logb):
"""
Log-Laplace distribution.
The pdf of log-Laplace is,
P(x | mu, b) = 1 / (2 * log(x) * b) * exp(|log(x) - mu|/b)
Args:
properties: image data [batch, H', W', channels]. Assumes properties are of the form log(maximum(1e-5, properties))
mu: [num_samples, batch, H', W', channels]
logb: [num_samples, batch, H', W', channels]
Returns:
log_prob [num_samples, batch]
"""
log_prob = - tf.math.abs(properties - mu) / tf.math.exp(logb) \
- tf.math.log(2.) - properties - logb # [num_samples, batch, H', W', num_properties]
#num_samples, batch
return tf.reduce_sum(log_prob, axis=[-1, -2, -3])
def log_prob_q(self, latent_logits, log_token_samples_onehot):
"""
Args:
latent_logits: [batch, H, W, num_embeddings] (normalised)
log_token_samples_onehot: [num_samples, batch, H, W, num_embeddings]
Returns:
"""
_, H, W, _ = get_shape(latent_logits)
q_dist = tfp.distributions.ExpRelaxedOneHotCategorical(self.temperature, logits=latent_logits)
log_prob_q = q_dist.log_prob(log_token_samples_onehot) # num_samples, batch, H, W
return log_prob_q
def kl_term(self, latent_logits, log_token_samples_onehot):
"""
Compute the term, which if marginalised over q(z) results in KL(q | prior).
sum_z log q(z) - log prior(z)
Args:
latent_logits: [batch, H, W, num_embeddings] (normalised)
log_token_samples_onehot: [num_samples, batch, H, W, num_embeddings]
Returns:
kl_term: [num_samples, batch]
"""
log_prob_q = self.log_prob_q(latent_logits, log_token_samples_onehot) #num_samples, batch, H, W
prior_dist = tfp.distributions.ExpRelaxedOneHotCategorical(self.temperature, logits=tf.zeros_like(latent_logits))
log_prob_prior = prior_dist.log_prob(log_token_samples_onehot) # num_samples, batch, H, W
return tf.reduce_sum(log_prob_q - log_prob_prior, axis=[-1,-2])# num_samples, batch
def _build(self, img, **kwargs) -> dict:
"""
Args:
img: [batch, H, W, num_channels]
**kwargs:
Returns:
"""
latent_logits = self.compute_logits(img) # [batch, H, W, num_embeddings]
log_token_samples_onehot, token_samples_onehot, latent_tokens = self.sample_latent(latent_logits, self.temperature, self.num_token_samples) # [num_samples, batch, H, W, num_embeddings], [num_samples, batch, H, W, embedding_size]
mu, logb = self.compute_likelihood_parameters(latent_tokens) # [num_samples, batch, H', W', C], [num_samples, batch, H', W', C]
log_likelihood = self.log_likelihood(img, mu, logb) # [num_samples, batch]
kl_term = self.kl_term(latent_logits, log_token_samples_onehot) # [num_samples, batch]
var_exp = tf.reduce_mean(log_likelihood, axis=0) # [batch]
kl_div = tf.reduce_mean(kl_term, axis=0) # [batch]
elbo = var_exp - self.beta * kl_div # batch
loss = - tf.reduce_mean(elbo) # scalar
entropy = -tf.reduce_sum(tf.math.exp(latent_logits) * latent_logits, axis=[-1]) # [batch, H, W]
perplexity = 2. ** (entropy / tf.math.log(2.)) # [batch, H, W]
mean_perplexity = tf.reduce_mean(perplexity) # scalar
if self.log_counter % int(128 / mu.get_shape()[0]):
tf.summary.scalar('perplexity', mean_perplexity, step=self.step)
tf.summary.scalar('var_exp', tf.reduce_mean(var_exp), step=self.step)
tf.summary.scalar('kl_div', tf.reduce_mean(kl_div), step=self.step)
tf.summary.scalar('temperature', self.temperature, step=self.step)
tf.summary.scalar('beta', self.beta, step=self.step)
_mu = mu[0] #[batch, H', W', C]
_img = img #[batch, H', W', C]
for i in range(self.num_channels):
vmin = tf.reduce_min(_mu[..., i])
vmax = tf.reduce_max(_mu[..., i])
_projected_mu = (_mu[..., i:i+1]-vmin)/(vmax-vmin)#batch, H', W', 1
_projected_mu = tf.clip_by_value(_projected_mu, 0., 1.)
vmin = tf.reduce_min(_img[..., i])
vmax = tf.reduce_max(_img[..., i])
_projected_img = (_img[..., i:i+1]-vmin)/(vmax-vmin)#batch, H', W', 1
_projected_img = tf.clip_by_value(_projected_img, 0., 1.)
tf.summary.image(f'image_predict[{i}]', _projected_mu, step=self.step)
tf.summary.image(f'image_actual[{i}]', _projected_img, step=self.step)
batch, H, W, _ = get_shape(latent_logits)
_latent_logits = latent_logits # [batch, H, W, num_embeddings]
_latent_logits -= tf.reduce_min(_latent_logits, axis=-1, keepdims=True)
_latent_logits /= tf.reduce_max(_latent_logits, axis=-1, keepdims=True)
_latent_logits = tf.reshape(_latent_logits, [batch, H*W, self.num_embedding, 1]) # [batch, H*W, num_embedding, 1]
tf.summary.image('latent_logits', _latent_logits, step=self.step)
token_sample_onehot = token_samples_onehot[0] # [batch, H, W, num_embeddings]
token_sample_onehot = tf.reshape(token_sample_onehot, [batch, H*W, self.num_embedding, 1]) # [batch, H*W, num_embedding, 1]
tf.summary.image('latent_samples_onehot', token_sample_onehot, step=self.step)
return dict(loss=loss,
metrics=dict(var_exp=var_exp,
kl_div=kl_div,
mean_perplexity=mean_perplexity))
```
#### File: models/Simple_complete_model/autoregressive_prior.py
```python
from graph_nets import blocks
import tensorflow as tf
import sonnet as snt
from graph_nets.graphs import GraphsTuple
from neural_deprojection.graph_net_utils import AbstractModule, get_shape, graph_batch_reshape, graph_unbatch_reshape, \
grid_graphs
from neural_deprojection.models.Simple_complete_model.autoencoder_3d import DiscreteVoxelsVAE
from neural_deprojection.models.Simple_complete_model.autoencoder_2d import DiscreteImageVAE
import tensorflow_probability as tfp
from graph_nets.modules import SelfAttention
from sonnet.src import utils, once
from neural_deprojection.models.Simple_complete_model.edge_connectors import connect_graph_dynamic
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class TransformerLayer(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
name=None):
super(TransformerLayer, self).__init__(name=name)
self.num_heads = num_heads
self.ln1 = snt.LayerNorm(axis=-1, eps=1e-6, create_scale=True, create_offset=True, name='layer_norm1')
self.ln2 = snt.LayerNorm(axis=-1, eps=1e-6, create_scale=True, create_offset=True, name='layer_norm2')
self.ln_keys = snt.LayerNorm(axis=-1, eps=1e-6, create_scale=True, create_offset=True, name='layer_norm_keys')
self.ln_queries = snt.LayerNorm(axis=-1, eps=1e-6, create_scale=True, create_offset=True,
name='layer_norm_queries')
self.self_attention = SelfAttention()
@once.once
def initialize(self, graphs):
input_node_size = get_shape(graphs.nodes)[-1]
self.input_node_size = input_node_size
self.v_linear = MultiHeadLinear(output_size=input_node_size, num_heads=self.num_heads, name='mhl1') # values
self.k_linear = MultiHeadLinear(output_size=input_node_size, num_heads=self.num_heads, name='mhl2') # keys
self.q_linear = MultiHeadLinear(output_size=input_node_size, num_heads=self.num_heads, name='mhl3') # queries
self.FFN = snt.nets.MLP([input_node_size, input_node_size], activate_final=False,
name='ffn') # Feed forward network
self.output_linear = snt.Linear(output_size=input_node_size, name='output_linear')
def _build(self, latent):
self.initialize(latent)
n_node, _ = get_shape(latent.nodes)
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes) # n_node, num_head, F
node_keys = self.ln_keys(node_keys)
node_queries = self.ln_queries(node_queries)
_, _, d_k = get_shape(node_keys)
node_queries /= tf.math.sqrt(tf.cast(d_k, node_queries.dtype)) # n_node, F
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
# n_nodes, heads, output_size -> n_nodes, heads*output_size
output_nodes = tf.reshape(attended_latent.nodes, (n_node, self.num_heads * self.input_node_size))
output_nodes = self.ln1(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.ln2(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
return output_graph
class SelfAttentionMessagePassing(AbstractModule):
"""
Operates on graphs with nodes, and connectivity defined by senders and receivers, and maps to new nodes.
Optionally uses edges and globals if they are defined.
"""
def __init__(self, num_heads: int = 1, use_edges=False, use_globals=False, name=None):
super(SelfAttentionMessagePassing, self).__init__(name=name)
self.selfattention_core = TransformerLayer(num_heads=num_heads)
self.layer_norm1 = snt.LayerNorm(-1, True, True, name='layer_norm_edges')
self.layer_norm2 = snt.LayerNorm(-1, True, True, name='layer_norm_nodes')
self.use_globals = use_globals
self.use_edges = use_edges
@once.once
def initialize(self, graphs: GraphsTuple):
in_node_size = get_shape(graphs.nodes)[-1]
node_model_fn = lambda: snt.nets.MLP([in_node_size, in_node_size], activate_final=True,
activation=tf.nn.relu,
name='node_fn')
edge_model_fn = lambda: snt.nets.MLP([in_node_size, in_node_size],
activate_final=True, activation=tf.nn.relu,
name='edge_fn')
self.edge_block = blocks.EdgeBlock(edge_model_fn,
use_edges=self.use_edges,
use_receiver_nodes=False,
use_sender_nodes=True,
use_globals=self.use_globals)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=self.use_globals)
def _build(self, graphs: GraphsTuple):
self.initialize(graphs)
latent_graphs = self.selfattention_core(graphs)
latent_graphs = self.edge_block(latent_graphs)
latent_graphs = latent_graphs.replace(edges=self.layer_norm1(latent_graphs.edges))
latent_graphs = self.node_block(latent_graphs)
latent_graphs = latent_graphs.replace(nodes=self.layer_norm2(latent_graphs.nodes))
return latent_graphs
class AutoRegressivePrior(AbstractModule):
"""
This models an auto-regressive joint distribution, which is typically for modelling a prior.
Minimizes KL(q(z_2d, z_3d | x_2d, x_3d) || p(z_2d, z_3d)) for p(z_2d, z_3d) which is an auto-regressive model.
"""
def __init__(self,
discrete_image_vae: DiscreteImageVAE,
discrete_voxel_vae: DiscreteVoxelsVAE,
num_heads: int = 1,
num_layers: int = 1,
embedding_dim: int = 16,
num_token_samples: int = 1,
name=None):
super(AutoRegressivePrior, self).__init__(name=name)
self.discrete_image_vae = discrete_image_vae
self.discrete_voxel_vae = discrete_voxel_vae
self.embedding_dim = embedding_dim
self.num_token_samples = num_token_samples
self.num_embedding = self.discrete_voxel_vae.num_embedding + self.discrete_image_vae.num_embedding + 3
self.embeddings = tf.Variable(
initial_value=tf.random.truncated_normal((self.num_embedding, self.embedding_dim)),
name='embeddings')
self.num_output = (self.discrete_voxel_vae.voxels_per_dimension // 8) ** 3 # 3 maxpool layers cubed
message_passing_layers = [SelfAttentionMessagePassing(num_heads=num_heads, name=f"self_attn_mp_{i}")
for i in range(num_layers)]
# don't need because we do X @ embeddings^T
# message_passing_layers.append(blocks.NodeBlock(lambda: snt.Linear(self.num_embedding, name='output_linear'),
# use_received_edges=False, use_nodes=True,
# use_globals=False, use_sent_edges=False,
# name='project_block'))
self.selfattention_core = snt.Sequential(message_passing_layers, name='selfattention_core')
def _core(self, graphs):
graphs = self.selfattention_core(graphs)
# logits = output @ embeddings^T
# decrease in logit entropy => embeddings and output orthogonal, thus well separated embeddings
output_nodes = tf.einsum("nd,ed->ne", graphs.nodes, self.embeddings)
graphs = graphs.replace(nodes=output_nodes)
return graphs
@once.once
def initialize_positional_encodings(self, nodes):
_, n_node, _ = get_shape(nodes)
self.positional_encodings = tf.Variable(
initial_value=tf.random.truncated_normal((n_node, self.embedding_dim)),
name='positional_encodings')
@tf.function(input_signature=[tf.TensorSpec((None, None, None, None), tf.float32)])
def deproject_images(self, images):
"""
For a batch of images samples a 3D medium consistent with the image.
Args:
images: [batch, H2, W2, C2]
Returns:
mu, b: mean, uncertainty of images [batch, H3, W2, D3, C3]
"""
return self._deproject_images(images)
def _deproject_images(self, images):
"""
For a batch of images samples a 3D medium consistent with the image.
Args:
images: [batch, H2, W2, C2]
Returns:
mu, b: mean, uncertainty of images [batch, H3, W2, D3, C3]
"""
logits_2d = self.discrete_image_vae.compute_logits(images) # [batch, W2, H2, num_embeddings2]
dist_2d = tfp.distributions.Categorical(logits=logits_2d, dtype=tf.int32)
token_samples_idx_2d = dist_2d.sample(1) # [1, batch, W2, H2]
token_samples_idx_2d = token_samples_idx_2d[0] # batch, W2, H2
token_samples_idx_3d = self._incrementally_decode(token_samples_idx_2d) # batch, H3,W3,D3
latent_token_samples_3d = tf.nn.embedding_lookup(self.discrete_voxel_vae.embeddings, token_samples_idx_3d)# [batch, W3, H3, D3, embedding_size3]
mu_3d, logb_3d = self.discrete_voxel_vae.compute_likelihood_parameters(
latent_token_samples_3d[None]) # [1, batch, H', W', D' C], [1, batch, H', W', D' C]_
mu_3d = mu_3d[0]
logb_3d = logb_3d[0]
b_3d = tf.math.exp(logb_3d)
return mu_3d, b_3d
def _incrementally_decode(self, token_samples_idx_2d):
"""
Args:
token_samples_idx_2d: [batch, H2, W2]
Returns:
token_samples_idx_3d: [batch, H3,W3,D3]
"""
idx_dtype = token_samples_idx_2d.dtype
batch, H2, W2 = get_shape(token_samples_idx_2d)
H3 = W3 = D3 = self.discrete_voxel_vae.voxels_per_dimension // self.discrete_voxel_vae.shrink_factor
token_samples_idx_2d = tf.reshape(token_samples_idx_2d, (batch, H2 * W2))
N, _ = get_shape(self.positional_encodings)
# batch, H3*W3*D3, num_embedding3
token_samples_idx_3d = tf.zeros((batch, H3 * W3 * D3), dtype=idx_dtype)
def _core(output_token_idx, token_samples_idx_3d):
"""
Args:
output_token_idx: which element is being replaced
token_samples_idx_3d: [batch, H3*W3*D3]
"""
# [batch, 1 + H2*W2 + 1 + H3*W3*D3 + 1]
sequence = self.construct_sequence(token_samples_idx_2d, token_samples_idx_3d)
input_sequence = sequence[:, :-1]
input_graphs = self.construct_input_graph(input_sequence, H2*W2)
latent_logits = self.compute_logits(input_graphs)
#batch, H3 * W3 * D3, num_embedding3
# . a b . c d
# a b . c d .
prior_latent_logits_3d = latent_logits[:, H2*W2+1:H2*W2+1+H3*W3*D3,
self.discrete_image_vae.num_embedding:self.discrete_image_vae.num_embedding + self.discrete_voxel_vae.num_embedding]
prior_dist = tfp.distributions.Categorical(logits=prior_latent_logits_3d, dtype=idx_dtype)
prior_latent_tokens_idx_3d = prior_dist.sample(1)[0] # batch, H3*W3*D3
# import pylab as plt
# # plt.imshow(tf.one_hot(prior_latent_tokens_idx_3d[0, :30], self.discrete_voxel_vae.num_embedding))
# plt.imshow(latent_logits[0, 1020:1050], aspect='auto', interpolation='nearest')
# plt.show()
_mask = tf.range(H3 * W3 * D3) == output_token_idx # [H3*W3*D3]
output_token_samples_idx_3d = tf.where(_mask[None, :],
prior_latent_tokens_idx_3d,
token_samples_idx_3d
)
return (output_token_idx + 1, output_token_samples_idx_3d)
_, token_samples_idx_3d = tf.while_loop(
cond=lambda output_token_idx, _: output_token_idx < (H3 * W3 * D3),
body=_core,
loop_vars=(tf.convert_to_tensor(0), token_samples_idx_3d))
# latent_graphs = GraphsTuple(**latent_graphs_data_dict, edges=None, globals=None)
token_samples_idx_3d = tf.reshape(token_samples_idx_3d,
(batch, H3, W3, D3))
return token_samples_idx_3d
def write_summary(self,images, graphs,
latent_logits_2d,
latent_logits_3d,
prior_latent_logits_2d,
prior_latent_logits_3d):
dist_2d = tfp.distributions.OneHotCategorical(logits=latent_logits_2d, dtype=latent_logits_2d.dtype)
dist_3d = tfp.distributions.OneHotCategorical(logits=latent_logits_3d, dtype=latent_logits_3d.dtype)
token_samples_onehot_2d = dist_2d.sample(1)[0]
token_samples_onehot_3d = dist_3d.sample(1)[0]
dist_2d_prior = tfp.distributions.OneHotCategorical(logits=prior_latent_logits_2d, dtype=prior_latent_logits_2d.dtype)
dist_3d_prior = tfp.distributions.OneHotCategorical(logits=prior_latent_logits_3d, dtype=prior_latent_logits_3d.dtype)
prior_token_samples_onehot_2d = dist_2d_prior.sample(1)[0]
prior_token_samples_onehot_3d = dist_3d_prior.sample(1)[0]
kl_div_2d = tf.reduce_mean(tf.reduce_sum(dist_2d.kl_divergence(dist_2d_prior), axis=[-1,-2]))
kl_div_3d = tf.reduce_mean(tf.reduce_sum(dist_3d.kl_divergence(dist_3d_prior), axis=[-1,-2,-3]))
tf.summary.scalar('kl_div_2d', kl_div_2d, step=self.step)
tf.summary.scalar('kl_div_3d', kl_div_3d, step=self.step)
tf.summary.scalar('kl_div', kl_div_2d + kl_div_3d, step=self.step)
perplexity_2d = 2. ** (dist_2d_prior.entropy() / tf.math.log(2.)) #
mean_perplexity_2d = tf.reduce_mean(perplexity_2d) # scalar
perplexity_3d = 2. ** (dist_3d_prior.entropy() / tf.math.log(2.)) #
mean_perplexity_3d = tf.reduce_mean(perplexity_3d) #
tf.summary.scalar('perplexity_2d_prior', mean_perplexity_2d, step=self.step)
tf.summary.scalar('perplexity_3d_prior', mean_perplexity_3d, step=self.step)
prior_latent_tokens_2d = tf.einsum('sbhwd,de->sbhwe', prior_token_samples_onehot_2d[None], self.discrete_image_vae.embeddings)
prior_latent_tokens_3d = tf.einsum('sbhwdn,ne->sbhwde', prior_token_samples_onehot_3d[None], self.discrete_voxel_vae.embeddings)
mu_2d, logb_2d = self.discrete_image_vae.compute_likelihood_parameters(
prior_latent_tokens_2d) # [num_samples, batch, H', W', C], [num_samples, batch, H', W', C]
log_likelihood_2d = self.discrete_image_vae.log_likelihood(images, mu_2d, logb_2d) # [num_samples, batch]
var_exp_2d = tf.reduce_mean(log_likelihood_2d) # [scalar]
mu_3d, logb_3d = self.discrete_voxel_vae.compute_likelihood_parameters(
prior_latent_tokens_3d) # [num_samples, batch, H', W', D', C], [num_samples, batch, H', W', D', C]
log_likelihood_3d = self.discrete_voxel_vae.log_likelihood(graphs, mu_3d, logb_3d) # [num_samples, batch]
var_exp_3d = tf.reduce_mean(log_likelihood_3d) # [scalar]
var_exp = log_likelihood_2d + log_likelihood_3d
tf.summary.scalar('var_exp_3d', tf.reduce_mean(var_exp_3d), step=self.step)
tf.summary.scalar('var_exp_2d', tf.reduce_mean(var_exp_2d), step=self.step)
tf.summary.scalar('var_exp', tf.reduce_mean(var_exp), step=self.step)
projected_mu = tf.reduce_sum(mu_3d[0], axis=-2) # [batch, H', W', C]
voxels = grid_graphs(graphs, self.discrete_voxel_vae.voxels_per_dimension) # [batch, H', W', D', C]
projected_img = tf.reduce_sum(voxels, axis=-2) # [batch, H', W', C]
for i in range(self.discrete_voxel_vae.num_channels):
vmin = tf.reduce_min(projected_mu[..., i])
vmax = tf.reduce_max(projected_mu[..., i])
_projected_mu = (projected_mu[..., i:i + 1] - vmin) / (vmax - vmin) # batch, H', W', 1
_projected_mu = tf.clip_by_value(_projected_mu, 0., 1.)
vmin = tf.reduce_min(projected_img[..., i])
vmax = tf.reduce_max(projected_img[..., i])
_projected_img = (projected_img[..., i:i + 1] - vmin) / (vmax - vmin) # batch, H', W', 1
_projected_img = tf.clip_by_value(_projected_img, 0., 1.)
tf.summary.image(f'voxels_predict_prior[{i}]', _projected_mu, step=self.step)
tf.summary.image(f'voxels_actual[{i}]', _projected_img, step=self.step)
for name, _latent_logits_3d, _tokens_onehot_3d in zip(['', '_prior'],
[latent_logits_3d,prior_latent_logits_3d],
[token_samples_onehot_3d, prior_token_samples_onehot_3d]):
batch, H3, W3, D3, _ = get_shape(_latent_logits_3d)
_latent_logits_3d -= tf.reduce_min(_latent_logits_3d, axis=-1, keepdims=True)
_latent_logits_3d /= tf.reduce_max(_latent_logits_3d, axis=-1, keepdims=True)
_latent_logits_3d = tf.reshape(_latent_logits_3d,
[batch, H3 * W3 * D3, self.discrete_voxel_vae.num_embedding,
1]) # [batch, H*W*D, num_embedding, 1]
tf.summary.image(f"latent_logits_3d{name}", _latent_logits_3d, step=self.step)
_tokens_onehot_3d = tf.reshape(_tokens_onehot_3d,
[batch, H3 * W3 * D3, self.discrete_voxel_vae.num_embedding,
1]) # [batch, H*W*D, num_embedding, 1]
tf.summary.image(f'latent_samples_onehot_3d{name}', _tokens_onehot_3d, step=self.step)
_mu = mu_2d[0] # [batch, H', W', C]
_img = images # [batch, H', W', C]
for i in range(self.discrete_image_vae.num_channels):
vmin = tf.reduce_min(_mu[..., i])
vmax = tf.reduce_max(_mu[..., i])
_projected_mu = (_mu[..., i:i + 1] - vmin) / (vmax - vmin) # batch, H', W', 1
_projected_mu = tf.clip_by_value(_projected_mu, 0., 1.)
vmin = tf.reduce_min(_img[..., i])
vmax = tf.reduce_max(_img[..., i])
_projected_img = (_img[..., i:i + 1] - vmin) / (vmax - vmin) # batch, H', W', 1
_projected_img = tf.clip_by_value(_projected_img, 0., 1.)
tf.summary.image(f'image_predict_prior[{i}]', _projected_mu, step=self.step)
tf.summary.image(f'image_actual[{i}]', _projected_img, step=self.step)
for name, _latent_logits_2d, _tokens_onehot_2d in zip(['', '_prior'],
[latent_logits_2d, prior_latent_logits_2d],
[token_samples_onehot_2d, prior_token_samples_onehot_2d]):
batch, H2, W2, _ = get_shape(_latent_logits_2d)
_latent_logits_2d -= tf.reduce_min(_latent_logits_2d, axis=-1, keepdims=True)
_latent_logits_2d /= tf.reduce_max(_latent_logits_2d, axis=-1, keepdims=True)
_latent_logits_2d = tf.reshape(_latent_logits_2d,
[batch, H2 * W2, self.discrete_image_vae.num_embedding,
1]) # [batch, H*W*D, num_embedding, 1]
tf.summary.image(f"latent_logits_2d{name}", _latent_logits_2d, step=self.step)
_tokens_onehot_2d = tf.reshape(_tokens_onehot_2d,
[batch, H2 * W2, self.discrete_image_vae.num_embedding,
1]) # [batch, H*W*D, num_embedding, 1]
tf.summary.image(f'latent_samples_onehot_2d{name}', _tokens_onehot_2d, step=self.step)
def _build(self, graphs, images):
idx_dtype = tf.int32
latent_logits_2d = self.discrete_image_vae.compute_logits(images) # [batch, H, W, num_embeddings]
latent_logits_3d = self.discrete_voxel_vae.compute_logits(graphs) # [batch, H, W, D, num_embeddings]
batch, H2, W2, _ = get_shape(latent_logits_2d)
batch, H3, W3, D3, _ = get_shape(latent_logits_3d)
G = self.num_token_samples * batch
latent_logits_2d = tf.reshape(latent_logits_2d, (batch, H2 * W2, self.discrete_image_vae.num_embedding))
latent_logits_3d = tf.reshape(latent_logits_3d, (batch, H3 * W3 * D3, self.discrete_voxel_vae.num_embedding))
q_dist_2d = tfp.distributions.Categorical(logits=latent_logits_2d, dtype=idx_dtype)
token_samples_idx_2d = q_dist_2d.sample(self.num_token_samples) # [num_samples, batch, H2*W2]
token_samples_idx_2d = tf.reshape(token_samples_idx_2d, (G, H2*W2))
q_dist_3d = tfp.distributions.Categorical(logits=latent_logits_3d, dtype=idx_dtype)
token_samples_idx_3d = q_dist_3d.sample(
self.num_token_samples) # [num_samples, batch, H3*W3*D3]
token_samples_idx_3d = tf.reshape(token_samples_idx_3d, (G, H3*W3*D3))
entropy_2d = tf.reduce_sum(q_dist_2d.entropy(), axis=-1)
entropy_3d = tf.reduce_sum(q_dist_3d.entropy(), axis=-1)
entropy = entropy_3d + entropy_2d # [batch]
## create sequence
sequence = self.construct_sequence(token_samples_idx_2d, token_samples_idx_3d)
input_sequence = sequence[:, :-1]
input_graphs = self.construct_input_graph(input_sequence, H2*W2)
latent_logits = self.compute_logits(input_graphs)
prior_dist = tfp.distributions.Categorical(logits=latent_logits, dtype=idx_dtype)
output_sequence = sequence[:, 1:]
cross_entropy = -prior_dist.log_prob(output_sequence)#num_samples*batch, H2*W2+1+H3*W3*D3+1
# . a . b
# a . b .
cross_entropy = cross_entropy[:, H2*W2+1:-1]
cross_entropy = tf.reshape(tf.reduce_sum(cross_entropy, axis=-1), (self.num_token_samples, batch)) # num_samples,batch
kl_term = cross_entropy + entropy # [num_samples, batch]
kl_div = tf.reduce_mean(kl_term) # scalar
# elbo = tf.stop_gradient(var_exp) - self.beta * kl_div
elbo = - kl_div # scalar
loss = - elbo # scalar
if self.log_counter % int(128 / G) == 0:
prior_latent_logits_2d = tf.reshape(latent_logits[:, :H2*W2, :self.discrete_image_vae.num_embedding],
(self.num_token_samples, batch, H2, W2, self.discrete_image_vae.num_embedding))
prior_latent_logits_3d = tf.reshape(latent_logits[:, H2*W2+1:H2*W2+1+H3*W3*D3,
self.discrete_image_vae.num_embedding:self.discrete_image_vae.num_embedding+self.discrete_voxel_vae.num_embedding],
(self.num_token_samples, batch, H3, W3, D3, self.discrete_voxel_vae.num_embedding))
latent_logits_2d = tf.reshape(latent_logits_2d,
(batch, H2, W2, self.discrete_image_vae.num_embedding))
latent_logits_3d = tf.reshape(latent_logits_3d,
(batch, H3, W3, D3, self.discrete_voxel_vae.num_embedding))
self.write_summary(images, graphs,
latent_logits_2d,
latent_logits_3d,
prior_latent_logits_2d[0],
prior_latent_logits_3d[0])
return dict(loss=loss)
def compute_logits(self, input_graphs):
latent_graphs = self._core(input_graphs)
latent_graphs = graph_batch_reshape(latent_graphs)
latent_logits = latent_graphs.nodes # num_samples*batch, H2*W2 + 1 + H3*W3*D3 + 1, num_embedding
latent_logits /= 1e-6 + tf.math.reduce_std(latent_logits, axis=-1, keepdims=True)
latent_logits -= tf.reduce_logsumexp(latent_logits, axis=-1, keepdims=True)
return latent_logits
def construct_input_graph(self, input_sequence, N2):
G, N = get_shape(input_sequence)
# num_samples*batch, 1 + H2*W2 + 1 + H3*W3*D3, embedding_dim
input_tokens = tf.nn.embedding_lookup(self.embeddings, input_sequence)
self.initialize_positional_encodings(input_tokens)
nodes = input_tokens + self.positional_encodings
n_node = tf.fill([G], N)
n_edge = tf.zeros_like(n_node)
data_dict = dict(nodes=nodes, edges=None, senders=None, receivers=None, globals=None,
n_node=n_node,
n_edge=n_edge)
concat_graphs = GraphsTuple(**data_dict)
concat_graphs = graph_unbatch_reshape(concat_graphs) # [n_graphs * (num_input + num_output), embedding_size]
# nodes, senders, receivers, globals
def edge_connect_rule(sender, receiver):
# . a . b -> a . b .
complete_2d = (sender < N2 + 1) & (receiver < N2 + 1) & (
sender + 1 != receiver) # exclude senders from one-right, so it doesn't learn copy.
auto_regressive_3d = (sender <= receiver) & (
receiver >= N2 + 1) # auto-regressive (excluding 2d) with self-loops
return complete_2d | auto_regressive_3d
# nodes, senders, receivers, globals
concat_graphs = connect_graph_dynamic(concat_graphs, edge_connect_rule)
return concat_graphs
def construct_sequence(self, token_samples_idx_2d, token_samples_idx_3d):
"""
Args:
token_samples_idx_2d: [G, H2*W2]
token_samples_idx_3d: [G, H3*W3*D3]
Returns:
sequence: G, 1 + H2*W2 + 1 + H3*W3*D3 + 1
"""
idx_dtype = token_samples_idx_2d.dtype
G, N2 = get_shape(token_samples_idx_2d)
G, N3 = get_shape(token_samples_idx_3d)
start_token_idx = tf.constant(self.num_embedding - 3, dtype=idx_dtype)
del_token_idx = tf.constant(self.num_embedding - 2, dtype=idx_dtype)
eos_token_idx = tf.constant(self.num_embedding - 1, dtype=idx_dtype)
start_token = tf.fill((G, 1), start_token_idx)
del_token = tf.fill((G, 1), del_token_idx)
eos_token = tf.fill((G, 1), eos_token_idx)
###
# num_samples*batch, 1 + H2*W2 + 1 + H3*W3*D3 + 1
sequence = tf.concat([
start_token,
token_samples_idx_2d,
del_token,
token_samples_idx_3d + self.discrete_image_vae.num_embedding, # shift to right
eos_token
], axis=-1)
return sequence
```
#### File: Simple_complete_model_GCD/complete_model/main.py
```python
import sys
sys.path.insert(1, '/data/s2675544/git/neural_deprojection/')
from neural_deprojection.models.Simple_complete_model.autoencoder_3d import DiscreteVoxelsVAE
from neural_deprojection.models.Simple_complete_model.autoencoder_2d import DiscreteImageVAE
from neural_deprojection.models.Simple_complete_model.autoregressive_prior import AutoRegressivePrior
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, build_log_dir, \
build_checkpoint_dir, temperature_schedule
import os
import tensorflow as tf
import json
import sonnet as snt
import random
import glob
from neural_deprojection.models.identify_medium_GCD.generate_data import decode_examples
from graph_nets.graphs import GraphsTuple
from functools import partial
from tensorflow_addons.image import gaussian_filter2d
random.seed(1)
MODEL_MAP = {'auto_regressive_prior': AutoRegressivePrior,
'disc_image_vae': DiscreteImageVAE,
'disc_voxel_vae': DiscreteVoxelsVAE}
def build_training(model_type, model_parameters, optimizer_parameters, loss_parameters, strategy=None,
**kwargs) -> TrainOneEpoch:
model_cls = MODEL_MAP[model_type]
model = model_cls(**model_parameters, **kwargs)
def build_opt(**kwargs):
opt_type = kwargs.get('opt_type')
if opt_type == 'adam':
learning_rate = kwargs.get('learning_rate')
opt = snt.optimizers.Adam(learning_rate)
else:
raise ValueError('Opt {} invalid'.format(opt_type))
return opt
def build_loss(**loss_parameters):
def loss(model_outputs, batch):
return model_outputs['loss']
return loss
loss = build_loss(**loss_parameters)
opt = build_opt(**optimizer_parameters)
training = TrainOneEpoch(model, loss, opt, strategy=strategy)
return training
def build_dataset(tfrecords_dirs, batch_size, type='train'):
"""
Build data set from a directory of tfrecords. With graph batching
Args:
data_dir: str, path to *.tfrecords
Returns: Dataset obj.
"""
tfrecords = []
for tfrecords_dir in tfrecords_dirs:
tfrecords += glob.glob(os.path.join(tfrecords_dir, type, '*.tfrecords'))
random.shuffle(tfrecords)
print(f'Number of {type} tfrecord files : {len(tfrecords)}')
dataset = tf.data.TFRecordDataset(tfrecords).map(partial(decode_examples,
node_shape=(10,),
edge_shape=(2,),
image_shape=(256, 256, 1))) # (graph, image, idx)
dataset = dataset.map(lambda graph_data_dict,
img,
cluster_idx,
projection_idx,
vprime: (
GraphsTuple(**graph_data_dict).replace(nodes=tf.concat([GraphsTuple(**graph_data_dict).nodes[:, :3],
GraphsTuple(**graph_data_dict).nodes[:, 6:8]],
axis=-1)),
gaussian_filter2d(img))).shuffle(buffer_size=52).batch(batch_size=batch_size)
return dataset
def train_discrete_image_vae(data_dirs, config, kwargs, batch_size=1, num_epochs=100):
train_one_epoch = build_training(**config, **kwargs)
train_dataset = build_dataset(data_dirs, batch_size=batch_size, type='train')
test_dataset = build_dataset(data_dirs, batch_size=batch_size, type='test')
# drop the graph as the model expects only images
train_dataset = train_dataset.map(lambda graphs, images: (images,))
test_dataset = test_dataset.map(lambda graphs, images: (images,))
# run on first input to set variable shapes
for batch in iter(train_dataset):
train_one_epoch.model(*batch)
break
log_dir = build_log_dir('log_dir', config)
checkpoint_dir = build_checkpoint_dir('checkpointing', config)
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=num_epochs,
early_stop_patience=20,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
save_model_dir=checkpoint_dir,
variables=train_one_epoch.model.trainable_variables,
debug=False)
return train_one_epoch.model, checkpoint_dir
def train_discrete_voxel_vae(data_dirs, config, kwargs, batch_size=1, num_epochs=100):
train_one_epoch = build_training(**config, **kwargs)
train_dataset = build_dataset(data_dirs, batch_size=batch_size, type='train')
test_dataset = build_dataset(data_dirs, batch_size=batch_size, type='test')
# drop the image as the model expects only graphs
train_dataset = train_dataset.map(lambda graphs, images: (graphs,))
test_dataset = test_dataset.map(lambda graphs, images: (graphs,))
# run on first input to set variable shapes
for batch in iter(train_dataset):
train_one_epoch.model(*batch)
break
log_dir = build_log_dir('log_dir', config)
checkpoint_dir = build_checkpoint_dir('checkpointing', config)
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=num_epochs,
early_stop_patience=20,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
save_model_dir=checkpoint_dir,
debug=False)
return train_one_epoch.model, checkpoint_dir
def train_auto_regressive_prior(data_dirs, config, kwargs, batch_size=1, num_epochs=100):
train_one_epoch = build_training(**config, **kwargs)
train_dataset = build_dataset(data_dirs, batch_size=batch_size, type='train')
test_dataset = build_dataset(data_dirs, batch_size=batch_size, type='test')
# run on first input to set variable shapes
for batch in iter(train_dataset):
train_one_epoch.model(*batch)
break
log_dir = build_log_dir('log_dir', config)
checkpoint_dir = build_checkpoint_dir('checkpointing', config)
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
exclude_variables = [variable.name for variable in kwargs['discrete_image_vae'].trainable_variables] \
+ [variable.name for variable in kwargs['discrete_voxel_vae'].trainable_variables]
trainable_variables = list(filter(lambda variable: (variable.name not in exclude_variables),
train_one_epoch.model.trainable_variables))
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=num_epochs,
early_stop_patience=40,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
save_model_dir=checkpoint_dir,
variables=trainable_variables,
debug=False)
def main():
if os.getcwd().split('/')[2] == 's2675544':
tfrec_base_dir = '/home/s2675544/data/tf_records'
print('Running on ALICE')
else:
tfrec_base_dir = '/home/matthijs/Documents/Studie/Master_Astronomy/1st_Research_Project/Data/tf_records'
print('Running at home')
data_dirs = ['AGN_TUNED_nu0_L400N1024_WMAP9_tf_records',
'snap_128_tf_records',
'snap_132_tf_records',
'snap_136_tf_records']
tfrec_dirs = [os.path.join(tfrec_base_dir, data_dir) for data_dir in data_dirs]
num_epochs_img = 20
num_epochs_vox = 0
num_epochs_auto = 0
print("Training the discrete image VAE")
config = dict(model_type='disc_image_vae',
model_parameters=dict(embedding_dim=32, # 64
num_embedding=32, # 1024
hidden_size=32,
num_channels=1
),
optimizer_parameters=dict(learning_rate=4e-4, opt_type='adam'),
loss_parameters=dict())
get_temp = temperature_schedule(config['model_parameters']['num_embedding'], num_epochs_img)
kwargs = dict(num_token_samples=4,
compute_temperature=get_temp,
beta=1.)
discrete_image_vae, discrete_image_vae_checkpoint = train_discrete_image_vae(tfrec_dirs,
config,
kwargs,
batch_size=4,
num_epochs=num_epochs_img)
print("Training the discrete voxel VAE.")
config = dict(model_type='disc_voxel_vae',
model_parameters=dict(voxels_per_dimension=8 * 8,
embedding_dim=32, # 64
num_embedding=32, # 1024
hidden_size=32,
num_groups=4,
num_channels=2),
optimizer_parameters=dict(learning_rate=5e-4, opt_type='adam'),
loss_parameters=dict())
get_temp = temperature_schedule(config['model_parameters']['num_embedding'], num_epochs_vox)
kwargs = dict(num_token_samples=3,
compute_temperature=get_temp,
beta=1.)
discrete_voxel_vae, discrete_voxel_vae_checkpoint = train_discrete_voxel_vae(tfrec_dirs,
config,
kwargs,
batch_size=3,
num_epochs=num_epochs_vox)
print("Training auto-regressive prior.")
config = dict(model_type='auto_regressive_prior',
model_parameters=dict(num_heads=4,
num_layers=2
),
optimizer_parameters=dict(learning_rate=4e-4, opt_type='adam'),
loss_parameters=dict())
get_temp = temperature_schedule(discrete_image_vae.num_embedding + discrete_voxel_vae.num_embedding, num_epochs_auto)
kwargs = dict(discrete_image_vae=discrete_image_vae,
discrete_voxel_vae=discrete_voxel_vae,
num_token_samples=2,
compute_temperature=get_temp,
beta=1.)
train_auto_regressive_prior(tfrec_dirs,
config,
kwargs,
batch_size=2,
num_epochs=num_epochs_auto)
if __name__ == '__main__':
main()
```
#### File: models/Simple_complete_model_GCD/visualize_3d.py
```python
import sys
sys.path.insert(1, '/data/s2675544/git/neural_deprojection/')
sys.path.insert(1, '/home/matthijs/git/neural_deprojection/')
import os
import tensorflow as tf
import glob
from neural_deprojection.models.Simple_complete_model_GCD.main import double_downsample
# from neural_deprojection.models.Simple_complete_model.model_utils import SimpleCompleteModel, VoxelisedModel
from neural_deprojection.models.Simple_complete_model.autoencoder_2d import DiscreteImageVAE
from neural_deprojection.models.Simple_complete_model.autoencoder_3d import DiscreteVoxelsVAE
from neural_deprojection.models.Simple_complete_model.autoregressive_prior import AutoRegressivePrior
from mayavi import mlab
from scipy import interpolate
from neural_deprojection.graph_net_utils import histogramdd, get_shape, temperature_schedule, grid_graphs, grid_graph_smoothing
import matplotlib.pyplot as plt
import numpy as np
from graph_nets.graphs import GraphsTuple
from functools import partial
from tensorflow_addons.image import gaussian_filter2d
from neural_deprojection.models.identify_medium_GCD.generate_data import decode_examples
import json
import tensorflow_probability as tfp
import tensorflow_graphics as tfg
# tf.random.set_seed(1)
def tf_graph_dict_to_voxels(graph, voxels_per_dimension):
tf_voxels = tf.py_function(graph_dict_to_voxels,
(graph, voxels_per_dimension),
tf.float32)
return tf_voxels
def graph_dict_to_voxels(nodes, voxels_per_dimension):
_x = tf.linspace(-1.7, 1.7, voxels_per_dimension)[..., None]
x, y, z = tf.meshgrid(_x, _x, _x, indexing='ij') # 3 x [grid_resolution, grid_resolution, grid_resolution]
grid_positions = (x, y, z) # [3, grid_resolution, grid_resolution, grid_resolution]
node_positions = (nodes[:, 0].numpy(),
nodes[:, 1].numpy(),
nodes[:, 2].numpy()) # [3, num_positions]
voxels = np.zeros((voxels_per_dimension,
voxels_per_dimension,
voxels_per_dimension,
2), dtype=np.float32)
for i in range(2):
prop = nodes[:, 3 + i].numpy() # [num_positions]
voxels[..., i] = interpolate.griddata(node_positions,
prop,
xi=grid_positions,
method='linear',
fill_value=0.) # [grid_resolution, grid_resolution, grid_resolution]
return tf.convert_to_tensor(voxels, dtype=tf.float32)
def build_dataset(tfrecords, voxels_per_dimension, batch_size):
dataset = tf.data.TFRecordDataset(tfrecords).map(partial(decode_examples,
node_shape=(10,),
edge_shape=(2,),
image_shape=(256, 256, 1))) # (graph, image, idx)
dataset = dataset.map(lambda graph_data_dict,
img,
cluster_idx,
projection_idx,
vprime: (
GraphsTuple(**graph_data_dict).replace(nodes=tf.concat([GraphsTuple(**graph_data_dict).nodes[:, :3],
GraphsTuple(**graph_data_dict).nodes[:, 6:8]],
axis=-1)),
gaussian_filter2d(img), cluster_idx, projection_idx)).batch(batch_size=batch_size)
# dataset = dataset.map(lambda graph_data_dict,
# img,
# cluster_idx,
# projection_idx,
# vprime: (tf_graph_dict_to_voxels(
# GraphsTuple(**graph_data_dict).replace(nodes=tf.concat([GraphsTuple(**graph_data_dict).nodes[:, :3],
# GraphsTuple(**graph_data_dict).nodes[:, 6:8]],
# axis=-1)).nodes, voxels_per_dimension),
# gaussian_filter2d(img), cluster_idx,
# projection_idx)).batch(batch_size=batch_size)
return dataset
def decode_property(model,
img,
graph,
positions,
property_index,
temperature,
component=None,
debug=False,
saved_model=True,
test_decoder=False):
if model.name != 'auto_regressive_prior':
if debug and not saved_model:
decoded_properties, _ = model._im_to_components(img, tf.tile(positions[None, None, :, :], (model.num_token_samples, model.batch, 1, 1)), temperature) # [num_components, num_positions, num_properties]
else:
decoded_properties = model.im_to_components(img, tf.tile(positions[None, None, :, :], (model.num_token_samples, model.batch, 1, 1)), temperature) # [num_components, num_positions, num_properties]
else:
latent_logits_2d = model.discrete_image_vae.compute_logits(img) # [batch, H, W, num_embeddings]
log_token_samples_onehot_2d, token_samples_onehot_2d, latent_tokens_2d = model.discrete_image_vae.sample_latent(
latent_logits_2d,
0.01,
model.num_token_samples) # [num_samples, batch, H, W, num_embeddings], [num_samples, batch, H, W, embedding_size]
latent_logits_3d = model.discrete_voxel_vae.compute_logits(graph) # [batch, H, W, D, num_embeddings]
log_token_samples_onehot_3d, token_samples_onehot_3d, latent_tokens_3d = model.discrete_voxel_vae.sample_latent(
latent_logits_3d,
0.01,
model.num_token_samples) # [num_samples, batch, H, W, D, num_embeddings], [num_samples, batch, H, W, D, embedding_size]
mu_2d, _ = model.discrete_image_vae.compute_likelihood_parameters(latent_tokens_2d)
if test_decoder:
mu_3d, _ = model.discrete_voxel_vae.compute_likelihood_parameters(latent_tokens_3d)
else:
mu_3d, _ = model.deproject_images(img)
decoded_properties = mu_3d
decoded_img = mu_2d
[batch3, H3, W3, D3, _] = get_shape(decoded_properties)
decoded_properties = tf.reshape(decoded_properties, (batch3 * H3 * W3 * D3, 2))
if model.name == 'simple_complete_model':
if component is not None:
properties_one_component = decoded_properties[component] # [num_positions, num_properties]
else:
properties_one_component = tf.reduce_mean(decoded_properties, axis=0) # [num_positions, num_properties]
decoded_property = properties_one_component[:, property_index] # [num_positions]
else:
decoded_property = decoded_properties[..., property_index] # [num_positions]
return decoded_property, decoded_img
def visualization_3d(model,
image,
graph,
property_index,
temperature,
component=None,
decode_on_interp_pos=True,
debug=False,
saved_model=True,
test_decoder=False):
grid_resolution = model.discrete_voxel_vae.voxels_per_dimension
_x = tf.linspace(-1.7, 1.7, grid_resolution)[..., None]
x, y, z = tf.meshgrid(_x, _x, _x, indexing='ij') # 3 x [grid_resolution, grid_resolution, grid_resolution]
grid_positions = (x, y, z) # [3, grid_resolution, grid_resolution, grid_resolution]
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
z = tf.reshape(z, [-1])
grid_positions_tensor = tf.concat([x[:, None], y[:, None], z[:, None]], axis=-1) # [num_grid_positions, 3]
# Interpolate 3D property to grid positions
node_positions = (graph.nodes[0, :, 0].numpy(),
graph.nodes[0, :, 1].numpy(),
graph.nodes[0, :, 2].numpy()) # [3, num_positions]
prop = graph.nodes[0, :, 3 + property_index]
interp_data_before = interpolate.griddata(node_positions,
prop.numpy(),
xi=grid_positions,
method='linear',
fill_value=0.) # [grid_resolution, grid_resolution, grid_resolution]
# interp_data_before = grid_graphs(graph, model.discrete_voxel_vae.voxels_per_dimension)[0, :, :, :, property_index].numpy()
histogram_positions_before = graph.nodes[0, :, :2]
# nodes = graph
# interp_data_before = nodes[0, :, :, :, property_index].numpy() # [num_positions]
# prop = tf.reshape(interp_data_before, [-1])
# histogram_positions_before = grid_positions_tensor[:, :2]
if decode_on_interp_pos:
# Directly calculate the decoded 3D property for the grid positions
histogram_positions_after = grid_positions_tensor[:, :2]
decoded_prop, decoded_img = decode_property(model,
image,
graph,
grid_positions_tensor,
property_index,
temperature,
component,
debug=debug,
saved_model=saved_model,
test_decoder=test_decoder)
interp_data_after = tf.reshape(decoded_prop, [grid_resolution, grid_resolution, grid_resolution]).numpy()
else:
# Calculate the decoded 3D property for the node positions and interpolate to grid positions
histogram_positions_after = graph.nodes[0, :, :2]
decoded_prop, decoded_img = decode_property(model,
image,
graph,
graph.nodes[0, :, :3],
property_index,
temperature,
component,
debug=debug,
saved_model=saved_model,
test_decoder=test_decoder)
interp_data_after = interpolate.griddata(node_positions,
decoded_prop.numpy(),
xi=grid_positions,
method='linear',
fill_value=0.) # [grid_resolution, grid_resolution, grid_resolution]
prop_interp_before = tf.convert_to_tensor(np.reshape(interp_data_before, (-1)))
prop_interp_after = tf.convert_to_tensor(np.reshape(interp_data_after, (-1)))
histogram_positions_interp = grid_positions_tensor[:, :2]
# decoded_prop = tf.random.uniform((10000,))
# the reverse switches x and y, this way my images and histograms line up
graph_hist_before, _ = histogramdd(tf.reverse(histogram_positions_before, [1]), bins=grid_resolution, weights=prop)
graph_hist_after, _ = histogramdd(tf.reverse(histogram_positions_after, [1]), bins=grid_resolution, weights=decoded_prop)
interp_hist_before, _ = histogramdd(tf.reverse(histogram_positions_interp, [1]), bins=grid_resolution, weights=prop_interp_before)
interp_hist_after, _ = histogramdd(tf.reverse(histogram_positions_interp, [1]), bins=grid_resolution, weights=prop_interp_after)
return interp_data_before, \
interp_data_after, \
graph_hist_before, \
graph_hist_after, \
interp_hist_before, \
interp_hist_after, prop.numpy(), decoded_prop.numpy(), decoded_img.numpy()
def load_saved_models(saved_model_dir):
model = tf.saved_model.load(saved_model_dir)
return model
def load_checkpoint_models(checkpoint_dir, model, cp_kwargs):
objects_cp = tf.train.Checkpoint(**cp_kwargs)
model_cp = tf.train.Checkpoint(_model=objects_cp)
checkpoint = tf.train.Checkpoint(module=model_cp)
manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3,
checkpoint_name=model.__class__.__name__)
checkpoint.restore(manager.latest_checkpoint).expect_partial()
return model
def main(saved_model_dir,
checkpoint_dir,
model,
cp_kwargs,
image,
input_graph,
temperature,
prop_index: int,
component=None,
decode_on_interp_pos=True,
saved_model=True,
debug=False,
test_decoder=False):
if type(model) is not dict:
if saved_model:
simple_complete_model = load_saved_models(saved_model_dir)
else:
simple_complete_model = load_checkpoint_models(checkpoint_dir, model, cp_kwargs)
grid_res = model.discrete_voxel_vae.voxels_per_dimension
data_before, \
data_after, \
hist_before, \
hist_after, \
image_interp_before, \
image_interp_after,\
prop, \
decoded_prop, \
decoded_img = visualization_3d(model=model,
image=image,
graph=input_graph,
property_index=prop_index,
temperature=temperature,
component=component,
decode_on_interp_pos=decode_on_interp_pos,
debug=debug,
saved_model=saved_model,
test_decoder=test_decoder)
if model.name != 'auto_regressive_prior':
if debug and not saved_model:
decoded_img = model.discrete_image_vae._sample_decoder(
model.discrete_image_vae._sample_encoder(img), temperature, 1)
else:
decoded_img = model.discrete_image_vae.sample_decoder(
model.discrete_image_vae.sample_encoder(img), temperature, 4)
# H, xedges, yedges = np.histogram2d(input_nodes[0, :, 0].numpy(),
# -input_nodes[0, :, 1].numpy(),
# bins=([i for i in np.linspace(-1.7, 1.7, grid_res)],
# [i for i in np.linspace(-1.7, 1.7, grid_res)]))
fig, ax = plt.subplots(2, 4, figsize=(24, 12))
# image channel 1 is smoothed image
image_before = ax[0, 0].imshow(image[0, :, :, 0])
fig.colorbar(image_before, ax=ax[0, 0])
ax[0, 0].set_title('input image')
# decoded_img [S, batch, H, W, channels]
# image channel 1 is decoded from the smoothed image
image_after = ax[1, 0].imshow(decoded_img[0, 0, :, :, 0])
fig.colorbar(image_after, ax=ax[1, 0])
ax[1, 0].set_title('decoded image')
graph_before = ax[0, 1].imshow(hist_before.numpy())
fig.colorbar(graph_before, ax=ax[0, 1])
ax[0, 1].set_title('property histogram')
graph_after = ax[1, 1].imshow(hist_after.numpy())
fig.colorbar(graph_after, ax=ax[1, 1])
ax[1, 1].set_title('reconstructed property histogram')
# hist2d = ax[0, 2].imshow(H.T, interpolation='nearest', origin='lower',
# extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
# fig.colorbar(hist2d, ax=ax[0, 2])
# ax[0, 2].set_title('particles per histogram bin')
ax[0, 2].hist(prop, bins=32, histtype='step', label='input data')
ax[0, 2].hist(decoded_prop, bins=32, histtype='step', label='reconstructed')
prop = data_before.flatten()
ax[1, 2].hist(prop, bins=32, histtype='step', label='input data')
ax[1, 2].hist(decoded_prop, bins=32, histtype='step', label='reconstructed')
offset = (np.min(decoded_prop) * np.max(prop) - np.max(decoded_prop) * np.min(prop)) / (np.max(prop) - np.min(prop))
scale = np.max(prop) / np.max(decoded_prop - offset)
ax[1, 2].hist((decoded_prop - offset) * scale, bins=32, histtype='step', label='reconstructed scaled')
ax[1, 2].set_yscale('log')
ax[1, 2].set_xlabel('property value')
ax[1, 2].set_ylabel('counts')
ax[1, 2].set_title('property value distributions')
ax[1, 2].legend()
interp_before = ax[0, 3].imshow(image_interp_before.numpy())
fig.colorbar(interp_before, ax=ax[0, 3])
ax[0, 3].set_title('property interpolated to grid points')
interp_after = ax[1, 3].imshow(image_interp_after.numpy())
fig.colorbar(interp_after, ax=ax[1, 3])
ax[1, 3].set_title('reconstructed property interpolated to grid points')
plt.show()
mlab.figure(1, bgcolor=(0, 0, 0), size=(800, 800))
mlab.clf()
source_before = mlab.pipeline.scalar_field(data_before)
source_after = mlab.pipeline.scalar_field(data_after)
# v_min_before = np.min(data_before) + 0.25 * (np.max(data_before) - np.min(data_before))
# v_max_before = np.min(data_before) + 0.75 * (np.max(data_before) - np.min(data_before))
# mlab.pipeline.volume(source_before, vmin=v_min_before, vmax=v_max_before)
#
# v_min_after = np.min(data_after) + 0.25 * (np.max(data_after) - np.min(data_after))
# v_max_after = np.min(data_after) + 0.75 * (np.max(data_after) - np.min(data_after))
# mlab.pipeline.volume(source_after, vmin=v_min_after, vmax=v_max_after)
mlab.pipeline.iso_surface(source_before, contours=16, opacity=0.4)
mlab.pipeline.iso_surface(source_after, contours=16, opacity=0.4)
mlab.pipeline.scalar_cut_plane(source_before, line_width=2.0, plane_orientation='z_axes')
mlab.pipeline.scalar_cut_plane(source_after, line_width=2.0, plane_orientation='z_axes')
mlab.view(180, 180, 100 * grid_res / 40, [0.5 * grid_res,
0.5 * grid_res,
0.5 * grid_res])
mlab.show()
if __name__ == '__main__':
if os.getcwd().split('/')[2] == 's2675544':
tfrec_base_dir = '/home/s2675544/data/tf_records'
base_dir = '/home/s2675544/git/neural_deprojection/neural_deprojection/models/Simple_complete_model_GCD/complete_model'
print('Running on ALICE')
else:
tfrec_base_dir = '/home/matthijs/Documents/Studie/Master_Astronomy/1st_Research_Project/Data/tf_records'
base_dir = '/home/matthijs/git/neural_deprojection/neural_deprojection/models/Simple_complete_model_GCD/complete_model'
print('Running at home')
scm_cp_dir = os.path.join(base_dir, 'checkpointing')
scm_saved_dir = os.path.join(base_dir, 'saved_model')
tfrec_dir = os.path.join(tfrec_base_dir, 'snap_136_tf_records')
tfrecords = glob.glob(os.path.join(tfrec_dir, 'train', '*.tfrecords'))
dataset = build_dataset(tfrecords, 4*16, batch_size=1)
counter = 0
thing = iter(dataset)
(graph, img, cluster_id, proj_id) = next(thing)
datapoint = 543
# 66
# 543
while counter < datapoint:
(graph, img, cluster_id, proj_id) = next(thing)
counter += 1
discrete_image_vae_checkpoint = glob.glob(os.path.join(scm_cp_dir, '*disc_image_vae*'))[0]
discrete_voxel_vae_checkpoint = glob.glob(os.path.join(scm_cp_dir, '*disc_voxel_vae*'))[0]
auto_regressive_prior_checkpoint = glob.glob(os.path.join(scm_cp_dir, '*auto_regressive_prior*'))[0]
with open(os.path.join(discrete_image_vae_checkpoint, 'config.json'), 'r') as f:
discrete_image_vae_kwargs = json.load(f)['model_parameters']
discrete_image_vae = DiscreteImageVAE(**discrete_image_vae_kwargs)
checkpoint = tf.train.Checkpoint(module=discrete_image_vae)
manager = tf.train.CheckpointManager(checkpoint, discrete_image_vae_checkpoint, max_to_keep=3,
checkpoint_name=discrete_image_vae.__class__.__name__)
if manager.latest_checkpoint is not None:
checkpoint.restore(manager.latest_checkpoint).expect_partial()
print(f"Restored from {manager.latest_checkpoint}")
with open(os.path.join(discrete_voxel_vae_checkpoint, 'config.json'), 'r') as f:
discrete_voxel_vae_kwargs = json.load(f)['model_parameters']
discrete_voxel_vae = DiscreteVoxelsVAE(**discrete_voxel_vae_kwargs)
checkpoint = tf.train.Checkpoint(module=discrete_voxel_vae)
manager = tf.train.CheckpointManager(checkpoint, discrete_voxel_vae_checkpoint, max_to_keep=3,
checkpoint_name=discrete_voxel_vae.__class__.__name__)
if manager.latest_checkpoint is not None:
checkpoint.restore(manager.latest_checkpoint).expect_partial()
print(f"Restored from {manager.latest_checkpoint}")
with open(os.path.join(auto_regressive_prior_checkpoint, 'config.json'), 'r') as f:
auto_regressive_prior_kwargs = json.load(f)['model_parameters']
auto_regressive_prior = AutoRegressivePrior(discrete_image_vae=discrete_image_vae,
discrete_voxel_vae=discrete_voxel_vae,
**auto_regressive_prior_kwargs)
checkpoint = tf.train.Checkpoint(module=auto_regressive_prior)
manager = tf.train.CheckpointManager(checkpoint, auto_regressive_prior_checkpoint, max_to_keep=3,
checkpoint_name=auto_regressive_prior.__class__.__name__)
if manager.latest_checkpoint is not None:
checkpoint.restore(manager.latest_checkpoint).expect_partial()
print(f"Restored from {manager.latest_checkpoint}")
main(saved_model_dir=scm_saved_dir,
checkpoint_dir=glob.glob(os.path.join(scm_cp_dir, '*'))[0],
model=auto_regressive_prior,
cp_kwargs=dict(#autoregressive_prior=_voxelised_model.autoregressive_prior,
#decoder_3d=_voxelised_model.decoder_3d,
#discrete_image_vae=_voxelised_model.discrete_image_vae
),
image=img,
input_graph=graph,
temperature=1.,
prop_index=0,
component=None,
decode_on_interp_pos=True,
saved_model=False,
debug=True,
test_decoder=False)
```
#### File: models/Simple_complete_model/plot_evaluations.py
```python
import numpy as np
import os, glob
import pylab as plt
from matplotlib.widgets import Slider
def plot_voxel(image, rec_voxels, actual_voxels):
fig, axs = plt.subplots(1, 3)
plt.subplots_adjust(left=0.25, bottom=0.25)
axs[0].imshow(image[:,:,0])
axs[0].set_title('Image')
img_actual = axs[1].imshow(actual_voxels[:, :, 0, 0])
axs[1].margins(x=0)
axs[2].set_title("Actual voxels")
img_rec = axs[2].imshow(rec_voxels[:, :, 0, 0])
axs[2].margins(x=0)
axs[2].set_title("Reconstructed voxels")
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
zaxis_idx = Slider(axfreq, 'z-axis slice', 0, actual_voxels.shape[-2]-1, valinit=0, valstep=1)
def update(val):
idx = int(zaxis_idx.val)
img_actual.set_data(actual_voxels[:, :, idx, 0])
img_rec.set_data(rec_voxels[:, :, idx, 0])
fig.canvas.draw_idle()
zaxis_idx.on_changed(update)
plt.show()
def main(eval_dir):
files = glob.glob(os.path.join(eval_dir, '*.npz'))
for f in files:
image = np.load(f)['image']
mu_3d = np.load(f)['mu_3d']
actual_voxels = np.load(f)['actual_voxels']
plot_voxel(image, mu_3d, actual_voxels)
# Visualize the local atomic density
# break
if __name__ == '__main__':
main("/home/albert/data/deprojections")
```
#### File: models/Simple_complete_model_SCD/main_im.py
```python
import sys
sys.path.insert(1, '/data/s1825216/git/neural_deprojection/')
from functools import partial
import tensorflow as tf
import sonnet as snt
from graph_nets.graphs import GraphsTuple
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, \
get_distribution_strategy, build_log_dir, build_checkpoint_dir
from neural_deprojection.models.Simple_complete_model.autoencoder_2d import DiscreteImageVAE
from neural_deprojection.models.identify_medium_SCD.generate_data import decode_examples, decode_examples_old
import glob, os, json
MODEL_MAP = dict(dis_im_vae=DiscreteImageVAE)
def build_training(model_type, model_parameters, optimizer_parameters, loss_parameters, strategy=None,
**kwargs) -> TrainOneEpoch:
model_cls = MODEL_MAP[model_type]
model = model_cls(**model_parameters, **kwargs)
def build_opt(**kwargs):
opt_type = kwargs.get('opt_type')
if opt_type == 'adam':
learning_rate = kwargs.get('learning_rate', 1e-4)
opt = snt.optimizers.Adam(learning_rate, beta1=1 - 1 / 100, beta2=1 - 1 / 500)
else:
raise ValueError('Opt {} invalid'.format(opt_type))
return opt
def build_loss(**loss_parameters):
def loss(model_outputs, batch):
img = batch
# model_outputs = dict(loss=tf.reduce_mean(log_likelihood_samples - kl_term_samples),
# var_exp=tf.reduce_mean(log_likelihood_samples),
# kl_term=tf.reduce_mean(kl_term_samples),
# mean_perplexity=mean_perplexity)
return model_outputs['loss']
return loss
loss = build_loss(**loss_parameters)
opt = build_opt(**optimizer_parameters)
training = TrainOneEpoch(model, loss, opt, strategy=strategy)
return training
def build_dataset(data_dir, batch_size):
tfrecords = glob.glob(os.path.join(data_dir, '*.tfrecords'))
dataset = tf.data.TFRecordDataset(tfrecords).map(partial(decode_examples,
node_shape=(11,),
image_shape=(256, 256, 1),
k=6)) # (graph, image, spsh, proj)
dataset = dataset.map(lambda graph_data_dict, img, spsh, proj, e: img).batch(batch_size=batch_size)
return dataset
def main(data_dir, config, kwargs):
# Make strategy at the start of your main before any other tf code is run.
# strategy = get_distribution_strategy(use_cpus=False, logical_per_physical_factor=1,
# memory_limit=None)
train_dataset = build_dataset(os.path.join(data_dir, 'train'), batch_size=4)
test_dataset = build_dataset(os.path.join(data_dir, 'test'), batch_size=4)
# with strategy.scope():
train_one_epoch = build_training(**config, **kwargs)
train_one_epoch.model.set_temperature(10.)
log_dir = build_log_dir('new_im_16_log_dir', config)
checkpoint_dir = build_checkpoint_dir('new_im_16_checkpointing', config)
save_model_dir = os.path.join('new_im_16_saved_models')
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=1000,
early_stop_patience=100,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
save_model_dir=save_model_dir,
debug=False)
if __name__ == '__main__':
# data_dir = '/home/s1825216/data/train_data/ClaudeData/'
data_dir = '/home/s1825216/data/dataset/'
config = dict(model_type='dis_im_vae',
model_parameters=dict(hidden_size=64,
embedding_dim=64,
num_embedding=1024,
num_channels=1,
name='discreteImageVAE'),
optimizer_parameters=dict(learning_rate=1e-4, opt_type='adam'),
loss_parameters=dict())
kwargs = dict(
num_token_samples=4,
)
main(data_dir, config, kwargs)
```
#### File: models/Simple_complete_model_SCD/main.py
```python
import sys
sys.path.insert(1, '/data/s1825216/git/neural_deprojection/')
import tensorflow as tf
import sonnet as snt
from functools import partial
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, \
build_log_dir, build_checkpoint_dir, get_distribution_strategy
from neural_deprojection.models.Simple_complete_model.model_utils import SimpleCompleteModel
from neural_deprojection.models.Simple_complete_model.autoencoder_2d import DiscreteImageVAE
from neural_deprojection.models.identify_medium_SCD.generate_data import decode_examples_old
import glob, os, json
from graph_nets.graphs import GraphsTuple
MODEL_MAP = dict(simple_complete_model=SimpleCompleteModel)
def build_training(model_type, model_parameters, optimizer_parameters, loss_parameters, strategy=None,
**kwargs) -> TrainOneEpoch:
model_cls = MODEL_MAP[model_type]
model = model_cls(**model_parameters, **kwargs)
def build_opt(**kwargs):
opt_type = kwargs.get('opt_type')
if opt_type == 'adam':
learning_rate = kwargs.get('learning_rate', 1e-4)
opt = snt.optimizers.Adam(learning_rate, beta1=1 - 1 / 100, beta2=1 - 1 / 500)
else:
raise ValueError('Opt {} invalid'.format(opt_type))
return opt
def build_loss(**loss_parameters):
def loss(model_outputs, batch):
(graph, img) = batch
# model_outputs = dict(loss=tf.reduce_mean(log_likelihood_samples - kl_term_samples),
# var_exp=tf.reduce_mean(log_likelihood_samples),
# kl_term=tf.reduce_mean(kl_term_samples),
# mean_perplexity=mean_perplexity)
return model_outputs['loss']
return loss
loss = build_loss(**loss_parameters)
opt = build_opt(**optimizer_parameters)
training = TrainOneEpoch(model, loss, opt, strategy=strategy)
return training
def build_dataset(data_dir, batch_size):
dataset = _build_dataset(data_dir)
dataset = dataset.batch(batch_size=batch_size)
return dataset
def _build_dataset(data_dir):
tfrecords = glob.glob(os.path.join(data_dir, '*.tfrecords'))
dataset = tf.data.TFRecordDataset(tfrecords).map(partial(decode_examples_old,
node_shape=(11,),
image_shape=(256, 256, 1))) # (graph, image, spsh, proj)
dataset = dataset.map(lambda graph_data_dict, img, spsh, proj: (GraphsTuple(**graph_data_dict), img))
return dataset
# def batch_dataset(dataset, batch_size):
# dataset = dataset.batch(batch_size=batch_size)
# dataset = dataset.map(lambda graph_data_dict, img: {nodes: graph_data_dict['nodes'],
# edges: graph_data_dict['edges'],})
# def graph_correct(graph_data_dict):
# nodes = graph_data_dict['nodes']
# edges = graph_data_dict['edges']
# globals = graph_data_dict['globals']
# senders = graph_data_dict['senders']
# receivers = graph_data_dict['receivers']
# n_node = graph_data_dict['n_node']
# n_edge = graph_data_dict['n_edge']
#
# globals = globals[:, 0, :]
# n_edge = n_edge[:, 0]
# n_node = n_node[:, 0]
#
# graph = GraphsTuple(nodes=nodes,
# edges=edges,
# globals=globals,
# senders=senders,
# receivers=receivers,
# n_node=n_node,
# n_edge=n_edge)
#
# # unbatch
# # offsets (autoregressive example)
def build_distributed_dataset(data_dir, global_batch_size, strategy):
def data_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = _build_dataset(data_dir)
dataset = dataset.shard(input_context.num_input_pipelines, input_context.input_pipeline_id)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE())
return dataset
distributed_dataset = strategy.distribute_datasets_from_function(data_fn)
return distributed_dataset
def main(data_dir, batch_size, config, kwargs):
# Make strategy at the start of your main before any other tf code is run.
# strategy = get_distribution_strategy(use_cpus=False, logical_per_physical_factor=1,
# memory_limit=None)
strategy = None
if strategy is not None:
train_dataset = build_distributed_dataset(os.path.join(data_dir, 'train'), global_batch_size=batch_size, strategy=strategy)
test_dataset = build_distributed_dataset(os.path.join(data_dir, 'test'), global_batch_size=batch_size, strategy=strategy)
else:
train_dataset = build_dataset(os.path.join(data_dir, 'train'), batch_size=batch_size)
test_dataset = build_dataset(os.path.join(data_dir, 'test'), batch_size=batch_size)
# for (graph, positions) in iter(test_dataset):
# print(graph)
# break
if strategy is not None:
with strategy.scope():
train_one_epoch = build_training(**config, **kwargs, strategy=strategy)
else:
train_one_epoch = build_training(**config, **kwargs, strategy=strategy)
train_one_epoch.model.set_temperature(10.)
train_one_epoch.model.set_beta(6.6)
log_dir = build_log_dir('simple_complete_log_dir', config)
checkpoint_dir = build_checkpoint_dir('simple_complete_checkpointing', config)
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
save_model_dir = os.path.join('simple_complete_saved_models')
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=1000,
early_stop_patience=100,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
save_model_dir=save_model_dir,
debug=False)
if __name__ == '__main__':
data_dir = '/home/s1825216/data/train_data/ClaudeData/'
saved_model_dir = 'new_im_16_saved_models'
checkpoint_dir = 'im_16_checkpointing'
n_node_per_graph = 50000
batch_size = 4
# no attributes or trainable variables so not trainable?
discrete_image_vae = tf.saved_model.load(saved_model_dir)
config = dict(model_type='simple_complete_model',
model_parameters=dict(num_properties=8,
num_components=16,
component_size=64,
num_embedding_3d=1024,
edge_size=8,
global_size=16,
num_heads=4,
multi_head_output_size=64,
name='simple_complete_model'),
optimizer_parameters=dict(learning_rate=1e-4, opt_type='adam'),
loss_parameters=dict())
kwargs = dict(num_token_samples=4,
n_node_per_graph=256,
batch=batch_size,
discrete_image_vae=discrete_image_vae)
main(data_dir, batch_size, config, kwargs)
```
#### File: models/Simple_complete_model_SCD/tb_to_np.py
```python
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
# import seaborn as sns
from scipy import stats
from tensorboard.backend.event_processing import event_accumulator
import tensorflow as tf
from packaging import version
number_of_points = 1000
model_name = 'simple_complete_model'
# model_name = 'second_discreteImageVAE'
# model_name = 'discreteImageVAE'
log_dir_path_train = '/home/s1825216/Simple_complete_model_SCD/single_voxelised_log_dir/|voxelised_model||cmpnntsz=16,dcdr3dhddnsz=4,edgsz=4,glblsz=16,mlthdotptsz=16,nm=simple_complete_model,nmembddng3d=128,nmhds=2,nmprprts=1,vxlprdmnsn=4||lrnngrt=1.0e-04,opttyp=adam|||/train/events.out.tfevents.1625680335.node859.38538.2995.v2'
log_dir_path_test = '/home/s1825216/Simple_complete_model_SCD/single_voxelised_log_dir/|voxelised_model||cmpnntsz=16,dcdr3dhddnsz=4,edgsz=4,glblsz=16,mlthdotptsz=16,nm=simple_complete_model,nmembddng3d=128,nmhds=2,nmprprts=1,vxlprdmnsn=4||lrnngrt=1.0e-04,opttyp=adam|||/test/events.out.tfevents.1625680335.node859.38538.3003.v2'
# log_dir_path_train = '/home/s1825216/Simple_complete_model_SCD/second_im_16_log_dir/|dis_im_vae||embddngdm=64,hddnsz=64,nm=second_discreteImageVAE,nmchnnls=1,nmembddng=1024||lrnngrt=1.0e-04,opttyp=adam|||/train/events.out.tfevents.1625753233.node859.46347.325.v2'
# log_dir_path_test = '/home/s1825216/Simple_complete_model_SCD/second_im_16_log_dir/|dis_im_vae||embddngdm=64,hddnsz=64,nm=second_discreteImageVAE,nmchnnls=1,nmembddng=1024||lrnngrt=1.0e-04,opttyp=adam|||/test/events.out.tfevents.1625753233.node859.46347.333.v2'
# log_dir_path_train = '/home/s1825216/git/neural_deprojection/neural_deprojection/models/Simple_complete_model_SCD/new_im_16_log_dir/|dis_im_vae||embddngdm=64,hddnsz=64,nm=discreteImageVAE,nmchnnls=1,nmembddng=1024||lrnngrt=1.0e-04,opttyp=adam|||/train/events.out.tfevents.1624624343.node852.52700.269.v2'
# log_dir_path_test = '/home/s1825216/git/neural_deprojection/neural_deprojection/models/Simple_complete_model_SCD/new_im_16_log_dir/|dis_im_vae||embddngdm=64,hddnsz=64,nm=discreteImageVAE,nmchnnls=1,nmembddng=1024||lrnngrt=1.0e-04,opttyp=adam|||/test/events.out.tfevents.1624624343.node852.52700.277.v2'
ea_train = event_accumulator.EventAccumulator(log_dir_path_train,
size_guidance={event_accumulator.TENSORS: number_of_points})
ea_test = event_accumulator.EventAccumulator(log_dir_path_test,
size_guidance={event_accumulator.TENSORS: number_of_points})
ea_train.Reload()
ea_test.Reload()
print(ea_train.Tags())
print(ea_test.Tags())
def tensor_protos_to_array(df_protos):
len_array = len(df_protos)
arr = np.zeros(len_array)
for i in range(len_array):
arr[i] = tf.make_ndarray(df_protos[i])
return arr
def plot_from_dfs(train_df, test_df, property_name, epoch_or_minibatch='minibatch'):
train_steps = train_df['step'].to_numpy().astype(dtype=float)
train_prop = tensor_protos_to_array(train_df['tensor_proto'])
plt.figure(figsize=(4,3))
plt.plot(train_steps, train_prop, label='train', color='dodgerblue')
if test_df is not None:
test_steps = test_df['step'].to_numpy().astype(dtype=float)
test_prop = tensor_protos_to_array(test_df['tensor_proto'])
plt.plot(test_steps, test_prop, label='test', color='darkorange')
if epoch_or_minibatch != 'minibatch':
plt.xlabel('epoch')
plt.ticklabel_format(axis='y', style='sci', scilimits=(8, 9))
else:
plt.xlabel('minibatch')
plt.ticklabel_format(axis='both', style='sci', scilimits=(8, 9))
plt.ylabel(property_name)
plt.title(f'{property_name} per {epoch_or_minibatch}')
plt.legend()
plt.tight_layout()
plt.savefig(f'{property_name}_per_{epoch_or_minibatch}.pdf')
plt.close()
# df_train_mini_loss = pd.DataFrame(ea_train.Tensors('train_one_epoch/while/cond/mini_batch_loss'))
# # df_test_mini_loss = pd.DataFrame(ea_test.Tensors('train_one_epoch/while/cond/mini_batch_loss'))
# df_train_epoch_loss = pd.DataFrame(ea_train.Tensors('train_one_epoch/epoch_loss'))
# df_test_epoch_loss = pd.DataFrame(ea_test.Tensors('train_one_epoch/loss'))
# df_train_var_exp = pd.DataFrame(ea_train.Tensors(f'{model_name}/cond_1/var_exp'))
# df_test_var_exp = pd.DataFrame(ea_test.Tensors(f'{model_name}/cond_1/var_exp'))
# df_train_kl_div = pd.DataFrame(ea_train.Tensors(f'{model_name}/cond_1/kl_div'))
# df_test_kl_div = pd.DataFrame(ea_test.Tensors(f'{model_name}/cond_1/kl_div'))
# df_train_perplexity = pd.DataFrame(ea_train.Tensors(f'{model_name}/cond_1/perplexity'))
# df_test_perplexity = pd.DataFrame(ea_test.Tensors(f'{model_name}/cond_1/perplexity'))
df_train_mini_loss = pd.DataFrame(ea_train.Tensors('train_one_epoch/while/cond/mini_batch_loss'))
# df_test_mini_loss = pd.DataFrame(ea_test.Tensors('train_one_epoch/while/cond/mini_batch_loss'))
df_train_epoch_loss = pd.DataFrame(ea_train.Tensors('train_one_epoch/epoch_loss'))
df_test_epoch_loss = pd.DataFrame(ea_test.Tensors('train_one_epoch/loss'))
df_train_var_exp = pd.DataFrame(ea_train.Tensors('simple_complete_model/cond/var_exp'))
df_test_var_exp = pd.DataFrame(ea_test.Tensors('simple_complete_model/cond/var_exp'))
df_train_kl_div = pd.DataFrame(ea_train.Tensors('simple_complete_model/cond/kl_div'))
df_test_kl_div = pd.DataFrame(ea_test.Tensors('simple_complete_model/cond/kl_div'))
df_train_perplexity = pd.DataFrame(ea_train.Tensors('simple_complete_model/cond/perplexity'))
df_test_perplexity = pd.DataFrame(ea_test.Tensors('simple_complete_model/cond/perplexity'))
df_train_std_before = pd.DataFrame(ea_train.Tensors('simple_complete_model/cond_1/properties3_std_before'))
df_test_std_before = pd.DataFrame(ea_test.Tensors('simple_complete_model/cond_1/properties3_std_before'))
df_train_std_after = pd.DataFrame(ea_train.Tensors('simple_complete_model/cond_1/properties3_std_after'))
df_test_std_after = pd.DataFrame(ea_test.Tensors('simple_complete_model/cond_1/properties3_std_after'))
plot_from_dfs(df_train_mini_loss, None, 'loss')
plot_from_dfs(df_train_epoch_loss, df_test_epoch_loss, 'loss', 'epoch')
plot_from_dfs(df_train_var_exp, df_test_var_exp, 'variational expectation\n')
plot_from_dfs(df_train_kl_div, df_test_kl_div, 'kl divergence')
plot_from_dfs(df_train_perplexity, df_test_perplexity, 'perplexity')
plot_from_dfs(df_train_std_before, df_test_std_before, 'std before')
plot_from_dfs(df_train_std_after, df_test_std_after, 'std after')
```
#### File: Simple_complete_model/train_autoencoder_3d/main.py
```python
from neural_deprojection.models.Simple_complete_model.autoencoder_3d import DiscreteVoxelsVAE
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, build_example_dataset, grid_graphs, graph_unbatch_reshape, build_log_dir, build_checkpoint_dir
import os
import tensorflow as tf
import json
import pylab as plt
import sonnet as snt
MODEL_MAP = {'disc_voxel_vae': DiscreteVoxelsVAE}
def build_training(model_type, model_parameters, optimizer_parameters, loss_parameters, strategy=None, **kwargs) -> TrainOneEpoch:
model_cls = MODEL_MAP[model_type]
model = model_cls(**model_parameters, **kwargs)
def build_opt(**kwargs):
opt_type = kwargs.get('opt_type')
if opt_type == 'adam':
learning_rate = kwargs.get('learning_rate')
opt = snt.optimizers.Adam(learning_rate)
else:
raise ValueError('Opt {} invalid'.format(opt_type))
return opt
def build_loss(**loss_parameters):
def loss(model_outputs, batch):
return model_outputs['loss']
return loss
loss = build_loss(**loss_parameters)
opt = build_opt(**optimizer_parameters)
training = TrainOneEpoch(model, loss, opt, strategy=strategy)
return training
def train_discrete_voxel_vae(config, kwargs):
# with strategy.scope():
train_one_epoch = build_training(**config, **kwargs)
dataset = build_example_dataset(100, batch_size=2, num_blobs=3, num_nodes=64**3, image_dim=256)
# the model will call grid_graphs internally to learn the 3D autoencoder.
# we show here what that produces from a batch of graphs.
for graphs, image in iter(dataset):
assert image.numpy().shape == (2, 256, 256, 1)
plt.imshow(image[0].numpy())
plt.colorbar()
plt.show()
voxels = grid_graphs(graphs, 64)
assert voxels.numpy().shape == (2, 64, 64, 64, 1)
plt.imshow(tf.reduce_mean(voxels[0], axis=-2))
plt.colorbar()
plt.show()
break
# drop the image as the model expects only graphs
dataset = dataset.map(lambda graphs, images: (graphs,))
# run on first input to set variable shapes
for batch in iter(dataset):
train_one_epoch.model(*batch)
break
log_dir = build_log_dir('log_dir', config)
checkpoint_dir = build_checkpoint_dir('checkpointing', config)
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, 'config.json'), 'w') as f:
json.dump(config, f)
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=dataset,
num_epochs=1,
early_stop_patience=5,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
save_model_dir=checkpoint_dir,
debug=False)
def main():
config = dict(model_type='disc_voxel_vae',
model_parameters=dict(voxels_per_dimension=8*8,
embedding_dim=64, # 64
num_embedding=128, # 1024
hidden_size=4,
num_channels=1),
optimizer_parameters=dict(learning_rate=1e-3, opt_type='adam'),
loss_parameters=dict())
kwargs = dict(num_token_samples=4,
temperature=2.,
beta=1.)
train_discrete_voxel_vae(config, kwargs)
if __name__ == '__main__':
main()
```
#### File: models/vqvae_GCD/vae.py
```python
import sys, glob, os
sys.path.insert(1, '/data/s2675544/git/neural_deprojection/')
import tensorflow as tf
import sonnet as snt
import numpy as np
from neural_deprojection.graph_net_utils import vanilla_training_loop, batch_dataset_set_graph_tuples, \
TrainOneEpoch, AbstractModule, get_distribution_strategy, build_log_dir, build_checkpoint_dir
from neural_deprojection.models.identify_medium_GCD.main import build_dataset
from tensorflow_addons.image import gaussian_filter2d
class ResidualStack(AbstractModule):
def __init__(self,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
residual_name='',
name=None):
super(ResidualStack, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._layers = []
for i in range(num_residual_layers):
conv3 = snt.Conv2D(
output_channels=num_residual_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
name=f"res3x3_{residual_name}_{i}")
conv1 = snt.Conv2D(
output_channels=num_hiddens,
kernel_shape=(1, 1),
stride=(1, 1),
name=f"res1x1_{residual_name}_{i}")
self._layers.append((conv3, conv1))
def _build(self, inputs):
h = inputs
for conv3, conv1 in self._layers:
conv3_out = conv3(tf.nn.relu(h))
conv1_out = conv1(tf.nn.relu(conv3_out))
h += conv1_out
return tf.nn.relu(h) # Resnet V1 style
class VariationalAutoEncoder(AbstractModule):
def __init__(self,
n_latent=4,
kernel_size=4,
name=None):
super(VariationalAutoEncoder, self).__init__(name=name)
self.n_latent = n_latent
self.encoder = snt.Sequential([snt.Conv2D(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu, # [b, 250, 250, 4]
snt.Conv2D(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu, # [b, 63, 63, 16]
snt.Conv2D(32, kernel_size, stride=4, padding='SAME'), tf.nn.relu, # [b, 16, 16, 32]
snt.Conv2D(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu, # [b, 8, 8, 64]
snt.Flatten()])
self.mn = snt.nets.MLP([n_latent], activation=tf.nn.relu)
self.std = snt.nets.MLP([n_latent], activation=tf.nn.relu)
self.decoder = snt.Sequential([snt.nets.MLP([8*8*64], activation=tf.nn.leaky_relu),
snt.Reshape([8, 8, 64]),
snt.Conv2DTranspose(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu, # [b, 16, 16, 64]
snt.Conv2DTranspose(32, kernel_size, stride=4, padding='SAME'), tf.nn.relu, # [b, 64, 64, 32]
snt.Conv2DTranspose(16, kernel_size, stride=4, padding='SAME'), tf.nn.relu, # [b, 256, 256, 16]
snt.Conv2DTranspose(4, kernel_size, stride=4, padding='SAME'), tf.nn.relu, # [b, 1024, 1024, 4]
snt.Conv2D(1, kernel_size, padding='SAME')]) # [b, 1024, 1024, 1]
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch):
(img, ) = batch
img = gaussian_filter2d(img, filter_shape=[6, 6])
img_before_autoencoder = (img - tf.reduce_min(img)) / (
tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_autoencoder', img_before_autoencoder, step=self.step)
encoded_img = self.encoder(img)
print(encoded_img.shape)
mn = self.mn(encoded_img)
std = self.std(encoded_img)
epsilon = tf.random.normal(tf.stack([tf.shape(encoded_img)[0], self.n_latent]))
z = mn + tf.multiply(epsilon, tf.exp(tf.multiply(0.5, std)))
decoded_img = self.decoder(z)
print(decoded_img.shape)
img_after_autoencoder = (decoded_img - tf.reduce_min(decoded_img)) / (
tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'img_after_autoencoder', img_after_autoencoder, step=self.step)
return mn, std, z, decoded_img
class VectorQuantizerVariationalAutoEncoder(AbstractModule):
def __init__(self,
embedding_dim=64,
num_embeddings=512,
kernel_size=4,
num_layers=5,
num_residual_layers=2,
name=None):
super(VectorQuantizerVariationalAutoEncoder, self).__init__(name=name)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.num_layers = num_layers
self.num_residual_layers = num_residual_layers
encoder_layers = []
decoder_layers = []
for i in range(self.num_layers):
num_filters = 4 * 2**i
conv_layer = snt.Conv2D(output_channels=num_filters,
kernel_shape=kernel_size,
stride=2,
padding='SAME',
name=f'conv{num_filters}')
residual_layer = ResidualStack(num_hiddens=num_filters,
num_residual_layers=self.num_residual_layers,
num_residual_hiddens=num_filters,
residual_name=f'enc_{num_filters}')
encoder_layers.append(conv_layer)
encoder_layers.append(tf.nn.relu)
encoder_layers.append(residual_layer)
for i in range(self.num_layers - 2, -1, -1):
num_filters = 4 * 2 ** i
conv_layer = snt.Conv2DTranspose(output_channels=num_filters,
kernel_shape=kernel_size,
stride=2,
padding='SAME',
name=f'convt{num_filters}')
residual_layer = ResidualStack(num_hiddens=num_filters,
num_residual_layers=self.num_residual_layers,
num_residual_hiddens=num_filters,
residual_name=f'enc_{num_filters}')
decoder_layers.append(conv_layer)
decoder_layers.append(tf.nn.relu)
decoder_layers.append(residual_layer)
decoder_layers.append(snt.Conv2DTranspose(1, kernel_size, stride=2, padding='SAME', name='convt1'))
decoder_layers.append(tf.nn.relu)
decoder_layers.append(snt.Conv2D(1, kernel_size, padding='SAME', name='conv1'))
self.encoder = snt.Sequential(encoder_layers)
self.decoder = snt.Sequential(decoder_layers)
self.VQVAE = snt.nets.VectorQuantizerEMA(embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
commitment_cost=0.25,
decay=0.994413,
name='VQ')
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch):
(img, ) = batch
img = gaussian_filter2d(img, filter_shape=[6, 6])
img_before_autoencoder = (img - tf.reduce_min(img)) / (
tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_autoencoder', img_before_autoencoder, step=self.step)
encoded_img = self.encoder(img)
vq_dict = self.VQVAE(encoded_img, is_training=True)
quantized_img = (vq_dict['quantize'] - tf.reduce_min(vq_dict['quantize'])) / (
tf.reduce_max(vq_dict['quantize']) - tf.reduce_min(vq_dict['quantize']))
# quantized_img has shape [32, 32, 32, 64] (batch, x, y, channels),
# for the tf.summary we only show one of the 64 channels.
tf.summary.image(f'quantized_img', quantized_img[:, :, :, np.random.randint(low=0, high=64)][:, :, :, None], step=self.step)
decoded_img = self.decoder(vq_dict['quantize'])
img_after_autoencoder = (decoded_img - tf.reduce_min(decoded_img)) / (
tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'img_after_autoencoder', img_after_autoencoder, step=self.step)
return vq_dict['loss'], decoded_img
def train_variational_autoencoder(data_dir):
# strategy = get_distribution_strategy(use_cpus=False, logical_per_physical_factor=1, memory_limit=10000)
# lists containing tfrecord files
train_tfrecords = glob.glob(os.path.join(data_dir, 'train', '*.tfrecords'))
test_tfrecords = glob.glob(os.path.join(data_dir, 'test', '*.tfrecords'))
train_dataset = build_dataset(train_tfrecords)
test_dataset = build_dataset(test_tfrecords)
train_dataset = train_dataset.map(lambda graph, img, c: (img,)).batch(batch_size=32)
test_dataset = test_dataset.map(lambda graph, img, c: (img,)).batch(batch_size=32)
# with strategy.scope():
model = VariationalAutoEncoder(n_latent=4, kernel_size=4)
learning_rate = 1e-3
opt = snt.optimizers.Adam(learning_rate)
def loss(model_outputs, batch):
(img,) = batch
mn, std, z, decoded_img = model_outputs
# reconstruction_loss = tf.reduce_mean(tf.reduce_sum(
# keras.losses.binary_crossentropy(img, decoded_img), axis=(1, 2)
# ))
reconstruction_loss = tf.reduce_mean((gaussian_filter2d(img, filter_shape=[6, 6]) - decoded_img[:, 12:-12, 12:-12, :]) ** 2)
kl_loss = -0.5 * (1 + std - tf.square(mn) - tf.exp(std))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
print(f"recon_loss = {reconstruction_loss}")
print(f"kl_loss = {kl_loss}")
return total_loss
train_one_epoch = TrainOneEpoch(model, loss, opt, strategy=None)
log_dir = 'VAE_log_dir'
checkpoint_dir = 'VAE_checkpointing'
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=50,
early_stop_patience=5,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
debug=False)
def train_VQVAE(data_dir):
# strategy = get_distribution_strategy(use_cpus=False, logical_per_physical_factor=1, memory_limit=10000)
# lists containing tfrecord files
train_tfrecords = glob.glob(os.path.join(data_dir, 'train', '*.tfrecords'))
test_tfrecords = glob.glob(os.path.join(data_dir, 'test', '*.tfrecords'))
train_dataset = build_dataset(train_tfrecords)
test_dataset = build_dataset(test_tfrecords)
train_dataset = train_dataset.map(lambda graph, img, c: (img,)).batch(batch_size=32)
test_dataset = test_dataset.map(lambda graph, img, c: (img,)).batch(batch_size=32)
# with strategy.scope():
autoencoder_depth = 6
model = VectorQuantizerVariationalAutoEncoder(embedding_dim=2 * 2**autoencoder_depth,
num_embeddings=1024,
kernel_size=4,
num_layers=autoencoder_depth,
num_residual_layers=2)
learning_rate = 1e-6
opt = snt.optimizers.Adam(learning_rate)
def loss(model_outputs, batch):
(img,) = batch
vq_loss, decoded_img = model_outputs
print('im shape', img.shape)
print('dec im shape', decoded_img.shape)
# reconstruction_loss = tf.reduce_mean(tf.reduce_sum(
# keras.losses.binary_crossentropy(img, decoded_img), axis=(1, 2)
# ))
reconstruction_loss = tf.reduce_mean((gaussian_filter2d(img, filter_shape=[6, 6]) - decoded_img[:, 12:-12, 12:-12, :]) ** 2)
total_loss = reconstruction_loss + vq_loss
return total_loss
train_one_epoch = TrainOneEpoch(model, loss, opt, strategy=None)
log_dir = 'vqvae2_log_dir'
checkpoint_dir = 'vqvae2_checkpointing'
vanilla_training_loop(train_one_epoch=train_one_epoch,
training_dataset=train_dataset,
test_dataset=test_dataset,
num_epochs=50,
early_stop_patience=5,
checkpoint_dir=checkpoint_dir,
log_dir=log_dir,
debug=False)
def main(data_dir):
# train_variational_autoencoder(data_dir)
train_VQVAE(data_dir)
if __name__ == '__main__':
tfrec_base_dir = '/home/s2675544/data/tf_records'
tfrec_dir = os.path.join(tfrec_base_dir, 'snap_128_tf_records')
main(tfrec_dir)
```
#### File: neural_deprojection/monet/cycle_gan.py
```python
import tensorflow as tf
from tensorflow import keras
class CycleGan(keras.Model):
def __init__(
self,
monet_generator,
photo_generator,
monet_discriminator,
photo_discriminator,
lambda_cycle=10,
):
super(CycleGan, self).__init__()
self.m_gen = monet_generator
self.p_gen = photo_generator
self.m_disc = monet_discriminator
self.p_disc = photo_discriminator
self.lambda_cycle = lambda_cycle
def compile(
self,
m_gen_optimizer,
p_gen_optimizer,
m_disc_optimizer,
p_disc_optimizer,
gen_loss_fn,
disc_loss_fn,
cycle_loss_fn,
identity_loss_fn
):
super(CycleGan, self).compile()
self.m_gen_optimizer = m_gen_optimizer
self.p_gen_optimizer = p_gen_optimizer
self.m_disc_optimizer = m_disc_optimizer
self.p_disc_optimizer = p_disc_optimizer
self.gen_loss_fn = gen_loss_fn
self.disc_loss_fn = disc_loss_fn
self.cycle_loss_fn = cycle_loss_fn
self.identity_loss_fn = identity_loss_fn
self.loss_trackers = dict(
monet_gen_loss=keras.metrics.Mean(name="monet_gen_loss"),
monet_disc_loss=keras.metrics.Mean(name="monet_disc_loss"),
photo_gen_loss=keras.metrics.Mean(name="photo_gen_loss"),
photo_disc_loss=keras.metrics.Mean(name="photo_disc_loss"),
total_loss=keras.metrics.Mean(name="total_loss"),
)
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch
# or at the start of `evaluate()`.
# If you don't implement this property, you have to call
# `reset_states()` yourself at the time of your choosing.
return [self.loss_trackers[key] for key in sorted(self.loss_trackers.keys())]
@tf.function
def train_step(self, batch_data):
real_monet, real_photo = batch_data
if self.compiled_loss is not None:
ValueError("You passed a loss function to `model.compile` however we are defining our own losses inside the"
"training loop. Passed loss: {}".format(self.compiled_loss))
if self.compiled_metrics is not None:
ValueError("You passed metrics to the compile function, however we are defining our own metrics in the "
"training loop. Passed metrics: {}".format(self.compiled_metrics))
with tf.GradientTape(persistent=True) as tape:
# photo to monet back to photo
fake_monet = self.m_gen(real_photo, training=True)
cycled_photo = self.p_gen(fake_monet, training=True)
# monet to photo back to monet
fake_photo = self.p_gen(real_monet, training=True)
cycled_monet = self.m_gen(fake_photo, training=True)
# generating itself
same_monet = self.m_gen(real_monet, training=True)
same_photo = self.p_gen(real_photo, training=True)
# discriminator used to check, inputing real images
disc_real_monet = self.m_disc(real_monet, training=True)
disc_real_photo = self.p_disc(real_photo, training=True)
# discriminator used to check, inputing fake images
disc_fake_monet = self.m_disc(fake_monet, training=True)
disc_fake_photo = self.p_disc(fake_photo, training=True)
# evaluates generator loss
monet_gen_loss = self.gen_loss_fn(disc_fake_monet)
photo_gen_loss = self.gen_loss_fn(disc_fake_photo)
# evaluates total cycle consistency loss
total_cycle_loss = self.cycle_loss_fn(real_monet, cycled_monet, self.lambda_cycle) + self.cycle_loss_fn(
real_photo, cycled_photo, self.lambda_cycle)
# evaluates total generator loss
total_monet_gen_loss = monet_gen_loss + total_cycle_loss + self.identity_loss_fn(real_monet, same_monet,
self.lambda_cycle)
total_photo_gen_loss = photo_gen_loss + total_cycle_loss + self.identity_loss_fn(real_photo, same_photo,
self.lambda_cycle)
# evaluates discriminator loss
monet_disc_loss = self.disc_loss_fn(disc_real_monet, disc_fake_monet)
photo_disc_loss = self.disc_loss_fn(disc_real_photo, disc_fake_photo)
both_disc_loss = monet_disc_loss + photo_disc_loss
signal_disc_improving = both_disc_loss < self.loss_trackers['monet_disc_loss'].result() + self.loss_trackers['photo_disc_loss'].result()
monet_disc_loss = tf.where(signal_disc_improving, 0., monet_disc_loss)
photo_disc_loss = tf.where(signal_disc_improving, 0., photo_disc_loss)
# Calculate the gradients for generator and discriminator
monet_generator_gradients = tape.gradient(total_monet_gen_loss,
self.m_gen.trainable_variables)
photo_generator_gradients = tape.gradient(total_photo_gen_loss,
self.p_gen.trainable_variables)
monet_discriminator_gradients = tape.gradient(monet_disc_loss,
self.m_disc.trainable_variables)
photo_discriminator_gradients = tape.gradient(photo_disc_loss,
self.p_disc.trainable_variables)
# Apply the gradients to the optimizer
self.m_gen_optimizer.apply_gradients(zip(monet_generator_gradients,
self.m_gen.trainable_variables))
self.p_gen_optimizer.apply_gradients(zip(photo_generator_gradients,
self.p_gen.trainable_variables))
self.m_disc_optimizer.apply_gradients(zip(monet_discriminator_gradients,
self.m_disc.trainable_variables))
self.p_disc_optimizer.apply_gradients(zip(photo_discriminator_gradients,
self.p_disc.trainable_variables))
#These keep track of the mean loss over the epoch. THey are reset in the model.fit function at the start of each
# epoch automatically.
self.loss_trackers["monet_gen_loss"].update_state(total_monet_gen_loss)
self.loss_trackers["photo_gen_loss"].update_state(total_photo_gen_loss)
self.loss_trackers["monet_disc_loss"].update_state(monet_disc_loss)
self.loss_trackers["photo_disc_loss"].update_state(photo_disc_loss)
self.loss_trackers["total_loss"].update_state(total_monet_gen_loss+total_photo_gen_loss+monet_disc_loss+photo_disc_loss)
return {k:v.result() for k,v in self.loss_trackers.items()}
@tf.function
def test_step(self, batch_data):
#similar to the train_step except for skipping the optimisation
real_monet, real_photo = batch_data
if self.compiled_loss is not None:
ValueError("You passed a loss function to `model.compile` however we are defining our own losses inside the"
"training loop. Passed loss: {}".format(self.compiled_loss))
if self.compiled_metrics is not None:
ValueError("You passed metrics to the compile function, however we are defining our own metrics in the "
"training loop. Passed metrics: {}".format(self.compiled_metrics))
# photo to monet back to photo
fake_monet = self.m_gen(real_photo, training=True)
cycled_photo = self.p_gen(fake_monet, training=True)
# monet to photo back to monet
fake_photo = self.p_gen(real_monet, training=True)
cycled_monet = self.m_gen(fake_photo, training=True)
# generating itself
same_monet = self.m_gen(real_monet, training=True)
same_photo = self.p_gen(real_photo, training=True)
# discriminator used to check, inputing real images
disc_real_monet = self.m_disc(real_monet, training=True)
disc_real_photo = self.p_disc(real_photo, training=True)
# discriminator used to check, inputing fake images
disc_fake_monet = self.m_disc(fake_monet, training=True)
disc_fake_photo = self.p_disc(fake_photo, training=True)
# evaluates generator loss
monet_gen_loss = self.gen_loss_fn(disc_fake_monet)
photo_gen_loss = self.gen_loss_fn(disc_fake_photo)
# evaluates total cycle consistency loss
total_cycle_loss = self.cycle_loss_fn(real_monet, cycled_monet, self.lambda_cycle) + self.cycle_loss_fn(
real_photo, cycled_photo, self.lambda_cycle)
# evaluates total generator loss
total_monet_gen_loss = monet_gen_loss + total_cycle_loss + self.identity_loss_fn(real_monet, same_monet,
self.lambda_cycle)
total_photo_gen_loss = photo_gen_loss + total_cycle_loss + self.identity_loss_fn(real_photo, same_photo,
self.lambda_cycle)
# evaluates discriminator loss
monet_disc_loss = self.disc_loss_fn(disc_real_monet, disc_fake_monet)
photo_disc_loss = self.disc_loss_fn(disc_real_photo, disc_fake_photo)
# These keep track of the mean loss over the epoch. THey are reset in the model.fit function at the start of each
# epoch automatically.
self.loss_trackers["monet_gen_loss"].update_state(total_monet_gen_loss)
self.loss_trackers["photo_gen_loss"].update_state(total_photo_gen_loss)
self.loss_trackers["monet_disc_loss"].update_state(monet_disc_loss)
self.loss_trackers["photo_disc_loss"].update_state(photo_disc_loss)
self.loss_trackers["total_loss"].update_state(
total_monet_gen_loss + total_photo_gen_loss + monet_disc_loss + photo_disc_loss)
return {k: v.result() for k, v in self.loss_trackers.items()}
def build_discriminator_loss(strategy):
with strategy.scope():
def discriminator_loss(real, generated):
real_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(
tf.ones_like(real), real)
generated_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)(
tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
return discriminator_loss
def build_generator_loss(strategy):
with strategy.scope():
def generator_loss(generated):
return tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(
tf.ones_like(generated), generated)
return generator_loss
def build_calc_cycle_loss(strategy):
with strategy.scope():
def calc_cycle_loss(real_image, cycled_image, LAMBDA):
loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))
return LAMBDA * loss1
return calc_cycle_loss
def build_identity_loss(strategy):
with strategy.scope():
def identity_loss(real_image, same_image, LAMBDA):
loss = tf.reduce_mean(tf.abs(real_image - same_image))
return LAMBDA * 0.5 * loss
return identity_loss
```
#### File: neural_deprojection/monet/read_tfrec.py
```python
import tensorflow as tf
IMAGE_SIZE = [256, 256]
def decode_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = (tf.cast(image, tf.float32) / 127.5) - 1
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_tfrecord(example):
tfrecord_format = {
"image_name": tf.io.FixedLenFeature([], tf.string),
"image": tf.io.FixedLenFeature([], tf.string),
"target": tf.io.FixedLenFeature([], tf.string)
}
example = tf.io.parse_single_example(example, tfrecord_format)
image = decode_image(example['image'])
return image
def load_dataset(filenames, AUTOTUNE, labeled=True, ordered=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTOTUNE)
return dataset
``` |
{
"source": "Joshuaalbert/RadioAstronomyThings",
"score": 3
} |
#### File: rathings/notebooks/combine_images.py
```python
import numpy as np
import pylab as plt
from scipy.stats import norm#is it poisson
import glob,os
#import BeamDeconvolution
import numpy as np
from scipy.optimize import minimize
def ecliptic2quadratic(xc,yc,bmaj,bmin,pa,k=np.log(2)):
'''a*x**2 + b*x*y + c*y**2 + d*x + e*y + f = k
pa in deg'''
#unrotated solution
a0 = k/(bmaj/2.)**2
c0 = k/(bmin/2.)**2
theta = (pa + 90.)*np.pi/180.
#Rotated Solution
cos2 = np.cos(theta)**2
sin2 = np.sin(theta)**2
A = (a0*cos2 + c0*sin2)
C = (c0*cos2 + a0*sin2)
B = (a0 - c0 )*np.sin(2.*theta)
#Now move center
D = -2.*A*xc - B*yc
E = -2.*C*yc - B*xc
F = A*xc**2 + B*xc*yc + C*yc**2
return A,B,C,D,E,F
def quad2ecliptic(A,B,C,k=np.log(2)):
A /= k
B /= k
C /= k
D1 = np.sqrt(A**2-2*A*C+B**2+C**2)
A0 = (-D1 + A + C)/2.
C0 = (D1 + A + C)/2.
D2 = D1 + A - C
D3 = 2*D2*D1
if D3 < 0 or A0 < 0 or C0 < 0:
# print "No real ecliptic coordinates from quadratic"
return None
if (D2 == 0) or (D3 - B*np.sqrt(D3) == 0):
#print "circle"
theta = np.pi/2.
else:
theta = 2.*np.arctan(B/D2 - np.sqrt(D3)/D2)
bmaj = 2./np.sqrt(A0)
bmin = 2./np.sqrt(C0)
bpa = (theta - np.pi/2.)*180/np.pi#degs
bpa = np.mod(bpa,180.)-90.
# while bpa < 0:#to (0,180)
# bpa += 180.
# while bpa > 180.:
# bpa -= 180.
def chi2(b,ak,bk,ck):
a,b,c,d,e,f = ecliptic2quadratic(0.,0.,b[0],b[1],b[2])
return (a-ak)**2 + (b-bk)**2 + (c-ck)**2
res = minimize(chi2,(bmaj,bmin,bpa),args=(A,B,C),method='Powell')
return res.x
def deconvolve(A1,B1,C1,A2,B2,C2):
'''Solves analytically G(A1,B1,C1) = convolution(G(A2,B2,C2), G(Ak,Bk,Ck))
for Ak,Bl,Ck
Returns None if delta function'''
D = B1**2 - 2*B1*B2 + B2**2 - 4*A1*C1 + 4* A2* C1 + 4* A1* C2 - 4* A2* C2
if (np.abs(D) < 10*(1-2./3.-1./3.)):
#print "Indefinite... invertibles"
return None#delta function
if (D<0.):
#print "Inverse Gaussian, discriminant D:",D
pass
Ak = (-A2* B1**2 + A1* B2**2 + 4* A1* A2* C1 - 4* A1* A2* C2)/D
Bk = (-B1**2 *B2 + B1* B2**2 + 4* A1* B2* C1 - 4* A2* B1* C2)/D
Ck = (B2**2 *C1 - B1**2 *C2 + 4* A1* C1* C2 - 4* A2* C1* C2)/D
return Ak,Bk,Ck
def findCommonBeam(beams):
beams_array = []
for b in beams:
beams_array.append([b['major']['value'],b['minor']['value'],b['pa']['value']])
beams_array = np.array(beams_array)
#Try convolving to max area one
Areas = beams_array[:,0]*beams_array[:,1]*np.pi/4./np.log(2.)
idxMaxArea = np.argsort(Areas)[-1]
A1,B1,C1,D,E,F = ecliptic2quadratic(0.,0.,beams_array[idxMaxArea,0],beams_array[idxMaxArea,1],beams_array[idxMaxArea,2])
cb = beams_array[idxMaxArea,:].flatten()
i = 0
while i < np.size(Areas):
print np.size(Areas),i
if i != idxMaxArea:
#deconlove
A2,B2,C2,D,E,F = ecliptic2quadratic(0.,0.,beams_array[i,0],beams_array[i,1],beams_array[i,2])
Ak,Bk,Ck = deconvolve(A1,B1,C1,A2,B2,C2)
print Ak,Bk,Ck
try:
b = quad2ecliptic(Ak,Bk,Ck,k=np.log(2))
if b is None:
pass
else:
"convolve possible:",b
except:
"Failed convolve"
cb = None
break
i += 1
if cb is None:
Area_init = Areas[idxMaxArea]
inc = 1.05#15 iters in area
works = False
Area = Area_init
while Area < 2.*Area_init and not works:
bmaj_min = np.sqrt(Area*4.*np.log(2)/np.pi)
bmaj_max = np.sqrt(Area*4.*np.log(2)/np.pi*3.)
bmaj = np.linspace(bmaj_min,bmaj_max,10)
pa = np.linspace(-90.,90.,10)
for bj in bmaj:
bmin = Area*4.*np.log(2)/np.pi/bj
for p in pa:
cb = (bj,bmin,p)
A1,B1,C1,D,E,F = ecliptic2quadratic(0.,0.,cb[0],cb[1],cb[2])
i = 0
while i < np.size(Areas):
#deconlove
A2,B2,C2,D,E,F = ecliptic2quadratic(0.,0.,beams_array[i,0],beams_array[i,1],beams_array[i,2])
Ak,Bk,Ck = deconvolve(A1,B1,C1,A2,B2,C2)
print Ak,Bk,Ck
try:
b = quad2ecliptic(Ak,Bk,Ck,k=np.log(2))
if b is None:
pass
else:
"convolve possible:",b
except:
"Failed convolve"
cb = None
break
i += 1
if cb is not None:
work = True
Area *= inc
else:
print "passed",
return cb
def fftGaussian(A,B,C,X,Y):
D = 4*A*C-B**2
return 2*np.pi/np.sqrt(D)*np.exp(-4*np.pi/D*(-C*X**2 +B*X*Y -A*Y**2))
def gaussian(A,B,C,X,Y):
return np.exp(-A*X**2 - B*X*Y - C*Y**2)
def getRms(casaImage,snr=3.,plot=False):
#one method
ia.open(casaImage)
pixel_array = ia.getchunk().flatten()
ia.close()
#remove some of tail
s = np.std(pixel_array)
pixel_array[np.abs(pixel_array) > snr*s] = np.nan#s includes signal so greater than rms background
#could repeat
mu,sigma = norm.fit(pixel_array)#should remove those above 3 sigma first to remove tail
print "Image Statistics %s: mu = %.2e Jy/beam, sigma = %.2e Jy/beam"%(casaImage,mu,sigma)
return mu
#iterative below
# pixel_array_in = np.copy(pixel_array)
# s = np.nanstd(pixel_array)
# while np.max(pixel_array) > snr*s:
# pixel_array[pixel_array > snr*s] = np.nan
# s = np.nanstd(pixel_array)
# if plot:
# plt.imshow(pixel_array)
# plt.imshow(pixel_array_in[pixel_array_in > snr*s]*0.)
# plt.show
# return s
def stackImages(images,final,weights):
os.system("cp -r %s %s"%(images[0],final))
ia.open(images[0])
array = ia.getchunk()*weights[0]
ia.close()
sum = weights[0]
i = 1
while i < len(images):
ia.open(images[i])
array += ia.getchunk()*weights[i]
ia.close()
sum += weights[i]
i += 1
ia.open(final)
ia.putchunk(array/sum)
ia.close()
#return array/sum
def getImageInfo(image):
ia.open(image)
beam = ia.commonbeam()
shape = ia.shape()
ia.close()
rms = getRms(image)
return beam,shape,rms
def run(images_glob,output_dir):
images = glob.glob(images_glob)
print "Images in:",images
try:
os.makedirs(output_dir)
except:
print "output dir already exists:",output_dir
imageInfo = []
beams = []
rms = []
casaImages = []
idxMaxBeam = 0
idxMaxSize = 0
i = 0
while i < len(images):
if '.fits' in images[i]:
casaImages.append(output_dir+'/'+images[i].replace('.fits','.im'))
if not os.path.exists(casaImages[i]):
importfits(fitsimage=images[i],imagename = casaImages[i])
imageInfo.append(getImageInfo(casaImages[i]))
beams.append(imageInfo[i][0])
rms.append(imageInfo[i][2])
if imageInfo[i][0]['minor']['value'] >= imageInfo[idxMaxBeam][0]['minor']['value']:#just minor axis
idxMaxBeam = i
if imageInfo[i][1][0] >= imageInfo[idxMaxSize][1][0]:#just x axis size
idxMaxSize = i
i += 1
else:
print "wrong fits extension format. Use .fits"
return
cb = findCommonBeam(beams)
print "Common beam: ",cb
print "Regridding to [%d x %d]"%(imageInfo[idxMaxSize][1][0],imageInfo[idxMaxSize][1][1])
print "Should double that"
regridImages = []
regridSmoothedImages = []
i = 0
while i < len(images):
#regrid
regridImages.append(casaImages[i].replace('.im','-regrid.im'))
regridSmoothedImages.append(casaImages[i].replace('.im','-regrid-smoothed.im'))
if not os.path.exists(regridImages[i]):
imregrid(imagename=casaImages[i],template=casaImages[idxMaxSize],output=regridImages[i],axes=[0,1])
if not os.path.exists(regridSmoothedImages[i]):
imsmooth(imagename=regridImages[i],major='%.5farcsec'%(cb[0]),minor='%.5farcsec'%(cb[1]),pa='%.5fdeg'%(cb[2]),outfile=regridSmoothedImages[i],targetres=True,kernel='gauss')
if not os.path.exists(regridSmoothedImages[i]):
os.system("cp -r %s %s"%(regridImages[i],regridSmoothedImages[i]))
i += 1
finalImage=output_dir+'/CombinedImage.im'
stackImages(regridSmoothedImages,finalImage,1./rms)
finalFits = finalImage.replace('.im','.fits')
exportfits(imagename=finalImage,fitsimage=finalFits)
print "Combined in:",finalFits
if __name__== '__main__':
run("*.fits","./combinedImages_weighed3")
```
#### File: src/rathings/solve_factor_output.py
```python
from rathings.tec_solver import robust_l2_parallel, robust_l2
from rathings.phase_unwrap import *
import h5py
import pylab as plt
import numpy as np
import astropy.coordinates as ac
import astropy.time as at
import astropy.units as au
import sys
if sys.hexversion >= 0x3000000:
def str_(s):
return str(s,'utf-8')
else:
def str_(s):
return str(s)
def calc_phase(tec, freqs, cs = 0.):
'''Return the phase from tec and CS.
`tec` : `numpy.ndarray`
tec in TECU of shape (num_times,)
`freqs` : `numpy.ndarray`
freqs in Hz of shape (num_freqs,)
`cs` : `numpy.ndarray` or float (optional)
Can be zero to disregard (default)
'''
TECU=1e16
phase = 8.44797256e-7*TECU * np.multiply.outer(1./freqs,tec) + cs
return phase
def print_name(name, obj):
'''Print all parts of hdf5. Use with visititems.'''
#if "facet_patch" not in name or 'dir' in name:
# return
#if not isinstance(obj,h5py.Group):
# return
print("{}".format(name))
def plot_along_time(name, obj, freqs=None,start_time=0, stop_time=49, reference_ant = 'CS005HBA0',num_threads = None, ants=None):
"""Plot the phase for each antenna and time stamps between start_time and stop_time"""
#if "facet_patch" not in name or 'dir' in name:
# return
assert stop_time > start_time or stop_time == -1
if not isinstance(obj,h5py.Group):
return
print("Plotting {}".format(name))
phase = obj['phase'][...]
phase[phase==0] = np.nan
nant = phase.shape[0]
nfreq = phase.shape[1]
ntime = phase.shape[2]
N = stop_time - start_time
n_per_axis = np.ceil(np.sqrt(nant))
f1 = plt.figure(figsize=(n_per_axis*4,n_per_axis*3))
f2 = plt.figure(figsize=(n_per_axis*4,n_per_axis*3))
for i in range(nant):
#res = tec_solver.l1_lpsolver_parallel(phase[i,::5,start_time:stop_time],freqs[::5],fout=0.5,solve_cs=True,num_threads=16)
res = robust_l2_parallel(phase[i,:,start_time:stop_time],freqs[:],solve_cs=True,num_threads=num_threads)
ax = f1.add_subplot(n_per_axis,n_per_axis,i+1)
ax2 = f2.add_subplot(n_per_axis,n_per_axis,i+1)
#tecs = [tec for (_,tec,_) in res]
ax.plot(freqs,phase[i,:,start_time:stop_time])
t = 0
for tec,cs in res:
ax.plot(freqs,np.angle(np.exp(1j*calc_phase(tec,freqs,cs=cs))),ls='--',lw=2.)
ax2.scatter(t,cs)
t += 1
ax2.set_xlabel("time")
ax2.set_ylabel("scalar phase")
ax.set_ylim([-np.pi,np.pi])
ax.set_xlabel("Freq")
ax.set_ylabel("Phase [rad]")
if ants is not None:
ax.set_title(str_(ants[i]))
ax2.set_title(str_(ants[i]))
#ax.plot(times[start_time:stop_time], tecs)
plt.tight_layout()
plt.show()
def solve_patch(name, obj, freqs = None, data_dict=None, start_time=0, stop_time=49, reference_ant = 'CS005HBA0',num_threads = None):
'''The function that gets run on each group object in hdf5. Solves dtec and puts into data_dict for creation of datapack. Used with visititems and partial.'''
#if "facet_patch" not in name or 'dir' in name:
# return
assert stop_time > start_time or stop_time == -1
if not isinstance(obj,h5py.Group):
return
print("Solving {}".format(name))
phase = obj['phase'][...]#ant,freq,time
phase[phase==0] = np.nan
nant = phase.shape[0]
nfreq = phase.shape[1]
ntime = phase.shape[2]
if stop_time==-1:
stop_time = ntime
dir = obj['dir']#icrs pointing
data_dict['directions'].append(dir)
data_dict['patch_names'].append(name)
dtec = np.zeros([nant,ntime],dtype=float)
for i in range(nant):
res = robust_l2_parallel(phase[i,:,start_time:stop_time],freqs[:],solve_cs=True,num_threads=num_threads)
tecs = [tec for (tec,_) in res]
dtec[i,:] = np.array(tecs)
data_dict['dtec'].append(dtec)
def solve_dtec(data_file,datapack_file=None,start_time=0,stop_time=-1,reference_antenna='CS005HBA0',num_threads = None):
'''Create the datapack of the entire observation stored in data_file.
data_file is created by the transfer2hdf5 program which turns a facet
calibration into managable data format.
Creates a datapack in the same directory unless datapack_file is given.
start_time, end_time give the indices along timeaxis to do solve on.
Use 0 and -1 for full time. Reference_antenna is the name of lofar
station to use as reference.'''
from ionotomo.astro.real_data import DataPack
from ionotomo.astro.radio_array import RadioArray
h = h5py.File(data_file,'r')
freqs = h['freq'][...]
ants = h['ant'][...]
times = at.Time(h['times'][...],format='mjd',scale='tai')
radio_array = RadioArray(array_file=RadioArray.lofar_array)
order = []
for lab in ants:
idx = radio_array.get_antenna_idx(str_(lab))
order.append(idx)
antennas = radio_array.get_antenna_locs()[order]
antenna_labels = [str_(s) for s in ants]
data_dict = {'radio_array':radio_array,'antennas':antennas,'antenna_labels':antenna_labels,'times':times,'timestamps':times.isot,'directions':[],'patch_names':[],'dtec':[]}
from functools import partial
#h.visititems(partial(plot_along_time,freqs=freqs,start_time=start_time,stop_time=stop_time))
h.visititems(partial(solve_patch,freqs=freqs,data_dict=data_dict,start_time=start_time,stop_time=stop_time,num_threads =num_threads))
dirs = np.array(data_dict['directions'])
dirs = ac.SkyCoord(dirs[:,0]*au.rad,dirs[:,1]*au.rad,frame='icrs')
data_dict['directions'] = dirs
data_dict['dtec'] = np.stack(data_dict['dtec'],axis=-1)
datapack = DataPack(data_dict=data_dict)
try:
datapack.set_reference_antenna(reference_antenna)
except:
datapack.set_reference_antenna(antenna_labels[0])
if datapack_file is None:
datapack.save(data_file.replace('.hdf5','-datapack.hdf5'))
else:
datapack.save(datapack_file)
def plot_dtec(data_file,datapack_file=None,start_time=0,stop_time=-1,reference_antenna='CS005HBA0',num_threads = None):
'''Create the datapack of the entire observation stored in data_file.
data_file is created by the transfer2hdf5 program which turns a facet
calibration into managable data format.
Creates a datapack in the same directory unless datapack_file is given.
start_time, end_time give the indices along timeaxis to do solve on.
Use 0 and -1 for full time. Reference_antenna is the name of lofar
station to use as reference.'''
from ionotomo.astro.real_data import DataPack
from ionotomo.astro.radio_array import RadioArray
h = h5py.File(data_file,'r')
freqs = h['freq'][...]
ants = h['ant'][...]
from functools import partial
h.visititems(partial(plot_along_time,freqs=freqs,start_time=start_time,stop_time=stop_time,num_threads=num_threads,ants = ants))
if __name__=='__main__':
#solve_dtec('../../goods-n.hdf5',start_time=0,stop_time=20,num_threads = 16)
plot_dtec('../../goods-n.hdf5',start_time=0,stop_time=16,num_threads = 16)
``` |
{
"source": "Joshuaalbert/rayintegral_kernels",
"score": 3
} |
#### File: rayintegral_kernels/tests/test_misc.py
```python
from .common_setup import *
import tensorflow as tf
from ..misc import forward_gradients, compute_d2K
from tensorflow.python.ops.parallel_for.gradients import jacobian
def test_forward_gradients(tf_session):
with tf_session.graph.as_default():
#
x = tf.constant([1.,2.])
y = x*x + x
g_true = 2.*x + 1.
g = forward_gradients(y,x)[0]
assert np.all(tf_session.run(g) == tf_session.run(g_true))
def test_compute_d2K(tf_session):
with tf_session.graph.as_default():
#
x = tf.ones((2,3))
y = tf.reduce_sum(x*x,axis=-1)
H = compute_d2K(y,x)
print(tf_session.run(H).shape)
```
#### File: rayintegral_kernels/tests/test_project.py
```python
import tensorflow as tf
from .common_setup import *
def test_tensorflow(tf_session):
with tf_session.graph.as_default():
assert tf_session.run(tf.constant(0)) == 0
``` |
{
"source": "Joshua-amare/Word-Analyzer",
"score": 2
} |
#### File: Joshua-amare/Word-Analyzer/genFile.py
```python
def genFile(name):
return open(name, "w+")
def writeFile(f , content):
f.write(content)
```
#### File: Joshua-amare/Word-Analyzer/wordFilter.py
```python
def filterStopWords(text):
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
nltk.download('punkt')
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(text)
return [w for w in word_tokens if not w in stop_words]
def filterSpellCheckedWords(text):
#import enchant
import nltk
from nltk.corpus import wordnet
nltk.download('wordnet')
#d = enchant.Dict("en_US")
return [w for w in text if wordnet.synsets(w)]
def filterSmallWords(text):
return [w for w in text if len(w)>3]
def filterNonAlphaChars(text):
import re
return re.compile(r'[^a-zA-Z]+',re.UNICODE).split(str(text))
``` |
{
"source": "Joshua-Anderson/controller",
"score": 2
} |
#### File: api/models/domain.py
```python
from django.db import models
from django.conf import settings
from api.models import AuditedModel
class Domain(AuditedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
app = models.ForeignKey('App', on_delete=models.CASCADE)
domain = models.TextField(
blank=False, null=False, unique=True,
error_messages={
'unique': 'Domain is already in use by another application'
}
)
certificate = models.ForeignKey(
'Certificate',
on_delete=models.SET_NULL,
blank=True,
null=True
)
class Meta:
ordering = ['domain', 'certificate']
def save(self, *args, **kwargs):
app = str(self.app)
domain = str(self.domain)
# get config for the service
config = self._load_service_config(app, 'router')
# See if domains are available
if 'domains' not in config:
config['domains'] = ''
# convert from string to list to work with and filter out empty strings
domains = [_f for _f in config['domains'].split(',') if _f]
if domain not in domains:
domains.append(domain)
config['domains'] = ','.join(domains)
self._save_service_config(app, 'router', config)
# Save to DB
return super(Domain, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
app = str(self.app)
domain = str(self.domain)
# get config for the service
config = self._load_service_config(app, 'router')
# See if domains are available
if 'domains' not in config:
config['domains'] = ''
# convert from string to list to work with and filter out empty strings
domains = [_f for _f in config['domains'].split(',') if _f]
if domain in domains:
domains.remove(domain)
config['domains'] = ','.join(domains)
self._save_service_config(app, 'router', config)
# Deatch cert, updates k8s
if self.certificate:
self.certificate.detach(domain=str(self.domain))
# Delete from DB
return super(Domain, self).delete(*args, **kwargs)
def __str__(self):
return self.domain
``` |
{
"source": "Joshua-Anderson/fprime-tools",
"score": 2
} |
#### File: models/serialize/test_types.py
```python
import json
from collections.abc import Iterable
import pytest
from fprime.common.models.serialize.array_type import ArrayType
from fprime.common.models.serialize.bool_type import BoolType
from fprime.common.models.serialize.enum_type import EnumType
from fprime.common.models.serialize.numerical_types import (
F32Type,
F64Type,
I8Type,
I16Type,
I32Type,
I64Type,
U8Type,
U16Type,
U32Type,
U64Type,
)
from fprime.common.models.serialize.serializable_type import SerializableType
from fprime.common.models.serialize.string_type import StringType
from fprime.common.models.serialize.time_type import TimeBase, TimeType
from fprime.common.models.serialize.type_base import BaseType, ValueType
from fprime.common.models.serialize.type_exceptions import (
AbstractMethodException,
DeserializeException,
NotInitializedException,
TypeMismatchException,
TypeRangeException,
)
PYTHON_TESTABLE_TYPES = [
True,
False,
-1,
0,
300,
"abc",
"True",
"False",
3.1412,
(0, 1),
(True, False),
[],
[0],
{},
{"abc": 123},
{2, 4, 3},
]
def valid_values_test(type_input, valid_values, sizes, extras=None):
""" Tests to be run on all types """
if not isinstance(sizes, Iterable):
sizes = [sizes] * len(valid_values)
# Should be able to instantiate a blank type, but not serialize it until a value has been supplied
if not extras:
extras = [[]] * len(valid_values)
instantiation = type_input(*extras[0])
with pytest.raises(NotInitializedException):
instantiation.serialize()
# Should be able to get a JSONable object that is dumpable to a JSON string
jsonable = instantiation.to_jsonable()
json.loads(json.dumps(jsonable))
# Run on valid values
for value, size, extra in zip(valid_values, sizes, extras):
instantiation = type_input(*extra, val=value)
assert instantiation.val == value
assert instantiation.getSize() == size
# Check assignment by value
by_value = type_input(*extra)
by_value.val = value
assert by_value.val == instantiation.val, "Assignment by value has failed"
assert by_value.getSize() == size
# Check serialization and deserialization
serialized = instantiation.serialize()
for offset in [0, 10, 50]:
deserializer = type_input(*extra)
deserializer.deserialize((b" " * offset) + serialized, offset)
assert instantiation.val == deserializer.val, "Deserialization has failed"
assert deserializer.getSize() == size
def invalid_values_test(
type_input, invalid_values, exception_class=TypeMismatchException
):
""" Check invalid values for all types """
for item in invalid_values:
# Constructor initialization
with pytest.raises(exception_class):
instantiation = type_input(item)
# Value initialization
with pytest.raises(exception_class):
instantiation = type_input()
instantiation.val = item
# Deserialization problems required space
for offset in [0, 10, 50]:
with pytest.raises(DeserializeException):
instantiation = type_input()
instantiation.deserialize(b" " * offset, offset)
def ser_deser_time_test(t_base, t_context, secs, usecs):
"""
Test serialization/deserialization of TimeType objects.
This test function creates a time type object with the given parameters and
then serializes it and deserializes it. Also prints it for visual inspection
of the formatted output.
Args:
t_base (int): Time base for the new time type object
t_context (int): Time context for the new time type object
secs (int): Seconds value for the new time type object
usecs (int): Seconds value for the new time type object
should_err (int): True if error expected, else False
Returns:
True if test passed, False otherwise
"""
val = TimeType(t_base, t_context, secs, usecs)
buff = val.serialize()
val2 = TimeType()
val2.deserialize(buff, 0)
assert val2.timeBase.value == t_base
assert val2.timeContext == t_context
assert val2.seconds == secs
assert val2.useconds == usecs
def test_boolean_nominal():
""" Tests the nominal cases of a BoolType """
valid_values_test(BoolType, [True, False], 1)
def test_boolean_off_nominal():
""" Tests the nominal cases of a BoolType """
invalid_values_test(
BoolType, filter(lambda item: not isinstance(item, bool), PYTHON_TESTABLE_TYPES)
)
def test_int_types_nominal():
""" Tests the integer types """
for type_input, size in [(I8Type, 1), (I16Type, 2), (I32Type, 4), (I64Type, 8)]:
total = pow(2, (size * 8) - 1)
valid_values_test(type_input, [0, -1, 1, -total, total - 1], size)
def test_int_types_off_nominal():
""" Tests the integer off nominal types """
for type_input, size in [(I8Type, 1), (I16Type, 2), (I32Type, 4), (I64Type, 8)]:
total = pow(2, (size * 8) - 1)
invalid_values_test(
type_input,
filter(lambda item: not isinstance(item, int), PYTHON_TESTABLE_TYPES),
)
invalid_values_test(
type_input, [-total - 1, total, -total * 35, total * 35], TypeRangeException
)
def test_uint_types_nominal():
""" Tests the integer types """
for type_input, size in [(U8Type, 1), (U16Type, 2), (U32Type, 4), (U64Type, 8)]:
max_int = pow(2, (size * 8)) - 1
valid_values_test(type_input, [0, 1, max_int - 1, max_int], size)
def test_uint_types_off_nominal():
""" Tests the integer off nominal types """
for type_input, size in [(U8Type, 1), (U16Type, 2), (U32Type, 4), (U64Type, 8)]:
max_int = pow(2, (size * 8)) - 1
invalid_values_test(
type_input,
filter(lambda item: not isinstance(item, int), PYTHON_TESTABLE_TYPES),
)
invalid_values_test(
type_input,
[-1, -2, max_int + 1, max_int * 35, -max_int],
TypeRangeException,
)
def test_float_types_nominal():
""" Tests the integer types """
valid_values_test(F32Type, [0.31415000557899475, 0.0, -3.141590118408203], 4)
valid_values_test(F64Type, [0.31415000557899475, 0.0, -3.141590118408203], 8)
def test_float_types_off_nominal():
""" Tests the integer off nominal types """
invalid_values_test(
F32Type, filter(lambda item: not isinstance(item, float), PYTHON_TESTABLE_TYPES)
)
invalid_values_test(
F64Type, filter(lambda item: not isinstance(item, float), PYTHON_TESTABLE_TYPES)
)
def test_enum_type():
"""
Tests the EnumType serialization and deserialization
"""
members = {"MEMB1": 0, "MEMB2": 6, "MEMB3": 9}
val1 = EnumType("SomeEnum", members, "MEMB3")
buff = val1.serialize()
val2 = EnumType("SomeEnum", members)
val2.deserialize(buff, 0)
assert val1.val == val2.val
def check_cloned_member_list(members1, members2):
""" Check member list knowing direct compares don't work"""
for tuple1, tuple2 in zip(members1, members2):
assert tuple1[0] == tuple2[0], "Names do not match"
assert tuple1[2] == tuple2[2], "Format strings do not match"
assert tuple1[3] == tuple2[3], "Descriptions do not match"
assert tuple1[1].val == tuple2[1].val, "Values don't match"
def test_serializable_type():
"""
Tests the SerializableType serialization and deserialization
"""
u32Mem = U32Type(1000000)
stringMem = StringType("something to say")
members = {"MEMB1": 0, "MEMB2": 6, "MEMB3": 9}
enumMem = EnumType("SomeEnum", members, "MEMB3")
memList = [
("mem1", u32Mem, ">i"),
("mem2", stringMem, ">H"),
("mem3", enumMem, ">i"),
]
serType1 = SerializableType("ASerType", memList)
buff = serType1.serialize()
serType2 = SerializableType("ASerType", memList)
serType2.deserialize(buff, 0)
check_cloned_member_list(serType1.mem_list, serType2.mem_list)
assert serType1.val == serType2.val
i32Mem = I32Type(-1000000)
stringMem = StringType("something else to say")
members = {"MEMB1": 4, "MEMB2": 2, "MEMB3": 0}
enumMem = EnumType("SomeEnum", members, "MEMB3")
memList = [
("mem1", i32Mem, ">i"),
("mem2", stringMem, ">H"),
("mem3", enumMem, ">i"),
]
serType1 = SerializableType("ASerType", memList)
buff = serType1.serialize()
serType2 = SerializableType("ASerType", memList)
serType2.deserialize(buff, 0)
check_cloned_member_list(serType1.mem_list, serType2.mem_list)
value_dict = {"mem1": 3, "mem2": "abc 123", "mem3": "MEMB1"}
serType1.val = value_dict
assert serType1.val == value_dict
mem_list = serType1.mem_list
memList = [(a, b, c, None) for a, b, c in memList]
check_cloned_member_list(mem_list, memList)
serTypeEmpty = SerializableType("ASerType", [])
assert serTypeEmpty.val == {}
assert serTypeEmpty.mem_list == []
# def test_array_type():
# """
# Tests the ArrayType serialization and deserialization
# """
# extra_ctor_args = [("TestArray", (I32Type, 2, "I DON'T KNOW")), ("TestArray2", (U8Type, 4, "I DON'T KNOW")),
# ("TestArray3", (StringType, 1, "I DON'T KNOW"))]
# values = [[32, 1], [0, 1, 2, 3], ["one"]]
# sizes = [8, 4, 3]
#
# valid_values_test(ArrayType, values, sizes, extra_ctor_args)
def test_time_type():
"""
Tests the TimeType serialization and deserialization
"""
TIME_SIZE = 11
in_no_err_list = [
(TimeBase["TB_NONE"].value, 1, 100, 999999),
(TimeBase["TB_PROC_TIME"].value, 0xFF, 1234567, 2952),
(TimeBase["TB_WORKSTATION_TIME"].value, 8, 1529430215, 12),
(TimeBase["TB_SC_TIME"].value, 231, 1344230277, 123456),
(TimeBase["TB_FPGA_TIME"].value, 78, 10395, 24556),
(TimeBase["TB_DONT_CARE"].value, 0xB3, 12390819, 12356),
]
in_err_list = [
(10, 58, 15345, 0),
(TimeBase["TB_NONE"].value, 1, 3, -1),
(TimeBase["TB_WORKSTATION_TIME"].value, 1, 700000, 1234567),
]
val = TimeType()
size = val.getSize()
assert size == TIME_SIZE
for (t_base, t_context, secs, usecs) in in_no_err_list:
ser_deser_time_test(t_base, t_context, secs, usecs)
for (t_base, t_context, secs, usecs) in in_err_list:
with pytest.raises(TypeRangeException):
ser_deser_time_test(t_base, t_context, secs, usecs)
class Dummy(BaseType):
def serialize(self):
return "serialized"
def deserialize(self, data, offset):
super(Dummy, self).deserialize(data, offset)
return "deserialized"
def getSize(self):
return 0
def to_jsonable(self):
return {'name': 'dummy'}
def test_base_type():
with pytest.raises(TypeError) as excinfo:
BaseType()
assert "Can't instantiate abstract class" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo2:
ValueType()
assert "Can't instantiate abstract class" in str(excinfo2.value)
d = Dummy()
assert d.serialize() == "serialized"
assert d.getSize() == 0
with pytest.raises(AbstractMethodException):
# In the Dummy class above, the deserialize method
# is set to call the super class, which is just the
# raw abstract method, which is the only way to
# raise an `AbstractMethodException`.
d.deserialize("a", 0)
``` |
{
"source": "JoshuaAN/Shooter-Optimal-Control",
"score": 3
} |
#### File: JoshuaAN/Shooter-Optimal-Control/fast_mpc.py
```python
import math
import pylab as plt
from constants import constants
import collections
import numpy as np
def clamp(x, low, high):
return min(high, max(low, x))
# Given an intial starting velocity and time, and ending velocity and time,
# solve the optimal velocity at the time midpoint (t0 + t1) / 2.
# TODO: add lagrange multipliers to show full extent of recursive generation since this type of heuristic may not work for future problems
def solve_midpoint_velocity(states, v0, v1, t0, t1, reference, depth):
if depth == 0:
return
dt = (t1 - t0) / 2
velocity = lambda x, u, t: (x - u / constants.kv) * math.exp(-constants.kv / constants.ka * t) + u / constants.kv
min_velocity = velocity(v0, -12, dt)
max_velocity = velocity(v0, 12, dt)
v = clamp(reference, min_velocity, max_velocity)
states[(t0 + t1) / 2] = v
solve_midpoint_velocity(states, v0, v, t0, (t0 + t1) / 2, reference, depth - 1)
solve_midpoint_velocity(states, v, v1, (t0 + t1) / 2, t1, reference, depth - 1)
def solve_trajectory(initial_velocity, reference):
T = 5
search_depth = 7
states = {0:initial_velocity,T:reference}
solve_midpoint_velocity(states, initial_velocity, reference, 0, T, reference, search_depth)
sorted_states = collections.OrderedDict(sorted(states.items(), key=lambda kv: kv[0]))
# TODO: make key search more efficient b/c this seems stupid
items = []
keys = []
for item in sorted_states.items():
keys.append(item[0])
items.append(item[1])
plt.plot(keys, items, "--", label="Fast MPC")
plt.legend()
```
#### File: JoshuaAN/Shooter-Optimal-Control/transcription_mpc.py
```python
import pylab as plt
from casadi import *
from constants import constants
def solve_trajectory(intial_velocity, reference):
N = 100
T = 5
dt = T/N
solver = Opti()
# State variable - velocity for a flywheel
X = solver.variable(N+1)
# Control variable - voltage for flywheel
U = solver.variable(N)
# Dynamics (solution to differential equation V = kv * v + ka * a)
x_next = lambda x, u: vertcat(
((x - u / constants.kv) * exp(-constants.kv / constants.ka * dt) + u / constants.kv)
)
for k in range(N):
solver.subject_to(X[k+1]==x_next(X[k],U[k]))
# Cost function - minimizing error, control effort is not included because hard voltage limits can be set later
J = 0
for k in range(N+1):
J += (reference - X[k]) * (reference - X[k])
solver.minimize(J)
# Constraints
solver.subject_to(X[0]==intial_velocity)
solver.subject_to(solver.bounded(-12, U, 12))
# Solve non-linear problem
solver.solver("ipopt")
sol = solver.solve()
plt.plot([0,T],[reference,reference], label="Reference")
plt.plot(np.linspace(0,N,(N+1)) * dt, sol.value(X), "--", label="Transcription MPC")
plt.legend()
``` |
{
"source": "joshuaarmah/flower-project",
"score": 3
} |
#### File: joshuaarmah/flower-project/flower_project.py
```python
import numpy as np
import matplotlib.pyplot as plt
import seaborn;
import sklearn as sk
from sklearn import neighbors, datasets
from sklearn.linear_model import LinearRegression
from scipy import stats
import pylab as pl
from matplotlib.colors import ListedColormap
# import
pl.rcParams['figure.figsize'] = (10, 7)
seaborn.set()
"""## Dataset"""
# import some data to play with
iris = datasets.load_iris()
# dataset description
print(iris.DESCR)
## Explore the data
from sklearn.datasets import load_iris
iris = load_iris()
n_samples, n_features = iris.data.shape
test_data = iris.data + np.random.rand(n_samples, n_features)
test_target = iris.target
# print(iris.keys())
print('Dataset shape is', iris.data.shape)
print('The dataset has', n_samples, 'records of IRIS members.')
print('Each record has', n_features, 'features.')
print('The features are', iris.feature_names)
print('The dataset has', iris.target.shape, ' records of IRIS groups.')
print('The IRIS group names are', iris.target_names)
np.bincount(iris.target)
# maximum values
iris.data.max(axis=0)
# minimum values
iris.data.min(axis=0)
# mean values
iris.data.mean(axis=0)
#Format labels to color bar with Target Names
formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])
def plot2features(x_index, y_index):
plt.scatter(iris.data[:, x_index], iris.data[:, y_index],
c=iris.target, cmap=plt.cm.get_cmap('RdYlBu', 3))
plt.colorbar(ticks=[0, 1, 2], format=formatter)
plt.clim(-0.5, 2.5)
plt.xlabel(iris.feature_names[x_index])
plt.ylabel(iris.feature_names[y_index]);
plot2features(0, 1)
"""## Build the model"""
X_new = [2.5, 3, 1, 1]
def predict_new(clf, X_pred=X_new):
result = clf.predict([X_pred, ])
print('The new is a', iris.target_names[result])
print(iris.target_names)
print(clf.predict_proba([X_pred, ]))
X = iris.data
y = iris.target
h = .02
# Build The Model
knn = neighbors.KNeighborsClassifier(n_neighbors=3, weights='uniform')
"""## Train the model"""
# Use Library to split data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train.shape, X_test.shape
# fit the model
knn.fit(X_train, y_train)
"""## Evaluate model"""
y_pred = knn.predict(X_test)
print("{0} / {1} correct".format(np.sum(y_test == y_pred), len(y_test)))
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
knn.score(X_test, y_test)
``` |
{
"source": "joshuaavalon/AvalonYoutubeAgent.bundle",
"score": 2
} |
#### File: Contents/Code/__init__.py
```python
from log import PlexLog
from show import get_episode_json, get_show_json, set_episode, \
set_episode_cover, set_show
from utils import convert_date, create_id
version = "1.0.1"
# noinspection PyClassHasNoInit,PyShadowingNames
class AvalonYoutubeTvAgent(Agent.TV_Shows):
name = "Avalon Youtube TV Agent"
ver = version
primary_provider = True
languages = [Locale.Language.NoLanguage]
accepts_from = ["com.plexapp.agents.localmedia"]
def search(self, results, media, lang, manual):
PlexLog.debug("=================== Search Start ===================")
PlexLog.debug("%s (%s)" % (self.name, self.ver))
PlexLog.debug("Plex version: %s" % Platform.ServerVersion)
show = get_show_json(media)
if show is None:
return
title = show.get("title")
if title is None:
PlexLog.error("Missing or invalid title: %s" % str(show))
return
aired = convert_date(show.get("aired"))
year = aired.year if aired is not None else 0
# Plex throws exception that have "/" in ID
mid = create_id(title, year)
result = MetadataSearchResult(id=mid,
name=title,
year=year,
lang=lang,
score=100)
results.Append(result)
PlexLog.debug("=================== Search end ===================")
def update(self, metadata, media, lang, force):
PlexLog.debug("=================== Update Start ===================")
PlexLog.debug("%s (%s)" % (self.name, self.ver))
PlexLog.debug("Plex version: %s" % Platform.ServerVersion)
show = get_show_json(media)
if show is None:
return
set_show(metadata, show)
for season in media.seasons:
for episode in media.seasons[season].episodes:
episode_metadata = metadata.seasons[season].episodes[episode]
model = get_episode_json(media, season, episode)
if model is not None:
set_episode(episode_metadata, model)
set_episode_cover(episode_metadata, media, season, episode)
PlexLog.debug("=================== Update end ===================")
``` |
{
"source": "joshuaavalon/cloudflare-ddns",
"score": 2
} |
#### File: joshuaavalon/cloudflare-ddns/run.py
```python
import json
from logging import getLogger
from logging.config import dictConfig
from os import environ
from pathlib import Path
from typing import Optional
from cloudflare_ddns import CloudflareDDNS, Configuration, SiteConfiguration
log_level = environ.get("LOG_LEVEL", "INFO")
dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)-8s] %(name)-12s: %(message)s"
},
},
"handlers": {
"default": {
"level": "NOTSET",
"formatter": "standard",
"class": "logging.StreamHandler",
}
},
"loggers": {
"": {
"handlers": ["default"],
"level": "WARNING",
"propagate": True
},
"cloudflare_ddns": {
"handlers": ["default"],
"level": log_level,
"propagate": False
},
"__main__": {
"handlers": ["default"],
"level": log_level,
"propagate": False
}
}
})
logger = getLogger(__name__)
def main():
config: Optional[Configuration] = load_from_file()
if config is None:
config = load_from_env()
if config is None:
logger.error("Fail to any configs")
return
CloudflareDDNS(config).update()
def load_from_file() -> Optional[Configuration]:
config_path = environ.get("CONFIG_PATH")
if config_path is None:
logger.debug("CONFIG_PATH is not set")
return None
path = Path(config_path)
if path.is_file():
with open(path, "r", encoding="utf-8") as file:
try:
data = json.load(file)
return Configuration.from_dict(data)
except (TypeError, ValueError, IOError):
logger.exception("Fail to load from file")
else:
logger.warning(f"{path.absolute()} is not a file")
return None
def load_from_env() -> Optional[Configuration]:
try:
ttl = environ.get("TTL")
ttl = 1 if ttl is None else int(ttl)
proxied = environ.get("PROXIED", "")
if proxied is None:
proxied = True
else:
proxied = proxied.lower() in ("yes", "true", "1")
site = SiteConfiguration(
proxied=proxied,
ttl=ttl,
email=environ.get("EMAIL"),
api_key=environ.get("API_KEY"),
zone=environ.get("ZONE"),
domain=environ.get("DOMAIN"),
)
return Configuration(config=[site])
except (TypeError, ValueError):
logger.exception("Fail to load from environment")
return None
if __name__ == "__main__":
main()
``` |
{
"source": "joshuaavalon/file_hash",
"score": 2
} |
#### File: file_hash/file_hash/worker.py
```python
from logging import getLogger
from multiprocessing import JoinableQueue, Process
from pathlib import Path
from typing import Optional
from file_hash.algorithm import Algorithm
from file_hash.report import Record, Report
from file_hash.serialize import HashFileHandler
__all__ = ["HashingWorker", "HashValidationWorker", "MissingFileWorker"]
logger = getLogger("hasher")
class PathWorker(Process):
def __init__(self, queue: JoinableQueue, report: Optional[Report]):
super().__init__()
self.queue = queue
self.report = report
self.handler = HashFileHandler()
def run(self):
while True:
path = self.queue.get()
if path is None:
self.queue.task_done()
break
try:
self._run(path)
except IOError as e:
logger.error(f"I/O error: {e}")
self.report.append(Record(path=path, error=str(e)))
except Exception as e:
logger.exception(f"Unexpected error: {e}")
raise e
finally:
self.queue.task_done()
def _run(self, path: Path):
raise NotImplementedError()
class HashingWorker(PathWorker):
def __init__(self, queue: JoinableQueue,
algorithm: Algorithm,
dry_run: bool,
report: Optional[Report]):
super().__init__(queue, report)
self.algorithm = algorithm
self.dry_run = dry_run
def _run(self, path: Path):
file_hash = self.algorithm.hash(path)
self.handler.save(file_hash, path, self.dry_run)
self.report.append(Record(path=path, hash=file_hash))
class HashValidationWorker(PathWorker):
def _run(self, path: Path):
hashes = self.handler.load(path)
for file_hash in hashes:
algorithm = Algorithm.new(file_hash.algorithm)
full_path = path.absolute()
name = algorithm.name
if algorithm.hash(path) == file_hash:
logger.info(f"{name} hash of {full_path} is valid")
self.report.append(Record(path=path, hash=file_hash))
else:
message = f"{name} hash of {full_path} is invalid"
logger.warning(message)
record = Record(path=path, hash=file_hash, error=message)
self.report.append(record)
class MissingFileWorker(PathWorker):
def _run(self, path: Path):
file_path: Path = path.parent.joinpath(path.stem)
if file_path.exists():
return
file_hash = self.handler.load(path, exact=True)[0]
message = f"{path.absolute()} exists " \
f"but {file_path.absolute()} does not exist!"
logger.warning(message)
self.report.append(Record(path=path, hash=file_hash, error=message))
``` |
{
"source": "joshuaavalon/kotori",
"score": 2
} |
#### File: kotori/kotori/config.py
```python
import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from os.path import splitext
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from PIL import Image
from ruamel.yaml import YAML
from kotori.error import ConfigError
__all__ = [
"ItemKey", "RouteConfig", "TransformConfig", "StorageConfig", "Config",
"ConfigLoader", "DictConfigLoader", "JsonConfigLoader", "YamlConfigLoader"
]
@dataclass
class ItemKey:
path: str
key_path: str = field(init=False)
key: str = field(init=False)
transform: str = field(init=False)
suffix: str = field(init=False)
folder: str = field(init=False)
name: str = field(init=False)
format: str = field(init=False)
def __post_init__(self):
if self.path.endswith("/"):
raise ValueError("path cannot end with /")
path, suffix = splitext(self.path)
parts: List[str] = list(filter(None, path.split("/")))
self.format = Image.registered_extensions().get(suffix)
if self.format is None:
raise ValueError("Unknown format")
if len(parts) < 2:
raise ValueError("Too few arguments")
self.transform = parts[0]
key_parts = parts[1:]
self.key = "/".join(key_parts)
self.key_path = f"/{self.key}"
self.suffix = suffix
self.name = key_parts.pop()
self.folder = f"/{'/'.join(key_parts)}"
@dataclass
class SaveConfig:
format: str
options: Dict[str, Any]
@dataclass
class RouteConfig:
storage: str
transform: Union[bool, List[str], str] = False
expire: Optional[int] = None
save: Dict[str, Dict[str, Any]] = field(default_factory=dict)
@dataclass
class TransformConfig:
type: str
options: List[str] = field(default_factory=list)
@staticmethod
def from_query(query: str) -> "TransformConfig":
parts = [t.strip() for t in query.split("_")]
return TransformConfig(type=parts[0], options=parts[1:])
@staticmethod
def from_queries(queries: str) -> List["TransformConfig"]:
queries = [t.strip() for t in queries.split(",")]
for query in queries:
yield TransformConfig.from_query(query)
@dataclass
class StorageConfig:
type: str
options: Dict[str, Any] = field(default_factory=dict)
@dataclass
class Config:
storage: Dict[str, StorageConfig]
transform: Dict[str, List[TransformConfig]]
route: List[Tuple[str, RouteConfig]]
cache: Dict[str, Any]
def storage_of(self, key: ItemKey) -> StorageConfig:
route = self.route_of(key)
return self.storage[route.storage]
def route_of(self, key: ItemKey) -> RouteConfig:
for route in self.route:
pattern, config = route
if re.search(pattern, key.path) is not None:
return config
raise ConfigError(f"Cannot find config for {key.path}")
def transforms_of(self, key: ItemKey) -> List[TransformConfig]:
if key.transform in self.transform.keys():
return self.transform[key.transform]
return TransformConfig.from_queries(key.transform)
def allow_transform(self, key: ItemKey) -> bool:
route = self.route_of(key)
if not route.transform:
return False
if isinstance(route.transform, bool):
return True
if isinstance(route.transform, str):
transforms = [route.transform]
else:
transforms = route.transform
if key.transform in self.transform.keys():
return key.transform in transforms
configs = TransformConfig.from_queries(key.transform)
for config in configs:
if config.type not in transforms:
return False
return True
class ConfigLoader(ABC):
loaders: Dict[str, Type["ConfigLoader"]] = {}
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
for suffix in cls.support_suffixes():
if suffix not in cls.loaders:
cls.loaders[suffix] = cls
@classmethod
@abstractmethod
def support_suffixes(cls) -> List[str]:
raise NotImplementedError()
@classmethod
def load(cls, path: Union[Path, str]) -> Config:
if isinstance(path, str):
path = Path(path)
suffix = path.suffix
if suffix not in cls.loaders:
raise ConfigError(f"{suffix} is a unknown format")
loader = cls.loaders[suffix]()
config = loader._load(path) # pylint: disable=protected-access
return config
@abstractmethod
def _load(self, path: Path) -> Config:
raise NotImplementedError()
class DictConfigLoader(ConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return []
def _load(self, path: Path) -> Config:
config = self._load_dict(path)
storage = {}
for name, cfg in config.get("storage", {}).items():
storage[name] = StorageConfig(**cfg)
transform = {}
for name, cfg in config.get("transform", {}).items():
transform[name] = [TransformConfig(**c) for c in cfg]
route = []
for name, cfg in config.get("route", {}).items():
route.append((name, RouteConfig(**cfg)))
return Config(
storage=storage,
transform=transform,
route=route,
cache=config.get("cache", {})
)
@abstractmethod
def _load_dict(self, path: Path) -> Dict[str, Any]:
raise NotImplementedError()
class JsonConfigLoader(DictConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return [".json"]
def _load_dict(self, path: Path) -> Dict[str, Any]:
with open(path, "r", encoding="utf-8") as file:
return json.load(file)
class YamlConfigLoader(DictConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return [".yml", ".yaml"]
def _load_dict(self, path: Path) -> Dict[str, Any]:
yaml = YAML(typ="safe")
with open(path, "r", encoding="utf-8") as file:
return yaml.load(file)
```
#### File: kotori/kotori/error.py
```python
class ConfigError(ValueError):
def __init__(self, message: str = ""):
super().__init__(f"Config Error: {message}")
``` |
{
"source": "joshuaavalon/pysubs2",
"score": 2
} |
#### File: pysubs2/tests/test_substation.py
```python
from __future__ import unicode_literals
from textwrap import dedent
from pysubs2 import SSAFile, SSAEvent, SSAStyle, make_time, Color
from pysubs2.substation import color_to_ass_rgba, color_to_ssa_rgb, ass_rgba_to_color, ssa_rgb_to_color
from nose.tools import assert_raises
import sys
SIMPLE_ASS_REF = """
[Script Info]
; Script generated by pysubs2
; https://pypi.python.org/pypi/pysubs2
WrapStyle: 0
ScaledBorderAndShadow: yes
Collisions: Normal
My Custom Info: Some: Test, String.
ScriptType: v4.00+
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
Style: Default,Arial,20.0,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100.0,100.0,0.0,0.0,1,2.0,2.0,2,10,10,10,1
Style: left,Arial,20.0,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,-1,0,0,0,100.0,100.0,0.0,0.0,1,2.0,2.0,7,10,10,10,1
Style: topleft,Arial,20.0,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100.0,100.0,0.0,0.0,1,2.0,2.0,4,10,10,10,1
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
Dialogue: 0,0:00:00.00,0:01:00.00,Default,,0,0,0,,An, example, subtitle.
Comment: 0,0:00:00.00,0:01:00.00,Default,,0,0,0,,You can't see this one.
Dialogue: 0,0:01:00.00,0:02:00.00,Default,,0,0,0,,Subtitle number\\Ntwo.
"""
SIMPLE_SSA_REF = """\
[Script Info]
; Script generated by pysubs2
; https://pypi.python.org/pypi/pysubs2
WrapStyle: 0
ScaledBorderAndShadow: yes
Collisions: Normal
My Custom Info: Some: Test, String.
ScriptType: v4.00
[V4 Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, TertiaryColour, BackColour, Bold, Italic, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding
Style: Default,Arial,20.0,16777215,255,0,0,0,0,1,2.0,2.0,2,10,10,10,0,1
Style: left,Arial,20.0,16777215,255,0,0,-1,0,1,2.0,2.0,5,10,10,10,0,1
Style: topleft,Arial,20.0,16777215,255,0,0,0,0,1,2.0,2.0,9,10,10,10,0,1
[Events]
Format: Marked, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
Dialogue: Marked=0,0:00:00.00,0:01:00.00,Default,,0,0,0,,An, example, subtitle.
Comment: Marked=0,0:00:00.00,0:01:00.00,Default,,0,0,0,,You can't see this one.
Dialogue: Marked=0,0:01:00.00,0:02:00.00,Default,,0,0,0,,Subtitle number\\Ntwo.
"""
AEGISUB_PROJECT_GARBAGE_FILE = """\
[Script Info]
; Script generated by Aegisub 3.2.2
; http://www.aegisub.org/
Title: Default Aegisub file
ScriptType: v4.00+
WrapStyle: 0
ScaledBorderAndShadow: yes
YCbCr Matrix: None
PlayResX: 640
PlayResY: 480
[Aegisub Project Garbage]
Last Style Storage: Default
Video File: ?dummy:23.976000:40000:640:480:47:163:254:
Video AR Value: 1.333333
Video Zoom Percent: 0.500000
Active Line: 2
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
Style: Default,Arial,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,2,2,2,10,10,10,1
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
Dialogue: 0,0:00:00.00,0:00:05.00,Default,,0,0,0,,Test for new Aegisub Project section
"""
def build_ref():
subs = SSAFile()
subs.info["My Custom Info"] = "Some: Test, String."
subs.styles["left"] = SSAStyle(alignment=7, bold=True)
subs.styles["topleft"] = SSAStyle(alignment=4)
subs.append(SSAEvent(start=0, end=make_time(m=1), text="An, example, subtitle."))
subs.append(SSAEvent(start=0, end=make_time(m=1), type="Comment", text="You can't see this one."))
subs.append(SSAEvent(start=make_time(m=1), end=make_time(m=2), text="Subtitle number\\Ntwo."))
return subs
def test_simple_write():
subs = build_ref()
assert subs.to_string("ass").strip() == SIMPLE_ASS_REF.strip()
assert subs.to_string("ssa").strip() == SIMPLE_SSA_REF.strip()
def test_simple_read():
ref = build_ref()
subs1 = SSAFile.from_string(SIMPLE_ASS_REF)
subs2 = SSAFile.from_string(SIMPLE_SSA_REF)
assert ref.equals(subs1)
assert ref.equals(subs2)
def test_color_parsing():
solid_color = Color(r=1, g=2, b=3)
transparent_color = Color(r=1, g=2, b=3, a=4)
assert ssa_rgb_to_color(color_to_ssa_rgb(solid_color)) == solid_color
assert ass_rgba_to_color(color_to_ass_rgba(solid_color)) == solid_color
assert ass_rgba_to_color(color_to_ass_rgba(transparent_color)) == transparent_color
assert ass_rgba_to_color("&HAABBCCDD") == Color(r=0xDD, g=0xCC, b=0xBB, a=0xAA)
assert color_to_ass_rgba(Color(r=0xDD, g=0xCC, b=0xBB, a=0xAA)) == "&HAABBCCDD"
def test_aegisub_project_garbage():
subs = SSAFile.from_string(AEGISUB_PROJECT_GARBAGE_FILE)
garbage_section = dedent("""
[Aegisub Project Garbage]
Last Style Storage: Default
Video File: ?dummy:23.976000:40000:640:480:47:163:254:
Video AR Value: 1.333333
Video Zoom Percent: 0.500000
Active Line: 2""")
assert garbage_section in subs.to_string("ass")
def test_ascii_str_fields():
# see issue #12
STYLE_NAME = b"top-style"
subs = SSAFile()
line = SSAEvent(style=STYLE_NAME)
subs.events.append(line)
style = SSAStyle()
subs.styles[STYLE_NAME] = style
if sys.version_info.major == 2:
# in Python 2, saving subtitles with non-unicode fields is tolerated
# as long as they do not fall outside of ASCII range
subs.to_string("ass")
else:
# in Python 3, we are strict and enforce Unicode
with assert_raises(TypeError):
subs.to_string("ass")
def test_non_ascii_str_fields():
# see issue #12
STYLE_NAME = "my-style"
FONT_NAME = b"NonAsciiString\xff"
subs = SSAFile()
line = SSAEvent(style=STYLE_NAME)
subs.events.append(line)
style = SSAStyle(fontname=FONT_NAME)
subs.styles[STYLE_NAME] = style
# in all Pythons, saving subtitles with non-unicode fields
# fails when they are not in ASCII range
with assert_raises(TypeError):
subs.to_string("ass")
``` |
{
"source": "Joshua-Barawa/Blog-post",
"score": 3
} |
#### File: Joshua-Barawa/Blog-post/test_models.py
```python
import unittest
from models import *
class TestUser(unittest.TestCase):
def setUp(self):
self.user = User("Dfdf", "Dfdf", "dfdfdf", "dfdf")
if __name__ == '__main__':
unittest.main()
class TestBlog(unittest.TestCase):
def setUp(self):
self.blog = Blog("vcvcv", "sdsd", "dsd","sdsd", 12/3/2020, "dsd")
if __name__ == '__main__':
unittest.main()
class TestComment(unittest.TestCase):
def setUp(self):
self.comment = Comment(1, "sdsd", "dsd")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Joshua-Barawa/Django-IP3",
"score": 2
} |
#### File: Django-IP3/members/forms.py
```python
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import *
class RegisterForm(UserCreationForm):
first_name = forms.CharField(max_length=30)
email = forms.CharField()
class Meta:
model = User
fields = ['username', "first_name", "email", '<PASSWORD>', '<PASSWORD>']
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['first_name'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['class'] = 'form-control'
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['project_pic', "name", "description", 'country', 'live_link']
def __init__(self, *args, **kwargs):
super(ProjectForm, self).__init__(*args, **kwargs)
self.fields['project_pic'].widget.attrs['class'] = 'form-control'
self.fields['name'].widget.attrs['class'] = 'form-control'
self.fields['description'].widget.attrs['class'] = 'form-control description'
self.fields['country'].widget.attrs['class'] = 'form-control'
self.fields['live_link'].widget.attrs['class'] = 'form-control'
class RateForm(forms.ModelForm):
class Meta:
model = Prorating
fields = ['design', "usability", "content"]
def __init__(self, *args, **kwargs):
super(RateForm, self).__init__(*args, **kwargs)
self.fields['design'].widget.attrs['class'] = 'form-control'
self.fields['usability'].widget.attrs['class'] = 'form-control'
self.fields['content'].widget.attrs['class'] = 'form-control'
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['message']
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.fields['message'].widget.attrs['class'] = 'form-control c-field'
```
#### File: Django-IP3/members/tests.py
```python
from django.test import TestCase
from .models import *
class ProfileTestCase(TestCase):
def setUp(self):
Profile.objects.create(user="josh", profile_pic="media/default2_GJAt4l6.png", caption="<NAME>")
Profile.objects.create(user="bob", profile_pic="media/default2_GJAt4l6.png", caption="<NAME>")
def test_animals_can_speak(self):
pro1 = Profile.objects.get(user="josh")
pro2 = Profile.objects.get(name="bob")
self.assertEqual(pro1.caption, '<NAME>')
self.assertEqual(pro2.caption, '<NAME>')
``` |
{
"source": "Joshua-Barawa/My-Photos",
"score": 2
} |
#### File: My-Photos/useraccount/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True, null=True)
profile_photo = CloudinaryField()
bio = models.TextField(max_length=200, blank=True, null=True)
def __str__(self):
return self.user.username
class Image(models.Model):
user = models.ForeignKey(Profile, on_delete=models.CASCADE, blank=True, null=True)
image = CloudinaryField()
name = models.CharField(max_length=60)
caption = models.TextField(max_length=200)
like = models.IntegerField(default=0)
comment = models.IntegerField(default=0)
def __str__(self):
return self.name
class Comments(models.Model):
post_id = models.ForeignKey(Image, on_delete=models.CASCADE, blank=False, null=False)
name = models.ForeignKey(Profile, on_delete=models.CASCADE)
desc = models.TextField(max_length=200)
def __str__(self):
return self.post_id.name
```
#### File: backports/zoneinfo/__init__.py
```python
__all__ = [
"ZoneInfo",
"reset_tzpath",
"available_timezones",
"TZPATH",
"ZoneInfoNotFoundError",
"InvalidTZPathWarning",
]
import sys
from . import _tzpath
from ._common import ZoneInfoNotFoundError
from ._version import __version__
try:
from ._czoneinfo import ZoneInfo
except ImportError: # pragma: nocover
from ._zoneinfo import ZoneInfo
reset_tzpath = _tzpath.reset_tzpath
available_timezones = _tzpath.available_timezones
InvalidTZPathWarning = _tzpath.InvalidTZPathWarning
if sys.version_info < (3, 7):
# Module-level __getattr__ was added in Python 3.7, so instead of lazily
# populating TZPATH on every access, we will register a callback with
# reset_tzpath to update the top-level tuple.
TZPATH = _tzpath.TZPATH
def _tzpath_callback(new_tzpath):
global TZPATH
TZPATH = new_tzpath
_tzpath.TZPATH_CALLBACKS.append(_tzpath_callback)
del _tzpath_callback
else:
def __getattr__(name):
if name == "TZPATH":
return _tzpath.TZPATH
else:
raise AttributeError(
f"module {__name__!r} has no attribute {name!r}"
)
def __dir__():
return sorted(list(globals()) + ["TZPATH"])
```
#### File: cloudinary/provisioning/account_config.py
```python
from __future__ import absolute_import
import os
from cloudinary import BaseConfig, import_django_settings
ACCOUNT_URI_SCHEME = "account"
class AccountConfig(BaseConfig):
def __init__(self):
self._uri_scheme = ACCOUNT_URI_SCHEME
super(AccountConfig, self).__init__()
def _config_from_parsed_url(self, parsed_url):
if not self._is_url_scheme_valid(parsed_url):
raise ValueError("Invalid CLOUDINARY_ACCOUNT_URL scheme. URL should begin with 'account://'")
return {
"account_id": parsed_url.hostname,
"provisioning_api_key": parsed_url.username,
"provisioning_api_secret": parsed_url.password,
}
def _load_config_from_env(self):
if os.environ.get("CLOUDINARY_ACCOUNT_URL"):
self._load_from_url(os.environ.get("CLOUDINARY_ACCOUNT_URL"))
def account_config(**keywords):
global _account_config
_account_config.update(**keywords)
return _account_config
def reset_config():
global _account_config
_account_config = AccountConfig()
_account_config = AccountConfig()
```
#### File: admin/views/autocomplete.py
```python
from django.apps import apps
from django.core.exceptions import FieldDoesNotExist, PermissionDenied
from django.http import Http404, JsonResponse
from django.views.generic.list import BaseListView
class AutocompleteJsonView(BaseListView):
"""Handle AutocompleteWidget's AJAX requests for data."""
paginate_by = 20
admin_site = None
def get(self, request, *args, **kwargs):
"""
Return a JsonResponse with search results as defined in
serialize_result(), by default:
{
results: [{id: "123" text: "foo"}],
pagination: {more: true}
}
"""
(
self.term,
self.model_admin,
self.source_field,
to_field_name,
) = self.process_request(request)
if not self.has_perm(request):
raise PermissionDenied
self.object_list = self.get_queryset()
context = self.get_context_data()
return JsonResponse(
{
"results": [
self.serialize_result(obj, to_field_name)
for obj in context["object_list"]
],
"pagination": {"more": context["page_obj"].has_next()},
}
)
def serialize_result(self, obj, to_field_name):
"""
Convert the provided model object to a dictionary that is added to the
results list.
"""
return {"id": str(getattr(obj, to_field_name)), "text": str(obj)}
def get_paginator(self, *args, **kwargs):
"""Use the ModelAdmin's paginator."""
return self.model_admin.get_paginator(self.request, *args, **kwargs)
def get_queryset(self):
"""Return queryset based on ModelAdmin.get_search_results()."""
qs = self.model_admin.get_queryset(self.request)
qs = qs.complex_filter(self.source_field.get_limit_choices_to())
qs, search_use_distinct = self.model_admin.get_search_results(
self.request, qs, self.term
)
if search_use_distinct:
qs = qs.distinct()
return qs
def process_request(self, request):
"""
Validate request integrity, extract and return request parameters.
Since the subsequent view permission check requires the target model
admin, which is determined here, raise PermissionDenied if the
requested app, model or field are malformed.
Raise Http404 if the target model admin is not configured properly with
search_fields.
"""
term = request.GET.get("term", "")
try:
app_label = request.GET["app_label"]
model_name = request.GET["model_name"]
field_name = request.GET["field_name"]
except KeyError as e:
raise PermissionDenied from e
# Retrieve objects from parameters.
try:
source_model = apps.get_model(app_label, model_name)
except LookupError as e:
raise PermissionDenied from e
try:
source_field = source_model._meta.get_field(field_name)
except FieldDoesNotExist as e:
raise PermissionDenied from e
try:
remote_model = source_field.remote_field.model
except AttributeError as e:
raise PermissionDenied from e
try:
model_admin = self.admin_site._registry[remote_model]
except KeyError as e:
raise PermissionDenied from e
# Validate suitability of objects.
if not model_admin.get_search_fields(request):
raise Http404(
"%s must have search_fields for the autocomplete_view."
% type(model_admin).__qualname__
)
to_field_name = getattr(
source_field.remote_field, "field_name", remote_model._meta.pk.attname
)
to_field_name = remote_model._meta.get_field(to_field_name).attname
if not model_admin.to_field_allowed(request, to_field_name):
raise PermissionDenied
return term, model_admin, source_field, to_field_name
def has_perm(self, request, obj=None):
"""Check if user has permission to access the related model."""
return self.model_admin.has_view_permission(request, obj=obj)
```
#### File: contrib/auth/context_processors.py
```python
class PermLookupDict:
def __init__(self, user, app_label):
self.user, self.app_label = user, app_label
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.app_label, perm_name))
def __iter__(self):
# To fix 'item in perms.someapp' and __getitem__ interaction we need to
# define __iter__. See #18979 for details.
raise TypeError("PermLookupDict is not iterable.")
def __bool__(self):
return self.user.has_module_perms(self.app_label)
class PermWrapper:
def __init__(self, user):
self.user = user
def __repr__(self):
return f"{self.__class__.__qualname__}({self.user!r})"
def __getitem__(self, app_label):
return PermLookupDict(self.user, app_label)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if "." not in perm_name:
# The name refers to module.
return bool(self[perm_name])
app_label, perm_name = perm_name.split(".", 1)
return self[app_label][perm_name]
def auth(request):
"""
Return context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, use AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, "user"):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
"user": user,
"perms": PermWrapper(user),
}
```
#### File: gis/gdal/envelope.py
```python
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import GDALException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# https://gdal.org/doxygen/ogr__core_8h_source.html
class OGREnvelope(Structure):
"Represent the OGREnvelope C Structure."
_fields_ = [
("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope:
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException(
"Incorrect number of tuple elements (%d)." % len(args[0])
)
else:
self._from_sequence(args[0])
else:
raise TypeError("Incorrect type of argument: %s" % type(args[0]))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException("Incorrect number (%d) of arguments." % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException("Envelope minimum X > maximum X.")
if self.min_y > self.max_y:
raise GDALException("Envelope minimum Y > maximum Y.")
def __eq__(self, other):
"""
Return True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (
(self.min_x == other.min_x)
and (self.min_y == other.min_y)
and (self.max_x == other.max_x)
and (self.max_y == other.max_y)
)
elif isinstance(other, tuple) and len(other) == 4:
return (
(self.min_x == other[0])
and (self.min_y == other[1])
and (self.max_x == other[2])
and (self.max_y == other[3])
)
else:
raise GDALException("Equivalence testing only works with other Envelopes.")
def __str__(self):
"Return a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initialize the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modify the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], "x") and hasattr(args[0], "y"):
return self.expand_to_include(
args[0].x, args[0].y, args[0].x, args[0].y
)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include(
(args[0][0], args[0][1], args[0][0], args[0][1])
)
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException(
"Incorrect number of tuple elements (%d)." % len(args[0])
)
else:
raise TypeError("Incorrect type of argument: %s" % type(args[0]))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException("Incorrect number (%d) of arguments." % len(args[0]))
@property
def min_x(self):
"Return the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Return the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Return the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Return the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Return the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Return the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Return a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Return WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return "POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))" % (
self.min_x,
self.min_y,
self.min_x,
self.max_y,
self.max_x,
self.max_y,
self.max_x,
self.min_y,
self.min_x,
self.min_y,
)
```
#### File: geos/prototypes/coordseq.py
```python
from ctypes import POINTER, c_byte, c_double, c_int, c_uint
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import GEOSException, last_arg_byref
# ## Error-checking routines specific to coordinate sequences. ##
def check_cs_op(result, func, cargs):
"Check the status code of a coordinate sequence operation."
if result == 0:
raise GEOSException("Could not set value on coordinate sequence")
else:
return result
def check_cs_get(result, func, cargs):
"Check the coordinate sequence retrieval."
check_cs_op(result, func, cargs)
# Object in by reference, return its value.
return last_arg_byref(cargs)
# ## Coordinate sequence prototype factory classes. ##
class CsInt(GEOSFuncFactory):
"For coordinate sequence routines that return an integer."
argtypes = [CS_PTR, POINTER(c_uint)]
restype = c_int
errcheck = staticmethod(check_cs_get)
class CsOperation(GEOSFuncFactory):
"For coordinate sequence operations."
restype = c_int
def __init__(self, *args, ordinate=False, get=False, **kwargs):
if get:
# Get routines have double parameter passed-in by reference.
errcheck = check_cs_get
dbl_param = POINTER(c_double)
else:
errcheck = check_cs_op
dbl_param = c_double
if ordinate:
# Get/Set ordinate routines have an extra uint parameter.
argtypes = [CS_PTR, c_uint, c_uint, dbl_param]
else:
argtypes = [CS_PTR, c_uint, dbl_param]
super().__init__(
*args, **{**kwargs, "errcheck": errcheck, "argtypes": argtypes}
)
class CsOutput(GEOSFuncFactory):
restype = CS_PTR
@staticmethod
def errcheck(result, func, cargs):
if not result:
raise GEOSException(
"Error encountered checking Coordinate Sequence returned from GEOS "
'C function "%s".' % func.__name__
)
return result
# ## Coordinate Sequence ctypes prototypes ##
# Coordinate Sequence constructors & cloning.
cs_clone = CsOutput("GEOSCoordSeq_clone", argtypes=[CS_PTR])
create_cs = CsOutput("GEOSCoordSeq_create", argtypes=[c_uint, c_uint])
get_cs = CsOutput("GEOSGeom_getCoordSeq", argtypes=[GEOM_PTR])
# Getting, setting ordinate
cs_getordinate = CsOperation("GEOSCoordSeq_getOrdinate", ordinate=True, get=True)
cs_setordinate = CsOperation("GEOSCoordSeq_setOrdinate", ordinate=True)
# For getting, x, y, z
cs_getx = CsOperation("GEOSCoordSeq_getX", get=True)
cs_gety = CsOperation("GEOSCoordSeq_getY", get=True)
cs_getz = CsOperation("GEOSCoordSeq_getZ", get=True)
# For setting, x, y, z
cs_setx = CsOperation("GEOSCoordSeq_setX")
cs_sety = CsOperation("GEOSCoordSeq_setY")
cs_setz = CsOperation("GEOSCoordSeq_setZ")
# These routines return size & dimensions.
cs_getsize = CsInt("GEOSCoordSeq_getSize")
cs_getdims = CsInt("GEOSCoordSeq_getDimensions")
cs_is_ccw = GEOSFuncFactory(
"GEOSCoordSeq_isCCW", restype=c_int, argtypes=[CS_PTR, POINTER(c_byte)]
)
```
#### File: geos/prototypes/threadsafe.py
```python
import threading
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import CONTEXT_PTR, error_h, lgeos, notice_h
class GEOSContextHandle(GEOSBase):
"""Represent a GEOS context handle."""
ptr_type = CONTEXT_PTR
destructor = lgeos.finishGEOS_r
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
class GEOSFunc:
"""
Serve as a wrapper for GEOS C Functions. Use thread-safe function
variants when available.
"""
def __init__(self, func_name):
# GEOS thread-safe function signatures end with '_r' and take an
# additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + "_r")
# Create a reference to thread_context so it's not garbage-collected
# before an attempt to call this object.
self.thread_context = thread_context
def __call__(self, *args):
# Create a context handle if one doesn't exist for this thread.
self.thread_context.handle = self.thread_context.handle or GEOSContextHandle()
# Call the threaded GEOS routine with the pointer of the context handle
# as the first argument.
return self.cfunc(self.thread_context.handle.ptr, *args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
self.cfunc.argtypes = [CONTEXT_PTR, *argtypes]
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck)
```
#### File: contrib/postgres/expressions.py
```python
from django.contrib.postgres.fields import ArrayField
from django.db.models import Subquery
from django.utils.functional import cached_property
class ArraySubquery(Subquery):
template = "ARRAY(%(subquery)s)"
def __init__(self, queryset, **kwargs):
super().__init__(queryset, **kwargs)
@cached_property
def output_field(self):
return ArrayField(self.query.output_field)
```
#### File: core/checks/files.py
```python
from pathlib import Path
from django.conf import settings
from . import Error, Tags, register
@register(Tags.files)
def check_setting_file_upload_temp_dir(app_configs, **kwargs):
setting = getattr(settings, "FILE_UPLOAD_TEMP_DIR", None)
if setting and not Path(setting).is_dir():
return [
Error(
f"The FILE_UPLOAD_TEMP_DIR setting refers to the nonexistent "
f"directory '{setting}'.",
id="files.E001",
),
]
return []
```
#### File: core/checks/messages.py
```python
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
class CheckMessage:
def __init__(self, level, msg, hint=None, obj=None, id=None):
if not isinstance(level, int):
raise TypeError("The first argument should be level.")
self.level = level
self.msg = msg
self.hint = hint
self.obj = obj
self.id = id
def __eq__(self, other):
return isinstance(other, self.__class__) and all(
getattr(self, attr) == getattr(other, attr)
for attr in ["level", "msg", "hint", "obj", "id"]
)
def __str__(self):
from django.db import models
if self.obj is None:
obj = "?"
elif isinstance(self.obj, models.base.ModelBase):
# We need to hardcode ModelBase and Field cases because its __str__
# method doesn't return "applabel.modellabel" and cannot be changed.
obj = self.obj._meta.label
else:
obj = str(self.obj)
id = "(%s) " % self.id if self.id else ""
hint = "\n\tHINT: %s" % self.hint if self.hint else ""
return "%s: %s%s%s" % (obj, id, self.msg, hint)
def __repr__(self):
return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % (
self.__class__.__name__,
self.level,
self.msg,
self.hint,
self.obj,
self.id,
)
def is_serious(self, level=ERROR):
return self.level >= level
def is_silenced(self):
from django.conf import settings
return self.id in settings.SILENCED_SYSTEM_CHECKS
class Debug(CheckMessage):
def __init__(self, *args, **kwargs):
super().__init__(DEBUG, *args, **kwargs)
class Info(CheckMessage):
def __init__(self, *args, **kwargs):
super().__init__(INFO, *args, **kwargs)
class Warning(CheckMessage):
def __init__(self, *args, **kwargs):
super().__init__(WARNING, *args, **kwargs)
class Error(CheckMessage):
def __init__(self, *args, **kwargs):
super().__init__(ERROR, *args, **kwargs)
class Critical(CheckMessage):
def __init__(self, *args, **kwargs):
super().__init__(CRITICAL, *args, **kwargs)
```
#### File: django/middleware/locale.py
```python
from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.http import HttpResponseRedirect
from django.urls import get_script_prefix, is_valid_path
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
class LocaleMiddleware(MiddlewareMixin):
"""
Parse a request and decide what translation object to install in the
current thread context. This allows pages to be dynamically translated to
the language the user desires (if the language is available).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
(
i18n_patterns_used,
prefixed_default_language,
) = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(
request, check_path=i18n_patterns_used
)
language_from_path = translation.get_language_from_path(request.path_info)
if (
not language_from_path
and i18n_patterns_used
and not prefixed_default_language
):
language = settings.LANGUAGE_CODE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
(
i18n_patterns_used,
prefixed_default_language,
) = is_language_prefix_patterns_used(urlconf)
if (
response.status_code == 404
and not language_from_path
and i18n_patterns_used
and prefixed_default_language
):
# Maybe the language code is missing in the URL? Try adding the
# language prefix and redirecting to that URL.
language_path = "/%s%s" % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = not path_valid and (
settings.APPEND_SLASH
and not language_path.endswith("/")
and is_valid_path("%s/" % language_path, urlconf)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(
force_append_slash=path_needs_slash
).replace(script_prefix, "%s%s/" % (script_prefix, language), 1)
# Redirect to the language-specific URL as detected by
# get_language_from_request(). HTTP caches may cache this
# redirect, so add the Vary header.
redirect = self.response_redirect_class(language_url)
patch_vary_headers(redirect, ("Accept-Language", "Cookie"))
return redirect
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ("Accept-Language",))
response.headers.setdefault("Content-Language", language)
return response
```
#### File: template/loaders/app_directories.py
```python
from django.template.utils import get_app_template_dirs
from .filesystem import Loader as FilesystemLoader
class Loader(FilesystemLoader):
def get_dirs(self):
return get_app_template_dirs("templates")
```
#### File: template/loaders/locmem.py
```python
from django.template import Origin, TemplateDoesNotExist
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, templates_dict):
self.templates_dict = templates_dict
super().__init__(engine)
def get_contents(self, origin):
try:
return self.templates_dict[origin.name]
except KeyError:
raise TemplateDoesNotExist(origin)
def get_template_sources(self, template_name):
yield Origin(
name=template_name,
template_name=template_name,
loader=self,
)
```
#### File: django/urls/utils.py
```python
import functools
from importlib import import_module
from django.core.exceptions import ViewDoesNotExist
from django.utils.module_loading import module_has_submodule
@functools.lru_cache(maxsize=None)
def get_callable(lookup_view):
"""
Return a callable corresponding to lookup_view.
* If lookup_view is already a callable, return it.
* If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it, otherwise raise an exception
(ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, str):
raise ViewDoesNotExist(
"'%s' is not a callable or a dot-notation path" % lookup_view
)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
raise ImportError(
"Could not import '%s'. The path must be fully qualified." % lookup_view
)
try:
mod = import_module(mod_name)
except ImportError:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist."
% (lookup_view, mod_name)
)
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s."
% (lookup_view, mod_name)
)
else:
if not callable(view_func):
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable."
% (mod_name, func_name)
)
return view_func
def get_mod_func(callback):
# Convert 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex(".")
except ValueError:
return callback, ""
return callback[:dot], callback[dot + 1 :]
```
#### File: gunicorn/http/parser.py
```python
from gunicorn.http.message import Request
from gunicorn.http.unreader import SocketUnreader, IterUnreader
class Parser(object):
mesg_class = None
def __init__(self, cfg, source, source_addr):
self.cfg = cfg
if hasattr(source, "recv"):
self.unreader = SocketUnreader(source)
else:
self.unreader = IterUnreader(source)
self.mesg = None
self.source_addr = source_addr
# request counter (for keepalive connetions)
self.req_count = 0
def __iter__(self):
return self
def __next__(self):
# Stop if HTTP dictates a stop.
if self.mesg and self.mesg.should_close():
raise StopIteration()
# Discard any unread body of the previous message
if self.mesg:
data = self.mesg.body.read(8192)
while data:
data = self.mesg.body.read(8192)
# Parse the next request
self.req_count += 1
self.mesg = self.mesg_class(self.cfg, self.unreader, self.source_addr, self.req_count)
if not self.mesg:
raise StopIteration()
return self.mesg
next = __next__
class RequestParser(Parser):
mesg_class = Request
```
#### File: site-packages/PIL/IcoImagePlugin.py
```python
import struct
import warnings
from io import BytesIO
from math import ceil, log
from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin
from ._binary import i16le as i16
from ._binary import i32le as i32
from ._binary import o32le as o32
#
# --------------------------------------------------------------------
_MAGIC = b"\0\0\1\0"
def _save(im, fp, filename):
fp.write(_MAGIC) # (2+2)
sizes = im.encoderinfo.get(
"sizes",
[(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)],
)
width, height = im.size
sizes = filter(
lambda x: False
if (x[0] > width or x[1] > height or x[0] > 256 or x[1] > 256)
else True,
sizes,
)
sizes = list(sizes)
fp.write(struct.pack("<H", len(sizes))) # idCount(2)
offset = fp.tell() + len(sizes) * 16
bmp = im.encoderinfo.get("bitmap_format") == "bmp"
provided_images = {im.size: im for im in im.encoderinfo.get("append_images", [])}
for size in sizes:
width, height = size
# 0 means 256
fp.write(struct.pack("B", width if width < 256 else 0)) # bWidth(1)
fp.write(struct.pack("B", height if height < 256 else 0)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
tmp = provided_images.get(size)
if not tmp:
# TODO: invent a more convenient method for proportional scalings
tmp = im.copy()
tmp.thumbnail(size, Image.LANCZOS, reducing_gap=None)
bits = BmpImagePlugin.SAVE[tmp.mode][1] if bmp else 32
fp.write(struct.pack("<H", bits)) # wBitCount(2)
image_io = BytesIO()
if bmp:
tmp.save(image_io, "dib")
if bits != 32:
and_mask = Image.new("1", tmp.size)
ImageFile._save(
and_mask, image_io, [("raw", (0, 0) + tmp.size, 0, ("1", 0, -1))]
)
else:
tmp.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
if bmp:
image_bytes = image_bytes[:8] + o32(height * 2) + image_bytes[12:]
bytes_len = len(image_bytes)
fp.write(struct.pack("<I", bytes_len)) # dwBytesInRes(4)
fp.write(struct.pack("<I", offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
def _accept(prefix):
return prefix[:4] == _MAGIC
class IcoFile:
def __init__(self, buf):
"""
Parse image from file-like object containing ico file data
"""
# check magic
s = buf.read(6)
if not _accept(s):
raise SyntaxError("not an ICO file")
self.buf = buf
self.entry = []
# Number of items in file
self.nb_items = i16(s, 4)
# Get headers for each item
for i in range(self.nb_items):
s = buf.read(16)
icon_header = {
"width": s[0],
"height": s[1],
"nb_color": s[2], # No. of colors in image (0 if >=8bpp)
"reserved": s[3],
"planes": i16(s, 4),
"bpp": i16(s, 6),
"size": i32(s, 8),
"offset": i32(s, 12),
}
# See Wikipedia
for j in ("width", "height"):
if not icon_header[j]:
icon_header[j] = 256
# See Wikipedia notes about color depth.
# We need this just to differ images with equal sizes
icon_header["color_depth"] = (
icon_header["bpp"]
or (
icon_header["nb_color"] != 0
and ceil(log(icon_header["nb_color"], 2))
)
or 256
)
icon_header["dim"] = (icon_header["width"], icon_header["height"])
icon_header["square"] = icon_header["width"] * icon_header["height"]
self.entry.append(icon_header)
self.entry = sorted(self.entry, key=lambda x: x["color_depth"])
# ICO images are usually squares
# self.entry = sorted(self.entry, key=lambda x: x['width'])
self.entry = sorted(self.entry, key=lambda x: x["square"])
self.entry.reverse()
def sizes(self):
"""
Get a list of all available icon sizes and color depths.
"""
return {(h["width"], h["height"]) for h in self.entry}
def getentryindex(self, size, bpp=False):
for (i, h) in enumerate(self.entry):
if size == h["dim"] and (bpp is False or bpp == h["color_depth"]):
return i
return 0
def getimage(self, size, bpp=False):
"""
Get an image from the icon
"""
return self.frame(self.getentryindex(size, bpp))
def frame(self, idx):
"""
Get an image from frame idx
"""
header = self.entry[idx]
self.buf.seek(header["offset"])
data = self.buf.read(8)
self.buf.seek(header["offset"])
if data[:8] == PngImagePlugin._MAGIC:
# png frame
im = PngImagePlugin.PngImageFile(self.buf)
Image._decompression_bomb_check(im.size)
else:
# XOR + AND mask bmp frame
im = BmpImagePlugin.DibImageFile(self.buf)
Image._decompression_bomb_check(im.size)
# change tile dimension to only encompass XOR image
im._size = (im.size[0], int(im.size[1] / 2))
d, e, o, a = im.tile[0]
im.tile[0] = d, (0, 0) + im.size, o, a
# figure out where AND mask image starts
bpp = header["bpp"]
if 32 == bpp:
# 32-bit color depth icon image allows semitransparent areas
# PIL's DIB format ignores transparency bits, recover them.
# The DIB is packed in BGRX byte order where X is the alpha
# channel.
# Back up to start of bmp data
self.buf.seek(o)
# extract every 4th byte (eg. 3,7,11,15,...)
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
# convert to an 8bpp grayscale image
mask = Image.frombuffer(
"L", # 8bpp
im.size, # (w, h)
alpha_bytes, # source chars
"raw", # raw decoder
("L", 0, -1), # 8bpp inverted, unpadded, reversed
)
else:
# get AND image from end of bitmap
w = im.size[0]
if (w % 32) > 0:
# bitmap row data is aligned to word boundaries
w += 32 - (im.size[0] % 32)
# the total mask data is
# padded row size * height / bits per char
total_bytes = int((w * im.size[1]) / 8)
and_mask_offset = header["offset"] + header["size"] - total_bytes
self.buf.seek(and_mask_offset)
mask_data = self.buf.read(total_bytes)
# convert raw data to image
mask = Image.frombuffer(
"1", # 1 bpp
im.size, # (w, h)
mask_data, # source chars
"raw", # raw decoder
("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed
)
# now we have two images, im is XOR image and mask is AND image
# apply mask image as alpha channel
im = im.convert("RGBA")
im.putalpha(mask)
return im
##
# Image plugin for Windows Icon files.
class IcoImageFile(ImageFile.ImageFile):
"""
PIL read-only image support for Microsoft Windows .ico files.
By default the largest resolution image in the file will be loaded. This
can be changed by altering the 'size' attribute before calling 'load'.
The info dictionary has a key 'sizes' that is a list of the sizes available
in the icon file.
Handles classic, XP and Vista icon formats.
When saving, PNG compression is used. Support for this was only added in
Windows Vista. If you are unable to view the icon in Windows, convert the
image to "RGBA" mode before saving.
This plugin is a refactored version of Win32IconImagePlugin by <NAME>
<<EMAIL>>.
https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
"""
format = "ICO"
format_description = "Windows Icon"
def _open(self):
self.ico = IcoFile(self.fp)
self.info["sizes"] = self.ico.sizes()
self.size = self.ico.entry[0]["dim"]
self.load()
@property
def size(self):
return self._size
@size.setter
def size(self, value):
if value not in self.info["sizes"]:
raise ValueError("This is not one of the allowed sizes of this image")
self._size = value
def load(self):
if self.im and self.im.size == self.size:
# Already loaded
return
im = self.ico.getimage(self.size)
# if tile is PNG, it won't really be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
if im.size != self.size:
warnings.warn("Image was not the expected size")
index = self.ico.getentryindex(self.size)
sizes = list(self.info["sizes"])
sizes[index] = im.size
self.info["sizes"] = set(sizes)
self.size = im.size
def load_seek(self):
# Flag the ImageFile.Parser so that it
# just does all the decode at the end.
pass
#
# --------------------------------------------------------------------
Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
Image.register_save(IcoImageFile.format, _save)
Image.register_extension(IcoImageFile.format, ".ico")
Image.register_mime(IcoImageFile.format, "image/x-icon")
``` |
{
"source": "Joshua-Barawa/news-app",
"score": 2
} |
#### File: news-app/app/__init__.py
```python
from flask import Flask
from config import config_options
from .main import main as main_blueprint
from .requests import configure_request
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
app.register_blueprint(main_blueprint)
configure_request(app)
return app
from app.main import views
```
#### File: app/models/test_news_source.py
```python
import unittest
from news_source import Source
class TestSource(unittest.TestCase):
def setUp(self):
self.new_source = Source("abc", "ABC-News")
def test_instance(self):
self.assertTrue(isinstance(self.new_source, Source))
```
#### File: news-app/app/requests.py
```python
from config import Config
import requests, json
from app.models import news_article, news_source
MOVIE_API_KEY = Config.API_KEY
News_Article = news_article.Article
News_Source = news_source.Source
def configure_request(app):
global api_key
api_key = app.config['API_KEY']
def get_news():
request = requests.get('https://newsapi.org/v2/everything?q=all&apiKey={}'
.format(MOVIE_API_KEY))
response = json.loads(request.content)
news = []
for new in response['articles']:
new = News_Article(new['source'], new['author'], new['title'], new['description'], new['urlToImage'],
new['url'], new['publishedAt'])
news.append(new)
return news
def get_news_sources():
request = requests.get('https://newsapi.org/v2/top-headlines/sources?apiKey={}'
.format(MOVIE_API_KEY))
response = json.loads(request.content)
news_sources = []
for source in response['sources']:
source = News_Source(source['id'], source['name'])
news_sources.append(source)
return news_sources
def get_news_from_source(source):
request = requests.get('https://newsapi.org/v2/everything?q={}&apiKey={}'.format(source, MOVIE_API_KEY))
response = json.loads(request.content)
news = []
for new in response['articles']:
new = News_Article(new['source'], new['author'], new['title'], new['description'], new['urlToImage'],
new['url'], new['publishedAt'])
news.append(new)
return news
``` |
{
"source": "Joshua-Barawa/pitches-IP",
"score": 3
} |
#### File: Joshua-Barawa/pitches-IP/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Email, ValidationError
from models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address', validators=[InputRequired(), Email()])
username = StringField('Enter your username', validators=[InputRequired()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Sign Up')
def validate_username(self, username):
existing_username = User.query.filter_by(username=username.data).first()
if existing_username:
raise ValidationError("The username already exists")
class LoginForm(FlaskForm):
username = StringField("Your email address", validators=[InputRequired()])
password = PasswordField("<PASSWORD>:", validators=[InputRequired()])
submit = SubmitField("Sign In")
``` |
{
"source": "joshua-barber/bokeh_happiness",
"score": 2
} |
#### File: happiness/happiness/bokeh_utils.py
```python
from contextlib import closing
from bokeh.client import push_session, pull_session
from bokeh.document import Document
from bokeh.embed import autoload_server
from .viz.individuals import update_individuals_data
from .viz.team import update_team_data
from .viz.teams import update_teams_data
def get_bokeh_script(user, plot, suffix):
from .models import UserSession
document = Document()
document.add_root(plot)
document.title = suffix
with closing(push_session(document)) as session:
# Save the session id to a UserSession
UserSession.objects.create(user=user, bokeh_session_id=session.id)
# Get the script to pass into the template
script = autoload_server(None, session_id=session.id)
return script
def update_bokeh_sessions(user_sessions):
for us in user_sessions:
with closing(pull_session(session_id=us.bokeh_session_id)) as session:
if len(session.document.roots) == 0:
# In this case, the session_id was from a dead session and
# calling pull_session caused a new empty session to be
# created. So we just delete the UserSession and move on.
# It would be nice if there was a more efficient way - where I
# could just ask bokeh if session x is a session.
us.delete()
else:
# Call the appropriate update method based on the document's title
if session.document.title == 'individuals':
update_individuals_data(user=us.user, session=session)
if session.document.title == 'team':
update_team_data(user=us.user, session=session)
if session.document.title == 'teams':
update_teams_data(user=us.user, session=session)
```
#### File: happiness/viz/utils.py
```python
from bokeh.plotting import Figure
from bokeh.models import Range1d, Legend, FixedTicker
def make_plot():
plot = Figure(
plot_height=400,
plot_width=800,
responsive=True,
tools="xpan,xwheel_zoom,xbox_zoom,reset",
x_axis_type='datetime',
min_border_top=10,
min_border_right=0,
min_border_bottom=0,
min_border_left=30,
outline_line_color=None,
)
plot.x_range.follow = "end"
plot.x_range.follow_interval = 120 * 24 * 60 * 60 * 1000
plot.x_range.range_padding = 0
plot.y_range = Range1d(0, 12)
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
plot.yaxis.bounds = (0, 9)
plot.yaxis.minor_tick_line_color = None
plot.yaxis.ticker = FixedTicker(ticks=[0, 3, 6, 9])
return plot
def make_legend(legends):
return Legend(
legends=legends,
location='top_right',
border_line_color=None,
background_fill_alpha=0.7
)
``` |
{
"source": "JoshuaBeard/netharn",
"score": 2
} |
#### File: netharn/data/base.py
```python
from torch.utils import data as torch_data
class DataMixin(object):
def make_loader(self, *args, **kwargs):
loader = torch_data.DataLoader(self, *args, **kwargs)
return loader
```
#### File: examples/tests/expt_cifar.py
```python
import numpy as np
import ubelt as ub
import torch
# import torchvision
import pandas as pd
from torchvision.datasets import cifar
from netharn import xpu_device
from netharn import monitor
from netharn import initializers
from netharn import hyperparams
from netharn import fit_harness
from netharn.transforms import (ImageCenterScale,)
# from netharn.transforms import (RandomWarpAffine, RandomGamma, RandomBlur,)
import imgaug as ia
import imgaug.augmenters as iaa
from netharn import util
import netharn as nh
class CropTo(iaa.Augmenter):
def __init__(self, shape, name=None, deterministic=False, random_state=None):
super(CropTo, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.shape = shape
def _augment_images(self, images, random_state, parents, hooks):
result = []
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in range(nb_images):
seed = seeds[i]
height, width = images[i].shape[0:2]
top, bot, left, right = self._draw_samples_image(seed, height, width)
image_cr = images[i][top:bot, left:right]
image_cr = np.pad(image_cr, ((1, 1), (1, 1)), mode='constant')
result.append(image_cr)
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = []
nb_images = len(keypoints_on_images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i, keypoints_on_image in enumerate(keypoints_on_images):
seed = seeds[i]
height, width = keypoints_on_image.shape[0:2]
top, bot, left, right = self._draw_samples_image(seed, height, width)
shifted = keypoints_on_image.shift(x=-left, y=-top)
shifted.shape = (
height - top - bot,
width - left - right
) + shifted.shape[2:]
result.append(shifted)
return result
def _draw_samples_image(self, seed, height, width):
"""
height = 32
width = 32
h, w = shape = (30, 30)
random_state = np.random
"""
random_state = ia.new_random_state(seed)
h, w = self.shape
assert w <= width, '{} {}'.format(w, width)
assert h <= height, '{} {}'.format(h, height)
space_h = height - h
space_w = width - w
top = random_state.randint(0, space_h + 1)
bot = height - (space_h - top)
left = random_state.randint(0, space_w + 1)
right = width - (space_w - left)
sub = [top, bot, left, right]
return sub
def get_parameters(self):
return [self.shape]
class Task(object):
def __init__(task, labelnames=None, ignore_labelnames=[], alias={}):
if labelnames is not None:
task.set_labelnames(labelnames, ignore_labelnames, alias)
def set_labelnames(task, labelnames, ignore_labelnames=[], alias={}):
task.labelnames = list(labelnames)
task.labelname_alias = alias
task.ignore_labelnames = ignore_labelnames
# Remove aliased classes
for k in alias.keys():
if k in task.labelnames:
task.labelnames.remove(k)
# Assign an integer label to each labelname
task.labelname_to_id = ub.invert_dict(dict(enumerate(task.labelnames)))
# Map aliased classes to a different label
for k, v in alias.items():
task.labelname_to_id[k] = task.labelname_to_id[v]
task.ignore_labelnames = ignore_labelnames
task.ignore_labels = np.array(
list(ub.take(task.labelname_to_id, task.ignore_labelnames)))
task.labels = np.arange(len(task.labelnames))
task.relevant_labels = np.setdiff1d(task.labels, task.ignore_labels)
def radial_fourier_mask(img_chw, radius=11, axis=None, clip=None):
"""
In [1] they use a radius of 11.0 on CIFAR-10.
Args:
img_chw (ndarray): assumed to be float 01
References:
[1] Jo and Bengio "Measuring the tendency of CNNs to Learn Surface Statistical Regularities" 2017.
https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.html
CommandLine:
python examples/cifar.py radial_fourier_mask --show
Example:
>>> dset = cifar_training_datasets()['test']
>>> dset.center_inputs = None
>>> img_tensor, label = dset[7]
>>> img_chw = img_tensor.numpy()
>>> out = radial_fourier_mask(img_chw, radius=11)
>>> # xdoc: REQUIRES(--show)
>>> nh.util.qtensure()
>>> def keepdim(func):
>>> def _wrap(im):
>>> needs_transpose = (im.shape[0] == 3)
>>> if needs_transpose:
>>> im = im.transpose(1, 2, 0)
>>> out = func(im)
>>> if needs_transpose:
>>> out = out.transpose(2, 0, 1)
>>> return out
>>> return _wrap
>>> @keepdim
>>> def rgb_to_bgr(im):
>>> return util.convert_colorspace(im, src_space='rgb', dst_space='bgr')
>>> @keepdim
>>> def bgr_to_lab(im):
>>> return util.convert_colorspace(im, src_space='bgr', dst_space='lab')
>>> @keepdim
>>> def lab_to_bgr(im):
>>> return util.convert_colorspace(im, src_space='lab', dst_space='bgr')
>>> @keepdim
>>> def bgr_to_yuv(im):
>>> return util.convert_colorspace(im, src_space='bgr', dst_space='yuv')
>>> @keepdim
>>> def yuv_to_bgr(im):
>>> return util.convert_colorspace(im, src_space='yuv', dst_space='bgr')
>>> dpath = ub.ensuredir('./fouriertest')
>>> from matplotlib import pyplot as plt
>>> for x in ub.ProgIter(range(100)):
>>> img_tensor, label = dset[x]
>>> img_chw = img_tensor.numpy()
>>> bgr_img = rgb_to_bgr(img_chw)
>>> nh.util.imshow(bgr_img.transpose(1, 2, 0), fnum=1)
>>> pnum_ = nh.util.PlotNums(nRows=4, nCols=5)
>>> for r in range(0, 17):
>>> imgt = radial_fourier_mask(bgr_img, r, clip=(0, 1))
>>> nh.util.imshow(imgt.transpose(1, 2, 0), pnum=pnum_(), fnum=2)
>>> plt.gca().set_title('r = {}'.format(r))
>>> nh.util.set_figtitle('BGR')
>>> plt.gcf().savefig(join(dpath, '{}_{:08d}.png'.format('bgr', x)))
>>> pnum_ = nh.util.PlotNums(nRows=4, nCols=5)
>>> for r in range(0, 17):
>>> imgt = lab_to_bgr(radial_fourier_mask(bgr_to_lab(bgr_img), r)).transpose(1, 2, 0)
>>> nh.util.imshow(imgt, pnum=pnum_(), fnum=3)
>>> plt.gca().set_title('r = {}'.format(r))
>>> #imgt = lab_to_bgr(to_lab(bgr_img)).transpose(1, 2, 0)
>>> #nh.util.imshow(lab_to_bgr(to_lab(bgr_img)).transpose(1, 2, 0), pnum=pnum_(), fnum=2)
>>> nh.util.set_figtitle('LAB')
>>> plt.gcf().savefig(join(dpath, '{}_{:08d}.png'.format('lab', x)))
>>> pnum_ = nh.util.PlotNums(nRows=4, nCols=5)
>>> for r in range(0, 17):
>>> imgt = yuv_to_bgr(radial_fourier_mask(bgr_to_yuv(bgr_img), r, clip=(0., 1.))).transpose(1, 2, 0)
>>> nh.util.imshow(imgt, pnum=pnum_(), fnum=4)
>>> plt.gca().set_title('r = {}'.format(r))
>>> nh.util.set_figtitle('YUV')
>>> plt.gcf().savefig(join(dpath, '{}_{:08d}.png'.format('yuv', x)))
>>> nh.util.show_if_requested()
Ignore:
im_chw = bgr_to_lab(bgr_img)
"""
import cv2
rows, cols = img_chw.shape[1:3]
def fourier(s):
# note: cv2 functions would probably be faster here
return np.fft.fftshift(np.fft.fft2(s))
def inv_fourier(f):
# use real because LAB has negative components
return np.real(np.fft.ifft2(np.fft.ifftshift(f)))
diam = radius * 2
left = int(np.floor((cols - diam) / 2))
right = int(np.ceil((cols - diam) / 2))
top = int(np.floor((rows - diam) / 2))
bot = int(np.ceil((rows - diam) / 2))
# element = skimage.morphology.disk(radius)
# mask = np.pad(element, ((top, bot), (left, right)), 'constant')
if diam > 0:
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (diam, diam))
mask = cv2.copyMakeBorder(element, top, bot, left, right, cv2.BORDER_CONSTANT, value=0)
else:
mask = 0
out = np.empty_like(img_chw)
if axis is None:
for i, s in enumerate(img_chw):
# hadamard product (aka simple element-wise multiplication)
out[i] = inv_fourier(fourier(s) * mask)
else:
for i, s in enumerate(img_chw):
if i in axis:
# hadamard product (aka simple element-wise multiplication)
out[i] = inv_fourier(fourier(s) * mask)
else:
out[i] = s
if clip:
out = np.clip(out, *clip)
return out
# nrows = cv2.getOptimalDFTSize(rows)
# ncols = cv2.getOptimalDFTSize(cols)
# right = ncols - cols
# bottom = nrows - rows
# if right or bottom:
# bordertype = cv2.BORDER_CONSTANT # just to avoid line breakup in PDF file
# nimg = cv2.copyMakeBorder(img, 0, bottom, 0, right, bordertype, value=0)
# dft_chans = [cv2.dft(chan, flags=cv2.DFT_COMPLEX_OUTPUT) for chan in img_chw]
# dft = np.dstack(dft_chans).transpose(2, 0, 1)
# dft_mag = np.dstack([(c ** 2).sum(axis=-1) for c in dft_chans]).transpose(2, 0, 1)
# dft_shift = [np.fft.fftshift(c) for c in dft_chans]
# dft_mag = np.dstack([(c ** 2).sum(axis=-1) for c in dft_shift]).transpose(2, 0, 1)
# dft_filt_shift = [c * mask[:, :, None] for c in dft_shift]
# dft_filt = [np.fft.ifftshift(c) for c in dft_filt_shift]
# idft_filt = [cv2.idft(c) for c in dft_filt]
# img_filt = np.dstack([np.linalg.norm(c, axis=-1) for c in idft_filt])
# nh.util.imshow(dft_mag.transpose(1, 2, 0), norm=True)
# if False:
# nh.util.imshow(np.log(dft_mag[0]), norm=True, pnum=(1, 3, 1))
# nh.util.imshow(np.log(dft_mag[1]), norm=True, pnum=(1, 3, 2))
# nh.util.imshow(np.log(dft_mag[2]), norm=True, pnum=(1, 3, 3))
def zca_whitening_matrix(X):
"""
Function to compute ZCA whitening matrix (aka Mahalanobis whitening).
Args:
X (ndarray): [M x N] matrix, Rows: Variables, Columns: Observations
Returns:
ZCAMatrix: [M x M] matrix
References:
https://stackoverflow.com/a/38590790/887074
Example:
>>> rng = np.random.RandomState(0)
>>> # Construct a matrix of observations from grayscale 8x8 images
>>> gray_images = [rng.rand(8, 8) for _ in range(1000)]
>>> X = np.array([img.ravel() for img in gray_images]).T
>>> M = zca_whitening_matrix(X)
>>> img = gray_images[0]
>>> norm = M.dot(img.ravel()).reshape(8, 8)
>>> # ... for the RGB channels of color images
>>> rgb_images = [rng.rand(3, 8, 8) for _ in range(1000)]
>>> #X = np.array([img.mean(axis=(1, 2)) for img in rgb_images]).T
>>> X = np.hstack([img.reshape(3, -1) for img in rgb_images])
>>> M = zca_whitening_matrix(X)
>>> img = rgb_images[0]
>>> norm = M.dot(img.reshape(3, 64)).reshape(3, 8, 8)
"""
# Covariance matrix [column-wise variables]: Sigma = (X-mu)' * (X-mu) / N
sigma = np.cov(X, rowvar=True) # [M x M]
# Singular Value Decomposition. X = U * np.diag(S) * V
U, S, V = np.linalg.svd(sigma)
# U: [M x M] eigenvectors of sigma.
# S: [M x 1] eigenvalues of sigma.
# V: [M x M] transpose of U
# Whitening constant: prevents division by zero
epsilon = 1e-5
L = np.diag(1.0 / np.sqrt(S + epsilon))
# ZCA Whitening matrix: U * Lambda * U'
ZCAMatrix = np.dot(U, np.dot(L, U.T)) # [M x M]
return ZCAMatrix
class CIFAR10_Task(Task):
"""
task = CIFAR10_Task()
task._initialize()
ignore_labelnames = []
alias = {}
"""
def __init__(task, root=None):
if root is None:
root = ub.ensure_app_cache_dir('netharn')
task.root = root
task._initialize()
def _initialize(task):
from os.path import join
import pickle
train_dset = cifar.CIFAR10(root=task.root, download=False, train=True)
fpath = join(train_dset.root,
cifar.CIFAR10.base_folder, 'batches.meta')
with open(fpath, 'rb') as fo:
entry = pickle.load(fo, encoding='latin1')
labelnames = entry['label_names']
task.set_labelnames(labelnames)
class CIFAR100_Task(Task):
"""
task = CIFAR100_Task()
task._initialize()
ignore_labelnames = []
alias = {}
"""
def __init__(task, root=None):
if root is None:
root = ub.ensure_app_cache_dir('netharn')
task.root = root
task._initialize()
def _initialize(task):
from os.path import join
import pickle
train_dset = cifar.CIFAR100(root=task.root, download=False, train=True)
fpath = join(train_dset.root, cifar.CIFAR100.base_folder, 'meta')
with open(fpath, 'rb') as fo:
entry = pickle.load(fo, encoding='latin1')
labelnames = entry['fine_label_names']
task.set_labelnames(labelnames)
def mutex_clf_gt_info(gt_labels, task):
"""
gt_labels = train_dset.train_labels
"""
index = pd.Index(task.labels, name='label')
gtstats = pd.DataFrame(0, index=index, columns=['freq'], dtype=np.int)
label_freq = pd.value_counts(gt_labels)
gtstats.freq = pd.to_numeric(label_freq)
gtstats['classname'] = list(ub.take(task.labelnames, gtstats.index))
gtstats['mf_weight'] = gtstats.freq.median() / gtstats.freq
gtstats.loc[~np.isfinite(gtstats.mf_weight), 'mf_weight'] = 1
# Clip weights, so nothing gets crazy high weights, low weights are ok
gtstats = gtstats.sort_index()
gtstats.index.name = 'label'
gtstats = gtstats.reset_index().set_index('classname', drop=False)
return gtstats
class InMemoryInputs(ub.NiceRepr):
"""
Change inputs.Inputs to OnDiskInputs
"""
def __init__(inputs, tag=''):
inputs.tag = tag
inputs.im = None
inputs.gt = None
inputs.colorspace = None
inputs.input_id = None
def __nice__(inputs):
n = len(inputs)
return '{} {}'.format(inputs.tag, n)
def __len__(inputs):
if inputs.im is not None:
n = len(inputs.im)
elif inputs.gt is not None:
n = len(inputs.gt)
else:
n = 0
return n
@classmethod
def from_bhwc_rgb(cls, bhwc, labels=None, **kw):
# convert to bhwc
inputs = cls(**kw)
inputs.im = bhwc
inputs.gt = labels
inputs.colorspace = 'rgb'
return inputs
def convert_colorspace(inputs, colorspace, inplace=False):
if colorspace.lower() == inputs.colorspace.lower():
if not inplace:
return inputs.im
return
im_out = np.empty_like(inputs.im)
dst = np.ascontiguousarray(np.empty_like(inputs.im[0]))
for ix, im in enumerate(inputs.im):
util.convert_colorspace(im, src_space=inputs.colorspace,
dst_space=colorspace, dst=dst)
im_out[ix] = dst
if inplace:
inputs.im = im_out
inputs.colorspace = colorspace
else:
return im_out
def take(inputs, idxs, **kw):
new_inputs = inputs.__class__(**kw)
new_inputs.im = inputs.im.take(idxs, axis=0)
new_inputs.gt = inputs.gt.take(idxs, axis=0)
new_inputs.colorspace = inputs.colorspace
return new_inputs
def prepare_id(self, force=False):
if self.input_id is not None and not force:
return
depends = []
depends.append(self.im)
depends.append(self.gt)
def _set_id_from_dependency(self, depends):
"""
Allow for arbitrary representation of dependencies
(user must ensure that it is consistent)
"""
print('Preparing id for {} images'.format(self.tag))
abbrev = 8
hashid = util.hash_data(depends)[:abbrev]
n_input = len(self)
self.input_id = '{}-{}'.format(n_input, hashid)
print(' * n_input = {}'.format(n_input))
print(' * input_id = {}'.format(self.input_id))
class CIFAR_Wrapper(torch.utils.data.Dataset): # cifar.CIFAR10):
def __init__(dset, inputs, task, workdir, output_colorspace='RGB'):
dset.inputs = inputs
dset.task = task
dset.output_colorspace = output_colorspace
dset.rng = np.random.RandomState(432432)
inputs_base = ub.ensuredir((workdir, 'inputs'))
inputs.base_dpath = inputs_base
if len(inputs):
inputs.prepare_id()
dset.input_id = inputs.input_id
dset.with_gt = dset.inputs.gt is not None
else:
dset.input_id = ''
# TODO: only use horizontal flipping and translation by 4 pixels to
# match results from other papers
# https://arxiv.org/pdf/1603.09382.pdf page 8
dset.augment = None
# dset.im_augment = torchvision.transforms.Compose([
# RandomGamma(rng=dset.rng),
# RandomBlur(rng=dset.rng),
# ])
# dset.rand_aff = RandomWarpAffine(dset.rng)
augmentors = [
# iaa.Sometimes(.8, iaa.ContrastNormalization((0.2, 1.8))),
iaa.Fliplr(p=.5),
iaa.Affine(translate_px={'x': (-1, 1), 'y': (-1, 1)}),
# CropTo((30, 30)),
# iaa.Crop(px=(1, 1, 1, 1)),
# imgaug.Brightness(63),
# imgaug.RandomCrop((30, 30)),
# imgaug.MeanVarianceNormalize(all_channel=True)
]
dset.augmenter = iaa.Sequential(augmentors)
# iaa.Sequential([
# iaa.Affine(translate_px={"x":-40}),
# iaa.AdditiveGaussianNoise(scale=0.1*255)
# ])
# dset.rand_aff = RandomWarpAffine(
# dset.rng, tx_pdf=(-2, 2), ty_pdf=(-2, 2), flip_lr_prob=.5,
# zoom_pdf=None, shear_pdf=None, flip_ud_prob=None,
# enable_stretch=None, default_distribution='uniform')
dset.center_inputs = None
def _make_normalizer(dset, mode='independent'):
"""
Example:
>>> inputs, task = cifar_inputs(train=True)
>>> workdir = ub.ensuredir(ub.truepath('~/data/work/cifar'))
>>> dset = CIFAR_Wrapper(inputs, task, workdir, 'RGB')
>>> center_inputs = dset._make_normalizer('independent')
"""
if len(dset.inputs):
# compute normalizers in the output colorspace
out_im = dset.inputs.convert_colorspace(dset.output_colorspace,
inplace=False)
if mode == 'dependant':
# dependent centering per channel (for RGB)
im_mean = out_im.mean()
im_scale = out_im.std()
elif mode == 'independent':
# Independent centering per channel (for LAB)
im_mean = out_im.mean(axis=(0, 1, 2))
im_scale = out_im.std(axis=(0, 1, 2))
center_inputs = ImageCenterScale(im_mean, im_scale)
dset.center_inputs = center_inputs
return center_inputs
def __len__(dset):
return len(dset.inputs)
def load_inputs(dset, index):
"""
Ignore:
>>> inputs, task = cifar_inputs(train=False)
>>> workdir = ub.ensuredir(ub.truepath('~/data/work/cifar'))
>>> dset = CIFAR_Wrapper(inputs, task, workdir, 'LAB')
>>> dset._make_normalizer('independent')
>>> index = 0
>>> im, gt = dset.load_inputs(index)
Example:
>>> inputs, task = cifar_inputs(train=False)
>>> workdir = ub.ensuredir(ub.truepath('~/data/work/cifar'))
>>> dset = CIFAR_Wrapper(inputs, task, workdir, 'RGB')
>>> index = 0
>>> im, gt = dset.load_inputs(index)
>>> from netharn.util import mplutil
>>> mplutil.qtensure()
>>> dset = CIFAR_Wrapper(inputs, task, workdir, 'RGB')
>>> dset.augment = True
>>> im, gt = dset.load_inputs(index)
>>> mplutil.imshow(im, colorspace='rgb')
>>> dset = CIFAR_Wrapper(inputs, task, workdir, 'LAB')
>>> dset.augment = True
>>> im, gt = dset.load_inputs(index)
>>> mplutil.imshow(im, colorspace='LAB')
"""
assert dset.inputs.colorspace.lower() == 'rgb', (
'we must be in rgb for augmentation')
im = dset.inputs.im[index]
if dset.inputs.gt is not None:
gt = dset.inputs.gt[index]
else:
gt = None
if dset.augment:
# Image augmentation must be done in RGB
# Augment intensity independently
# im = dset.im_augment(im)
# Augment geometry consistently
# params = dset.rand_aff.random_params()
# im = dset.rand_aff.warp(im, params, interp='cubic', backend='cv2')
im = util.convert_colorspace(im, src_space=dset.inputs.colorspace,
dst_space='rgb')
# Do augmentation in uint8 RGB
im = (im * 255).astype(np.uint8)
im = dset.augmenter.augment_image(im)
im = (im / 255).astype(np.float32)
im = util.convert_colorspace(im, src_space='rgb',
dst_space=dset.output_colorspace)
else:
im = util.convert_colorspace(im, src_space=dset.inputs.colorspace,
dst_space=dset.output_colorspace)
# Do centering of inputs
if dset.center_inputs:
im = dset.center_inputs(im)
return im, gt
def __getitem__(dset, index):
from netharn import im_loaders
im, gt = dset.load_inputs(index)
input_tensor = im_loaders.numpy_image_to_float_tensor(im)
if dset.with_gt:
# print('gotitem: ' + str(data_tensor.shape))
# print('gt_tensor: ' + str(gt_tensor.shape))
return input_tensor, gt
else:
return input_tensor
@property
def n_channels(dset):
return 3
@property
def n_classes(dset):
return int(dset.task.labels.max() + 1)
@property
def ignore_labels(dset):
return dset.task.ignore_labels
def class_weights(dset):
"""
>>> from netharn.live.sseg_train import *
>>> dset = load_task_dataset('urban_mapper_3d')['train']
>>> dset.class_weights()
"""
# # Handle class weights
# print('prep class weights')
# gtstats = dset.inputs.prepare_gtstats(dset.task)
# gtstats = dset.inputs.gtstats
# # Take class weights (ensure they are in the same order as labels)
# mfweight_dict = gtstats['mf_weight'].to_dict()
# class_weights = np.array(list(ub.take(mfweight_dict, dset.task.classnames)))
# class_weights[dset.task.ignore_labels] = 0
# # HACK
# # class_weights[0] = 1.0
# # class_weights[1] = 0.7
# print('class_weights = {!r}'.format(class_weights))
# print('class_names = {!r}'.format(dset.task.classnames))
class_weights = np.ones(dset.n_classes)
return class_weights
def cifar_inputs(train=False, cifar_num=10):
root = ub.ensure_app_cache_dir('netharn')
if cifar_num == 10:
train_dset = cifar.CIFAR10(root=root, download=True, train=train)
task = CIFAR10_Task()
else:
train_dset = cifar.CIFAR100(root=root, download=True, train=train)
task = CIFAR100_Task()
if train:
bchw = (train_dset.train_data).astype(np.float32) / 255.0
labels = np.array(train_dset.train_labels)
else:
bchw = (train_dset.test_data).astype(np.float32) / 255.0
labels = np.array(train_dset.test_labels)
inputs = InMemoryInputs.from_bhwc_rgb(bchw, labels=labels)
if train:
inputs.tag = 'learn'
else:
inputs.tag = 'test'
return inputs, task
def cifar_training_datasets(output_colorspace='RGB', norm_mode='independent',
cifar_num=10):
"""
Example:
>>> datasets = cifar_training_datasets()
"""
inputs, task = cifar_inputs(train=True, cifar_num=cifar_num)
# split training into train / validation
# 45K / 5K validation split was used in densenet and resnet papers.
# https://arxiv.org/pdf/1512.03385.pdf page 7
# https://arxiv.org/pdf/1608.06993.pdf page 5
vali_frac = .1 # 10% is 5K images
n_vali = int(len(inputs) * vali_frac)
# n_vali = 10000 # 10K validation as in http://torch.ch/blog/2015/07/30/cifar.html
# the gt indexes seem to already be scrambled, I think other papers sample
# validation from the end, so lets do that
# The NIN paper https://arxiv.org/pdf/1312.4400.pdf in section 4 mentions
# that it uses the last 10K images for validation
input_idxs = np.arange(len(inputs))
# or just uncomment this line for reproducable random sampling
# input_idxs = util.random_indices(len(inputs), seed=1184576173)
train_idxs = sorted(input_idxs[:-n_vali])
vali_idxs = sorted(input_idxs[-n_vali:])
train_inputs = inputs.take(train_idxs, tag='train')
vali_inputs = inputs.take(vali_idxs, tag='vali')
test_inputs, _ = cifar_inputs(train=False, cifar_num=cifar_num)
# The dataset name and indices should fully specifiy dependencies
train_inputs._set_id_from_dependency(
['cifar{}-train'.format(cifar_num), train_idxs])
vali_inputs._set_id_from_dependency(
['cifar{}-train'.format(cifar_num), vali_idxs])
test_inputs._set_id_from_dependency(['cifar{}-test'.format(cifar_num)])
workdir = ub.ensuredir(ub.truepath('~/data/work/cifar'))
train_dset = CIFAR_Wrapper(
train_inputs, task, workdir, output_colorspace=output_colorspace)
vali_dset = CIFAR_Wrapper(
vali_inputs, task, workdir, output_colorspace=output_colorspace)
test_dset = CIFAR_Wrapper(test_inputs, task, workdir,
output_colorspace=output_colorspace)
print('built datasets')
datasets = {
'train': train_dset,
'vali': vali_dset,
'test': test_dset,
}
print('computing normalizers')
datasets['train'].center_inputs = datasets['train']._make_normalizer(
norm_mode)
for key in datasets.keys():
datasets[key].center_inputs = datasets['train'].center_inputs
print('computed normalizers')
datasets['train'].augment = True
return datasets
def train():
"""
Example:
>>> train()
"""
import random
np.random.seed(1031726816 % 4294967295)
torch.manual_seed(137852547 % 4294967295)
random.seed(2497950049 % 4294967295)
xpu = xpu_device.XPU.from_argv()
print('Chosen xpu = {!r}'.format(xpu))
cifar_num = 10
if ub.argflag('--lab'):
datasets = cifar_training_datasets(
output_colorspace='LAB', norm_mode='independent', cifar_num=cifar_num)
elif ub.argflag('--rgb'):
datasets = cifar_training_datasets(
output_colorspace='RGB', norm_mode='independent', cifar_num=cifar_num)
elif ub.argflag('--rgb-dep'):
datasets = cifar_training_datasets(
output_colorspace='RGB', norm_mode='dependant', cifar_num=cifar_num)
else:
raise AssertionError('specify --rgb / --lab')
import netharn.models.densenet
# batch_size = (128 // 3) * 3
batch_size = 64
# initializer_ = (initializers.KaimingNormal, {
# 'nonlinearity': 'relu',
# })
lr = 0.1
initializer_ = (initializers.LSUV, {})
hyper = hyperparams.HyperParams(
model=(netharn.models.densenet.DenseNet, {
'cifar': True,
'block_config': (32, 32, 32), # 100 layer depth
'num_classes': datasets['train'].n_classes,
'drop_rate': float(ub.argval('--drop_rate', default=.2)),
'groups': 1,
}),
optimizer=(torch.optim.SGD, {
# 'weight_decay': .0005,
'weight_decay': float(ub.argval('--weight_decay', default=.0005)),
'momentum': 0.9,
'nesterov': True,
'lr': 0.1,
}),
scheduler=(nh.schedulers.ListedLR, {
'points': {
0: lr,
150: lr * 0.1,
250: lr * 0.01,
},
'interpolate': False
}),
monitor=(nh.Monitor, {
'minimize': ['loss'],
'maximize': ['mAP'],
'patience': 314,
'max_epoch': 314,
}),
initializer=initializer_,
criterion=(torch.nn.CrossEntropyLoss, {
}),
# Specify anything else that is special about your hyperparams here
# Especially if you make a custom_batch_runner
augment=str(datasets['train'].augmenter),
other=ub.dict_union({
# TODO: type of augmentation as a parameter dependency
# 'augmenter': str(datasets['train'].augmenter),
# 'augment': datasets['train'].augment,
'batch_size': batch_size,
'colorspace': datasets['train'].output_colorspace,
'n_classes': datasets['train'].n_classes,
# 'center_inputs': datasets['train'].center_inputs,
}, datasets['train'].center_inputs.__dict__),
)
# if ub.argflag('--rgb-indie'):
# hyper.other['norm'] = 'dependant'
hyper.input_ids['train'] = datasets['train'].input_id
xpu = xpu_device.XPU.cast('auto')
print('xpu = {}'.format(xpu))
data_kw = {'batch_size': batch_size}
if xpu.is_gpu():
data_kw.update({'num_workers': 8, 'pin_memory': True})
tags = ['train', 'vali', 'test']
loaders = ub.odict()
for tag in tags:
dset = datasets[tag]
shuffle = tag == 'train'
data_kw_ = data_kw.copy()
if tag != 'train':
data_kw_['batch_size'] = max(batch_size // 4, 1)
loader = torch.utils.data.DataLoader(dset, shuffle=shuffle, **data_kw_)
loaders[tag] = loader
harn = fit_harness.FitHarness(
hyper=hyper, datasets=datasets, xpu=xpu,
loaders=loaders,
)
# harn.monitor = early_stop.EarlyStop(patience=40)
harn.monitor = monitor.Monitor(min_keys=['loss'],
max_keys=['global_acc', 'class_acc'],
patience=40)
# ignore_label = datasets['train'].ignore_label
# from netharn import metrics
workdir = ub.ensuredir('train_cifar_work')
harn.setup_dpath(workdir)
harn.run()
"""
python examples/cifar.py train --lab
python examples/cifar.py train --rgb-indie
"""
```
#### File: models/yolo2/multiscale_batch_sampler.py
```python
import torch.utils.data.sampler as torch_sampler
import torch
class MultiScaleBatchSampler(torch_sampler.BatchSampler):
"""
Indicies returned in the batch are tuples indicating data index and scale
index. Requires that dataset has a `multi_scale_inp_size` attribute.
Args:
sampler (Sampler): Base sampler. Must have a data_source attribute.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
resample_freq (int): how often to change scales. if None, then
only one scale is used.
Example:
>>> import torch.utils.data as torch_data
>>> class DummyDatset(torch_data.Dataset):
>>> def __init__(self):
>>> super(DummyDatset, self).__init__()
>>> self.multi_scale_inp_size = [1, 2, 3, 4]
>>> def __len__(self):
>>> return 34
>>> batch_size = 16
>>> data_source = DummyDatset()
>>> sampler1 = torch_sampler.RandomSampler(data_source)
>>> sampler2 = torch_sampler.SequentialSampler(data_source)
>>> rand = MultiScaleBatchSampler(sampler1, resample_freq=10)
>>> seq = MultiScaleBatchSampler(sampler2, resample_freq=None)
>>> rand_idxs = list(iter(rand))
>>> seq_idxs = list(iter(seq))
>>> assert len(rand_idxs[0]) == 16
>>> assert len(rand_idxs[0][0]) == 2
>>> assert len(rand_idxs[-1]) == 2
>>> assert {len({x[1] for x in xs}) for xs in rand_idxs} == {1}
>>> assert {x[1] for xs in seq_idxs for x in xs} == {None}
"""
def __init__(self, sampler, batch_size=16, drop_last=False,
resample_freq=10):
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
self.num_scales = len(sampler.data_source.multi_scale_inp_size)
self.resample_freq = resample_freq
def __iter__(self):
batch = []
if self.resample_freq:
scale_index = int(torch.rand(1) * self.num_scales)
else:
scale_index = None
for idx in self.sampler:
batch.append((int(idx), scale_index))
if len(batch) == self.batch_size:
yield batch
if self.resample_freq and idx % self.resample_freq == 0:
# choose a new scale index every 10 batches
scale_index = int(torch.rand(1) * self.num_scales)
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
if __name__ == '__main__':
r"""
CommandLine:
python -m netharn.models.yolo2.multiscale_batch_sampler all
"""
import xdoctest
xdoctest.doctest_module(__file__)
```
#### File: netharn/util/mplutil.py
```python
from __future__ import absolute_import, division, print_function
import cv2
import pandas as pd
import numpy as np
import six
import ubelt as ub
from six.moves import zip_longest
from os.path import join, dirname
import warnings
def multi_plot(xdata=None, ydata=[], **kwargs):
r"""
plots multiple lines, bars, etc...
This is the big function that implements almost all of the heavy lifting in
this file. Any function not using this should probably find a way to use
it. It is pretty general and relatively clean.
Args:
xdata (ndarray): can also be a list of arrays
ydata (list or dict of ndarrays): can also be a single array
**kwargs:
Misc:
fnum, pnum, use_legend, legend_loc
Labels:
xlabel, ylabel, title, figtitle
ticksize, titlesize, legendsize, labelsize
Grid:
gridlinewidth, gridlinestyle
Ticks:
num_xticks, num_yticks, tickwidth, ticklength, ticksize
Data:
xmin, xmax, ymin, ymax, spread_list
# can append _list to any of these
# these can be dictionaries if ydata was also a dict
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle']
any plot_kw key can be a scalar (corresponding to all ydatas),
a list if ydata was specified as a list, or a dict if ydata was
specified as a dict.
kind = ['bar', 'plot', ...]
if kind='plot':
spread
if kind='bar':
stacked, width
References:
matplotlib.org/examples/api/barchart_demo.html
CommandLine:
python -m netharn.util.mplutil multi_plot:0 --show
python -m netharn.util.mplutil multi_plot:1 --show
Example:
>>> autompl()
>>> xdata = [1, 2, 3, 4, 5]
>>> ydata_list = [[1, 2, 3, 4, 5], [3, 3, 3, 3, 3], [5, 4, np.nan, 2, 1], [4, 3, np.nan, 1, 0]]
>>> kwargs = {'label': ['spamΣ', 'eggs', 'jamµ', 'pram'], 'linestyle': '-'}
>>> #fig = multi_plot(xdata, ydata_list, title='$\phi_1(\\vec{x})$', xlabel='\nfds', **kwargs)
>>> fig = multi_plot(xdata, ydata_list, title='ΣΣΣµµµ', xlabel='\nfdsΣΣΣµµµ', **kwargs)
>>> show_if_requested()
Example:
>>> autompl()
>>> fig1 = multi_plot([1, 2, 3], [4, 5, 6])
>>> fig2 = multi_plot([1, 2, 3], [4, 5, 6], fnum=4)
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
ydata_list = ydata
if isinstance(ydata_list, dict):
# Special case where ydata is a dictionary
if isinstance(xdata, six.string_types):
# Special-er case where xdata is specified in ydata
xkey = xdata
ykeys = set(ydata_list.keys()) - {xkey}
xdata = ydata_list[xkey]
else:
ykeys = list(ydata_list.keys())
# Normalize input
ydata_list = list(ub.take(ydata_list, ykeys))
kwargs['label_list'] = kwargs.get('label_list', ykeys)
else:
ykeys = None
def is_listlike(data):
flag = isinstance(data, (list, np.ndarray, tuple, pd.Series))
flag &= hasattr(data, '__getitem__') and hasattr(data, '__len__')
return flag
def is_list_of_scalars(data):
if is_listlike(data):
if len(data) > 0 and not is_listlike(data[0]):
return True
return False
def is_list_of_lists(data):
if is_listlike(data):
if len(data) > 0 and is_listlike(data[0]):
return True
return False
# allow ydata_list to be passed without a container
if is_list_of_scalars(ydata_list):
ydata_list = [np.array(ydata_list)]
if xdata is None:
xdata = list(range(len(ydata_list[0])))
num_lines = len(ydata_list)
# Transform xdata into xdata_list
if is_list_of_lists(xdata):
xdata_list = [np.array(xd, copy=True) for xd in xdata]
else:
xdata_list = [np.array(xdata, copy=True)] * num_lines
fnum = ensure_fnum(kwargs.get('fnum', None))
pnum = kwargs.get('pnum', None)
kind = kwargs.get('kind', 'plot')
transpose = kwargs.get('transpose', False)
def parsekw_list(key, kwargs, num_lines=num_lines, ykeys=ykeys):
""" copies relevant plot commands into plot_list_kw """
if key in kwargs:
val_list = kwargs[key]
elif key + '_list' in kwargs:
warnings.warn('*_list is depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + '_list']
elif key + 's' in kwargs:
# hack, multiple ways to do something
warnings.warn('*s depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + 's']
else:
val_list = None
if val_list is not None:
if isinstance(val_list, dict):
if ykeys is None:
raise ValueError('ydata is not a dict, but a property was.')
else:
val_list = [val_list[key] for key in ykeys]
if not isinstance(val_list, list):
val_list = [val_list] * num_lines
return val_list
# Parse out arguments to ax.plot
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle', 'alpha']
# hackish / extra args that dont go to plot, but help
extra_plot_kw_keys = ['spread_alpha', 'autolabel', 'edgecolor', 'fill']
plot_kw_keys += extra_plot_kw_keys
plot_ks_vals = [parsekw_list(key, kwargs) for key in plot_kw_keys]
plot_list_kw = dict([
(key, vals)
for key, vals in zip(plot_kw_keys, plot_ks_vals) if vals is not None
])
if 'color' not in plot_list_kw:
plot_list_kw['color'] = distinct_colors(num_lines)
if kind == 'plot':
if 'marker' not in plot_list_kw:
plot_list_kw['marker'] = distinct_markers(num_lines)
if 'spread_alpha' not in plot_list_kw:
plot_list_kw['spread_alpha'] = [.2] * num_lines
if kind == 'bar':
# Remove non-bar kwargs
for key in ['markeredgewidth', 'linewidth', 'marker', 'markersize', 'linestyle']:
plot_list_kw.pop(key, None)
stacked = kwargs.get('stacked', False)
width_key = 'height' if transpose else 'width'
if 'width_list' in kwargs:
plot_list_kw[width_key] = kwargs['width_list']
else:
width = kwargs.get('width', .9)
# if width is None:
# # HACK: need variable width
# # width = np.mean(np.diff(xdata_list[0]))
# width = .9
if not stacked:
width /= num_lines
#plot_list_kw['orientation'] = ['horizontal'] * num_lines
plot_list_kw[width_key] = [width] * num_lines
spread_list = kwargs.get('spread_list', None)
if spread_list is None:
pass
# nest into a list of dicts for each line in the multiplot
valid_keys = list(set(plot_list_kw.keys()) - set(extra_plot_kw_keys))
valid_vals = list(ub.dict_take(plot_list_kw, valid_keys))
plot_kw_list = [dict(zip(valid_keys, vals)) for vals in zip(*valid_vals)]
extra_kw_keys = [key for key in extra_plot_kw_keys if key in plot_list_kw]
extra_kw_vals = list(ub.dict_take(plot_list_kw, extra_kw_keys))
extra_kw_list = [dict(zip(extra_kw_keys, vals)) for vals in zip(*extra_kw_vals)]
# Get passed in axes or setup a new figure
ax = kwargs.get('ax', None)
if ax is None:
doclf = kwargs.get('doclf', False)
fig = figure(fnum=fnum, pnum=pnum, docla=False, doclf=doclf)
ax = plt.gca()
else:
plt.sca(ax)
fig = ax.figure
# +---------------
# Draw plot lines
ydata_list = np.array(ydata_list)
if transpose:
if kind == 'bar':
plot_func = ax.barh
elif kind == 'plot':
def plot_func(_x, _y, **kw):
return ax.plot(_y, _x, **kw)
else:
plot_func = getattr(ax, kind) # usually ax.plot
assert len(ydata_list) > 0, 'no ydata'
#assert len(extra_kw_list) == len(plot_kw_list), 'bad length'
#assert len(extra_kw_list) == len(ydata_list), 'bad length'
_iter = enumerate(zip_longest(xdata_list, ydata_list, plot_kw_list, extra_kw_list))
for count, (_xdata, _ydata, plot_kw, extra_kw) in _iter:
ymask = np.isfinite(_ydata)
ydata_ = _ydata.compress(ymask)
xdata_ = _xdata.compress(ymask)
if kind == 'bar':
if stacked:
# Plot bars on top of each other
xdata_ = xdata_
else:
# Plot bars side by side
baseoffset = (width * num_lines) / 2
lineoffset = (width * count)
offset = baseoffset - lineoffset # Fixeme for more histogram bars
xdata_ = xdata_ - offset
# width_key = 'height' if transpose else 'width'
# plot_kw[width_key] = np.diff(xdata)
objs = plot_func(xdata_, ydata_, **plot_kw)
if kind == 'bar':
if extra_kw is not None and 'edgecolor' in extra_kw:
for rect in objs:
rect.set_edgecolor(extra_kw['edgecolor'])
if extra_kw is not None and extra_kw.get('autolabel', False):
# FIXME: probably a more cannonical way to include bar
# autolabeling with tranpose support, but this is a hack that
# works for now
for rect in objs:
if transpose:
numlbl = width = rect.get_width()
xpos = width + ((_xdata.max() - _xdata.min()) * .005)
ypos = rect.get_y() + rect.get_height() / 2.
ha, va = 'left', 'center'
else:
numlbl = height = rect.get_height()
xpos = rect.get_x() + rect.get_width() / 2.
ypos = 1.05 * height
ha, va = 'center', 'bottom'
barlbl = '%.3f' % (numlbl,)
ax.text(xpos, ypos, barlbl, ha=ha, va=va)
# print('extra_kw = %r' % (extra_kw,))
if kind == 'plot' and extra_kw.get('fill', False):
ax.fill_between(_xdata, ydata_, alpha=plot_kw.get('alpha', 1.0),
color=plot_kw.get('color', None)) # , zorder=0)
if spread_list is not None:
# Plots a spread around plot lines usually indicating standard
# deviation
_xdata = np.array(_xdata)
spread = spread_list[count]
ydata_ave = np.array(ydata_)
y_data_dev = np.array(spread)
y_data_max = ydata_ave + y_data_dev
y_data_min = ydata_ave - y_data_dev
ax = plt.gca()
spread_alpha = extra_kw['spread_alpha']
ax.fill_between(_xdata, y_data_min, y_data_max, alpha=spread_alpha,
color=plot_kw.get('color', None)) # , zorder=0)
# L________________
#max_y = max(np.max(y_data), max_y)
#min_y = np.min(y_data) if min_y is None else min(np.min(y_data), min_y)
ydata = _ydata # HACK
xdata = _xdata # HACK
if transpose:
#xdata_list = ydata_list
ydata = xdata
# Hack / Fix any transpose issues
def transpose_key(key):
if key.startswith('x'):
return 'y' + key[1:]
elif key.startswith('y'):
return 'x' + key[1:]
elif key.startswith('num_x'):
# hackier, fixme to use regex or something
return 'num_y' + key[5:]
elif key.startswith('num_y'):
# hackier, fixme to use regex or something
return 'num_x' + key[5:]
else:
return key
kwargs = {transpose_key(key): val for key, val in kwargs.items()}
# Setup axes labeling
title = kwargs.get('title', None)
xlabel = kwargs.get('xlabel', '')
ylabel = kwargs.get('ylabel', '')
def none_or_unicode(text):
return None if text is None else ub.ensure_unicode(text)
xlabel = none_or_unicode(xlabel)
ylabel = none_or_unicode(ylabel)
title = none_or_unicode(title)
# Initial integration with mpl rcParams standards
mplrc = mpl.rcParams.copy()
mplrc.update({
# 'legend.fontsize': custom_figure.LEGEND_SIZE,
# 'axes.titlesize': custom_figure.TITLE_SIZE,
# 'axes.labelsize': custom_figure.LABEL_SIZE,
# 'legend.facecolor': 'w',
# 'font.family': 'sans-serif',
# 'xtick.labelsize': custom_figure.TICK_SIZE,
# 'ytick.labelsize': custom_figure.TICK_SIZE,
})
mplrc.update(kwargs.get('rcParams', {}))
titlesize = kwargs.get('titlesize', mplrc['axes.titlesize'])
labelsize = kwargs.get('labelsize', mplrc['axes.labelsize'])
legendsize = kwargs.get('legendsize', mplrc['legend.fontsize'])
xticksize = kwargs.get('ticksize', mplrc['xtick.labelsize'])
yticksize = kwargs.get('ticksize', mplrc['ytick.labelsize'])
family = kwargs.get('fontfamily', mplrc['font.family'])
tickformat = kwargs.get('tickformat', None)
ytickformat = kwargs.get('ytickformat', tickformat)
xtickformat = kwargs.get('xtickformat', tickformat)
# 'DejaVu Sans','Verdana', 'Arial'
weight = kwargs.get('fontweight', None)
if weight is None:
weight = 'normal'
labelkw = {
'fontproperties': mpl.font_manager.FontProperties(
weight=weight,
family=family, size=labelsize)
}
ax.set_xlabel(xlabel, **labelkw)
ax.set_ylabel(ylabel, **labelkw)
tick_fontprop = mpl.font_manager.FontProperties(family=family,
weight=weight)
if tick_fontprop is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontproperties(tick_fontprop)
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontproperties(tick_fontprop)
if xticksize is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontsize(xticksize)
if yticksize is not None:
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontsize(yticksize)
if xtickformat is not None:
# mpl.ticker.StrMethodFormatter # newstyle
# mpl.ticker.FormatStrFormatter # oldstyle
ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(ytickformat))
xtick_kw = ytick_kw = {
'width': kwargs.get('tickwidth', None),
'length': kwargs.get('ticklength', None),
}
xtick_kw = {k: v for k, v in xtick_kw.items() if v is not None}
ytick_kw = {k: v for k, v in ytick_kw.items() if v is not None}
ax.xaxis.set_tick_params(**xtick_kw)
ax.yaxis.set_tick_params(**ytick_kw)
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# Setup axes limits
if 'xlim' in kwargs:
xlim = kwargs['xlim']
if xlim is not None:
if 'xmin' not in kwargs and 'xmax' not in kwargs:
kwargs['xmin'] = xlim[0]
kwargs['xmax'] = xlim[1]
else:
raise ValueError('use xmax, xmin instead of xlim')
if 'ylim' in kwargs:
ylim = kwargs['ylim']
if ylim is not None:
if 'ymin' not in kwargs and 'ymax' not in kwargs:
kwargs['ymin'] = ylim[0]
kwargs['ymax'] = ylim[1]
else:
raise ValueError('use ymax, ymin instead of ylim')
xmin = kwargs.get('xmin', ax.get_xlim()[0])
xmax = kwargs.get('xmax', ax.get_xlim()[1])
ymin = kwargs.get('ymin', ax.get_ylim()[0])
ymax = kwargs.get('ymax', ax.get_ylim()[1])
text_type = six.text_type
if text_type(xmax) == 'data':
xmax = max([xd.max() for xd in xdata_list])
if text_type(xmin) == 'data':
xmin = min([xd.min() for xd in xdata_list])
# Setup axes ticks
num_xticks = kwargs.get('num_xticks', None)
num_yticks = kwargs.get('num_yticks', None)
if num_xticks is not None:
# TODO check if xdata is integral
if xdata.dtype.kind == 'i':
xticks = np.linspace(np.ceil(xmin), np.floor(xmax),
num_xticks).astype(np.int32)
else:
xticks = np.linspace((xmin), (xmax), num_xticks)
ax.set_xticks(xticks)
if num_yticks is not None:
if ydata.dtype.kind == 'i':
yticks = np.linspace(np.ceil(ymin), np.floor(ymax),
num_yticks).astype(np.int32)
else:
yticks = np.linspace((ymin), (ymax), num_yticks)
ax.set_yticks(yticks)
force_xticks = kwargs.get('force_xticks', None)
if force_xticks is not None:
xticks = np.array(sorted(ax.get_xticks().tolist() + force_xticks))
ax.set_xticks(xticks)
yticklabels = kwargs.get('yticklabels', None)
if yticklabels is not None:
# Hack ONLY WORKS WHEN TRANSPOSE = True
# Overrides num_yticks
ax.set_yticks(ydata)
ax.set_yticklabels(yticklabels)
xticklabels = kwargs.get('xticklabels', None)
if xticklabels is not None:
# Overrides num_xticks
ax.set_xticks(xdata)
ax.set_xticklabels(xticklabels)
xtick_rotation = kwargs.get('xtick_rotation', None)
if xtick_rotation is not None:
[lbl.set_rotation(xtick_rotation)
for lbl in ax.get_xticklabels()]
ytick_rotation = kwargs.get('ytick_rotation', None)
if ytick_rotation is not None:
[lbl.set_rotation(ytick_rotation)
for lbl in ax.get_yticklabels()]
# Axis padding
xpad = kwargs.get('xpad', None)
ypad = kwargs.get('ypad', None)
xpad_factor = kwargs.get('xpad_factor', None)
ypad_factor = kwargs.get('ypad_factor', None)
if xpad is None and xpad_factor is not None:
xpad = (xmax - xmin) * xpad_factor
if ypad is None and ypad_factor is not None:
ypad = (ymax - ymin) * ypad_factor
xpad = 0 if xpad is None else xpad
ypad = 0 if ypad is None else ypad
ypad_high = kwargs.get('ypad_high', ypad)
ypad_low = kwargs.get('ypad_low', ypad)
xpad_high = kwargs.get('xpad_high', xpad)
xpad_low = kwargs.get('xpad_low', xpad)
xmin, xmax = (xmin - xpad_low), (xmax + xpad_high)
ymin, ymax = (ymin - ypad_low), (ymax + ypad_high)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
xscale = kwargs.get('xscale', None)
yscale = kwargs.get('yscale', None)
if yscale is not None:
ax.set_yscale(yscale)
if xscale is not None:
ax.set_xscale(xscale)
gridlinestyle = kwargs.get('gridlinestyle', None)
gridlinewidth = kwargs.get('gridlinewidth', None)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
if gridlinestyle:
for line in gridlines:
line.set_linestyle(gridlinestyle)
if gridlinewidth:
for line in gridlines:
line.set_linewidth(gridlinewidth)
# Setup title
if title is not None:
titlekw = {
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=titlesize)
}
ax.set_title(title, **titlekw)
use_legend = kwargs.get('use_legend', 'label' in valid_keys)
legend_loc = kwargs.get('legend_loc', 'best')
legend_alpha = kwargs.get('legend_alpha', 1.0)
if use_legend:
legendkw = {
'alpha': legend_alpha,
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=legendsize)
}
legend(loc=legend_loc, ax=ax, **legendkw)
figtitle = kwargs.get('figtitle', None)
if figtitle is not None:
set_figtitle(figtitle, fontfamily=family, fontweight=weight,
size=kwargs.get('figtitlesize'))
use_darkbackground = kwargs.get('use_darkbackground', None)
lightbg = kwargs.get('lightbg', None)
if lightbg is None:
lightbg = True
if use_darkbackground is None:
use_darkbackground = not lightbg
if use_darkbackground:
_dark_background(force=use_darkbackground is True)
# TODO: return better info
return fig
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False,
docla=False, projection=None, **kwargs):
"""
http://matplotlib.org/users/gridspec.html
Args:
fnum (int): fignum = figure number
pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple
title (str): (default = None)
figtitle (None): (default = None)
docla (bool): (default = False)
doclf (bool): (default = False)
Returns:
mpl.Figure: fig
CommandLine:
python -m netharn.util.mplutil figure:0 --show
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> show_if_requested()
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> fig = figure(fnum, (2, 4, (1, slice(1, None))))
>>> plt.gca().text(0.5, 0.5, "ax3", va="center", ha="center")
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def ensure_fig(fnum=None):
if fnum is None:
try:
fig = plt.gcf()
except Exception as ex:
fig = plt.figure()
else:
try:
fig = plt.figure(fnum)
except Exception as ex:
fig = plt.gcf()
return fig
def _convert_pnum_int_to_tup(int_pnum):
# Convert pnum to tuple format if in integer format
nr = int_pnum // 100
nc = int_pnum // 10 - (nr * 10)
px = int_pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
return pnum
def _pnum_to_subspec(pnum):
if isinstance(pnum, six.string_types):
pnum = list(pnum)
nrow, ncols, plotnum = pnum
# if kwargs.get('use_gridspec', True):
# Convert old pnums to gridspec
gs = gridspec.GridSpec(nrow, ncols)
if isinstance(plotnum, (tuple, slice, list)):
subspec = gs[plotnum]
else:
subspec = gs[plotnum - 1]
return (subspec,)
def _setup_subfigure(pnum):
if isinstance(pnum, int):
pnum = _convert_pnum_int_to_tup(pnum)
axes_list = fig.get_axes()
if docla or len(axes_list) == 0:
if pnum is not None:
assert pnum[0] > 0, 'nRows must be > 0: pnum=%r' % (pnum,)
assert pnum[1] > 0, 'nCols must be > 0: pnum=%r' % (pnum,)
subspec = _pnum_to_subspec(pnum)
ax = fig.add_subplot(*subspec, projection=projection)
if len(axes_list) > 0:
ax.cla()
else:
ax = plt.gca()
else:
if pnum is not None:
subspec = _pnum_to_subspec(pnum)
ax = plt.subplot(*subspec)
else:
ax = plt.gca()
fig = ensure_fig(fnum)
if doclf:
fig.clf()
if pnum is not None:
_setup_subfigure(pnum)
# Set the title / figtitle
if title is not None:
ax = plt.gca()
ax.set_title(title)
if figtitle is not None:
fig.suptitle(figtitle)
return fig
def pandas_plot_matrix(df, rot=90, ax=None, grid=True, label=None,
zerodiag=False,
cmap='viridis', showvals=False, logscale=True):
import matplotlib as mpl
import copy
from matplotlib import pyplot as plt
if ax is None:
fig = figure(fnum=1, pnum=(1, 1, 1))
fig.clear()
ax = plt.gca()
ax = plt.gca()
values = df.values
if zerodiag:
values = values.copy()
values = values - np.diag(np.diag(values))
# aximg = ax.imshow(values, interpolation='none', cmap='viridis')
if logscale:
from matplotlib.colors import LogNorm
vmin = df[df > 0].min().min()
norm = LogNorm(vmin=vmin, vmax=values.max())
else:
norm = None
cmap = copy.copy(mpl.cm.get_cmap(cmap)) # copy the default cmap
cmap.set_bad((0, 0, 0))
aximg = ax.matshow(values, interpolation='none', cmap=cmap, norm=norm)
# aximg = ax.imshow(values, interpolation='none', cmap='viridis', norm=norm)
# ax.imshow(values, interpolation='none', cmap='viridis')
ax.grid(False)
cax = plt.colorbar(aximg, ax=ax)
if label is not None:
cax.set_label(label)
ax.set_xticks(list(range(len(df.index))))
ax.set_xticklabels([lbl[0:100] for lbl in df.index])
for lbl in ax.get_xticklabels():
lbl.set_rotation(rot)
for lbl in ax.get_xticklabels():
lbl.set_horizontalalignment('center')
ax.set_yticks(list(range(len(df.columns))))
ax.set_yticklabels([lbl[0:100] for lbl in df.columns])
for lbl in ax.get_yticklabels():
lbl.set_horizontalalignment('right')
for lbl in ax.get_yticklabels():
lbl.set_verticalalignment('center')
# Grid lines around the pixels
if grid:
offset = -.5
xlim = [-.5, len(df.columns)]
ylim = [-.5, len(df.index)]
segments = []
for x in range(ylim[1]):
xdata = [x + offset, x + offset]
ydata = ylim
segment = list(zip(xdata, ydata))
segments.append(segment)
for y in range(xlim[1]):
xdata = xlim
ydata = [y + offset, y + offset]
segment = list(zip(xdata, ydata))
segments.append(segment)
bingrid = mpl.collections.LineCollection(segments, color='w', linewidths=1)
ax.add_collection(bingrid)
if showvals:
x_basis = np.arange(len(df.columns))
y_basis = np.arange(len(df.index))
x, y = np.meshgrid(x_basis, y_basis)
for c, r in zip(x.flatten(), y.flatten()):
val = df.iloc[r, c]
ax.text(c, r, val, va='center', ha='center', color='white')
return ax
def axes_extent(axs, pad=0.0):
"""
Get the full extent of a group of axes, including axes labels, tick labels,
and titles.
"""
import itertools as it
import matplotlib as mpl
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
#mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def extract_axes_extents(fig, combine=False, pad=0.0):
# Make sure we draw the axes first so we can
# extract positions from the text objects
import matplotlib as mpl
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
dpi_scale_trans_inv = fig.dpi_scale_trans.inverted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inv) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://stackoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inv)
# extent = extent_
axes_extents_.append(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def render_figure_to_image(fig, **savekw):
import io
import cv2
import matplotlib as mpl
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
# fig.savefig(stream, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
im_bgra = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
return im_bgra
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
def dict_intersection(dict1, dict2):
r"""
Args:
dict1 (dict):
dict2 (dict):
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> mergedict_ = dict_intersection(dict1, dict2)
>>> print(ub.repr2(mergedict_, nl=0))
{'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
# maintain order if possible
if isinstance(dict1, ub.odict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = ub.odict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect
def _dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m .draw_func2 --exec-_dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> fig = figure()
>>> _dark_background()
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
def is_using_style(style):
style_dict = mpl.style.library[style]
return len(dict_intersection(style_dict, mpl.rcParams)) == len(style_dict)
if force:
from mpl_toolkits.mplot3d import Axes3D
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
# Should use mpl style dark background instead
bgcolor = BLACK * .9
if ax is None:
ax = plt.gca()
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = _get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_patch(rect)
def _get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
""" gets geometry of a subplot """
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
_LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True,
size=None, fontfamily=None, fontweight=None,
fig=None):
r"""
Args:
figtitle (?):
subtitle (str): (default = '')
forcefignum (bool): (default = True)
incanvas (bool): (default = True)
fontfamily (None): (default = None)
fontweight (None): (default = None)
size (None): (default = None)
fig (None): (default = None)
CommandLine:
python -m .custom_figure set_figtitle --show
Example:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> fig = figure(fnum=1, doclf=True)
>>> result = set_figtitle(figtitle='figtitle', fig=fig)
>>> # xdoc: +REQUIRES(--show)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
if figtitle is None:
figtitle = ''
if fig is None:
fig = plt.gcf()
figtitle = ub.ensure_unicode(figtitle)
subtitle = ub.ensure_unicode(subtitle)
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
prop = {
'family': fontfamily,
'weight': fontweight,
'size': size,
}
prop = {k: v for k, v in prop.items() if v is not None}
sup = fig.suptitle(figtitle + subtitle)
if prop:
fontproperties = sup.get_fontproperties().copy()
for key, val in prop.items():
getattr(fontproperties, 'set_' + key)(val)
sup.set_fontproperties(fontproperties)
# fontproperties = mpl.font_manager.FontProperties(**prop)
else:
fig.suptitle('')
# Set title in the window
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
window_figtitle = window_figtitle.replace('\n', ' ')
fig.canvas.set_window_title(window_figtitle)
def legend(loc='best', fontproperties=None, size=None, fc='w', alpha=1,
ax=None, handles=None):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
Ignore:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> loc = 'best'
>>> xdata = np.linspace(-6, 6)
>>> ydata = np.sin(xdata)
>>> plt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
assert loc in _LEGEND_LOCATION or loc == 'best', (
'invalid loc. try one of %r' % (_LEGEND_LOCATION,))
if ax is None:
ax = plt.gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normal'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None):
r"""
Args:
N (int):
brightness (float):
Returns:
list: RGB_tuples
CommandLine:
python -m color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95
python -m .color_funcs --test-distinct_colors --N 3 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 4 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 6 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 20 --show
References:
http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
CommandLine:
python -m .color_funcs --exec-distinct_colors --show
python -m .color_funcs --exec-distinct_colors --show --no-randomize --N 50
python -m .color_funcs --exec-distinct_colors --show --cmap_seed=foobar
Ignore:
>>> # build test data
>>> autompl()
>>> N = ub.smartcast(ub.get_argval('--N', default=2), int) # FIXME
>>> randomize = not ub.argflag('--no-randomize')
>>> brightness = 0.878
>>> # execute function
>>> cmap_seed = ub.get_argval('--cmap_seed', default=None)
>>> hue_range = ub.smartcast(ub.get_argval('--hue-range', default=(0.00, 1.0)), list) #FIXME
>>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed)
>>> # verify results
>>> assert len(RGB_tuples) == N
>>> result = str(RGB_tuples)
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> color_list = RGB_tuples
>>> testshow_colors(color_list)
>>> show_if_requested()
"""
# TODO: Add sin wave modulation to the sat and value
# HACK for white figures
from matplotlib import pyplot as plt
import colorsys
remove_yellow = True
use_jet = False
if use_jet:
cmap = plt.cm.jet
RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N))))
elif cmap_seed is not None:
# Randomized map based on a seed
#cmap_ = 'Set1'
#cmap_ = 'Dark2'
choices = [
#'Set1', 'Dark2',
'jet',
#'gist_rainbow',
#'rainbow',
#'gnuplot',
#'Accent'
]
cmap_hack = ub.argval('--cmap-hack', default=None)
ncolor_hack = ub.argval('--ncolor-hack', default=None)
if cmap_hack is not None:
choices = [cmap_hack]
if ncolor_hack is not None:
N = int(ncolor_hack)
N_ = N
seed = sum(list(map(ord, ub.hash_data(cmap_seed))))
rng = np.random.RandomState(seed + 48930)
cmap_str = rng.choice(choices, 1)[0]
#print('cmap_str = %r' % (cmap_str,))
cmap = plt.cm.get_cmap(cmap_str)
#.hashstr27(cmap_seed)
#cmap_seed = 0
#pass
jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2)))
range_ = np.linspace(0, 1, N, endpoint=False)
#print('range_ = %r' % (range_,))
range_ = range_ + jitter
#print('range_ = %r' % (range_,))
while not (np.all(range_ >= 0) and np.all(range_ <= 1)):
range_[range_ < 0] = np.abs(range_[range_ < 0] )
range_[range_ > 1] = 2 - range_[range_ > 1]
#print('range_ = %r' % (range_,))
shift = rng.rand()
range_ = (range_ + shift) % 1
#print('jitter = %r' % (jitter,))
#print('shift = %r' % (shift,))
#print('range_ = %r' % (range_,))
if ncolor_hack is not None:
range_ = range_[0:N_]
RGB_tuples = list(map(tuple, cmap(range_)))
else:
sat = brightness
val = brightness
hmin, hmax = hue_range
if remove_yellow:
hue_skips = [(.13, .24)]
else:
hue_skips = []
hue_skip_ranges = [_[1] - _[0] for _ in hue_skips]
total_skip = sum(hue_skip_ranges)
hmax_ = hmax - total_skip
hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float)
# Remove colors (like hard to see yellows) in specified ranges
for skip, range_ in zip(hue_skips, hue_skip_ranges):
hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list]
HSV_tuples = [(hue, sat, val) for hue in hue_list]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
if randomize:
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def distinct_markers(num, style='astrisk', total=None, offset=0):
r"""
Args:
num (?):
CommandLine:
python -m .draw_func2 --exec-distinct_markers --show
python -m .draw_func2 --exec-distinct_markers --style=star --show
python -m .draw_func2 --exec-distinct_markers --style=polygon --show
Ignore:
>>> autompl()
>>> style = ub.get_argval('--style', type_=str, default='astrisk')
>>> marker_list = distinct_markers(10, style)
>>> x_data = np.arange(0, 3)
>>> for count, (marker) in enumerate(marker_list):
>>> plt.plot(x_data, [count] * len(x_data), marker=marker, markersize=10, linestyle='', label=str(marker))
>>> legend()
>>> show_if_requested()
"""
num_sides = 3
style_num = {
'astrisk': 2,
'star': 1,
'polygon': 0,
'circle': 3
}[style]
if total is None:
total = num
total_degrees = 360 / num_sides
marker_list = [
(num_sides, style_num, total_degrees * (count + offset) / total)
for count in range(num)
]
return marker_list
def deterministic_shuffle(list_, rng=0):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
Example:
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
from netharn import util
rng = util.ensure_rng(rng)
rng.shuffle(list_)
return list_
_BASE_FNUM = 9001
def next_fnum(new_base=None):
global _BASE_FNUM
if new_base is not None:
_BASE_FNUM = new_base
_BASE_FNUM += 1
return _BASE_FNUM
def ensure_fnum(fnum):
if fnum is None:
return next_fnum()
return fnum
def _save_requested(fpath_, save_parts):
raise NotImplementedError('havent done this yet')
# dpi = ub.argval('--dpi', type_=int, default=200)
from os.path import expanduser
from matplotlib import pyplot as plt
dpi = 200
fpath_ = expanduser(fpath_)
print('Figure save was requested')
# arg_dict = ut.get_arg_dict(prefix_list=['--', '-'],
# type_hints={'t': list, 'a': list})
arg_dict = {}
# HACK
arg_dict = {
key: (val[0] if len(val) == 1 else '[' + ']['.join(val) + ']')
if isinstance(val, list) else val
for key, val in arg_dict.items()
}
fpath_ = fpath_.format(**arg_dict)
fpath_ = fpath_.replace(' ', '').replace('\'', '').replace('"', '')
dpath = ub.argval('--dpath', type_=str, default=None)
if dpath is None:
gotdpath = False
dpath = '.'
else:
gotdpath = True
fpath = join(dpath, fpath_)
if not gotdpath:
dpath = dirname(fpath_)
print('dpath = %r' % (dpath,))
fig = plt.gcf()
fig.dpi = dpi
fpath_strict = ub.truepath(fpath)
CLIP_WHITE = ub.argflag('--clipwhite')
from netharn import util
if save_parts:
# TODO: call save_parts instead, but we still need to do the
# special grouping.
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
div = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if div is not None:
df2_div_axes = _get_plotdat_dict(ax).get('df2_div_axes', [])
seen_.add(ax)
seen_.update(set(df2_div_axes))
atomic_axes.append([ax] + df2_div_axes)
# TODO: pad these a bit
else:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
hack_axes_group_row = ub.argflag('--grouprows')
if hack_axes_group_row:
groupid_list = []
for axs in atomic_axes:
for ax in axs:
groupid = ax.colNum
groupid_list.append(groupid)
groups = ub.group_items(atomic_axes, groupid_list)
new_groups = list(map(ub.flatten, groups.values()))
atomic_axes = new_groups
#[[(ax.rowNum, ax.colNum) for ax in axs] for axs in atomic_axes]
# save all rows of each column
subpath_list = save_parts(fig=fig, fpath=fpath_strict,
grouped_axes=atomic_axes, dpi=dpi)
absfpath_ = subpath_list[-1]
if CLIP_WHITE:
for subpath in subpath_list:
# remove white borders
util.clipwhite_ondisk(subpath, subpath)
else:
savekw = {}
# savekw['transparent'] = fpath.endswith('.png') and not noalpha
savekw['transparent'] = ub.argflag('--alpha')
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
savekw['bbox_inches'] = extract_axes_extents(fig, combine=True) # replaces need for clipwhite
absfpath_ = ub.truepath(fpath)
fig.savefig(absfpath_, **savekw)
if CLIP_WHITE:
# remove white borders
fpath_in = fpath_out = absfpath_
util.clipwhite_ondisk(fpath_in, fpath_out)
if ub.argflag(('--diskshow', '--ds')):
# show what we wrote
ub.startfile(absfpath_)
def show_if_requested(N=1):
"""
Used at the end of tests. Handles command line arguments for saving figures
Referencse:
http://stackoverflow.com/questions/4325733/save-a-subplot-in-matplotlib
"""
import matplotlib.pyplot as plt
# Process figures adjustments from command line before a show or a save
# udpate_adjust_subplots()
# if use_argv:
# # hack to take args from commandline
# adjust_dict = ut.parse_dict_from_argv(adjust_dict)
# adjust_subplots(use_argv=True)
# def update_figsize():
# """ updates figsize based on command line """
# figsize = ub.argval('--figsize', type_=list, default=None)
# if figsize is not None:
# # Enforce inches and DPI
# fig = plt.gcf()
# figsize = [eval(term) if isinstance(term, str) else term
# for term in figsize]
# figw, figh = figsize[0], figsize[1]
# print('get_size_inches = %r' % (fig.get_size_inches(),))
# print('fig w,h (inches) = %r, %r' % (figw, figh))
# fig.set_size_inches(figw, figh)
# #print('get_size_inches = %r' % (fig.get_size_inches(),))
# update_figsize()
save_parts = ub.argflag('--saveparts')
fpath_ = ub.argval('--save', default=None)
if fpath_ is None:
fpath_ = ub.argval('--saveparts', default=None)
save_parts = True
if fpath_ is not None:
_save_requested(fpath_, save_parts)
# elif ub.argflag('--cmd'):
# pass
if ub.argflag('--show'):
# if ub.argflag('--tile'):
# if ut.get_computer_name().lower() in ['hyrule']:
# fig_presenter.all_figures_tile(percent_w=.5, monitor_num=0)
# else:
# fig_presenter.all_figures_tile()
# if ub.argflag('--present'):
# fig_presenter.present()
# for fig in fig_presenter.get_all_figures():
# fig.set_dpi(80)
plt.show()
def save_parts(fig, fpath, grouped_axes=None, dpi=None):
"""
FIXME: this works in mpl 2.0.0, but not 2.0.2
Args:
fig (?):
fpath (str): file path string
dpi (None): (default = None)
Returns:
list: subpaths
CommandLine:
python -m draw_func2 save_parts
Ignore:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> def testimg(fname):
>>> return plt.imread(mpl.cbook.get_sample_data(fname))
>>> fnames = ['grace_hopper.png', 'ada.png'] * 4
>>> fig = plt.figure(1)
>>> for c, fname in enumerate(fnames, start=1):
>>> ax = fig.add_subplot(3, 4, c)
>>> ax.imshow(testimg(fname))
>>> ax.set_title(fname[0:3] + str(c))
>>> ax.set_xticks([])
>>> ax.set_yticks([])
>>> ax = fig.add_subplot(3, 1, 3)
>>> ax.plot(np.sin(np.linspace(0, np.pi * 2)))
>>> ax.set_xlabel('xlabel')
>>> ax.set_ylabel('ylabel')
>>> ax.set_title('title')
>>> fpath = 'test_save_parts.png'
>>> adjust_subplots(fig=fig, wspace=.3, hspace=.3, top=.9)
>>> subpaths = save_parts(fig, fpath, dpi=300)
>>> fig.savefig(fpath)
>>> ub.startfile(subpaths[0])
>>> ub.startfile(fpath)
"""
if dpi:
# Need to set figure dpi before we draw
fig.dpi = dpi
# We need to draw the figure before calling get_window_extent
# (or we can figure out how to set the renderer object)
# if getattr(fig.canvas, 'renderer', None) is None:
fig.canvas.draw()
# Group axes that belong together
if grouped_axes is None:
grouped_axes = []
for ax in fig.axes:
grouped_axes.append([ax])
subpaths = []
_iter = enumerate(grouped_axes, start=0)
_iter = ub.ProgIter(list(_iter), label='save subfig')
for count, axs in _iter:
subpath = ub.augpath(fpath, suffix=chr(count + 65))
extent = axes_extent(axs).transformed(fig.dpi_scale_trans.inverted())
savekw = {}
savekw['transparent'] = ub.argflag('--alpha')
if dpi is not None:
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
fig.savefig(subpath, bbox_inches=extent, **savekw)
subpaths.append(subpath)
return subpaths
_qtensured = False
def _current_ipython_session():
"""
Returns a reference to the current IPython session, if one is running
"""
try:
__IPYTHON__
except NameError:
return None
else:
import IPython
ipython = IPython.get_ipython()
# if ipython is None we must have exited ipython at some point
return ipython
def qtensure():
"""
If you are in an IPython session, ensures that your backend is Qt.
"""
global _qtensured
if not _qtensured:
ipython = _current_ipython_session()
if ipython:
import sys
if 'PyQt4' in sys.modules:
ipython.magic('pylab qt4 --no-import-all')
_qtensured = True
else:
ipython.magic('pylab qt5 --no-import-all')
_qtensured = True
def aggensure():
"""
Ensures that you are in agg mode as long as IPython is not running
This might help prevent errors in tmux like:
qt.qpa.screen: QXcbConnection: Could not connect to display localhost:10.0
Could not connect to any X display.
"""
import matplotlib as mpl
current_backend = mpl.get_backend()
if current_backend != 'agg':
ipython = _current_ipython_session()
if not ipython:
set_mpl_backend('agg')
def set_mpl_backend(backend):
"""
Args:
backend (str): name of backend to use (e.g. Agg, PyQt)
"""
import sys
import matplotlib as mpl
if backend.lower().startswith('qt'):
# handle interactive qt case
qtensure()
if backend != mpl.get_backend():
# If we have already imported pyplot, then we need to use experimental
# behavior. Otherwise, we can just set the backend.
if 'matplotlib.pyplot' in sys.modules:
from matplotlib import pyplot as plt
plt.switch_backend(backend)
else:
mpl.use(backend)
def autompl():
"""
Uses platform heuristics to automatically set the mpl backend.
If no display is available it will be set to agg, otherwise we will try to
use the cross-platform Qt5Agg backend.
"""
import os
import sys
if sys.platform.startswith('win32'):
# TODO: something reasonable
pass
else:
DISPLAY = os.environ.get('DISPLAY', '')
if not DISPLAY:
set_mpl_backend('agg')
else:
set_mpl_backend('Qt5Agg')
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', cmap=None, heatmap=False,
data_colorbar=False, xlabel=None, redraw_image=True,
colorspace='bgr', ax=None, alpha=None, norm=None, **kwargs):
r"""
Args:
img (ndarray): image data
fnum (int): figure number
colorspace (str): if the data is 3-4 channels, this indicates the colorspace
1 channel data is assumed grayscale. 4 channels assumes alpha.
title (str):
figtitle (None):
pnum (tuple): plot number
interpolation (str): other interpolations = nearest, bicubic, bilinear
cmap (None):
heatmap (bool):
data_colorbar (bool):
darken (None):
redraw_image (bool): used when calling imshow over and over. if false
doesnt do the image part.
Returns:
tuple: (fig, ax)
Kwargs:
docla, doclf, projection
Returns:
tuple: (fig, ax)
Ignore:
>>> autompl()
>>> img_fpath = ut.grab_test_imgpath('carl.jpg')
>>> img = util.imread(img_fpath)
>>> (fig, ax) = imshow(img)
>>> result = ('(fig, ax) = %s' % (str((fig, ax)),))
>>> print(result)
>>> ut.show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
if ax is not None:
fig = ax.figure
nospecial = True
else:
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = plt.gca()
nospecial = False
#ax.set_xticks([])
#ax.set_yticks([])
#return fig, ax
if not redraw_image:
return fig, ax
if isinstance(img, six.string_types):
# Allow for path to image to be specified
from netharn import util
img_fpath = img
img = util.imread(img_fpath)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
}
if alpha is not None:
plt_imshow_kwargs['alpha'] = alpha
if norm is not None:
if norm is True:
norm = mpl.colors.Normalize()
plt_imshow_kwargs['norm'] = norm
else:
if cmap is None and not heatmap and not nospecial:
plt_imshow_kwargs['vmin'] = 0
plt_imshow_kwargs['vmax'] = 255
if heatmap:
cmap = 'hot'
# Handle tensor chw format in most cases
if img.ndim == 3:
if img.shape[0] == 3 or img.shape[0] == 1:
if img.shape[2] > 4:
# probably in chw format
img = img.transpose(1, 2, 0)
try:
if len(img.shape) == 3 and (img.shape[2] == 3 or img.shape[2] == 4):
# img is in a color format
from netharn import util
dst_space = 'rgb'
if img.shape[2] == 4:
colorspace += 'a'
dst_space += 'a'
imgRGB = util.convert_colorspace(img, dst_space=dst_space,
src_space=colorspace)
if imgRGB.dtype.kind == 'f':
maxval = imgRGB.max()
if maxval > 1.01 and maxval < 256:
imgRGB = np.array(imgRGB, dtype=np.uint8)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2 or (len(img.shape) == 3 and img.shape[2] == 1):
# img is in grayscale
if len(img.shape) == 3:
imgGRAY = img.reshape(img.shape[0:2])
else:
imgGRAY = img
if cmap is None:
cmap = plt.get_cmap('gray')
if isinstance(cmap, six.string_types):
cmap = plt.get_cmap(cmap)
# for some reason gray floats aren't working right
if imgGRAY.max() <= 1.01 and imgGRAY.min() >= -1E-9:
imgGRAY = (imgGRAY * 255).astype(np.uint8)
ax.imshow(imgGRAY, cmap=cmap, **plt_imshow_kwargs)
else:
raise AssertionError(
'unknown image format. img.dtype=%r, img.shape=%r' %
(img.dtype, img.shape))
except TypeError as te:
print('[df2] imshow ERROR %r' % (te,))
raise
except Exception as ex:
print('!!!!!!!!!!!!!!WARNING!!!!!!!!!!!')
print('[df2] type(img) = %r' % type(img))
if not isinstance(img, np.ndarray):
print('!!!!!!!!!!!!!!ERRROR!!!!!!!!!!!')
pass
#print('img = %r' % (img,))
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
if data_colorbar is True:
scores = np.unique(img.flatten())
if cmap is None:
cmap = 'hot'
colors = scores_to_color(scores, cmap)
colorbar(scores, colors)
if xlabel is not None:
ax.set_xlabel(xlabel)
if figtitle is not None:
set_figtitle(figtitle)
return fig, ax
def colorbar(scalars, colors, custom=False, lbl=None, ticklabels=None,
float_format='%.2f', **kwargs):
"""
adds a color bar next to the axes based on specific scalars
Args:
scalars (ndarray):
colors (ndarray):
custom (bool): use custom ticks
Kwargs:
See plt.colorbar
Returns:
cb : matplotlib colorbar object
Ignore:
>>> autompl()
>>> scalars = np.array([-1, -2, 1, 1, 2, 7, 10])
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = True
>>> reverse_cmap = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale, reverse_cmap=reverse_cmap, val2_customcolor=val2_customcolor)
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
Ignore:
>>> # ENABLE_DOCTEST
>>> scalars = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = False
>>> reverse_cmap = False
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale,
>>> reverse_cmap=reverse_cmap)
>>> colors = [lighten_rgb(c, .3) for c in colors]
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
assert len(scalars) == len(colors), 'scalars and colors must be corresponding'
if len(scalars) == 0:
return None
# Parameters
ax = plt.gca()
divider = _ensure_divider(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
xy, width, height = _get_axis_xy_width_height(ax)
#orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
#
# Create scalar mappable with cmap
if custom:
# FIXME: clean this code up and change the name custom
# to be meaningful. It is more like: display unique colors
unique_scalars, unique_idx = np.unique(scalars, return_index=True)
unique_colors = np.array(colors)[unique_idx]
#max_, min_ = unique_scalars.max(), unique_scalars.min()
#extent_ = max_ - min_
#bounds = np.linspace(min_, max_ + 1, extent_ + 2)
listed_cmap = mpl.colors.ListedColormap(unique_colors)
#norm = mpl.colors.BoundaryNorm(bounds, listed_cmap.N)
#sm = mpl.cm.ScalarMappable(cmap=listed_cmap, norm=norm)
sm = mpl.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(np.linspace(0, 1, len(unique_scalars) + 1))
else:
sorted_scalars = sorted(scalars)
listed_cmap = scores_to_cmap(scalars, colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
#COLORBAR_SHRINK = .42 # 1
#COLORBAR_PAD = .01 # 1
#COLORBAR_ASPECT = np.abs(20 * height / (width)) # 1
cb = plt.colorbar(sm, cax=cax, **kwargs)
## Add the colorbar to the correct label
#axis = cb.ax.yaxis # if orientation == 'horizontal' else cb.ax.yaxis
#position = 'bottom' if orientation == 'horizontal' else 'right'
#axis.set_ticks_position(position)
# This line alone removes data
# axis.set_ticks([0, .5, 1])
if custom:
ticks = np.linspace(0, 1, len(unique_scalars) + 1)
if len(ticks) < 2:
ticks += .5
else:
# SO HACKY
ticks += (ticks[1] - ticks[0]) / 2
if isinstance(unique_scalars, np.ndarray) and unique_scalars.dtype.kind == 'f':
ticklabels = [float_format % scalar for scalar in unique_scalars]
else:
ticklabels = unique_scalars
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels) # tick labels
elif ticklabels is not None:
ticks_ = cb.ax.get_yticks()
mx = ticks_.max()
mn = ticks_.min()
ticks = np.linspace(mn, mx, len(ticklabels))
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels)
#cb.ax.get_yticks()
#cb.set_ticks(ticks) # tick locations
#cb.set_ticklabels(ticklabels) # tick labels
# _set_plotdat(cb.ax, 'viztype', 'colorbar-%s' % (lbl,))
# _set_plotdat(cb.ax, 'sm', sm)
# FIXME: Figure out how to make a maximum number of ticks
# and to enforce them to be inside the data bounds
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
# Sets current axis
plt.sca(ax)
if lbl is not None:
cb.set_label(lbl)
return cb
_DF2_DIVIDER_KEY = '_df2_divider'
def _get_plotdat(ax, key, default=None):
""" returns internal property from a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
val = _plotdat.get(key, default)
return val
def _set_plotdat(ax, key, val):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
_plotdat[key] = val
def _del_plotdat(ax, key):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
if key in _plotdat:
del _plotdat[key]
def _get_plotdat_dict(ax):
""" sets internal property to a matplotlib axis """
if '_plotdat' not in ax.__dict__:
ax.__dict__['_plotdat'] = {}
plotdat_dict = ax.__dict__['_plotdat']
return plotdat_dict
def _ensure_divider(ax):
""" Returns previously constructed divider or creates one """
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if divider is None:
divider = make_axes_locatable(ax)
_set_plotdat(ax, _DF2_DIVIDER_KEY, divider)
orig_append_axes = divider.append_axes
def df2_append_axes(divider, position, size, pad=None, add_to_figure=True, **kwargs):
""" override divider add axes to register the divided axes """
div_axes = _get_plotdat(ax, 'df2_div_axes', [])
new_ax = orig_append_axes(position, size, pad=pad, add_to_figure=add_to_figure, **kwargs)
div_axes.append(new_ax)
_set_plotdat(ax, 'df2_div_axes', div_axes)
return new_ax
new_method = df2_append_axes.__get__(divider, divider.__class__)
setattr(divider, 'append_axes', new_method)
# ut.inject_func_as_method(divider, df2_append_axes, 'append_axes', allow_override=True)
return divider
def scores_to_cmap(scores, colors=None, cmap_='hot'):
import matplotlib as mpl
if colors is None:
colors = scores_to_color(scores, cmap_=cmap_)
scores = np.array(scores)
colors = np.array(colors)
sortx = scores.argsort()
sorted_colors = colors[sortx]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
return listed_cmap
def scores_to_color(score_list, cmap_='hot', logscale=False, reverse_cmap=False,
custom=False, val2_customcolor=None, score_range=None,
cmap_range=(.1, .9)):
"""
Other good colormaps are 'spectral', 'gist_rainbow', 'gist_ncar', 'Set1',
'Set2', 'Accent'
# TODO: plasma
Args:
score_list (list):
cmap_ (str): defaults to hot
logscale (bool):
cmap_range (tuple): restricts to only a portion of the cmap to avoid extremes
Returns:
<class '_ast.ListComp'>
Ignore:
>>> ut.exec_funckw(scores_to_color, globals())
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> # score_list = np.array([0, .1, .11, .12, .13, .8])
>>> # score_list = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> colors = scores_to_color(score_list, cmap_)
>>> imgRGB = util.atleast_nd(np.array(colors)[:, 0:3], 3, tofront=True)
>>> imgRGB = imgRGB.astype(np.float32)
>>> imgBGR = util.convert_colorspace(imgRGB, 'BGR', 'RGB')
>>> imshow(imgBGR)
>>> show_if_requested()
Ignore:
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> cmap_ = 'hot'
>>> logscale = False
>>> reverse_cmap = True
>>> custom = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
"""
import matplotlib.pyplot as plt
assert len(score_list.shape) == 1, 'score must be 1d'
if len(score_list) == 0:
return []
def apply_logscale(scores):
scores = np.array(scores)
above_zero = scores >= 0
scores_ = scores.copy()
scores_[above_zero] = scores_[above_zero] + 1
scores_[~above_zero] = scores_[~above_zero] - 1
scores_ = np.log2(scores_)
return scores_
if logscale:
# Hack
score_list = apply_logscale(score_list)
#if loglogscale
#score_list = np.log2(np.log2(score_list + 2) + 1)
#if isinstance(cmap_, six.string_types):
cmap = plt.get_cmap(cmap_)
#else:
# cmap = cmap_
if reverse_cmap:
cmap = reverse_colormap(cmap)
#if custom:
# base_colormap = cmap
# data = score_list
# cmap = customize_colormap(score_list, base_colormap)
if score_range is None:
min_ = score_list.min()
max_ = score_list.max()
else:
min_ = score_range[0]
max_ = score_range[1]
if logscale:
min_, max_ = apply_logscale([min_, max_])
if cmap_range is None:
cmap_scale_min, cmap_scale_max = 0., 1.
else:
cmap_scale_min, cmap_scale_max = cmap_range
extent_ = max_ - min_
if extent_ == 0:
colors = [cmap(.5) for fx in range(len(score_list))]
else:
if False and logscale:
# hack
def score2_01(score):
return np.log2(
1 + cmap_scale_min + cmap_scale_max *
(float(score) - min_) / (extent_))
score_list = np.array(score_list)
#rank_multiplier = score_list.argsort() / len(score_list)
#normscore = np.array(list(map(score2_01, score_list))) * rank_multiplier
normscore = np.array(list(map(score2_01, score_list)))
colors = list(map(cmap, normscore))
else:
def score2_01(score):
return cmap_scale_min + cmap_scale_max * (float(score) - min_) / (extent_)
colors = [cmap(score2_01(score)) for score in score_list]
if val2_customcolor is not None:
colors = [
np.array(val2_customcolor.get(score, color))
for color, score in zip(colors, score_list)]
return colors
def reverse_colormap(cmap):
"""
References:
http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
"""
import matplotlib as mpl
if isinstance(cmap, mpl.colors.ListedColormap):
return mpl.colors.ListedColormap(cmap.colors[::-1])
else:
reverse = []
k = []
for key, channel in six.iteritems(cmap._segmentdata):
data = []
for t in channel:
data.append((1 - t[0], t[1], t[2]))
k.append(key)
reverse.append(sorted(data))
cmap_reversed = mpl.colors.LinearSegmentedColormap(
cmap.name + '_reversed', dict(zip(k, reverse)))
return cmap_reversed
class PlotNums(object):
"""
Convinience class for dealing with plot numberings (pnums)
Example:
>>> pnum_ = PlotNums(nRows=2, nCols=2)
>>> # Indexable
>>> print(pnum_[0])
(2, 2, 1)
>>> # Iterable
>>> print(ub.repr2(list(pnum_), nl=0, nobr=True))
(2, 2, 1), (2, 2, 2), (2, 2, 3), (2, 2, 4)
>>> # Callable (iterates through a default iterator)
>>> print(pnum_())
(2, 2, 1)
>>> print(pnum_())
(2, 2, 2)
"""
def __init__(self, nRows=None, nCols=None, nSubplots=None, start=0):
nRows, nCols = self._get_num_rc(nSubplots, nRows, nCols)
self.nRows = nRows
self.nCols = nCols
base = 0
self.offset = 0 if base == 1 else 1
self.start = start
self._iter = None
def __getitem__(self, px):
return (self.nRows, self.nCols, px + self.offset)
def __call__(self):
"""
replacement for make_pnum_nextgen
Example:
>>> import itertools as it
>>> pnum_ = PlotNums(nSubplots=9)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
Example:
>>> import itertools as it
>>> for nRows, nCols, nSubplots in it.product([None, 3], [None, 3], [None, 9]):
>>> start = 0
>>> pnum_ = PlotNums(nRows, nCols, nSubplots, start)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> print((nRows, nCols, nSubplots))
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
"""
if self._iter is None:
self._iter = iter(self)
return six.next(self._iter)
def __iter__(self):
r"""
Yields:
tuple : pnum
Example:
>>> pnum_ = iter(PlotNums(nRows=3, nCols=2))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 1),
(3, 2, 2),
(3, 2, 3),
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
Example:
>>> nRows = 3
>>> nCols = 2
>>> pnum_ = iter(PlotNums(nRows, nCols, start=3))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
"""
for px in range(self.start, len(self)):
yield self[px]
def __len__(self):
total_plots = self.nRows * self.nCols
return total_plots
@classmethod
def _get_num_rc(PlotNums, nSubplots=None, nRows=None, nCols=None):
r"""
Gets a constrained row column plot grid
Args:
nSubplots (None): (default = None)
nRows (None): (default = None)
nCols (None): (default = None)
Returns:
tuple: (nRows, nCols)
Example:
>>> cases = [
>>> dict(nRows=None, nCols=None, nSubplots=None),
>>> dict(nRows=2, nCols=None, nSubplots=5),
>>> dict(nRows=None, nCols=2, nSubplots=5),
>>> dict(nRows=None, nCols=None, nSubplots=5),
>>> ]
>>> for kw in cases:
>>> print('----')
>>> size = PlotNums._get_num_rc(**kw)
>>> if kw['nSubplots'] is not None:
>>> assert size[0] * size[1] >= kw['nSubplots']
>>> print('**kw = %s' % (ub.repr2(kw),))
>>> print('size = %r' % (size,))
"""
if nSubplots is None:
if nRows is None:
nRows = 1
if nCols is None:
nCols = 1
else:
if nRows is None and nCols is None:
nRows, nCols = PlotNums._get_square_row_cols(nSubplots)
elif nRows is not None:
nCols = int(np.ceil(nSubplots / nRows))
elif nCols is not None:
nRows = int(np.ceil(nSubplots / nCols))
return nRows, nCols
def _get_square_row_cols(nSubplots, max_cols=None, fix=False, inclusive=True):
r"""
Args:
nSubplots (int):
max_cols (int):
Returns:
tuple: (int, int)
Example:
>>> nSubplots = 9
>>> nSubplots_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> max_cols = None
>>> rc_list = [PlotNums._get_square_row_cols(nSubplots, fix=True) for nSubplots in nSubplots_list]
>>> print(repr(np.array(rc_list).T))
array([[1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3],
[1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4]])
"""
if nSubplots == 0:
return 0, 0
if inclusive:
rounder = np.ceil
else:
rounder = np.floor
if fix:
# This function is very broken, but it might have dependencies
# this is the correct version
nCols = int(rounder(np.sqrt(nSubplots)))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
else:
# This is the clamped num cols version
# probably used in ibeis.viz
if max_cols is None:
max_cols = 5
if nSubplots in [4]:
max_cols = 2
if nSubplots in [5, 6, 7]:
max_cols = 3
if nSubplots in [8]:
max_cols = 4
nCols = int(min(nSubplots, max_cols))
#nCols = int(min(rounder(np.sqrt(nrids)), 5))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
def draw_border(ax, color, lw=2, offset=None, adjust=True):
'draws rectangle border around a subplot'
if adjust:
xy, width, height = _get_axis_xy_width_height(ax, -.7, -.2, 1, .4)
else:
xy, width, height = _get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
import matplotlib as mpl
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
def draw_boxes(boxes, box_format='xywh', color='blue', labels=None,
textkw=None, ax=None):
"""
Args:
boxes (list): list of coordindates in xywh, tlbr, or cxywh format
box_format (str): specify how boxes are formated
xywh is the top left x and y pixel width and height
cxywh is the center xy pixel width and height
tlbr is the top left xy and the bottom right xy
color (str): edge color of the boxes
labels (list): if specified, plots a text annotation on each box
Example:
>>> from netharn.util.mplutil import *
>>> autompl()
>>> bboxes = [[.1, .1, .6, .3], [.3, .5, .5, .6]]
>>> col = draw_boxes(bboxes)
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
from netharn import util
if isinstance(boxes, util.Boxes):
box_format = boxes.format
boxes = boxes.data
if not len(boxes):
return
boxes = np.asarray(boxes)
if box_format == 'xywh':
xywh = boxes
elif box_format == 'cxywh':
cx, cy, w, h = boxes.T[0:4]
x1 = cx - (w / 2)
y1 = cy - (h / 2)
xywh = np.vstack([x1, y1, w, h]).T
elif box_format == 'tlbr':
x1, y1 = boxes.T[0:2]
w, h = boxes.T[2:4] - boxes.T[0:2]
xywh = np.vstack([x1, y1, w, h]).T
else:
raise KeyError(box_format)
edgecolor = Color(color).as01('rgba')
facecolor = Color((0, 0, 0, 0)).as01('rgba')
rectkw = dict(ec=edgecolor, fc=facecolor, lw=2, linestyle='solid')
patches = [mpl.patches.Rectangle((x, y), w, h, **rectkw)
for x, y, w, h in xywh]
col = mpl.collections.PatchCollection(patches, match_original=True)
ax.add_collection(col)
if labels:
texts = []
default_textkw = {
'horizontalalignment': 'left',
'verticalalignment': 'top',
'backgroundcolor': (0, 0, 0, .3),
'color': 'white',
'fontproperties': mpl.font_manager.FontProperties(
size=6, family='monospace'),
}
tkw = default_textkw.copy()
if textkw is not None:
tkw.update(textkw)
for (x1, y1, w, h), label in zip(xywh, labels):
texts.append((x1, y1, label, tkw))
for (x1, y1, catname, tkw) in texts:
ax.text(x1, y1, catname, **tkw)
return col
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
"""
draws `N` line segments between `N` pairs of points
Args:
pts1 (ndarray): Nx2
pts2 (ndarray): Nx2
ax (None): (default = None)
**kwargs: lw, alpha, colors
CommandLine:
python -m netharn.util.mplutil draw_line_segments --show
Example:
>>> pts1 = np.array([(.1, .8), (.6, .8)])
>>> pts2 = np.array([(.6, .7), (.4, .1)])
>>> figure(fnum=None)
>>> draw_line_segments(pts1, pts2)
>>> # xdoc: +REQUIRES(--show)
>>> import matplotlib.pyplot as plt
>>> ax = plt.gca()
>>> ax.set_xlim(0, 1)
>>> ax.set_ylim(0, 1)
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
if 'color' in kwargs:
kwargs['colors'] = kwargs['color']
# mpl.colors.ColorConverter().to_rgb(kwargs['color'])
line_group = mpl.collections.LineCollection(segments, linewidths=linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def make_heatmask(probs, cmap='plasma', with_alpha=True):
"""
Colorizes a single-channel intensity mask (with an alpha channel)
"""
import matplotlib as mpl
from netharn.util import imutil
assert len(probs.shape) == 2
cmap_ = mpl.cm.get_cmap(cmap)
probs = imutil.ensure_float01(probs)
heatmask = cmap_(probs)
if with_alpha:
heatmask[:, :, 0:3] = heatmask[:, :, 0:3][:, :, ::-1]
heatmask[:, :, 3] = probs
return heatmask
def colorbar_image(domain, cmap='plasma', dpi=96, shape=(200, 20), transparent=False):
"""
Notes:
shape is approximate
Ignore:
domain = np.linspace(-30, 200)
cmap='plasma'
dpi = 80
dsize = (20, 200)
util.imwrite('foo.png', util.colorbar_image(np.arange(0, 1)), shape=(400, 80))
import plottool as pt
pt.qtensure()
import matplotlib as mpl
mpl.style.use('ggplot')
util.imwrite('foo.png', util.colorbar_image(np.linspace(0, 1, 100), dpi=200, shape=(1000, 40), transparent=1))
ub.startfile('foo.png')
"""
import matplotlib as mpl
mpl.use('agg', force=False, warn=False)
from matplotlib import pyplot as plt
fig = plt.figure(dpi=dpi)
w, h = shape[1] / dpi, shape[0] / dpi
# w, h = 1, 10
fig.set_size_inches(w, h)
ax = fig.add_subplot('111')
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap(cmap))
sm.set_array(domain)
plt.colorbar(sm, cax=ax)
cb_img = render_figure_to_image(fig, dpi=dpi, transparent=transparent)
plt.close(fig)
return cb_img
class Color(ub.NiceRepr):
"""
move to colorutil?
Example:
>>> from netharn.util.mplutil import *
>>> print(Color('g'))
>>> print(Color('orangered'))
>>> print(Color('#AAAAAA').as255())
>>> print(Color([0, 255, 0]))
>>> print(Color([1, 1, 1.]))
>>> print(Color([1, 1, 1]))
>>> print(Color(Color([1, 1, 1])).as255())
>>> print(Color(Color([1., 0, 1, 0])).ashex())
>>> print(Color([1, 1, 1], alpha=255))
>>> print(Color([1, 1, 1], alpha=255, space='lab'))
"""
def __init__(self, color, alpha=None, space=None):
if isinstance(color, Color):
assert alpha is None
assert space is None
space = color.space
color = color.color01
else:
color = self._ensure_color01(color)
if alpha is not None:
alpha = self._ensure_color01([alpha])[0]
if space is None:
space = 'rgb'
# always normalize the color down to 01
color01 = list(color)
if alpha is not None:
if len(color01) not in [1, 3]:
raise ValueError('alpha already in color')
color01 = color01 + [alpha]
# correct space if alpha is given
if len(color01) in [2, 4]:
if not space.endswith('a'):
space += 'a'
self.color01 = color01
self.space = space
def __nice__(self):
colorpart = ', '.join(['{:.2f}'.format(c) for c in self.color01])
return self.space + ': ' + colorpart
def ashex(self, space=None):
c255 = self.as255(space)
return '#' + ''.join(['{:02x}'.format(c) for c in c255])
def as255(self, space=None):
color = (np.array(self.as01(space)) * 255).astype(np.uint8)
return tuple(map(int, color))
def as01(self, space=None):
"""
self = mplutil.Color('red')
mplutil.Color('green').as01('rgba')
"""
color = tuple(self.color01)
if space is not None:
if space == self.space:
pass
elif space == 'rgba' and self.space == 'rgb':
color = color + (1,)
elif space == 'bgr' and self.space == 'rgb':
color = color[::-1]
elif space == 'rgb' and self.space == 'bgr':
color = color[::-1]
else:
assert False
return tuple(map(float, color))
@classmethod
def _is_base01(channels):
""" check if a color is in base 01 """
def _test_base01(channels):
tests01 = {
'is_float': all([isinstance(c, (float, np.float64)) for c in channels]),
'is_01': all([c >= 0.0 and c <= 1.0 for c in channels]),
}
return tests01
if isinstance(channels, six.string_types):
return False
return all(_test_base01(channels).values())
@classmethod
def _is_base255(Color, channels):
""" there is a one corner case where all pixels are 1 or less """
if (all(c > 0.0 and c <= 255.0 for c in channels) and any(c > 1.0 for c in channels)):
# Definately in 255 space
return True
else:
# might be in 01 or 255
return all(isinstance(c, int) for c in channels)
@classmethod
def _hex_to_01(Color, hex_color):
"""
hex_color = '#6A5AFFAF'
"""
assert hex_color.startswith('#'), 'not a hex string %r' % (hex_color,)
parts = hex_color[1:].strip()
color255 = tuple(int(parts[i: i + 2], 16) for i in range(0, len(parts), 2))
assert len(color255) in [3, 4], 'must be length 3 or 4'
return Color._255_to_01(color255)
def _ensure_color01(Color, color):
""" Infer what type color is and normalize to 01 """
if isinstance(color, six.string_types):
color = Color._string_to_01(color)
elif Color._is_base255(color):
color = Color._255_to_01(color)
return color
@classmethod
def _255_to_01(Color, color255):
""" converts base 255 color to base 01 color """
return [channel / 255.0 for channel in color255]
@classmethod
def _string_to_01(Color, color):
"""
mplutil.Color._string_to_01('green')
mplutil.Color._string_to_01('red')
"""
from matplotlib import colors as mcolors
if color in mcolors.BASE_COLORS:
color01 = mcolors.BASE_COLORS[color]
elif color in mcolors.CSS4_COLORS:
color_hex = mcolors.CSS4_COLORS[color]
color01 = Color._hex_to_01(color_hex)
elif color.startswith('#'):
color01 = Color._hex_to_01(color)
else:
raise ValueError('unknown color=%r' % (color,))
return color01
@classmethod
def named_colors():
from matplotlib import colors as mcolors
names = sorted(list(mcolors.BASE_COLORS.keys()) + list(mcolors.CSS4_COLORS.keys()))
return names
@classmethod
def distinct(Color, num, space='rgb'):
"""
Make multiple distinct colors
"""
import matplotlib as mpl
import matplotlib._cm as _cm
cm = mpl.colors.LinearSegmentedColormap.from_list(
'gist_rainbow', _cm.datad['gist_rainbow'],
mpl.rcParams['image.lut'])
distinct_colors = [
np.array(cm(i / num)).tolist()[0:3]
for i in range(num)
]
if space == 'rgb':
return distinct_colors
else:
return [Color(c, space='rgb').as01(space=space) for c in distinct_colors]
if __name__ == '__main__':
r"""
CommandLine:
python -m netharn.util.mplutil
"""
import xdoctest
xdoctest.doctest_module(__file__)
```
#### File: util/nms/torch_nms.py
```python
def torch_nms(bboxes, scores, classes=None, thresh=.5):
"""
Non maximum suppression implemented with pytorch tensors
CURRENTLY NOT WORKING
Args:
bboxes (Tensor): Bounding boxes of one image in the format (x1, y1, x2, y2)
scores (Tensor): Scores of each box
classes (Tensor, optional): the classes of each box. If specified nms is applied to each class separately.
thresh (float): iou threshold
Returns:
ByteTensor: keep: boolean array indicating which boxes were not pruned.
Example:
>>> # DISABLE_DOCTEST
>>> import torch
>>> import numpy as np
>>> bboxes = torch.FloatTensor(np.array([
>>> [0, 0, 100, 100],
>>> [100, 100, 10, 10],
>>> [10, 10, 100, 100],
>>> [50, 50, 100, 100],
>>> ], dtype=np.float32))
>>> scores = torch.FloatTensor(np.array([.1, .5, .9, .1]))
>>> classes = torch.FloatTensor(np.array([0, 0, 0, 0]))
>>> thresh = .5
>>> keep = torch_nms(bboxes, scores, classes, thresh)
>>> bboxes[keep]
"""
if bboxes.numel() == 0:
return []
# Sort coordinates by descending score
scores, order = scores.sort(0, descending=True)
x1, y1, x2, y2 = bboxes[order].split(1, 1)
# Compute dx and dy between each pair of boxes (these mat contain every pair twice...)
dx = (x2.min(x2.t()) - x1.max(x1.t())).clamp_(min=0)
dy = (y2.min(y2.t()) - y1.max(y1.t())).clamp_(min=0)
# Compute iou
intersections = dx * dy
areas = (x2 - x1) * (y2 - y1)
unions = (areas + areas.t()) - intersections
ious = intersections / unions
# Filter based on iou (and class)
conflicting = (ious > thresh).triu(1)
if classes is not None:
same_class = (classes.unsqueeze(0) == classes.unsqueeze(1))
conflicting = (conflicting & same_class)
ordered_keep = (conflicting.sum(0) == 0) # Unlike numpy, pytorch cannot perform any() along a certain axis
keep = ordered_keep.new(*ordered_keep.size())
keep.scatter_(0, order, ordered_keep) # Unsort, so keep is aligned with input boxes
return keep
# aaa = torch.LongTensor(np.arange(len(boxes))).reshape(-1, 1)
# sorted(aaa[order][keep1[:, None].expand_as(aaa)].cpu().numpy().ravel()) == sorted(aaa[keep].cpu().numpy().ravel())
# bboxes[keep]
# keep1 = (conflicting.sum(0) == 0) # Unlike numpy, pytorch cannot perform any() along a certain axis
# bboxes[order][keep1[:, None].expand_as(bboxes)].view(-1, 4).contiguous()
def _benchmark():
import ubelt
import torch
import numpy as np
import netharn as nh
from netharn.util.nms.torch_nms import torch_nms
from netharn.util import non_max_supression
import ubelt as ub
import itertools as it
N = 100
bestof = 10
ydata = ub.ddict(list)
xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700, 1000, 1500, 2000]
rng = nh.util.ensure_rng(0)
thresh = 0.5
for num in xdata:
outputs = {}
# Build random test boxes and scores
boxes = nh.util.Boxes.random(num, scale=10.0, rng=rng, format='tlbr', tensor=True).data
scores = torch.Tensor(rng.rand(len(boxes)))
t1 = ubelt.Timerit(N, bestof=bestof, label='torch(cpu)')
for timer in t1:
with timer:
keep = torch_nms(boxes, scores, thresh=thresh)
ydata[t1.label].append(t1.min())
outputs[t1.label] = np.where(keep.cpu().numpy())[0]
if torch.cuda.is_available():
# Move boxes to the GPU
gpu_boxes = boxes.cuda()
gpu_scores = scores.cuda()
t1 = ubelt.Timerit(N, bestof=bestof, label='torch(gpu)')
for timer in t1:
with timer:
keep = torch_nms(gpu_boxes, gpu_scores, thresh=thresh)
torch.cuda.synchronize()
ydata[t1.label].append(t1.min())
outputs[t1.label] = np.where(keep.cpu().numpy())[0]
# Move boxes to numpy
np_boxes = boxes.cpu().numpy()
np_scores = scores.cpu().numpy()
t1 = ubelt.Timerit(N, bestof=bestof, label='numpy(cpu)')
for timer in t1:
with timer:
keep = non_max_supression(np_boxes, np_scores, thresh=thresh, impl='py')
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
t1 = ubelt.Timerit(N, bestof=bestof, label='cython(cpu)')
for timer in t1:
with timer:
keep = non_max_supression(np_boxes, np_scores, thresh=thresh, impl='cpu')
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
if torch.cuda.is_available():
t1 = ubelt.Timerit(N, bestof=bestof, label='cython(gpu)')
for timer in t1:
with timer:
keep = non_max_supression(np_boxes, np_scores, thresh=thresh, impl='gpu')
ydata[t1.label].append(t1.min())
outputs[t1.label] = sorted(keep)
# Check that all kept boxes do not have more than `threshold` ious
for key, idxs in outputs.items():
ious = nh.util.box_ious(np_boxes[idxs], np_boxes[idxs])
max_iou = (np.tril(ious) - np.eye(len(ious))).max()
if max_iou > thresh:
print('{} produced a bad result with max_iou={}'.format(key, max_iou))
# Check result consistency:
print('Result consistency:')
for k1, k2 in it.combinations(outputs.keys(), 2):
idxs1 = set(outputs[k1])
idxs2 = set(outputs[k2])
jaccard = len(idxs1 & idxs2) / len(idxs1 | idxs2)
print('{}, {}: {}'.format(k1, k2, jaccard))
nh.util.mplutil.qtensure()
nh.util.mplutil.multi_plot(xdata, ydata, xlabel='num boxes', ylabel='seconds')
```
#### File: netharn/util/util_boxes.py
```python
import numpy as np
import torch
import ubelt as ub
try:
from netharn.util.cython_boxes import bbox_ious_c
except ImportError:
bbox_ious_c = None
def box_ious(boxes1, boxes2, bias=0, mode=None):
"""
Args:
boxes1 (ndarray): (N, 4) tlbr format
boxes2 (ndarray): (K, 4) tlbr format
bias (int): either 0 or 1, does tl=br have area of 0 or 1?
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> boxes1 = Boxes.random(5, scale=10.0, rng=0, format='tlbr').data
>>> boxes2 = Boxes.random(7, scale=10.0, rng=1, format='tlbr').data
>>> ious = box_ious(boxes1, boxes2)
>>> print(ub.repr2(ious.tolist(), precision=2))
[
[0.00, 0.00, 0.28, 0.00, 0.00, 0.20, 0.01],
[0.00, 0.00, 0.50, 0.00, 0.04, 0.06, 0.00],
[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[0.00, 0.00, 0.02, 0.00, 0.00, 0.00, 0.00],
[0.00, 0.00, 0.19, 0.03, 0.00, 0.00, 0.00],
]
Example:
>>> boxes1 = Boxes.random(5, scale=10.0, rng=0, format='tlbr').data
>>> boxes2 = Boxes.random(7, scale=10.0, rng=1, format='tlbr').data
>>> if bbox_ious_c is not None:
>>> ious_c = box_ious(boxes1, boxes2, bias=0, mode='c')
>>> ious_py = box_ious(boxes1, boxes2, bias=0, mode='py')
>>> assert np.all(np.isclose(ious_c, ious_py))
>>> ious_c = box_ious(boxes1, boxes2, bias=1, mode='c')
>>> ious_py = box_ious(boxes1, boxes2, bias=1, mode='py')
>>> assert np.all(np.isclose(ious_c, ious_py))
"""
if mode == 'torch' or torch.is_tensor(boxes1):
# TODO: add tests for equality with other methods or show why it should
# be different
return box_ious_torch(boxes1, boxes2, bias)
if mode is None:
mode = 'py' if bbox_ious_c is None else 'c'
if mode == 'c':
return bbox_ious_c(boxes1.astype(np.float32),
boxes2.astype(np.float32), bias)
elif mode == 'py':
return box_ious_py(boxes1, boxes2, bias)
else:
raise KeyError(mode)
# def light_bbox_ious_cxywh_bias0(boxes1, boxes2):
# """ Compute IOU between all boxes from ``boxes1`` with all boxes from ``boxes2``.
# Notes:
# Accepts cxywh format and the bias is 0
# Args:
# boxes1 (torch.Tensor): List of bounding boxes
# boxes2 (torch.Tensor): List of bounding boxes
# Note:
# List format: [[xc, yc, w, h],...]
# Example:
# >>> boxes1 = Boxes.random(5, scale=10.0, rng=0, format='cxywh', tensor=True).data
# >>> boxes2 = Boxes.random(7, scale=10.0, rng=1, format='cxywh', tensor=True).data
# >>> bias = 1
# >>> ious = light_bbox_ious(boxes1, boxes2)
# """
# b1x1, b1y1 = (boxes1[:, :2] - (boxes1[:, 2:4] / 2)).split(1, 1)
# b1x2, b1y2 = (boxes1[:, :2] + (boxes1[:, 2:4] / 2)).split(1, 1)
# b2x1, b2y1 = (boxes2[:, :2] - (boxes2[:, 2:4] / 2)).split(1, 1)
# b2x2, b2y2 = (boxes2[:, :2] + (boxes2[:, 2:4] / 2)).split(1, 1)
# dx = (b1x2.min(b2x2.t()) - b1x1.max(b2x1.t())).clamp(min=0)
# dy = (b1y2.min(b2y2.t()) - b1y1.max(b2y1.t())).clamp(min=0)
# intersections = dx * dy
# areas1 = (b1x2 - b1x1) * (b1y2 - b1y1)
# areas2 = (b2x2 - b2x1) * (b2y2 - b2y1)
# unions = (areas1 + areas2.t()) - intersections
# return intersections / unions
def box_ious_torch(boxes1, boxes2, bias=1):
"""
Example:
>>> boxes1 = Boxes.random(5, scale=10.0, rng=0, format='tlbr', tensor=True).data
>>> boxes2 = Boxes.random(7, scale=10.0, rng=1, format='tlbr', tensor=True).data
>>> bias = 0
>>> ious = box_ious_torch(boxes1, boxes2, bias)
>>> ious_np = box_ious_py(boxes1.numpy(), boxes2.numpy(), bias)
>>> assert np.all(ious_np == ious.numpy())
Benchmark:
import ubelt
import netharn as nh
N = 100
ydata = ub.ddict(list)
xdata = [10, 20, 40, 80, 100, 200, 300, 400, 500, 600, 700]
for num in xdata:
boxes1 = nh.util.Boxes.random(num, scale=10.0, rng=0, format='tlbr', tensor=True).data
boxes2 = nh.util.Boxes.random(num + 1, scale=10.0, rng=1, format='tlbr', tensor=True).data
t1 = ubelt.Timerit(N, bestof=10, label='time-torch-cpu')
for timer in t1:
with timer:
box_ious_torch(boxes1, boxes2, bias)
ydata['cpu'].append(t1.ave_secs)
boxes1 = boxes1.cuda()
boxes2 = boxes2.cuda()
t2 = ubelt.Timerit(N, bestof=10, label='time-torch-gpu')
for timer in t2:
with timer:
box_ious_torch(boxes1, boxes2, bias)
torch.cuda.synchronize()
ydata['gpu'].append(t2.ave_secs)
boxes1 = boxes1.cpu().numpy()
boxes2 = boxes2.cpu().numpy()
t3 = ubelt.Timerit(N, bestof=10, label='time-numpy')
for timer in t3:
with timer:
box_ious_py(boxes1, boxes2, bias)
ydata['numpy'].append(t3.ave_secs)
nh.util.mplutil.qtensure()
nh.util.mplutil.multi_plot(xdata, ydata, xlabel='num boxes', ylabel='seconds')
"""
# boxes1 = boxes1.view(-1, 4)
# boxes2 = boxes2.view(-1, 4)
w1 = boxes1[..., 2] - boxes1[..., 0] + bias
h1 = boxes1[..., 3] - boxes1[..., 1] + bias
w2 = boxes2[..., 2] - boxes2[..., 0] + bias
h2 = boxes2[..., 3] - boxes2[..., 1] + bias
areas1 = w1 * h1
areas2 = w2 * h2
x_maxs = torch.min(boxes1[..., 2][..., None], boxes2[..., 2])
x_mins = torch.max(boxes1[..., 0][..., None], boxes2[..., 0])
iws = (x_maxs - x_mins + bias).clamp(0, float('inf'))
y_maxs = torch.min(boxes1[..., 3][..., None], boxes2[..., 3])
y_mins = torch.max(boxes1[..., 1][..., None], boxes2[..., 1])
ihs = (y_maxs - y_mins + bias).clamp(0, float('inf'))
areas_sum = (areas1[..., None] + areas2)
inter_areas = iws * ihs
union_areas = (areas_sum - inter_areas)
ious = inter_areas / union_areas
return ious
def box_ious_py(boxes1, boxes2, bias=1):
"""
This is the fastest python implementation of bbox_ious I found
"""
w1 = boxes1[:, 2] - boxes1[:, 0] + bias
h1 = boxes1[:, 3] - boxes1[:, 1] + bias
w2 = boxes2[:, 2] - boxes2[:, 0] + bias
h2 = boxes2[:, 3] - boxes2[:, 1] + bias
areas1 = w1 * h1
areas2 = w2 * h2
x_maxs = np.minimum(boxes1[:, 2][:, None], boxes2[:, 2])
x_mins = np.maximum(boxes1[:, 0][:, None], boxes2[:, 0])
iws = np.maximum(x_maxs - x_mins + bias, 0)
# note: it would be possible to significantly reduce the computation by
# filtering any box pairs where iws <= 0. Not sure how to do with numpy.
y_maxs = np.minimum(boxes1[:, 3][:, None], boxes2[:, 3])
y_mins = np.maximum(boxes1[:, 1][:, None], boxes2[:, 1])
ihs = np.maximum(y_maxs - y_mins + bias, 0)
areas_sum = (areas1[:, None] + areas2)
inter_areas = iws * ihs
union_areas = (areas_sum - inter_areas)
ious = inter_areas / union_areas
return ious
class _BoxConversionMixins(object):
"""
Methods for converting between different bounding box formats
"""
def toformat(self, format, copy=True):
"""
Changes the internal representation of the bounding box
"""
if format == 'xywh':
return self.to_xywh(copy=copy)
elif format == 'tlbr':
return self.to_tlbr(copy=copy)
elif format == 'cxywh':
return self.to_cxywh(copy=copy)
elif format == 'extent':
return self.to_extent(copy=copy)
else:
raise KeyError('Cannot convert {} to {}'.format(self.format, format))
def to_extent(self, copy=True):
if self.format == 'extent':
return self.copy() if copy else self
else:
# Only difference between tlbr and extent is the column order
# extent is x1, x2, y1, y2
tlbr = self.to_tlbr().data
extent = tlbr[..., [0, 2, 1, 3]]
return Boxes(extent, 'extent')
def to_xywh(self, copy=True):
if self.format == 'xywh':
return self.copy() if copy else self
elif self.format == 'tlbr':
x1, y1, x2, y2 = self.components
w = x2 - x1
h = y2 - y1
elif self.format == 'cxywh':
cx, cy, w, h = self.components
x1 = cx - w / 2
y1 = cy - h / 2
else:
raise KeyError(self.format)
xywh = self._cat([x1, y1, w, h])
return Boxes(xywh, 'xywh')
def to_cxywh(self, copy=True):
if self.format == 'cxywh':
return self.copy() if copy else self
elif self.format == 'tlbr':
x1, y1, x2, y2 = self.components
w = x2 - x1
h = y2 - y1
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
elif self.format == 'xywh':
x1, y1, w, h = self.components
cx = x1 + (w / 2)
cy = y1 + (h / 2)
else:
raise KeyError(self.format)
cxywh = self._cat([cx, cy, w, h])
return Boxes(cxywh, 'cxywh')
def to_tlbr(self, copy=True):
if self.format == 'tlbr':
return self.copy() if copy else self
elif self.format == 'cxywh':
cx, cy, w, h = self.components
half_w = (w / 2)
half_h = (h / 2)
x1 = cx - half_w
x2 = cx + half_w
y1 = cy - half_h
y2 = cy + half_h
elif self.format == 'xywh':
x1, y1, w, h = self.components
x2 = x1 + w
y2 = y1 + h
else:
raise KeyError(self.format)
tlbr = self._cat([x1, y1, x2, y2])
return Boxes(tlbr, 'tlbr')
def to_imgaug(self, shape):
"""
Args:
shape (tuple): shape of image that boxes belong to
Example:
>>> self = Boxes([[25, 30, 15, 10]], 'tlbr')
>>> bboi = self.to_imgaug((10, 10))
"""
import imgaug
if len(self.data.shape) != 2:
raise ValueError('data must be 2d got {}d'.format(len(self.data.shape)))
tlbr = self.to_tlbr(copy=False).data
bboi = imgaug.BoundingBoxesOnImage(
[imgaug.BoundingBox(x1, y1, x2, y2)
for x1, y1, x2, y2 in tlbr], shape=shape)
return bboi
@classmethod
def from_imgaug(Boxes, bboi):
"""
Args:
bboi (ia.BoundingBoxesOnImage):
Example:
>>> orig = Boxes.random(5, format='tlbr')
>>> bboi = orig.to_imgaug(shape=(500, 500))
>>> self = Boxes.from_imgaug(bboi)
>>> assert np.all(self.data == orig.data)
"""
tlbr = np.array([[bb.x1, bb.y1, bb.x2, bb.y2]
for bb in bboi.bounding_boxes])
tlbr = tlbr.reshape(-1, 4)
return Boxes(tlbr, format='tlbr')
class _BoxPropertyMixins(object):
@property
def xy_center(self):
""" Returns the xy coordinates of the box centers """
xy = self.to_cxywh(copy=False).data[..., 0:2]
return xy
@property
def components(self):
a = self.data[..., 0:1]
b = self.data[..., 1:2]
c = self.data[..., 2:3]
d = self.data[..., 3:4]
return [a, b, c, d]
@property
def shape(self):
return self.data.shape
@property
def area(self):
"""
Example:
>>> Boxes([25, 30, 15, 10], 'xywh').area
array([150])
>>> Boxes([[25, 30, 0, 0]], 'xywh').area
array([[0]])
"""
w, h = self.to_xywh().components[-2:]
return w * h
def _numel(data):
""" compatable API between torch and numpy """
if isinstance(data, np.ndarray):
return data.size
else:
return data.numel()
class _BoxTransformMixins(object):
"""
methods for transforming bounding boxes
"""
def scale(self, factor):
r"""
works with tlbr, cxywh, xywh, xy, or wh formats
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> Boxes(np.array([1, 1, 10, 10])).scale(2).data
array([ 2., 2., 20., 20.])
>>> Boxes(np.array([[1, 1, 10, 10]])).scale((2, .5)).data
array([[ 2. , 0.5, 20. , 5. ]])
>>> Boxes(np.array([[10, 10]])).scale(.5).data
array([[5., 5.]])
"""
boxes = self.data
sx, sy = factor if ub.iterable(factor) else (factor, factor)
if torch.is_tensor(boxes):
new_data = boxes.float().clone()
else:
if boxes.dtype.kind != 'f':
new_data = boxes.astype(np.float)
else:
new_data = boxes.copy()
new_data[..., 0:4:2] *= sx
new_data[..., 1:4:2] *= sy
return Boxes(new_data, self.format)
def translate(self, amount):
"""
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> Boxes([25, 30, 15, 10], 'xywh').shift(10)
<Boxes(xywh, array([35., 40., 15., 10.]))>
>>> Boxes([25, 30, 15, 10], 'xywh').shift((10, 0))
<Boxes(xywh, array([35., 30., 15., 10.]))>
>>> Boxes([25, 30, 15, 10], 'tlbr').shift((10, 5))
<Boxes(tlbr, array([35., 35., 25., 15.]))>
"""
boxes = self.data
if not ub.iterable(amount):
tx = ty = amount
elif isinstance(amount, (list, tuple)):
tx, ty = amount
else:
tx = amount[..., 0]
ty = amount[..., 1]
if torch.is_tensor(boxes):
new_data = boxes.float().clone()
else:
new_data = boxes.astype(np.float).copy()
if _numel(new_data) > 0:
if self.format in ['xywh', 'cxywh']:
new_data[..., 0] += tx
new_data[..., 1] += ty
elif self.format in ['tlbr']:
new_data[..., 0:4:2] += tx
new_data[..., 1:4:2] += ty
else:
raise KeyError(self.format)
return Boxes(new_data, self.format)
shift = translate
def clip(self, x_min, y_min, x_max, y_max, inplace=False):
"""
Clip boxes to image boundaries. If box is in tlbr format, inplace
operation is an option.
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> boxes = Boxes(np.array([[-10, -10, 120, 120], [1, -2, 30, 50]]), 'tlbr')
>>> clipped = boxes.clip(0, 0, 110, 100, inplace=False)
>>> assert np.any(boxes.data != clipped.data)
>>> clipped2 = boxes.clip(0, 0, 110, 100, inplace=True)
>>> assert clipped2.data is boxes.data
>>> assert np.all(clipped2.data == clipped.data)
>>> print(clipped)
<Boxes(tlbr,
array([[ 0, 0, 110, 100],
[ 1, 0, 30, 50]]))>
"""
if inplace:
if self.format != 'tlbr':
raise ValueError('Must be in tlbr format to operate inplace')
self2 = self
else:
self2 = self.to_tlbr(copy=True)
x1, y1, x2, y2 = self2.data.T
np.clip(x1, x_min, x_max, out=x1)
np.clip(y1, y_min, y_max, out=y1)
np.clip(x2, x_min, x_max, out=x2)
np.clip(y2, y_min, y_max, out=y2)
return self2
def transpose(self):
"""
Flips the box itself in data coordinates
"""
x, y, w, h = self.to_xywh().components
self2 = self.__class__(self._cat([y, x, h, w]), format='xywh')
self2 = self2.toformat(self.format)
return self2
class Boxes(ub.NiceRepr, _BoxConversionMixins, _BoxPropertyMixins, _BoxTransformMixins):
"""
Converts boxes between different formats as long as the last dimension
contains 4 coordinates and the format is specified.
This is a convinience class, and should not not store the data for very
long. The general idiom should be create class, convert data, and then get
the raw data and let the class be garbage collected. This will help ensure
that your code is portable and understandable if this class is not
available.
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> Boxes([25, 30, 15, 10], 'xywh')
<Boxes(xywh, array([25, 30, 15, 10]))>
>>> Boxes([25, 30, 15, 10], 'xywh').to_xywh()
<Boxes(xywh, array([25, 30, 15, 10]))>
>>> Boxes([25, 30, 15, 10], 'xywh').to_cxywh()
<Boxes(cxywh, array([32.5, 35. , 15. , 10. ]))>
>>> Boxes([25, 30, 15, 10], 'xywh').to_tlbr()
<Boxes(tlbr, array([25, 30, 40, 40]))>
>>> Boxes([25, 30, 15, 10], 'xywh').scale(2).to_tlbr()
<Boxes(tlbr, array([50., 60., 80., 80.]))>
>>> Boxes(torch.FloatTensor([[25, 30, 15, 20]]), 'xywh').scale(.1).to_tlbr()
<Boxes(tlbr, tensor([[ 2.5000, 3.0000, 4.0000, 5.0000]]))>
Example:
>>> datas = [
>>> [1, 2, 3, 4],
>>> [[1, 2, 3, 4], [4, 5, 6, 7]],
>>> [[[1, 2, 3, 4], [4, 5, 6, 7]]],
>>> ]
>>> formats = ['xywh', 'cxywh', 'tlbr']
>>> for format1 in formats:
>>> for data in datas:
>>> self = box1 = Boxes(data, format1)
>>> for format2 in formats:
>>> box2 = box1.toformat(format2)
>>> back = box2.toformat(format1)
>>> assert box1 == back
"""
def __init__(self, data, format='xywh'):
CHECKS = False
if CHECKS:
if _numel(data) > 0 and data.shape[-1] == 4:
raise ValueError('trailing dimension of boxes must be 4')
if isinstance(data, (list, tuple)):
data = np.array(data)
self.data = data
self.format = format
def __getitem__(self, index):
cls = self.__class__
subset = cls(self.data[index], self.format)
return subset
def __eq__(self, other):
"""
Tests equality of two Boxes objects
Example:
>>> box0 = box1 = Boxes([[1, 2, 3, 4]], 'xywh')
>>> box2 = Boxes(box0.data, 'tlbr')
>>> box3 = Boxes([[0, 2, 3, 4]], box0.format)
>>> box4 = Boxes(box0.data, box2.format)
>>> assert box0 == box1
>>> assert not box0 == box2
>>> assert not box2 == box3
>>> assert box2 == box4
"""
return np.array_equal(self.data, other.data) and self.format == other.format
def __nice__(self):
# return self.format + ', shape=' + str(list(self.data.shape))
data_repr = repr(self.data)
if '\n' in data_repr:
data_repr = ub.indent('\n' + data_repr.lstrip('\n'), ' ')
return '{}, {}'.format(self.format, data_repr)
__repr__ = ub.NiceRepr.__str__
@classmethod
def random(Boxes, num=1, scale=1.0, format='xywh', rng=None, tensor=False):
"""
Makes random boxes
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> Boxes.random(3, rng=0, scale=100)
<Boxes(xywh,
array([[27, 35, 30, 27],
[21, 32, 21, 44],
[48, 19, 39, 26]]))>
>>> Boxes.random(3, rng=0, scale=100, tensor=True)
<Boxes(xywh,
tensor([[ 27, 35, 30, 27],
[ 21, 32, 21, 44],
[ 48, 19, 39, 26]]))>
"""
from netharn import util
rng = util.ensure_rng(rng)
xywh = (rng.rand(num, 4) * scale / 2)
as_integer = isinstance(scale, int)
if as_integer:
xywh = xywh.astype(np.int)
if tensor:
if as_integer:
xywh = torch.LongTensor(xywh)
else:
xywh = torch.FloatTensor(xywh)
boxes = Boxes(xywh, format='xywh').toformat(format, copy=False)
return boxes
def copy(self):
if torch.is_tensor(self.data):
new_data = self.data.clone()
else:
new_data = self.data.copy()
return Boxes(new_data, self.format)
@classmethod
def _cat(cls, datas):
if torch.is_tensor(datas[0]):
return torch.cat(datas, dim=-1)
else:
return np.concatenate(datas, axis=-1)
def compress(self, flags, axis=0, inplace=False):
"""
Filters boxes based on a boolean criterion
Example:
>>> self = Boxes([[25, 30, 15, 10]], 'tlbr')
>>> flags = [False]
"""
if len(self.data.shape) != 2:
raise ValueError('data must be 2d got {}d'.format(len(self.data.shape)))
self2 = self if inplace else self.copy()
self2.data = self2.data.compress(flags, axis=axis)
return self2
def numpy(self):
""" converts tensors to numpy """
new_self = self.copy()
if torch.is_tensor(self.data):
new_self.data = new_self.data.cpu().numpy()
return new_self
def ious(self, other, bias=0, mode=None):
"""
Compute IOUs between these boxes and another set of boxes
Examples:
>>> formats = ['cxywh', 'xywh', 'tlbr']
>>> istensors = [False, True]
>>> results = {}
>>> for format in formats:
>>> for tensor in istensors:
>>> boxes1 = Boxes.random(5, scale=10.0, rng=0, format=format, tensor=tensor)
>>> boxes2 = Boxes.random(7, scale=10.0, rng=1, format=format, tensor=tensor)
>>> ious = boxes1.ious(boxes2)
>>> results[(format, tensor)] = ious
>>> results = {k: v.numpy() if torch.is_tensor(v) else v for k, v in results.items() }
>>> results = {k: v.tolist() for k, v in results.items()}
>>> print(ub.repr2(results, sk=True, precision=3, nl=2))
>>> from functools import partial
>>> assert ub.allsame(results.values(), partial(np.allclose, atol=1e-07))
"""
self_tlbr = self.to_tlbr(copy=False)
other_tlbr = other.to_tlbr(copy=False)
ious = box_ious(self_tlbr.data, other_tlbr.data, bias=bias, mode=mode)
return ious
def view(self, *shape):
"""
Passthrough method to view or reshape
Example:
>>> self = Boxes.random(6, scale=10.0, rng=0, format='xywh', tensor=True)
>>> assert list(self.view(3, 2, 4).data.shape) == [3, 2, 4]
>>> self = Boxes.random(6, scale=10.0, rng=0, format='tlbr', tensor=False)
>>> assert list(self.view(3, 2, 4).data.shape) == [3, 2, 4]
"""
if torch.is_tensor(self.data):
data_ = self.data.view(*shape)
else:
data_ = self.data.reshape(*shape)
return self.__class__(data_, self.format)
if __name__ == '__main__':
"""
CommandLine:
python -m netharn.util.util_boxes all
"""
import xdoctest
xdoctest.doctest_module(__file__)
```
#### File: netharn/util/util_cachestamp.py
```python
from os.path import exists
import ubelt as ub
class CacheStamp(object):
"""
Quickly determine if a computation that writes a file has been done.
Writes a file that marks that a procedure has been done by writing a
"stamp" file to disk. Removing the stamp file will force recomputation.
However, removing or changing the result of the computation may not trigger
recomputation unless specific handling is done or the expected "product"
of the computation is a file and registered with the stamper. If robust is
False, we only check if the product exists, and we ignore its hash.
Args:
fname (str):
name of the stamp file
cfgstr (str):
configuration associated with the stamped computation. A common
pattern is to call `ub.hash_data` on a dependency list.
dpath (str):
where to store the cached stamp file
product (str or list, optional):
Path or paths that we expect the computation to produce. If
specified the hash of the paths are stored. It is faster, but lets
robust if products are not specified.
robust (bool):
if True and product was specified, we use the product hash to
check if it is expired, otherwise we assume the file has not
been corrupoted or changed.
Example:
>>> import ubelt as ub
>>> from os.path import join
>>> # Stamp the computation of expensive-to-compute.txt
>>> dpath = ub.ensure_app_cache_dir('netharn', 'test-cache-stemp')
>>> product = join(dpath, 'expensive-to-compute.txt')
>>> self = CacheStamp('somedata', 'someconfig', dpath, product)
>>> if self.expired():
>>> ub.writeto(product, 'very expensive')
>>> self.renew()
>>> assert not self.expired()
>>> self.robust = False
>>> # corrupting the output will not expire in non-robust mode
>>> ub.writeto(product, 'corrupted')
>>> assert not self.expired()
>>> self.robust = True
>>> # but it will expire if we are not in robust mode
>>> assert self.expired()
>>> # deleting the product will cause expiration in any mode
>>> self.robust = False
>>> ub.delete(product)
>>> assert self.expired()
"""
def __init__(self, fname, dpath, cfgstr=None, product=None, robust=True):
self.cacher = ub.Cacher(fname, cfgstr=cfgstr, dpath=dpath)
self.product = product
self.robust = robust
def _get_certificate(self, cfgstr=None):
"""
Returns the stamp certificate if it exists
"""
certificate = self.cacher.tryload(cfgstr=cfgstr)
return certificate
def _rectify_products(self, product=None):
""" puts products in a normalied format """
products = self.product if product is None else product
if not isinstance(products, (list, tuple)):
products = [products]
return products
def _product_file_hash(self, product=None):
"""
Get the hash of the each product file
"""
import xxhash # much faster than builtin hashers
products = self._rectify_products(product)
product_file_hash = [ub.hash_file(p, hasher=xxhash.xxh64)
for p in products]
return product_file_hash
def expired(self, cfgstr=None, product=None):
"""
Check to see if a previously existing stamp is still valid and if the
expected result of that computation still exists.
Args:
cfgstr (str, optional): override the default cfgstr if specified
product (str or list, optional): override the default product if
specified
"""
products = self._rectify_products(product)
certificate = self._get_certificate(cfgstr=cfgstr)
if certificate is None:
# We dont even have a certificate, so we are expired
is_expired = True
elif products is None:
# We dont have a product to check, so assume not expired
# TODO: we could check the timestamp in the cerficiate
is_expired = False
elif not all(map(exists, products)):
# We are expired if the expected product does not exist
is_expired = True
elif not self.robust:
# Assume that the product hash is the same.
is_expired = False
else:
# We are expired if the hash of the existing product data
# does not match the expected hash in the certificate
product_file_hash = self._product_file_hash(products)
certificate_hash = certificate.get('product_file_hash', None)
is_expired = product_file_hash != certificate_hash
return is_expired
def renew(self, cfgstr=None, product=None):
"""
Recertify that the product has been recomputed by writing a new
certificate to disk.
"""
products = self._rectify_products(product)
certificate = {
'timestamp': ub.timestamp(),
'product': products,
}
if products is not None:
if not all(map(exists, products)):
raise IOError(
'The stamped product must exist: {}'.format(products))
certificate['product_file_hash'] = self._product_file_hash(products)
self.cacher.save(certificate, cfgstr=cfgstr)
if __name__ == '__main__':
"""
CommandLine:
python -m netharn.util.util_cachestamp all
"""
import xdoctest
xdoctest.doctest_module(__file__)
``` |
{
"source": "JoshuaBeard/netsounds",
"score": 3
} |
#### File: netsounds/netsounds/activation_ifft.py
```python
import os
import argparse
import torch
import ipdb # NOQA
from PIL import Image
from torchvision import transforms
from torchvision.datasets import ImageNet
import utils
from models.squeezenet import TransparentSqueezeNet
IMAGES_DPATH = 'test/images'
IMAGENET_LOCATION = os.path.expandvars('$HOME/data')
SAMPLING_RATE = 44100
def playground(activations):
activation_depth = 0
activation_number = 0
save_name = 'activation-{}-{}.wav'
save_path = os.path.join(IMAGES_DPATH, 'sounds', save_name)
# Grab the first activation and treat it as a spectrogram
spectrogram = activations[activation_depth][0, activation_number, ...].numpy() # NOQA
signal = utils.spectrogram_to_signal(spectrogram)
utils.save_as_wav(signal, save_path, sampling_rate=SAMPLING_RATE)
def get_image_paths(dpath=IMAGES_DPATH):
dpath = os.path.realpath(dpath)
images_fpaths = [os.path.join(dpath, filename)
for filename in os.listdir(dpath)]
return images_fpaths
def parse_labels_from_paths(image_paths):
labels = [' '.join((os.path.splitext(os.path.split(fp)[-1])[0]).split('_'))
for fp in image_paths]
return labels
def init_image_transforms(size=256):
if size > 0:
transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.transpose(1, 2)
if x.shape[1] > x.shape[2] else x),
transforms.Lambda(lambda x: x.unsqueeze(0)),
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.transpose(1, 2)
if x.shape[1] > x.shape[2] else x),
transforms.Lambda(lambda x: x.unsqueeze(0)),
])
return transform
def read_images_as_tensors(image_paths, transform):
imgs = torch.cat([transform(Image.open(filename))
for filename in images_fpaths])
return imgs
def to_readable(net_out, imagenet):
# Highest value corresponds to prediction
labels_pred_numeric = net_out.argmax(dim=1)
# Grab classes from ImageNet
labels_pred = [imagenet.classes[n] for n in labels_pred_numeric]
return labels_pred
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_size',
type=int,
default=256,
help='image size'
)
parser.add_argument(
'--image_name',
default=None,
help='a string that appears in the name of the images you want to load'
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='whether to drop into an interactive python shell for debugging/testing'
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
# Location of test images
images_fpaths = get_image_paths()
if args.image_name is not None:
idx = [i for i in range(len(images_fpaths))
if args.image_name in images_fpaths[i]][0]
images_fpaths = [images_fpaths[idx]]
# Get the actual labels from the filenames
labels_act = parse_labels_from_paths(images_fpaths)
# Get ImageNet dataset for interpreting the outputs of the network
imagenet = ImageNet(IMAGENET_LOCATION,
split='val',
download=True)
# For each input image, transform to be same size and orientation
transform = init_image_transforms(args.image_size)
# Put all images into a big tensor for putting into model
imgs = read_images_as_tensors(images_fpaths, transform)
# Instantiate model
model = TransparentSqueezeNet(pretrained=True)
with torch.no_grad():
transparent_out = model.forward_transparent(imgs)
# Hardcoded based on output of forward_transparent()
activations = [transparent_out[i] for i in range(4)]
output_transparent = transparent_out[-1]
# Predicted label
labels_pred_trans = to_readable(output_transparent, imagenet)
if args.test:
import IPython
IPython.embed()
``` |
{
"source": "Joshua-Beatty/ImageGridPython",
"score": 3
} |
#### File: ImageGridPython/imageGrid/imageGrid.py
```python
import sys
import os
from PIL import Image, ExifTags
from pathlib import Path
def cropFromTL(image, div_x, div_y):
"""crop image from top-left
0 1 2
3 4 5
6 7 8
Arguments:
image {image} -- PIL image object
div_x {int} -- div x
div_y {int} -- div y
"""
img_w, img_h = image.size
w = img_w//div_x
h = img_h//div_y
for i in range(div_y):
for j in range(div_x):
box = (j*w, i*h, (j+1)*w, (i+1)*h)
yield image.crop(box)
def crop(image, div_x, div_y):
"""crop image
8 7 6
5 4 3
1 2 0
Arguments:
image {image} -- PIL image object
div_x {int} -- div x
div_y {int} -- div y
"""
img_w, img_h = image.size
w = img_w//div_x
h = img_h//div_y
for i in range(div_y-1,-1,-1):
for j in range(div_x-1,-1,-1):
box = (j*w, i*h, (j+1)*w, (i+1)*h)
yield image.crop(box)
def cropSquare(image, div_x):
"""crop a square image
Arguments:
image {image} -- PIL image object
div_x {int} -- div x
"""
img_w, img_h = image.size
w = img_w//div_x
div_y = img_h//w
for i in range(div_y-1,-1,-1):
for j in range(div_x-1,-1,-1):
box = (j*w, i*w, (j+1)*w, (i+1)*w)
# yield box
yield image.crop(box)
def rotateByOrientation(image):
"""rotate image by orientation
Arguments:
image {image} -- PIL image object
Returns:
image -- PIL image object
"""
o = getOrientation(image)
if o == 3:
return image.rotate(180)
if o == 6:
return image.rotate(-90)
if o == 8:
return image.rotate(90)
return image
def getOrientation(image):
"""get image orientation
Arguments:
image {image} -- PIL image object
Returns:
int -- 1,3,6,8 or None
"""
if image._getexif():
return image._getexif().get(274, None)
return None
def saveGrid(image_fn, div_x, div_y):
"""save an image with grid
Arguments:
image_fn {str} -- image filename
div_x {int} -- div x
div_y {int} -- div y
Returns:
array -- filename of all images
"""
folder = Path('tmp')
if folder.exists() is not True:
folder.mkdir()
images = []
img = Image.open(image_fn)
img = rotateByOrientation(img)
print('size:{}'.format(img.size))
img_w, img_h = img.size
crop_w = crop_h = img_w//div_x
# crop_h = img_h//div_y
# for k, p in enumerate(crop(img, div_x, div_y)):
# for k, p in enumerate(cropFromTL(img, div_x, div_y)):
for k, p in enumerate(cropSquare(img, div_x)):
# print(k,p)
numOfImages = div_x * div_y
widthOfNumber = len(str(numOfImages))
img = Image.new('RGB', (crop_w,crop_h), 255)
img.paste(p)
fn = folder / '{}.jpg'.format(str(numOfImages - k).zfill(widthOfNumber))
img.save(fn)
images.append(fn)
return images
if __name__ == "__main__":
image_fn = sys.argv[1]
div_x = int(sys.argv[2].split('x')[0])
div_y = int(sys.argv[2].split('x')[1])
for i in saveGrid(image_fn, div_x, div_y):
print("Save {} file".format(i))
# os.remove(i)
``` |
{
"source": "joshuabenuck/notebook",
"score": 2
} |
#### File: joshuabenuck/notebook/findthatrev.py
```python
import os, sys, subprocess
# Helper taken from an adafuit LCD tutorial.
def run_cmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0]
return output
if __name__ == "__main__":
# set count of differences high
distance_from_head = 0
new_low = 900
low_distance = 0
path = "c:\\users\\joshua\\src"
i3v = "%s\\notebook\\i3v\\Marlin_RAMPS_EPCOS_i3v\\Configuration.h"%path
marlin = "%s\\Marlin\\Marlin\\Configuration.h"%path
while True:
try:
#run_cmd("cd %s\\Marlin && git revert -n 1.0.x~%d"%(path, distance_from_head))
run_cmd("cd %s\\Marlin && git checkout 1.0.x~%d"%(path, distance_from_head))
#print run_cmd("cd %s\\Marlin && git status"%path)
#print "diff -w %s %s"%(i3v, marlin)
output = run_cmd("diff -w %s %s"%(i3v, marlin))
diffs = len(output.split("\n"))
print "%d: %d"%(distance_from_head, diffs)
if diffs < new_low:
new_low = diffs; low_distance = distance_from_head
distance_from_head += 1
#run_cmd("cd %s\\Marlin && git clean -df && git checkout -- ."%path)
except:
break
#print run_cmd("cd %s\\Marlin && git stash save")
#print run_cmd("cd %s\\Marlin && git stash drop")
#run_cmd("cd %s\\Marlin && git status"%path)
#run_cmd("cd %s\\Marlin && git revert --abort"%path)
#run_cmd("cd %s\\Marlin && git checkout -- ."%path)
#run_cmd("cd %s\\Marlin && git clean -df && git checkout -- ."%path)
#print run_cmd("cd %s\\Marlin && git status"%path)
# start at HEAD
# get count of line differences
# go back a revision
print "Low was at %d with %d lines different."%(low_distance, new_low)
``` |
{
"source": "joshuabenuck/skyflyers",
"score": 3
} |
#### File: joshuabenuck/skyflyers/sim.py
```python
import serial, sys, curses, os, tty, termios, time
from select import select
# Two Byte Int in Little Endian
def tbint(one, two):
if type(one) == str: one = ord(one)
if type(two) == str: two = ord(two)
return one | (two << 8)
ack = "4153 AS"
u = [0x55, 0x80, 0x06, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00]
"""
Device Descriptor from portal
0000: 12 01 00 02 00 00 00 20 30 14 50 01 00 01 01 02 ....... 0.P.....
0010: 00 01 ..
Configuration Descriptor
0000: 09 02 29 00 01 01 00 80 96 09 04 00 00 02 03 00 ..).............
0010: 00 00 09 21 11 01 00 01 22 1d 00 07 05 81 03 20 ...!...."......
0020: 00 01 07 05 01 03 20 00 01 ...... ..
The above contains config, interfaces, and endpoints.
Marker bytes are 0x09 and 0x07.
Format of "endpoing configuration stuff" that is "tacked on" here...
1 - Count of endpoints
for each endpoint:
1 - Endpoint id | Direction
1 - Type
1 - MaxPacketSize & 0xFF
1 - MaxPacketSize >> 8
"""
device_descriptor = [
0x12, 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x40, 0x30,
0x14, 0x50, 0x01, 0x00, 0x01, 0x01, 0x02, 0x00, 0x01
]
endpoint_data = [0x2,
0x81, 0x3, 0x20, 0x00,
0x01, 0x3, 0x20, 0x00]
configuration = [
0x09, 0x02, 0x29, 0x00, 0x01, 0x01, 0x00, 0x80, 0x96, 0x09, 0x04,
0x00, 0x00, 0x02, 0x03, 0x00,
0x00, 0x00, 0x09, 0x21, 0x11, 0x01, 0x00, 0x01, 0x22, 0x1d, 0x00,
0x07, 0x05, 0x81, 0x03, 0x20,
0x00, 0x01, 0x07, 0x05, 0x01, 0x03, 0x20, 0x00, 0x01
]
# Consider storing this in a more easily readable format and then converting.
strings = [
[ 0x04, 0x03, 0x09, 0x04 ],
[ 0x16, 0x03, 0x41, 0x00 , 0x63, 0x00, 0x74, 0x00 , 0x69, 0x00, 0x76, 0x00 , 0x69, 0x00, 0x73, 0x00 , 0x69, 0x00, 0x6f, 0x00 , 0x6e, 0x00 ],
[ 0x18, 0x03, 0x53, 0x00 , 0x70, 0x00, 0x79, 0x00 , 0x72, 0x00, 0x6f, 0x00 , 0x20, 0x00, 0x50, 0x00 , 0x6f, 0x00, 0x72, 0x00 , 0x74, 0x00, 0x61, 0x00 ]
]
report = [
0x06, 0x00, 0xff, 0x09 , 0x01, 0xa1, 0x01, 0x19 , 0x01, 0x29, 0x40, 0x15, 0x00, 0x26, 0xff, 0x00 , 0x75, 0x08, 0x95, 0x20, 0x81, 0x00, 0x19, 0x01, 0x29, 0x40, 0x91, 0x00 , 0xc0
]
# print byte with binary
def fbwb(field, value):
if type(value) == str: value = ord(value)
return field + ": " + "%02x %s"%(value, bin(value))
def fb(field, value, lookup=None):
if type(value) == str: value = ord(value)
ret = field + ": " + "%02x"%(value)
if lookup != None and lookup[value] != None:
ret += " (%s)"%lookup[value]
return ret
# print word
def fw(field, p1, p2):
if type(p1) == str: p1 = ord(p1)
if type(p2) == str: p2 = ord(p2)
return field + ": " + "%02x%02x"%(p1, p2)
# print string
def fs(field, data):
return field + ": "
def fhex(data):
even = True; hex_str = ""; disp = ""
for c in data:
hex_str += "%02x"%c
if even: even = False
else: hex_str += " "; even = True
if c < 32 or c >= 127: disp += "."
else: disp += chr(c)
if not even: hex_str+= " "
disp = " " + disp
return hex_str + disp
REQUEST_TYPE_IDS = {
0x80: ""
}
REQUEST_IDS = {
0x00: "GET_STATUS",
0x01: "CLEAR_FEATURE",
0x02: "Reserved",
0x03: "SET_FEATURE",
0x04: "Reserved",
0x05: "SET_ADDRESS",
0x06: "GET_DESCRIPTOR",
0x07: "SET_DESCRIPTOR",
0x08: "GET_CONFIGURATION",
0x09: "SET_CONFIGURATION",
0x0A: "GET_INTERFACE",
0x0B: "SET_INTERFACE",
0x0C: "SYNC FRAME",
}
class UsbRequest:
def __init__(self, packet):
self.packet = packet
self.bmRequestType = packet[0]
self.bRequest = packet[1]
# This still doesn't sit well with me.
# It appears to be Big Endian and not Little Endian.
# Is something reversing this?
# Or does a "word sized value" in the spec mean it is in BE?
self.wValue = tbint(packet[3], packet[2])
self.wIndex = tbint(packet[5], packet[4])
self.wLength = tbint(packet[6], packet[7])
self.data = packet[8:]
# Brandon's code ignores requests that meet these two criteria...
def canIgnore(self):
if self.bmRequestType & 0x80 == 0x80: return True
if self.wLength == 0x00: return True
return False
def isDescriptorRequest(self):
GET_DESCRIPTOR = 0x06
if self.bRequest == GET_DESCRIPTOR:
return True
return False
def isDeviceDescriptorRequest(self):
if self.isDescriptorRequest() and self.wValue == 0x01:
return True
return False
def isDeviceQualifierDescriptorRequest(self):
if self.isDescriptorRequest() and self.wValue == 0x06:
return True
return False
def isConfigurationDescriptorRequest(self):
if self.isDescriptorRequest() and self.wValue == 0x02:
return True
return False
def format(self):
disp = ""
disp += fbwb("bmRequestType", self.packet[0]) + "\n"
disp += fb("bRequest", self.packet[1], REQUEST_IDS) + "\n"
disp += fw("wValue", self.packet[2], self.packet[3]) + "\n"
disp += fw("wIndex", self.packet[4], self.packet[5]) + "\n"
disp += fw("wLength", self.packet[6], self.packet[7]) + "\n"
disp += fs("data", self.packet[8:]) + "\n"
return disp
I_SP_TYPE = 0
I_SP_REQTYPE = 1
I_SP_REQ = 2
SP_REQTYPE_USB = 'U'
"""
What is the canonical representation for packets?
Sometimes they are chars as they come off the serial port.
Other times they are ordinals as it makes it easier to write in code.
"""
class SimPacket:
def __init__(self, packet):
self.packet = packet
self.cmd = packet[0]
def _packet_type(self): return self.packet[0]
def _req_type(self): return self.packet[1]
def _is_f_packet(self):
if self._packet_type() == ord('F'): return True
return False
def isConnectedEvent(self):
if self._is_f_packet() and self._req_type() == 0x01:
return True
return False
def isDisconnectedEvent(self):
if self._is_f_packet() and self._req_type() == 0x00:
return True
return False
def isUsbPacket(self):
if self.packet[0] == ord('U'): return True
return False
def isDescriptorPacket(self):
if self.packet[0] == ord('D'): return True
return False
def isDeviceDescriptorPacket(self):
if not self.isDescriptorPacket(): return False
if self.packet[2] == 0x01: return True
return False
def isDeviceQualifierDescriptorPacket(self):
if not self.isDescriptorPacket(): return False
if self.packet[2] == 0x06: return True
return False
def isConfigurationDescriptorPacket(self):
if not self.isDescriptorPacket(): return False
if self.packet[2] == 0x02: return True
return False
def isStringDescriptorPacket(self):
if not self.isDescriptorPacket(): return False
if self.packet[2] == 0x03: return True
return False
def getStringIndex(self):
return self.packet[1]
def isReportDescriptorPacket(self):
if not self.isDescriptorPacket(): return False
if self.packet[2] == 0x22: return True
return False
def getRequestedDescriptorSize(self):
if len(self.packet) == 7:
return tbint(self.packet[5], self.packet[6])
return 0
def usbRequest(self):
if not self.isUsbPacket():
print "Warning: returning usb request for non-usb-request."
return UsbRequest(self.packet[1:])
def format(self):
return fhex(self.packet)
def p(self): print self.format()
"""
I may eventually merge this with SimPacket. For now, this is easier.
"""
class SimCommandPacket():
def __init__(self, cmd, data=[]):
self.cmd = cmd
self.data = data
class SimUsbClient():
def __init__(self):
self.port = serial.Serial("/dev/ttyUSB0", 57600)
def isPacketAvailable(self):
"""Are there enough chars on the port for another packet?"""
if self.port.inWaiting() >= 2: return True
return False
def getNextPacket(self):
"""Get the next packet. Need to be sure there is one waiting."""
total = tbint(self.port.read(), self.port.read())
#print "Total:", total
chrpacket = self.port.read(total)
ordpacket = [ord(c) for c in chrpacket]
packet = SimPacket(ordpacket)
packet.p()
return packet
def sendPacket(self, packet):
"""Takes a SImPacket and sends it over the serial port."""
length = len(packet.data) + 1
towrite = []
towrite.append(chr(length & 0xFF))
towrite.append(chr((length>>8) & 0xFF))
towrite.append(packet.cmd)
for c in packet.data:
towrite.append(c)
print fhex([ord(c) for c in towrite])
for c in towrite:
self.port.write(c)
"""
This method still feels off.
Why is it the only one in the class that constructs the packet to send?
I know I move it here to avoid the caller from having to know
about the concept of a command in a packet, but is that really necessary?
"""
def acknowledgePacket(self, statusPacket, stall=False):
packet = SimCommandPacket(chr(statusPacket.cmd))
IGNORE = 0x00
HANDLE = 0x01
STALL = 0x02
print "Acknowledging:", packet.cmd, "Stall:", stall
self.port.write(chr(2 & 0xFF))
self.port.write(chr((2>>8) & 0xFF))
self.port.write(packet.cmd)
if not stall: self.port.write(chr(IGNORE))
if stall: self.port.write(chr(STALL))
"""
This class represents a portal.
It receives commands from a Teensy client and responds to them as if it was
a real portal.
The goal of this phase is to make it through enumeration.
Once that is successfull, I'll move on to establishing pass through
to a real portal.
That class will probably be named SimUsbPortalPassthrough.
"""
class SimUsbPortal():
def __init__(self):
self.client = SimUsbClient()
def loop(self):
while 1:
self.handleIncoming()
self.disconnectUsb()
"""
Is the Law of Demeter or DIP being violated here?
Part of the method directly references the client while part
goes through another method.
"""
def handleIncoming(self):
if self.client.isPacketAvailable():
packet = self.client.getNextPacket()
if packet.isConnectedEvent():
print "Connected event."
self.connectUsb()
elif packet.isDisconnectedEvent():
print "Diconnected event. Do nothing?"
elif packet.isDeviceQualifierDescriptorPacket():
self.sendEmptyDescriptor()
elif packet.isStringDescriptorPacket():
#self.sendEmptyDescriptor()
self.sendStringDescriptor(packet)
elif packet.isDeviceDescriptorPacket():
self.sendDeviceDescriptor()
elif packet.isConfigurationDescriptorPacket():
self.sendConfiguration(packet)
elif packet.isReportDescriptorPacket():
self.sendReportDescriptor(packet)
elif packet.isUsbPacket():
request = packet.usbRequest()
for line in request.format().split("\n"):
print "\t" + line
self.handleUsbRequest(packet, request)
def handleUsbRequest(self, packet, request):
if request.isDescriptorRequest():
if request.isDeviceDescriptorRequest():
self.client.acknowledgePacket(packet)
elif request.isDeviceQualifierDescriptorRequest():
self.client.acknowledgePacket(packet, stall = False)
elif request.isConfigurationDescriptorRequest():
self.client.acknowledgePacket(packet)
# Handle the descriptor requests that should be ignored.
elif request.canIgnore():
print "Ignoring device descriptor request."
self.client.acknowledgePacket(packet)
# There are some non descriptor requsts that should be ignored.
elif request.canIgnore():
print "Ignoring other command."
self.client.acknowledgePacket(packet)
else:
print "Unknown."
sys.exit(1)
def sendConfiguration(self, reqPacket):
print "Sending configuration descriptor."
size = reqPacket.getRequestedDescriptorSize()
data = configuration[:size]
print fhex(data)
packet = SimCommandPacket('D', [chr(o) for o in data])
self.client.sendPacket(packet)
def sendReportDescriptor(self, reqPacket):
print "Sending report descriptor."
size = reqPacket.getRequestedDescriptorSize()
data = report[:size]
print fhex(data)
packet = SimCommandPacket('D', [chr(o) for o in data])
self.client.sendPacket(packet)
def sendDeviceDescriptor(self):
print "Sending device descriptor."
data = device_descriptor + endpoint_data
packet = SimCommandPacket('D', [chr(o) for o in data])
self.client.sendPacket(packet)
# TODO: Fix! Not working!
def sendStringDescriptor(self, reqPacket):
print "Sending string descriptor."
data = []
data = strings[reqPacket.getStringIndex()]
packet = SimCommandPacket('D', [chr(o) for o in data])
self.client.sendPacket(packet)
def sendEmptyDescriptor(self):
print "Sending empty device descriptor."
packet = SimCommandPacket('D', [])
self.client.sendPacket(packet)
def connectUsb(self):
print "Connecting..."
packet = SimCommandPacket('S', [chr(0x01)])
self.client.sendPacket(packet)
def disconnectUsb(self):
print "Disconnecting..."
packet = SimCommandPacket('S', [chr(0x00)])
self.client.sendPacket(packet)
# http://stackoverflow.com/questions/24072790/detect-key-press-in-python
#fd = sys.stdin.fileno()
#old_settings = termios.tcgetattr(fd)
#try:
# tty.setraw(sys.stdin.fileno())
if __name__ == '__main__':
SimUsbPortal().loop()
#decodeControlRequest(u[1:])
def getch():
"""getch() -> key character
Read a single keypress from stdin and return the resulting character.
Nothing is echoed to the console. This call will block if a keypress
is not already available, but will not wait for Enter to be pressed.
If the pressed key was a modifier key, nothing will be detected; if
it were a special function key, it may return the first character of
of an escape sequence, leaving additional characters in the buffer.
"""
print "getch"
ch = None
[i, o, e] = select([sys.stdin.fileno()], [], [], 0.01)
if len(i) > 0: ch=sys.stdin.read(1)
print "return:", ch
return ch
``` |
{
"source": "joshuaberetta/formatte-cli",
"score": 3
} |
#### File: formatte/cli/main.py
```python
import logging
import argparse
from .formatte import add_subcommand_formatte
log = logging.getLogger(__name__)
def main(args=None):
parser = argparse.ArgumentParser(prog='formatte', description='Command line interface for the formatte package')
parser.add_argument(
'--loglevel', default='info', help='Log level',
choices=['debug', 'info', 'warning', 'error', 'critical'],
)
subparsers = parser.add_subparsers(help='Sub-commands')
add_subcommand_formatte(subparsers)
# Parse all command line arguments
args = parser.parse_args(args)
if hasattr(args, 'func'):
# Call the desired subcommand function
logging.basicConfig(level=args.loglevel.upper())
args.func(args)
return 0
else:
parser.print_help()
return 0
``` |
{
"source": "joshuaberg/apollo",
"score": 3
} |
#### File: joshuaberg/apollo/groupmebot.py
```python
import requests
from flask import Flask, request
class GroupMeBot():
"""
A simple GroupMe bot that allows you to specify commands like flask routes.
Uses Flask under the hood to create HTTP server that listens for chat posts.
"""
def __init__(self, bot_id):
"""
bot_id: Your bot ID from GroupMe dev site. Used for posting replies
"""
self._bot_id = bot_id
self._commands = {}
self._flask = Flask("GroupMeBot")
@self._flask.route('/', methods=['GET', 'POST'])
def _callback():
"""
Default flask route that listens for new messages. On a new message,
it makes sure to have the first /, checks if it's a valid command,
and then calls the function for that command.
"""
# Grab the posted message from the posted JSON
message_text = request.get_json()['text']
if message_text[0] == '/':
# If there are multiple spaces, GroupMe turns some of them into
# \xa0. I'm not sure why. Replace them with a space
message_text = message_text.replace(u'\xa0', ' ')
# The command should be the first word (split by whitespace),
# while the arguments will be passed as a list
split_message = message_text.lower().split()
command = split_message[0]
command_args = []
if len(split_message) > 1:
command_args = split_message[1:]
# Attempt to get the command and call it
command_function = self._commands.get(command)
if command_function:
command_function(command_args)
# Return an okay response (No content: 204)
return '', 204
def command(self, command_str):
"""
Decorator to add commands to the bot. Used like:
@bot.command('/command')
"""
def decorator(f):
# Keep track of the command string and it's function in _commands dict
self._commands[command_str] = f
return f
return decorator
def serve(self, *args, **kwargs):
"""
Start the GroupMe bot. All arguments are passed directly to Flask,
so debugging, port, host, etc can be set
"""
self._flask.run(*args, **kwargs)
def post(self, message):
"""
Post a message to the GroupMe.
"""
data = {
"bot_id": self._bot_id,
"text": message
}
requests.post("https://api.groupme.com/v3/bots/post", data=data)
``` |
{
"source": "joshua-bilbrey/coding-tracker",
"score": 3
} |
#### File: joshua-bilbrey/coding-tracker/pixela.py
```python
import requests
import os
USERNAME = os.environ.get("PIXELA_USERNAME")
TOKEN = os.environ.get("PIXELA_TOKEN")
GRAPH_ID = "python"
print(USERNAME, TOKEN)
pixela_endpoint = "https://pixe.la/v1/users"
graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs"
pixel_data_endpoint = f"{graph_endpoint}/{GRAPH_ID}"
headers = {
"X-USER-TOKEN": TOKEN,
}
def create_user():
# create user (successful) -- https://pixe.la/
user_params = {
"token": TOKEN,
"username": USERNAME,
"agreeTermsOfService": "yes",
"notMinor": "yes",
}
response = requests.post(url=pixela_endpoint, json=user_params)
print(response.text)
def create_new_graph():
# create a graph
graph_params = {
"id": GRAPH_ID,
"name": "Python Daily Learning",
"unit": "min",
"type": "float",
"color": "ajisai",
}
response = requests.post(url=graph_endpoint, json=graph_params, headers=headers)
print(response.text)
def post_pixel(date, quantity):
# post a pixel of data
pixel_data_params = {
"date": date,
"quantity": f"{quantity}",
}
response = requests.post(url=pixel_data_endpoint, json=pixel_data_params, headers=headers)
print(response.text)
def update_pixel(date, quantity):
# update a pixel for a specific date
update_pixel_data_params = {
"quantity": f"{quantity}",
}
response = requests.put(url=f"{pixel_data_endpoint}/{date}", json=update_pixel_data_params, headers=headers)
print(f"{pixel_data_endpoint}/{date}")
print(response.text)
def delete_pixel(date):
# delete a pixel
response = requests.delete(url=f"{pixel_data_endpoint}/{date}", headers=headers)
print(response.text)
``` |
{
"source": "joshua-bilbrey/mine-sweeper-tkinter",
"score": 4
} |
#### File: joshua-bilbrey/mine-sweeper-tkinter/tile_manager.py
```python
from turtle import RawTurtle
import random
EASY = 10 # number to divide total tiles by for number of mines
MEDIUM = 8
HARD = 6
DIFFICULTY = EASY
class Tile(RawTurtle):
"""Tile class for minesweeper program."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.penup()
self.shape("square")
self.pencolor("black")
self.fillcolor("white")
self.is_mine = False
self.is_flagged = False
self.is_left_clicked = False
self.number = 0
class TileManager:
"""Tile manager."""
def __init__(self):
self.tiles = []
self.mines = []
self.y_coors = []
self.x_coors = []
self.num_mines = 0
self.closest_tile = False # used as filler for start code
self.right_click = False
def create_tiles(self, columns: int, rows: int, canvas):
self.y_coors = [((rows / 2) * 20) * -1, ((rows / 2) * 20)]
self.x_coors = [((columns / 2) * 20) * -1, ((columns / 2) * 20)]
y_coor = self.y_coors[0] + 10
for row in range(rows):
x_coor = self.x_coors[0] + 10
for column in range(columns):
new_tile = Tile(canvas)
new_tile.goto(x_coor, y_coor)
self.tiles.append(new_tile)
x_coor += 20
y_coor += 20
self.set_mines()
self.set_hints()
def set_mines(self):
self.num_mines = int(len(self.tiles) / DIFFICULTY)
for num in range(self.num_mines):
not_mine = True
while not_mine:
current_tile = random.choice(self.tiles)
if not current_tile.is_mine:
current_tile.is_mine = True
not_mine = False
self.mines.append(current_tile)
def set_hints(self):
for tile in self.tiles:
mines_nearby = 0
if tile.is_mine is not True:
for other_tile in self.tiles:
if tile.pos() != other_tile.pos() and tile.distance(other_tile) < 30 and other_tile.is_mine is True:
mines_nearby += 1
tile.number = mines_nearby
def find_closest_tile(self, x, y):
if self.x_coors[0] <= x <= self.x_coors[1] and self.y_coors[0] <= y <= self.y_coors[1]:
distance = 100
for tile in self.tiles:
if tile.distance(x, y) < distance:
distance = tile.distance(x, y)
self.closest_tile = tile
def find_grey_tiles(self, tile):
tile_dict = {"grey_tiles": [tile], "nearby_tiles": []}
number_of_tiles = len(tile_dict["grey_tiles"])
while True:
for grey_tile in tile_dict["grey_tiles"]:
for tile in self.tiles:
if grey_tile.distance(tile) < 22 and tile.number == 0 and tile not in tile_dict["grey_tiles"]:
tile_dict["grey_tiles"].append(tile)
if number_of_tiles == len(tile_dict["grey_tiles"]):
break
else:
number_of_tiles = len(tile_dict["grey_tiles"])
for grey_tile in tile_dict["grey_tiles"]:
for tile in self.tiles:
if grey_tile.distance(tile) < 30 and tile not in tile_dict["nearby_tiles"] and tile.number != 0:
tile_dict["nearby_tiles"].append(tile)
return tile_dict
def click_tile(self, x, y):
self.find_closest_tile(x, y)
self.right_click = False
def right_click_tile(self, x, y):
self.find_closest_tile(x, y)
self.right_click = True
if self.closest_tile.is_left_clicked is False:
if self.closest_tile.is_flagged is True:
self.closest_tile.is_flagged = False
else:
self.closest_tile.is_flagged = True
``` |
{
"source": "JoshuaBillson/cgi-lab",
"score": 3
} |
#### File: JoshuaBillson/cgi-lab/hello.py
```python
import os
import json
def print_environment():
print("Content-Type: text/plain\r\n")
for k, v in os.environ.items():
print(f"{k} = {v}")
def print_environment_json():
print("Content-Type: application/json\r\n")
print(json.dumps(dict(os.environ), indent=2))
def print_browser():
browser = os.environ.get("HTTP_USER_AGENT")
print("Content-Type: text/html\r\n")
print(f"<p style='color:green;'>Your Browser Is: {browser}</p>")
def extract_query():
query = dict()
query_string = os.environ.get("QUERY_STRING")
pairs = query_string.split("&")
for pair in pairs:
key, value = pair.split("=")
query[key] = value
return query
def main():
print_environment()
if __name__ == "__main__":
main()
``` |
{
"source": "JoshuaBillson/VNH5019Controller",
"score": 4
} |
#### File: Python/VNH5019Controller/Controller.py
```python
import VNH5019Controller.Commands as Commands
from serial import Serial
# Import smbus2 if it exists in the present environment.
try:
from smbus2 import SMBus
except ImportError:
pass
# Enumeration For Writing Standard Commands
CHANNEL_1 = 0
CHANNEL_2 = 1
BOTH = 2
# Serial Baudrate
BAUDRATE = 115200
# Global Variables For The I2C Port (bus) and Serial Port (ser)
bus = None
ser = None
def init_serial(port_name):
"""
Initialize a serial port for communication with the motor controller, must be called prior to
writing to the controller.
port_name (string): The name of the USB port over which we wish to communicate.
Returns: None.
"""
global ser
ser = Serial(port_name, BAUDRATE)
def init_i2c(pre_existing_bus=None):
"""
Initialize the serial bus for communication. Must be called prior to communicating with the controller via I2C.
pre_existing_bus (SMBus): A pre-existing bus if it exists.
Returns: None.
"""
global bus
if pre_existing_bus is not None:
bus = pre_existing_bus
else:
bus = SMBus(1)
class Controller:
"""
Provides methods for controlling the motor controller via either I2C or USB Serial communication.
"""
def __init__(self):
pass
def write_speed(self, channel, value):
"""
Write a speed value between -100 (full reverse) to 100 (full forward) to one or both channels.
channel (int): The channel whose speed we want to set. Must be one of CHANNEL_1, CHANNEL_2, or BOTH.
value (int): The speed we want to set between -100 to 100.
"""
global ser, bus
assert channel == CHANNEL_1 or channel == CHANNEL_2 or channel == BOTH, "Write Error: Invalid Channel!"
assert -100 <= value <= 100, "Write Error: Invalid Value!"
commands = (Commands.SET_CHANNEL_1_SPEED, Commands.SET_CHANNEL_2_SPEED, Commands.SET_BOTH_CHANNEL_SPEED)
bytes_to_write = self._to_byte_array((commands[channel], value))
Controller._write_bytes(bytes_to_write)
def write_brake(self, channel, value):
"""
Write a brake value between 0 (coast) to 100 (max brake) to one or both channels.
channel (int): The channel whose speed we want to set. Must be one of CHANNEL_1, CHANNEL_2, or BOTH.
value (int): The brake we want to set between 0 to 100.
"""
assert channel == CHANNEL_1 or channel == CHANNEL_2 or channel == BOTH, "Write Error: Invalid Channel!"
assert value <= 100, "Write Error: Invalid Value!"
commands = (Commands.BRAKE_CHANNEL_1, Commands.BRAKE_CHANNEL_2, Commands.BRAKE_BOTH_CHANNELS)
bytes_to_write = self._to_byte_array((commands[channel], value))
Controller._write_bytes(bytes_to_write)
def write_mixed_command(self, speed_val, turn_val):
"""
Write a mixed command to the motor controller.
speed_val (int): The speed we want to write between -100 (full reverse) to 100 (full forward).
turn_val (int): The turn value we want to write between -100 (max left) to 100 (max right).
"""
assert -100 <= speed_val <= 100, "Write Error: Invalid Value!"
assert -100 <= turn_val <= 100, "Write Error: Invalid Value!"
bytes_to_write = self._to_byte_array((Commands.MIXED_COMMAND, speed_val, turn_val))
Controller._write_bytes(bytes_to_write)
def stop(self):
"""Stop both motors by applying max brakes."""
bytes_to_write = self._to_byte_array((Commands.BRAKE_BOTH_CHANNELS, 100))
Controller._write_bytes(bytes_to_write)
def set_active(self):
"""Put the motor controller into an active state."""
bytes_to_write = self._to_byte_array((Commands.TOGGLE_STATE_COMMAND, Commands.ACTIVE))
Controller._write_bytes(bytes_to_write)
def set_standby(self):
"""Put the motor controller into standby."""
bytes_to_write = self._to_byte_array((Commands.TOGGLE_STATE_COMMAND, Commands.STANDBY))
Controller._write_bytes(bytes_to_write)
# PRIVATE METHODS - DO NOT TOUCH THESE!
@staticmethod
def _write_bytes(bytes_to_write):
if ser is not None:
Controller._write_serial(bytes_to_write)
elif bus is not None:
Controller._write_i2c(bytes_to_write)
@staticmethod
def _write_i2c(bytes_to_write):
assert type(bytes_to_write) == bytes, "Error: Invalid Data Type!"
global bus
assert bus is not None, "Error: I2C Bus Is Not Initialized!"
bus.write_i2c_block_data(Commands.CONTROLLER_ADDRESS, 0, list(bytes_to_write))
@staticmethod
def _write_serial(bytes_to_write):
assert type(bytes_to_write) == bytes, "Error: Invalid Data Type!"
global ser
assert ser is not None, "Error: Serial Port Is Not Initialized!"
ser.write(bytes_to_write)
@staticmethod
def _to_byte(number, is_signed):
return number.to_bytes(1, byteorder='big', signed=is_signed)
@staticmethod
def _to_byte_array(numbers):
byte_array = []
for x in numbers:
if x < 0:
byte_array.append(Controller._to_byte(x, True))
else:
byte_array.append(Controller._to_byte(x, False))
return b''.join(byte_array)
``` |
{
"source": "joshua-blas/Homework",
"score": 4
} |
#### File: joshua-blas/Homework/40transmembrane.py
```python
import sys
# Write a program that predicts if a protein is trans-membrane
# Trans-membrane proteins have the following properties
# Signal peptide: https://en.wikipedia.org/wiki/Signal_peptide
# Hydrophobic regions(s): https://en.wikipedia.org/wiki/Transmembrane_protein
# No prolines in hydrophobic regions (alpha helix)
# Hydrophobicity is measued via Kyte-Dolittle
# https://en.wikipedia.org/wiki/Hydrophilicity_plot
# For our purposes:
# Signal peptide is 8 aa long, KD > 2.5, first 30 aa
# Hydrophobic region is 11 aa long, KD > 2.0, after 30 aa
# Hints:
# Create a function for KD calculation
# Create a function for amphipathic alpha-helix
#Worked with Jeremy
def score(code): #made a separate function for attributing value to AA to make neater
if code == 'I': return 4.5
elif code == 'V': return 4.2
elif code == 'L': return 3.8
elif code == 'F': return 2.8
elif code == 'C': return 2.5
elif code == 'M': return 1.9
elif code == 'A': return 1.8
elif code == 'G': return -0.4
elif code == 'T': return -0.7
elif code == 'S': return -0.8
elif code == 'W': return -0.9
elif code == 'Y': return -1.3
elif code == 'P': return -1.6
elif code == 'H': return -3.2
elif code == 'E': return -3.5
elif code == 'Q': return -3.5
elif code == 'D': return -3.5
elif code == 'N': return -3.5
elif code == 'K': return -3.9
elif code == 'R': return -4.5
return 0 #if none of the above put 0
def check_proline(seq): #check if P in the alpha helix
if 'P' in seq:
return True
else:
return False
def KD(seq, window=0, threshold=0, check_alpha = False): #KD calculation. check_alpha is a varaible that dictates whether the proline check function will be executed
total = 0
for i in range(window):
total += score(seq[i]) #check score for each value in first window
for pos in range(len(seq) - (window - 1)): #moving window
if pos > 0:
total += score(seq[pos + window - 1]) - score(seq[pos-1]) #add one more value on the end, subtract off the other end
avg = total / window
if avg > threshold and (check_alpha == False or check_proline(seq[pos:pos+window]) == False): #If threshold is surpassed, and either proline check is being skipped/returned that no P was present, print true
return True
return False #if the none of the windows fulfill the criteria, print false
fp = open(sys.argv[1])
assert(len(sys.argv) == 2)
line = fp.readline()
while line: #loop through while there are still lines in the file to go through
if line[0] == '>':
name = line.split(' | ') #split each informational segment in the first line into a new list since each segment is separated by two spaces and a |
protein = name[0] #first segment
protein = protein[1:] #take off the '>'
data = ''
line = fp.readline() #move to next line
while line:
data += line[0:len(line) - 1] #add each line to a string of data that will actually be analyzed (minus the \n at the end)
if data[len(data) - 1] == '*': #if the asterisk is encountered, take off the asterisk and stop the loop
data = data[0:len(data)-1]
break
line = fp.readline() #move to the next line if there is no asterisk
condition1 = KD(data[0:30], 8, 2.5, False)
condition2 = KD(data[30:], 11, 2.0, True)
if condition1 == True and condition2 == True:
print(protein)
line = fp.readline()
fp.close()
"""
python3 40transmembrane.py ../Data/at_prots.fa
AT1G75120.1
AT1G10950.1
AT1G75110.1
AT1G74790.1
AT1G12660.1
AT1G75130.1
"""
``` |
{
"source": "joshuabode/bubble-sort-python",
"score": 3
} |
#### File: bubble-sort-python/bubble_sorter/bubble_sort.py
```python
from progress.bar import IncrementalBar
from stopwatch import Stopwatch
from progress.bar import IncrementalBar
import random
import os
global opts
global bubble_sort_order
bubble_sort_order = 'asc'
global speed_test_check
speed_test_check = False
stopwatch = Stopwatch()
global number
number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
import pkg_resources
resource_package = os.path.dirname(__file__)
resource_path = '/wordlist.txt'
wordsfile = ''.join((resource_package, resource_path))
def create_list(user_tup):
global user_list
user_list = list(user_tup)
if number in user_list:
user_list = [int(i) for i in user_list]
print(user_list)
global x
x = len(user_list)
x = x - 1
if x == 0:
return str("Please input a list with more than one item")
return user_list, x
def speed_test(no_of_vals, **opts):
opts.setdefault('range', (0, 99))
opts.setdefault('type', 'int')
opts.setdefault('verbose', False)
opts.setdefault('order', 'asc')
global speed_test_check
speed_test_check = True
global speed_test_tuple
speed_test_list = []
if opts['type'] == 'str':
ext_dict = open(wordsfile, 'r')
dict_list = []
with IncrementalBar('Loading English Dictionary...', max=10000) as bar:
for line in ext_dict:
line = line.strip()
dict_list.append(line)
bar.next()
bar.finish()
with IncrementalBar('Generating Random Values...', max=no_of_vals) as bar:
for n in range(no_of_vals):
random_int = random.randint(0, 9999)
speed_test_list.append(dict_list[random_int])
bar.next()
speed_test_tuple = tuple(speed_test_list)
print("Sorting has started...")
sort(*speed_test_tuple, order = opts['order'])
speed_test_check = False
if opts['verbose'] == True:
print(record_time(time, y), "Sorted List: ", lst)
else:
print(record_time(time, y))
else:
with IncrementalBar('Generating Random Values...', max=no_of_vals) as bar:
for n in range(no_of_vals):
random_int = random.randint(opts['range'][0],opts['range'][1] )
speed_test_list.append(random_int)
bar.next()
speed_test_tuple = tuple(speed_test_list)
print("Sorting has started...")
sort(*speed_test_tuple, order = opts['order'])
speed_test_check = False
if opts['verbose'] == True:
print(record_time(time, y), "Sorted List: ", lst)
else:
print(record_time(time, y))
def swap_vals(lst, val1, pos1, val2, pos2):
lst.remove(val2)
lst.insert(pos1, val2)
lst.remove(val1)
lst.insert(pos2, val1)
return lst
def check_order(lst, length, order):
check = []
if order == 'desc':
for n in range(length):
if lst[n] >= lst[n+1]:
check.append(1)
else:
check.append(0)
if 0 in check:
return True
else:
return False
else:
for n in range(length):
if lst[n] <= lst[n+1]:
check.append(1)
else:
check.append(0)
if 0 in check:
return True
else:
return False
def record_time(time_string, no_of_items):
no_of_items = str(no_of_items)
return str("Bubble Sorter sorted " + no_of_items + " items in " + time_string)
def sort(*array, **opts):
opts.setdefault('order', 'asc')
stopwatch.restart()
if opts['order'] == 'desc':
create_list(array)
global lst
lst = user_list
while check_order(lst, len(lst) - 1, 'desc'):
for n in range(x):
if lst[n] < lst[n+1]:
swap_vals(lst, lst[n], n, lst[n+1], n+1)
stopwatch.stop()
global time, y
time = str(stopwatch)
y = x + 1
return lst, record_time(time, y)
else:
create_list(array)
lst = user_list
while check_order(lst, len(lst) - 1, 'asc'):
for n in range(x):
if lst[n] > lst[n+1]:
swap_vals(lst, lst[n], n, lst[n+1], n+1)
stopwatch.stop()
time = str(stopwatch)
y = x + 1
return lst, record_time(time, y)
def rev_sort(*array):
sort(*(array), order='desc')
return lst, record_time(time, y)
``` |
{
"source": "Joshua-Bomba/RouterVpnManager",
"score": 2
} |
#### File: RouterVpnManager/RouterVpnManager/RouterVpnManager.py
```python
import ptvsd
ptvsd.enable_attach('RouterVpnManager')
import os
import json
import sys
import signal
import subprocess
import socket
import threading
import time
import shutil
import Queue
CONFIG_FOLDER_NAME = "configurations"
OPENVPNCL_PATH = "/tmp/openvpncl" #I highly recommend against changing this
VPN_CONNECTION_CODE = "openvpn --route-up " + OPENVPNCL_PATH + "/route-up.sh --route-pre-down " + OPENVPNCL_PATH +"/route-down.sh --config " + OPENVPNCL_PATH +"/openvpn.conf"
#raw_input("Press Any Key Once The Debugger is hooked on")
class logger(threading.Thread):
__connections = None
__output = None
#__outputQueue = None
def __init__(self):
threading.Thread.__init__(self)
self.__output = True
#self.__outputQueue = Queue.Queue()
#self.start()#whoops forgot to start it https://youtu.be/KIrCOfDbL_E?t=1m44s
def setConnections(self,connections):
self.__connections = connections
#self.writeLine("Broadcast Logging has now began")
def write(self,outputString):
if not isinstance(outputString, basestring):
outputString = str(outputString)
#self.__outputQueue.put(outputString)
sys.stdout.write(outputString)
def writeLine(self,outputString):
if not isinstance(outputString, basestring):
outputString = str(outputString)
outputString += "\n"
self.write(outputString)
def run(self):
#try:
# while self.__output:
# str = self.__outputQueue.get()
# sys.stdout.write(str)
# if self.__connections is not None:
# m = {}
# m["message"] = str
# self.__connections.sendBroadcast("broadcast","broadcastlog",m,"")
#except Exception, e:
# print "Queue Exception Exiting log" + str(e)#probably not a good idea to broadcast this exception out
pass
def stop(self):
self.__output = False
log = logger()
#This class will process any output from a subprocessHandler
class subprocessOutputHandler(threading.Thread):
__PRINTSUBPROCESS = True
__handler = None
__outputCallback = None
__output = True
def __init__(self,outputCallback = None):
threading.Thread.__init__(self)
self.__outputCallback = outputCallback
def addHandler(self,handler):
self.__handler = handler
def stop(self):
self.__output = False
def run(self):
try:
while (self.__PRINTSUBPROCESS or self.__outputCallback is not None) and self.__handler is not None and self.__output:
line = self.__handler.stdout.readline()
if line != '':
self.output(line)
else:
break
except Exception,e:
log.writeLine(str(e))
def output(self,str):
if self.__PRINTSUBPROCESS:
log.write(str)
if self.__outputCallback is not None:
self.__outputCallback(self,str)
#this handler class is used to control a single instance of a subprocess
class subprocessHandler:
__lock = None
__command = None
__handler = None
__connections = None
__running = False
__output = None
def __init__(self,command,connections = None,outputCallback = None):
self.__lock = threading.Lock()
self.__command = command
self.__connections = connections
self.__output = subprocessOutputHandler(outputCallback)
self.execute()
def execute(self):
self.__running = True
try:
self.__handler = subprocess.Popen(self.__command,stdout=subprocess.PIPE,shell=True,preexec_fn=os.setsid)
self.__output.addHandler(self.__handler)
self.__output.start()
except:
log.writeLine("an issue has occured")
def kill(self):
self.__lock.acquire()
try:
if self.__running and self.__handler is not None:
os.killpg(os.getpgid(self.__handler.pid),signal.SIGTERM)
self.stop()
finally:
self.__lock.release()
def stop(self):
self.__running = False
if self.__connections is not None:
self.__connections.vpnUnexpectedDisconnectionBroadcast()
if self.__output is not None:
self.__output.stop()
def isRunning(self):
try:
self.__lock.acquire()
return self.__running
finally:
self.__lock.release()
def checkStatus(self):
self.__lock.acquire()
try:
if self.__running:
retcode = self.__handler.poll()
if(retcode is not None):
self.stop()
return False
else:
return False
finally:
self.__lock.release()
def wait(self):
self.__handler.wait()
# this class is incharge on managing subprocess and creating new ones
class subprocessManager(threading.Thread):
__processLock = None
__process = []
__stopProcessing = False
def __init__(self):
threading.Thread.__init__(self)
self.__processLock = threading.Lock()
self.start()
def startProcess(self,command,connections):
handler = None
self.__processLock.acquire()
try:
handler = subprocessHandler(command,connections)
self.__process.append(handler)
finally:
self.__processLock.release()
return handler;
def stop(self):
try:
self.__processLock.acquire()
self.__stopProcessing = True
for p in self.__process:
p.kill()
del self.__process
finally:
self.__processLock.release()
def run(self):
try:
index = -1
while not self.__stopProcessing:
self.__processLock.acquire()
try:
if len(self.__process) != 0:
if index < len(self.__process) - 1:
index = index + 1
else:
index = 0
if self.__process[index].checkStatus() == False:
del self.__process[index]
index = 0
finally:
self.__processLock.release()
time.sleep(.1)
except Exception,e:
log.writeLine( str(e))
class vpnFileManager:
__path = None
__processManager = None
def __init__(self,path,processManager):
try:
if os.path.isdir(path):
configPath = path + "/" + CONFIG_FOLDER_NAME
if not os.path.isdir(configPath):
os.makedirs(configPath)
self.__path = configPath
self.__processManager = processManager
except Exception, e:
log.writeLine(e)
def getAvaliableConnections(self):
vpnConnections = []
try:
for file in os.listdir(self.__path):
if os.path.isdir(os.path.join(self.__path,file)):
if self.folderValid(os.path.join(self.__path,file)):
vpnConnections.append(file)
except Exception, e:
log.writeLine(e)
finally:
return vpnConnections
def configExists(self,name):
return name in self.getAvaliableConnections()
def folderValid(self,path):
try:
return os.path.isdir(path) and os.path.isfile(path + "/openvpn.conf") and os.path.isfile(path + "/route-up.sh") and os.path.isfile(path + "/route-down.sh")
except Exception, e:
log.writeLine(e)
return False
def pathValid(self):
return self.__path != None
# copys the current openvpncl config to a new folder under the configuration folder
def copyCurrentConfig(self,name):
try:
if not os.path.isdir(CONFIG_FOLDER_NAME + "/" + name):
if self.folderValid(OPENVPNCL_PATH):
os.makedirs(CONFIG_FOLDER_NAME + "/" + name)
for f in os.listdir(OPENVPNCL_PATH):
shutil.copyfile(OPENVPNCL_PATH + "/" + f,CONFIG_FOLDER_NAME + "/" + name + "/" + f)
return ""
else:
return "No configuration found"
else:
return "Configuration already exists"
except Exception, e:
log.writeLine(e)
return "unhandle exception"
# this will clear the current openvpncl
def clearCurrentConfig(self):
try:
if os.path.isdir(OPENVPNCL_PATH):
shutil.rmtree(OPENVPNCL_PATH)
else:
return "no config to clear"
except Exception, e:
log.writeLine(e)
return "unhandle exception"
# this will copy the current config file to the openvpncl folder
def copyConfig(self,folderName):
try:
if not os.path.isdir(OPENVPNCL_PATH):
os.makedirs(OPENVPNCL_PATH)
for f in os.listdir(CONFIG_FOLDER_NAME + "/" + folderName):
shutil.copyfile(CONFIG_FOLDER_NAME + "/" + folderName + "/" + f,OPENVPNCL_PATH + "/" + f)
return self.applyPermissions()
except Exception, e:
log.writeLine(e)
return "unhandle exception"
def deleteConfig(self,folderName):
try:
if folderName != "" and os.path.isdir(CONFIG_FOLDER_NAME + "/" + folderName):
shutil.rmtree(CONFIG_FOLDER_NAME + "/" + folderName)
else:
return "could not find config"
except Exception, e:
log.writeLine(e)
return "unhandle exception"
def applyPermissions(self):
if self.folderValid(OPENVPNCL_PATH):
try:
chmod1 = self.__processManager.startProcess("chmod +x " + OPENVPNCL_PATH + "/route-up.sh", None)
chmod2 = self.__processManager.startProcess("chmod +x " + OPENVPNCL_PATH + "/route-down.sh", None)
chmod1.wait()
chmod2.wait()
except:
return "There was an issue applying the required permissions to the config"
else:
return "There was an issue with the config"
class routerVpnManager:
__processManager = None
__connectionStatus = None
__currentConnection = None
__vpnFileManager = None
__connections = None#For Handling unexpected Disconnection of the Process
__lock = None
def __init__(self,connections):
self.__connections = connections
self.__lock = threading.Lock()
self.__processManager = subprocessManager()#When this is called an the code wants to exit ensure that this object is cleared up and the thread is stoped
self.__vpnFileManager = vpnFileManager(os.path.dirname(os.path.realpath(__file__)),self.__processManager)
def exit(self):
if self.__processManager is not None:
self.__processManager.stop()
def getConnectionNames(self):#Async
return self.__vpnFileManager.getAvaliableConnections()
def isRunning(self):
self.__lock.acquire()
try:
return self.isRunningInternal()
finally:
self.__lock.release()
def isRunningInternal(self):
if self.__connectionStatus is not None and self.__connectionStatus.isRunning():
return True
else:
return False
def connectToVpn(self,str):
files = self.getConnectionNames()
self.__lock.acquire()
try:
if str in files:
if not self.isRunningInternal():
if self.__vpnFileManager.configExists(str):
self.__vpnFileManager.clearCurrentConfig()
self.__vpnFileManager.copyConfig(str)
self.__connectionStatus = self.__processManager.startProcess(VPN_CONNECTION_CODE,self.__connections)
self.__currentConnection = str
return ""
else:
return "could not connect since it's already connect to a vpn"#TODO: could change this to a disconnect and reconnect sort of thing
else:
return "could not connect the VPN opvn file does not exist"
finally:
self.__lock.release()
def disconnectFromVpn(self):
self.__lock.acquire()
try:
if self.isRunningInternal():
self.__connectionStatus.kill()
self.__connectionStatus = None
return ""
else:
return "could not disconnect since no vpn is connected"
finally:
self.__lock.release()
def getVpnConnection(self):
self.__lock.acquire()
try:
if self.isRunningInternal():
return self.__currentConnection
else:
return ""
finally:
self.__lock.release()
#this class will handle any socket request and respond to them
class processRequest:
__stringJson = None
__jsonObject = None
__exception = ""
__sock = None
__vpnManager = None
__connection = None
__inputProcessLock = None
def __init__(self,vpnManager):
self.__vpnManager = vpnManager
self.__inputProcessLock = threading.Lock()
def processInput(self,message,socket,connection):
self.__inputProcessLock.acquire()
try:
self.__stringJson = message
self.__sock = socket
self.__connection = connection
self.deseralizeJson()
if(self.__jsonObject != None):
return self.goThroughRequests()
else:
return False
finally:
self.__inputProcessLock.release()
def getException(self):
return self.__exception
def deseralizeJson(self):
try:
data = json.loads(self.__stringJson)
if 'request' not in data or 'data' not in data or 'type' not in data:
raise ValueError("Missing keys from the json")
self.__jsonObject = data
except Exception, e:
self.__exception = e.message
log.writeLine(e.message)
def sendResponse(self,type,request,data):
response = {}
response["type"] = type
response["request"] = request
response["data"] = data
response["signature"] = self.__jsonObject["signature"]
if(type == "response"):
self.__sock.send(json.dumps(response))
def goThroughRequests(self):
if self.__jsonObject["type"] == "request":
if self.__jsonObject["request"] == "connection":
self.sendResponse("response","connection","Connection Established")
return True
elif self.__jsonObject["request"] == "listovpn":
self.sendResponse("response","listovpn",self.__vpnManager.getConnectionNames())
return True
elif self.__jsonObject["request"] == "connecttovpn":
data = {}
data["vpnLocation"] = self.__jsonObject["data"][u'vpn']
data["status"] = self.__vpnManager.connectToVpn(data["vpnLocation"])
#self.sendResponse("response","connecttovpn",data)
self.__connection.sendBroadcast("broadcast","connecttovpn",data,self.__jsonObject["signature"])
return True
elif self.__jsonObject["request"] == "disconnectfrompvpn":
data = {}
data["reason"] = "Client Disconnected"
data["status"] = self.__vpnManager.disconnectFromVpn()#The disconnect from vpn will be callback twice because it shuts down the Subprocess and causes and unexpected disconnect
self.__connection.sendBroadcast("broadcast","disconnectfrompvpn",data,self.__jsonObject["signature"])
return True
elif self.__jsonObject["request"] == "checkconnectionstatus":
data = {}
data["running"] = self.__vpnManager.isRunning()
data["connectedTo"] = self.__vpnManager.getVpnConnection()
self.sendResponse("response","checkconnectionstatus",data)
return True
elif self.__jsonObject["request"] == "copycurrentconfig":
data = {}
data["status"] = self.__vpnManager._routerVpnManager__vpnFileManager.copyCurrentConfig(self.__jsonObject["data"][u'name'])
self.sendResponse("response","copycurrentconfig",data)
return True
elif self.__jsonObject["request"] == "deleteconfig":
data = {}
data["status"] = self.__vpnManager._routerVpnManager__vpnFileManager.deleteConfig(self.__jsonObject["data"][u'name'])
self.sendResponse("response","deleteconfig",data)
return True
elif self.__jsonObject["request"] == "clearcurrentconfig":
data = {}
data["status"] = self.__vpnManager._routerVpnManager__vpnFileManager.clearCurrentConfig()
self.sendResponse("response","clearcurrentconfig",data)
return True
elif self.__jsonObject["request"] == "copyconfig":
data = {}
data["status"] = self.__vpnManager._routerVpnManager__vpnFileManager.copyConfig(self.__jsonObject["data"][u'name'])
self.sendResponse("response","copyconfig",data)
return True
else:
self.__exception = "The request does not exist"
else:
self.__exception = "Could not processs this type of request"
return False
def handleBroadcast(self,response):
if(response["type"] == "broadcast"):
self.__sock.send(json.dumps(response));
#this will handle the socket connection for a paticular client
class client(threading.Thread):
__connection = None
__stopProcessing = False
__request = None
def __init__(self,socket,address,connection,vpnManager):
threading.Thread.__init__(self)
self.__connection = connection
self.sock = socket
self.add=address
self.__request = processRequest(vpnManager)
self.start()
def stop(self):
self.__stopProcessing = True
def run(self):
try:
while not self.__stopProcessing:
data = self.sock.recv(1024)
if(data == ''):
break
else:
log.writeLine('client sent: '+ data)
if (not self.__request.processInput(data,self.sock,self.__connection)):
self.sock.send('Messsage recived, could not process request: ', self.__request.getException())
except Exception,e:
log.writeLine( str(e))
finally:
self.disconnect()
def disconnect(self):
log.writeLine( "client disconnected")
self.__connection.disconnect(self)
#this handles all the connected clients
#this also handles any kind of broadcast
class connections:
__host = None
__port = 0
__serversocket = None;
__clientsMap = {}
__clientsMapLock = None
__vpnManager = None
def __init__(self,host,port):
self.__host = host
self.__port = port
self.__clientsMapLock = threading.Lock()
self.__vpnManager = routerVpnManager(self)
log.setConnections(self)
self.bind()
def exit(self):
for c in self.__clientsMap:
self.__clientsMap[c].stop()
self.__vpnManager.exit();
if self.__serversocket is not None:
self.__serversocket.close();
def bind(self):
log.writeLine('Port Binded')
self.__serversocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.__serversocket.bind((self.__host,self.__port))
def listen(self):
log.writeLine('server started and listening')
self.__serversocket.listen(5)
try:
while 1:
clientsocket, address = self.__serversocket.accept()
self.connect(clientsocket,address)
except KeyboardInterrupt:
log.writeLine("keyboard interuption")
except Exception,e:
log.writeLine(str(e))
def vpnUnexpectedDisconnectionBroadcast(self):
data = {}
data["status"] = ""
data["reason"] = "Unexpected Disconnection"
self.sendBroadcast("broadcast","disconnectfrompvpn",data,"")
def sendBroadcast(self,type,request,data,signature):
response = {}
response["type"] = type
response["request"] = request
response["data"] = data
response["signature"] = signature
if(type == "broadcast"):
self.__clientsMapLock.acquire()
try:
for k in self.__clientsMap:
self.__clientsMap[k]._client__request.handleBroadcast(response)
except Exception,e:
log.writeLine(str(e))
finally:
self.__clientsMapLock.release()
def connect(self,clientsocket,address):
log.writeLine("connecting to %s:%d" % (address[0],address[1]))
self.__clientsMapLock.acquire()
try:
self.__clientsMap[address[1]] = client(clientsocket,address,self,self.__vpnManager)
finally:
self.__clientsMapLock.release()
def disconnect(self,client):
self.__clientsMapLock.acquire()
try:
del self.__clientsMap[client.add[1]]
finally:
self.__clientsMapLock.release()
#this method is just some basic code to parse the input parameters
def start():
if len(sys.argv) >= 2:
host = sys.argv[1]
port = int(sys.argv[2])
c = None
try:
c = connections(host,port)
c.listen();
except Exception,e:
log.writeLine(e)
finally:
if c is not None:
c.exit()
log.stop()
#Add some sort of closing code here or whatever to manage a crash/shutdown
else:
log.writeLine("This script requires a host address and port")
def signal_term_handler(signal, frame):
log.writeLine('got SIGTERM')
sys.exit(0)
signal.signal(signal.SIGTERM, signal_term_handler)
start()#primary purpose of the above being in a method is because it makes it easier to comment out
#def testSubprocessManager():
# print "ping started"
# processManager = subprocessManager()
# process = processManager.startProcess("ping 192.168.2.1",callback=finishedTask)
# raw_input("Press any key to continue ")
# process.kill()
# processManager.stop()
#def finishedTask(handler):
# print "ping finished"
``` |
{
"source": "joshuaboniface/kalliope",
"score": 3
} |
#### File: core/Models/RestAPI.py
```python
class RestAPI(object):
"""
This Class is representing the rest API with all its configuration.
"""
def __init__(self,
password_protected=None,
login=None, password=<PASSWORD>,
active=None,
port=None,
allowed_cors_origin=None):
"""
:param password_protected: If true, the rest api will ask for an authentication
:param login: login used if auth is activated
:param password: password used if auth is activated
:param active: specify if the rest api is loaded on start with Kalliope
:param allowed_cors_origin: specify allowed origins
"""
self.password_protected = password_protected
self.login = login
self.password = password
self.active = active
self.port = port
self.allowed_cors_origin = allowed_cors_origin
def __str__(self):
return str(self.serialize())
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of order
:rtype: Dict
"""
return {
'password_protected': self.password_protected,
'login': self.login,
'password': <PASSWORD>,
'active': self.active,
'port': self.port,
'allowed_cors_origin': self.allowed_cors_origin
}
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
```
#### File: kalliope/core/PlayerLauncher.py
```python
import logging
from kalliope.core import Utils
logging.basicConfig()
logger = logging.getLogger("kalliope")
class PlayerLauncher(object):
def __init__(self):
pass
@staticmethod
def get_player(settings):
"""
Instantiate a Player
:param settings: setting object
:type settings: Settings
:return: the Player instance
:rtype: Player
"""
player_instance = None
for player in settings.players:
if player.name == settings.default_player_name:
logger.debug("PlayerLauncher: Start player %s with parameters: %s" % (player.name, player.parameters))
player_instance = Utils.get_dynamic_class_instantiation(package_name="players",
module_name=player.name,
parameters=player.parameters)
break
return player_instance
```
#### File: kalliope/core/PlayerModule.py
```python
import logging
import os
import subprocess
from kalliope.core.Utils.FileManager import FileManager
logging.basicConfig()
logger = logging.getLogger("kalliope")
class PlayerModule(object):
"""
Mother class of Players.
Ability to convert mp3 to wave format.
"""
def __init__(self, **kwargs):
# set parameter from what we receive from the settings
self.convert = kwargs.get('convert_to_wav', True)
@staticmethod
def convert_mp3_to_wav(file_path_mp3):
"""
PyAudio, AlsaPlayer, sounddevices do not support mp3 files
MP3 files must be converted to a wave in order to be played
This function assumes ffmpeg is available on the system
:param file_path_mp3: the file path to convert from mp3 to wav
"""
logger.debug("Converting mp3 file to wav file: %s" % file_path_mp3)
fnull = open(os.devnull, 'w')
# temp file
tmp_file_wav = file_path_mp3 + ".wav"
# Convert mp3 to wave
subprocess.call(['avconv', '-y', '-i', file_path_mp3, tmp_file_wav],
stdout=fnull, stderr=fnull)
# remove the original file
FileManager.remove_file(file_path_mp3)
# rename the temp file with the same name as the original file
os.rename(tmp_file_wav, file_path_mp3)
```
#### File: core/Utils/FileManager.py
```python
import logging
import os
import sys
logging.basicConfig()
logger = logging.getLogger("kalliope")
class FileManager:
"""
Class used to manage Files
"""
def __init__(self):
pass
@staticmethod
def create_directory(cache_path):
"""
Create a directory at the provided `cache_path`
:param cache_path: the path of the directory to create
:type cache_path: str
"""
if not os.path.exists(cache_path):
os.makedirs(cache_path)
@staticmethod
def write_in_file(file_path, content):
"""
Write contents into a file
:param file_path: the path of the file to write on
:type file_path: str
:param content: the contents to write in the file
:type content: str
.. raises:: IOError
"""
try:
with open(file_path, "wb") as file_open:
if sys.version_info[0] == 2:
file_open.write(content)
else:
file_open.write(content.encode())
file_open.close()
return not FileManager.file_is_empty(file_path)
except IOError as e:
logger.error("I/O error(%s): %s", e.errno, e.strerror)
return False
@staticmethod
def file_is_empty(file_path):
"""
Check if the file is empty
:param file_path: the path of the file
:return: True if the file is empty, False otherwise
"""
return os.path.getsize(file_path) == 0
@staticmethod
def remove_file(file_path):
"""
Remove the file locate at the provided `file_path`
:param file_path:
:return: True if the file has been removed successfully, False otherwise
"""
if os.path.exists(file_path):
return os.remove(file_path)
@staticmethod
def is_path_creatable(pathname):
"""
`True` if the current user has sufficient permissions to create the passed
pathname; `False` otherwise.
"""
dirname = os.path.dirname(pathname) or os.getcwd()
return os.access(dirname, os.W_OK)
@staticmethod
def is_path_exists_or_creatable(pathname):
"""
`True` if the passed pathname is a valid pathname for the current OS _and_
either currently exists or is hypothetically creatable; `False` otherwise.
This function is guaranteed to _never_ raise exceptions.
.. raises:: OSError
"""
try:
return os.path.exists(pathname) or FileManager.is_path_creatable(pathname)
except OSError as e:
logger.error("OSError(%s): %s", e.errno, e.strerror)
return False
```
#### File: ansible_playbook/tests/test_ansible_playbook.py
```python
import os
import unittest
from collections import namedtuple
import mock
from kalliope.neurons.ansible_playbook import Ansible_playbook
from kalliope.core.NeuronModule import MissingParameterException
class TestAnsible_Playbook(unittest.TestCase):
def setUp(self):
self.task_file = "task_file"
self.random = "random"
self.test_file = "/tmp/kalliope_text_ansible_playbook.txt"
def testParameters(self):
def run_test(parameters_to_test):
with self.assertRaises(MissingParameterException):
Ansible_playbook(**parameters_to_test)
# empty
parameters = dict()
run_test(parameters)
# missing task_file
parameters = {
"random": self.random
}
run_test(parameters)
# missing sudo user
parameters = {
"sudo": True,
"random": self.random
}
run_test(parameters)
# missing sudo password
parameters = {
"sudo": True,
"sudo_user": "user"
}
run_test(parameters)
# parameters ok
parameters = {
"task_file": "kalliope/neurons/ansible_playbook/tests/test_ansible_playbook_neuron.yml",
"sudo": True,
"sudo_user": "user",
"sudo_password": "password"
}
with mock.patch("ansible.executor.playbook_executor.PlaybookExecutor.run"):
instanciated_neuron = Ansible_playbook(**parameters)
self.assertTrue(instanciated_neuron._is_parameters_ok)
def test_create_file_via_ansible_playbook(self):
"""
This test will use an ansible playbook the create a file. We check that the file has been created
"""
# without sudo
param = {
"task_file": "kalliope/neurons/ansible_playbook/tests/test_ansible_playbook_neuron.yml"
}
Ansible_playbook(**param)
self.assertTrue(os.path.isfile(self.test_file))
if os.path.exists(self.test_file):
os.remove(self.test_file)
# with sudo
param = {
"task_file": "kalliope/neurons/ansible_playbook/tests/test_ansible_playbook_neuron.yml",
"sudo": True,
"sudo_user": "user",
"sudo_password": "password"
}
Options = namedtuple('Options',
['connection', 'forks', 'become', 'become_method', 'become_user', 'check', 'listhosts',
'listtasks', 'listtags', 'syntax', 'module_path'])
expected_option = Options(connection='local', forks=100, become=True, become_method="sudo",
become_user="user", check=False, listhosts=False, listtasks=False, listtags=False,
syntax=False, module_path="")
with mock.patch("ansible.executor.playbook_executor.PlaybookExecutor.run") as playbookExecutor:
instance_neuron = Ansible_playbook(**param)
playbookExecutor.assert_called_once()
self.assertEqual(instance_neuron._get_options(), expected_option)
if __name__ == '__main__':
unittest.main()
```
#### File: neurons/sleep/sleep.py
```python
import time
import six
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException
class Sleep(NeuronModule):
def __init__(self, **kwargs):
super(Sleep, self).__init__(**kwargs)
self.seconds = kwargs.get('seconds', None)
# check parameters
if self._is_parameters_ok():
if isinstance(self.seconds, str) or \
isinstance(self.seconds, six.text_type):
self.seconds = float(self.seconds)
time.sleep(self.seconds)
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.seconds is None:
raise MissingParameterException("You must set a number of seconds as parameter")
return True
```
#### File: neurons/systemdate/systemdate.py
```python
import time
from kalliope.core.NeuronModule import NeuronModule
class Systemdate(NeuronModule):
def __init__(self, **kwargs):
# get the cache if set by the user, if not, set it to false as it is not necessary
cache = kwargs.get('cache', None)
if cache is not None:
kwargs["cache"] = cache
else:
kwargs["cache"] = False
super(Systemdate, self).__init__(**kwargs)
# local time and date
hour = time.strftime("%H") # Hour (24-hour clock) as a decimal number [00,23].
minute = time.strftime("%M") # Minute as a decimal number [00,59].
weekday = time.strftime("%w") # Weekday as a decimal number [0(Sunday),6].
day_month = time.strftime("%d") # Day of the month as a decimal number [01,31].
month = time.strftime("%m") # Month as a decimal number [01,12].
year = time.strftime("%Y") # Year with century as a decimal number. E.g: 2016
self.message = {
"hours": hour,
"minutes": minute,
"weekday": weekday,
"month": month,
"day_month": day_month,
"year": year
}
self.say(self.message)
```
#### File: players/pyalsaaudio/pyalsaaudio.py
```python
import alsaaudio
import logging
import wave
from kalliope.core.PlayerModule import PlayerModule
logging.basicConfig()
logger = logging.getLogger("kalliope")
CHUNK = 1024
ALSAAUDIO_BIT_MAPPING = {8: alsaaudio.PCM_FORMAT_S8,
16: alsaaudio.PCM_FORMAT_S16_LE,
24: alsaaudio.PCM_FORMAT_S24_LE,
32: alsaaudio.PCM_FORMAT_S32_LE}
STANDARD_SAMPLE_RATES = (
8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200,
96000, 192000
)
DEVICE_TYPE_ALL = 'all'
DEVICE_TYPE_INPUT = 'input'
DEVICE_TYPE_OUTPUT = 'output'
def bits_to_samplefmt(bits):
if bits in ALSAAUDIO_BIT_MAPPING.keys():
return ALSAAUDIO_BIT_MAPPING[bits]
else:
raise ValueError('Unsupported format')
class Pyalsaaudio(PlayerModule):
"""
This Class is representing the Player Object used to play the all sound of the system.
"""
def __init__(self, **kwargs):
super(Pyalsaaudio, self).__init__(**kwargs)
# List devices
logger.debug("[pyalsaaudio.__init__] instance")
logger.debug("[pyalsaaudio.__init__] devices : %s " % (str(self.get_devices(DEVICE_TYPE_OUTPUT))))
logger.debug("[pyalsaaudio.__init__] args : %s " % str(kwargs))
self.device = kwargs.get('device', 'default')
@staticmethod
def get_devices(device_type=DEVICE_TYPE_ALL):
devices = set()
if device_type in (DEVICE_TYPE_ALL,
DEVICE_TYPE_OUTPUT):
devices.update(set(alsaaudio.pcms(alsaaudio.PCM_PLAYBACK)))
if device_type in (DEVICE_TYPE_ALL,
DEVICE_TYPE_INPUT):
devices.update(set(alsaaudio.pcms(alsaaudio.PCM_CAPTURE)))
device_names = sorted(list(devices))
num_devices = len(device_names)
logger.debug('Found %d ALSA devices', num_devices)
return device_names
def play(self, file_path):
if self.convert:
self.convert_mp3_to_wav(file_path_mp3=file_path)
f = wave.open(file_path, 'rb')
pcm_type = alsaaudio.PCM_PLAYBACK
stream = alsaaudio.PCM(type=pcm_type,
mode=alsaaudio.PCM_NORMAL,
device=self.device)
# Set attributes
stream.setchannels(f.getnchannels())
stream.setrate(f.getframerate())
bits = f.getsampwidth()*8
stream.setformat(bits_to_samplefmt(bits))
stream.setperiodsize(CHUNK)
logger.debug("[PyAlsaAudioPlayer] %d channels, %d sampling rate, %d bit" % (f.getnchannels(),
f.getframerate(),
bits))
data = f.readframes(CHUNK)
while data:
# Read data from stdin
stream.write(data)
data = f.readframes(CHUNK)
f.close()
stream.close()
```
#### File: signals/mqtt_subscriber/mqtt_subscriber.py
```python
import logging
from threading import Thread
from kalliope.core import SignalModule, MissingParameter
from kalliope.core.ConfigurationManager import BrainLoader
from kalliope.signals.mqtt_subscriber.MqttClient import MqttClient
from kalliope.signals.mqtt_subscriber.models import Broker, Topic
from kalliope.core import Utils
CLIENT_ID = "kalliope"
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Mqtt_subscriber(SignalModule, Thread):
def __init__(self, **kwargs):
super(Mqtt_subscriber, self).__init__(**kwargs)
Utils.print_info('[Mqtt_subscriber] Starting manager')# variables
self.list_synapses_with_mqtt = list(super(Mqtt_subscriber, self).get_list_synapse())
self.broker_ip = None
self.topic = None
self.json_message = False
def run(self):
logger.debug("[Mqtt_subscriber] Starting Mqtt_subscriber")
# we need to sort broker URL by ip, then for each broker, we sort by topic and attach synapses name to run to it
list_broker_to_instantiate = self.get_list_broker_to_instantiate(self.list_synapses_with_mqtt)
# now instantiate a MQTT client for each broker object
self.instantiate_mqtt_client(list_broker_to_instantiate)
@staticmethod
def check_parameters(parameters):
"""
overwrite method
receive a dict of parameter from a mqtt_subscriber signal
:param parameters: dict of mqtt_signal_parameters
:return: True if parameters are valid
"""
# check mandatory parameters
mandatory_parameters = ["broker_ip", "topic"]
if not all(key in parameters for key in mandatory_parameters):
return False
return True
@staticmethod
def get_list_broker_to_instantiate(list_synapse_with_mqtt_subscriber):
"""
return a list of Broker object from the given list of synapse
:param list_synapse_with_mqtt_subscriber: list of Synapse object
:return: list of Broker
"""
returned_list_of_broker = list()
for synapse in list_synapse_with_mqtt_subscriber:
for signal in synapse.signals:
# check if the broker exist in the list
if not any(x.broker_ip == signal.parameters["broker_ip"] for x in returned_list_of_broker):
logger.debug("[Mqtt_subscriber] Create new broker: %s" % signal.parameters["broker_ip"])
# create a new broker object
new_broker = Broker()
new_broker.build_from_signal_dict(signal.parameters)
# add the current topic
logger.debug("[Mqtt_subscriber] Add new topic to broker %s: %s" % (new_broker.broker_ip,
signal.parameters["topic"]))
new_topic = Topic()
new_topic.name = signal.parameters["topic"]
if "is_json" in signal.parameters:
logger.debug("[Mqtt_subscriber] Message for the topic %s will be json converted"
% new_topic.name)
new_topic.is_json = bool(signal.parameters["is_json"])
else:
new_topic.is_json = False
# add the current synapse to the topic
new_topic.synapses = list()
new_topic.synapses.append(synapse)
new_broker.topics.append(new_topic)
logger.debug("[Mqtt_subscriber] Add new synapse to topic %s :%s" % (new_topic.name, synapse.name))
returned_list_of_broker.append(new_broker)
else:
# the broker exist. get it from the list of broker
broker_to_edit = next((broker for broker in returned_list_of_broker
if signal.parameters["broker_ip"] == broker.broker_ip))
# check if the topic already exist
if not any(topic.name == signal.parameters["topic"] for topic in broker_to_edit.topics):
new_topic = Topic()
new_topic.name = signal.parameters["topic"]
if "is_json" in signal.parameters:
logger.debug("[Mqtt_subscriber] Message for the topic %s will be json converted"
% new_topic.name)
new_topic.is_json = bool(signal.parameters["is_json"])
else:
new_topic.is_json = False
logger.debug("[Mqtt_subscriber] Add new topic to existing broker "
"%s: %s" % (broker_to_edit.broker_ip, signal.parameters["topic"]))
# add the current synapse to the topic
logger.debug("[Mqtt_subscriber] Add new synapse "
"to topic %s :%s" % (new_topic.name, synapse.name))
new_topic.synapses = list()
new_topic.synapses.append(synapse)
# add the topic to the broker
broker_to_edit.topics.append(new_topic)
else:
# the topic already exist, get it from the list
topic_to_edit = next((topic for topic in broker_to_edit.topics
if topic.name == signal.parameters["topic"]))
# add the synapse
logger.debug("[Mqtt_subscriber] Add synapse %s to existing topic %s "
"in existing broker %s" % (synapse.name,
topic_to_edit.name,
broker_to_edit.broker_ip))
topic_to_edit.synapses.append(synapse)
return returned_list_of_broker
def instantiate_mqtt_client(self, list_broker_to_instantiate):
"""
Instantiate a MqttClient thread for each broker
:param list_broker_to_instantiate: list of broker to run
"""
for broker in list_broker_to_instantiate:
mqtt_client = MqttClient(broker=broker, brain=self.brain)
mqtt_client.start()
```
#### File: tts/espeak/espeak.py
```python
from kalliope.core.TTS.TTSModule import TTSModule, MissingTTSParameter
import logging
import sys
import subprocess
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Espeak(TTSModule):
def __init__(self, **kwargs):
super(Espeak, self).__init__(language="any", **kwargs)
# set parameter from what we receive from the settings
self.variant = kwargs.get('variant', None)
self.speed = str(kwargs.get('speed', '160'))
self.amplitude = str(kwargs.get('amplitude', '100'))
self.pitch = str(kwargs.get('pitch', '50'))
self.espeak_exec_path = kwargs.get('path', r'/usr/bin/espeak')
if self.voice == 'default' or self.voice is None:
raise MissingTTSParameter("voice parameter is required by the eSpeak TTS")
# if voice = default, don't add voice option to espeak
if self.variant is None:
self.voice_and_variant = self.voice
else:
self.voice_and_variant = self.voice + '+' + self.variant
def say(self, words):
"""
:param words: The sentence to say
"""
self.generate_and_play(words, self._generate_audio_file)
def _generate_audio_file(self):
"""
Generic method used as a Callback in TTSModule
- must provided the audio file and write it on the disk
.. raises:: FailToLoadSoundFile
"""
options = {
'v': '-v' + self.voice_and_variant,
's': '-s' + self.speed,
'a': '-a' + self.amplitude,
'p': '-p' + self.pitch,
'w': '-w' + self.file_path
}
final_command = [self.espeak_exec_path, options['v'], options['s'], options['a'],
options['p'], options['w'], self.words]
# generate the file with eSpeak
subprocess.call(final_command, stderr=sys.stderr)
```
#### File: tts/pico2wave/pico2wave.py
```python
import os
import subprocess
from kalliope.core.TTS.TTSModule import TTSModule, MissingTTSParameter
import sox
import logging
import sys
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Pico2wave(TTSModule):
def __init__(self, **kwargs):
super(Pico2wave, self).__init__(**kwargs)
self.samplerate = kwargs.get('samplerate', None)
self.path = kwargs.get('path', None)
self._check_parameters()
def _check_parameters(self):
"""
Check parameters are ok, raise MissingTTSParameters exception otherwise.
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingTTSParameterException
"""
if self.language == "default" or self.language is None:
raise MissingTTSParameter("[pico2wave] Missing parameters, check documentation !")
return True
def say(self, words):
"""
:param words: The sentence to say
"""
self.generate_and_play(words, self._generate_audio_file)
def _generate_audio_file(self):
"""
Generic method used as a Callback in TTSModule
- must provided the audio file and write it on the disk
.. raises:: FailToLoadSoundFile
"""
if self.path is None:
# we try to get the path from the env
self.path = self._get_pico_path()
# if still None, we set a default value
if self.path is None:
self.path = "/usr/bin/pico2wave"
# pico2wave needs that the file path ends with .wav
tmp_path = self.file_path+".wav"
pico2wave_options = ["-l=%s" % self.language, "-w=%s" % tmp_path]
final_command = list()
final_command.extend([self.path])
final_command.extend(pico2wave_options)
final_command.append(self.words)
logger.debug("[Pico2wave] command: %s" % final_command)
# generate the file with pico2wav
subprocess.call(final_command)
# convert samplerate
if self.samplerate is not None:
tfm = sox.Transformer()
tfm.rate(samplerate=self.samplerate)
tfm.build(str(tmp_path), str(tmp_path) + "tmp_name.wav")
os.rename(str(tmp_path) + "tmp_name.wav", tmp_path)
# remove the extension .wav
os.rename(tmp_path, self.file_path)
@staticmethod
def _get_pico_path():
prog = "pico2wave"
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, prog)
if os.path.isfile(exe_file):
return exe_file
return None
```
#### File: kalliope/Tests/test_configuration_checker.py
```python
import unittest
from kalliope.core.ConfigurationManager.ConfigurationChecker import ConfigurationChecker, NoSynapeName, \
NoSynapeNeurons, NoSynapeSignals, NoValidSignal, MultipleSameSynapseName
from kalliope.core.Models import Synapse
from kalliope.core.Utils.Utils import ModuleNotFoundError
class TestConfigurationChecker(unittest.TestCase):
"""
Class used to test the ConfigurationChecker class
"""
def setUp(self):
pass
def test_check_synape_dict(self):
valid_synapse_dict = {
'signals': [{'order': 'test_order'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test'
}
synapse_dict_without_name = {
'signals': [{'order': 'test_order'}],
'neurons': [{'say': {'message': ['test message']}}]
}
synapse_dict_without_neurons = {
'signals': [{'order': 'test_order'}],
'name': 'test'
}
synapse_dict_without_signals = {
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test'
}
self.assertTrue(ConfigurationChecker.check_synape_dict(valid_synapse_dict))
with self.assertRaises(NoSynapeName):
ConfigurationChecker.check_synape_dict(synapse_dict_without_name)
with self.assertRaises(NoSynapeNeurons):
ConfigurationChecker.check_synape_dict(synapse_dict_without_neurons)
with self.assertRaises(NoSynapeSignals):
ConfigurationChecker.check_synape_dict(synapse_dict_without_signals)
def test_check_neuron_dict(self):
valid_neuron = {'say': {'message': ['test message']}}
invalid_neuron = {'not_existing_neuron': {'message': ['test message']}}
self.assertTrue(ConfigurationChecker.check_neuron_dict(valid_neuron))
with self.assertRaises(ModuleNotFoundError):
ConfigurationChecker.check_neuron_dict(invalid_neuron)
def test_check_signal_dict(self):
valid_signal = {'event': {'parameter_1': ['value1']}}
invalid_signal = {'non_existing_signal_name': {'parameter_2': ['value2']}}
self.assertTrue(ConfigurationChecker.check_signal_dict(valid_signal))
with self.assertRaises(ModuleNotFoundError):
ConfigurationChecker.check_signal_dict(invalid_signal)
def test_check_synapes(self):
synapse_1 = Synapse(name="test")
synapse_2 = Synapse(name="test2")
synapse_3 = Synapse(name="test")
valid_synapse_list = [synapse_1, synapse_2]
invalid_synapse_list = [synapse_1, synapse_3]
self.assertTrue(ConfigurationChecker.check_synapes(valid_synapse_list))
with self.assertRaises(MultipleSameSynapseName):
ConfigurationChecker.check_synapes(invalid_synapse_list)
if __name__ == '__main__':
unittest.main()
```
#### File: kalliope/Tests/test_file_manager.py
```python
import unittest
import os
from kalliope.core.Utils.FileManager import FileManager
class TestFileManager(unittest.TestCase):
"""
Class to test FileManager
"""
def setUp(self):
pass
def create_file_manager(self):
file_manager = FileManager()
self.assertIsInstance(FileManager, file_manager)
def test_create_directory(self):
"""
Test to create a new directory.
"""
# set up
cache_path = "/tmp/kalliope/tests/testDirectory"
if os.path.exists(cache_path):
os.removedirs(cache_path)
# Test FileManager.create_directory
FileManager.create_directory(cache_path)
self.assertTrue(os.path.exists(cache_path),
"Fail creating a directory to the path ")
# Remove the directory
os.removedirs(cache_path)
def test_write_in_file(self):
"""
Test to write in file.
"""
# set up the context
dir_path = "/tmp/kalliope/tests/"
file_name = "test_FileManager_writeInFile"
file_path = os.path.join(dir_path,file_name)
in_file_text = "[Kalliope] Testing the write_in_file method from Utils.FileManager"
if os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Test FileManager.write_in_file
FileManager.write_in_file(file_path=file_path, content=in_file_text)
with open(file_path, 'r') as content_file:
content = content_file.read()
self.assertEqual(content, in_file_text,
"Fail writing in the file ")
# Clean up
if os.path.exists(file_path):
os.remove(file_path)
# run into IOError by trying to write something in root
dir_path = "/root/"
file_name = "test_FileManager_writeInFile"
file_path = os.path.join(dir_path, file_name)
self.assertFalse(FileManager.write_in_file(file_path=file_path, content=in_file_text))
def test_file_is_empty(self):
"""
Test that the file is empty
"""
# set up the context
dir_path = "/tmp/kalliope/tests/"
file_name = "test_FileManager_fileIsEmpty"
file_path = os.path.join(dir_path, file_name)
if os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Test FileManager.file_is_empty
with open(file_path, "wb") as file_open:
file_open.write(b"")
file_open.close()
self.assertTrue(FileManager.file_is_empty(file_path=file_path),
"Fail matching to verify that file is empty ")
# Clean up
if os.path.exists(file_path):
os.remove(file_path)
def test_remove_file(self):
"""
Test to remove a file
"""
# set up the context
dir_path = "/tmp/kalliope/tests/"
file_name = "test_FileManager_fileRemove"
file_path = os.path.join(dir_path, file_name)
if os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Test to remove the file
# FileManager.remove_file
with open(file_path, "wb") as file_open:
file_open.write(b"")
file_open.close()
FileManager.remove_file(file_path=file_path)
self.assertFalse(os.path.exists(file_path),
"Fail removing the file")
def test_is_path_creatable(self):
"""
Test if the path is creatable for the user
Does the user has the permission to use this path ?
"""
# set up the context
dir_path = "/tmp/kalliope/tests/"
file_name = "test_FileManager_filePathCreatable"
file_path = os.path.join(dir_path, file_name)
if os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# test not allowed : return False
not_allowed_root_path = "/root/"
not_allowed_path = os.path.join(not_allowed_root_path, file_name)
self.assertFalse(FileManager.is_path_creatable(not_allowed_path),
"Fail to assert not accessing this path ")
# test allowed : return True
self.assertTrue(FileManager.is_path_creatable(file_path))
def test_is_path_exists_or_creatable(self):
"""
Test the _is_path_exists_or_creatable
4 scenarii :
- the file exists and is creatable : return True
- the file does not exist but is creatable : return True
- the file exists but is not allowed : return True --> need a review !
- the file does not exist and is not allowed : return False
"""
# set up the context
dir_path = "/tmp/kalliope/tests/"
file_name = "test_FileManager_fileIsPathExistsOrCreatable"
file_path = os.path.join(dir_path, file_name)
if os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Test the file exist and creatable : return True
with open(file_path, "wb") as file_open:
file_open.write(b"[Kalliope] Test Running the test_is_path_exists_or_creatable method")
file_open.close()
self.assertTrue(FileManager.is_path_exists_or_creatable(file_path),
"Fail to assert the file exist ")
# test the file not exist but creatable : return True
os.remove(file_path)
self.assertTrue(FileManager.is_path_exists_or_creatable(file_path),
"Fail asserting the file does not exist ")
# test the file exist but not creatable : return True
# file_exist_not_allowed = "/root/.ssh/known_hosts"
# self.assertTrue(FileManager.is_path_creatable(file_exist_not_allowed))
# test the file not exist and not allowed : return False
not_allowed_root_path = "/root/"
not_allowed_path = os.path.join(not_allowed_root_path, file_name)
self.assertFalse(FileManager.is_path_creatable(not_allowed_path),
"Fail to assert not accessing this path ")
if __name__ == '__main__':
unittest.main()
```
#### File: kalliope/Tests/test_hook_manager.py
```python
import unittest
import os
import mock as mock
import inspect
import shutil
from kalliope.core.Models import Singleton
from kalliope.core.ConfigurationManager import SettingLoader
from kalliope.core import HookManager
from kalliope.core.Models.Settings import Settings
class TestInit(unittest.TestCase):
def setUp(self):
# Init the folders, otherwise it raises an exceptions
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/neurons")
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/stt")
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/tts")
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/trigger")
# get current script directory path. We are in /an/unknown/path/kalliope/core/tests
cur_script_directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# get parent dir. Now we are in /an/unknown/path/kalliope
root_dir = os.path.normpath(cur_script_directory + os.sep + os.pardir)
self.settings_file_to_test = root_dir + os.sep + "Tests/settings/settings_test.yml"
self.settings = SettingLoader(file_path=self.settings_file_to_test)
def tearDown(self):
# Cleanup
shutil.rmtree('/tmp/kalliope/tests/kalliope_resources_dir')
Singleton._instances = {}
def test_on_start(self):
"""
test list of synapse
"""
with mock.patch("kalliope.core.SynapseLauncher.start_synapse_by_list_name") as mock_synapse_launcher:
HookManager.on_start()
mock_synapse_launcher.assert_called_with(["on-start-synapse", "bring-led-on"], new_lifo=True)
mock_synapse_launcher.reset_mock()
def test_on_waiting_for_trigger(self):
"""
test with single synapse
"""
with mock.patch("kalliope.core.SynapseLauncher.start_synapse_by_name") as mock_synapse_launcher:
HookManager.on_waiting_for_trigger()
mock_synapse_launcher.assert_called_with("test", new_lifo=True)
mock_synapse_launcher.reset_mock()
def test_on_triggered(self):
with mock.patch("kalliope.core.SynapseLauncher.start_synapse_by_list_name") as mock_synapse_launcher:
HookManager.on_triggered()
mock_synapse_launcher.assert_called_with(["on-triggered-synapse"], new_lifo=True)
mock_synapse_launcher.reset_mock()
def test_on_start_listening(self):
self.assertIsNone(HookManager.on_start_listening())
def test_on_stop_listening(self):
self.assertIsNone(HookManager.on_stop_listening())
def test_on_order_found(self):
self.assertIsNone(HookManager.on_order_found())
def test_on_order_not_found(self):
with mock.patch("kalliope.core.SynapseLauncher.start_synapse_by_list_name") as mock_synapse_launcher:
HookManager.on_order_not_found()
mock_synapse_launcher.assert_called_with(["order-not-found-synapse"], new_lifo=True)
mock_synapse_launcher.reset_mock()
def test_on_mute(self):
"""
test that empty list of synapse return none
"""
self.assertIsNone(HookManager.on_mute())
if __name__ == '__main__':
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(TestInit("test_main"))
# runner = unittest.TextTestRunner()
# runner.run(suite)
```
#### File: kalliope/Tests/test_order_analyser.py
```python
import unittest
from kalliope.core.Models import Brain
from kalliope.core.Models import Neuron
from kalliope.core.Models import Synapse
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.Models.Signal import Signal
from kalliope.core.OrderAnalyser import OrderAnalyser
class TestOrderAnalyser(unittest.TestCase):
"""Test case for the OrderAnalyser Class"""
def setUp(self):
pass
def test_get_matching_synapse(self):
# Init
neuron1 = Neuron(name='neurone1', parameters={'var1': 'val1'})
neuron2 = Neuron(name='neurone2', parameters={'var2': 'val2'})
neuron3 = Neuron(name='neurone3', parameters={'var3': 'val3'})
neuron4 = Neuron(name='neurone4', parameters={'var4': 'val4'})
signal1 = Signal(name="order", parameters="this is the sentence")
signal2 = Signal(name="order", parameters="this is the second sentence")
signal3 = Signal(name="order", parameters="that is part of the third sentence")
signal4 = Signal(name="order", parameters={"matching-type": "strict",
"text": "that is part of the fourth sentence"})
signal5 = Signal(name="order", parameters={"matching-type": "ordered-strict",
"text": "sentence 5 with specific order"})
signal6 = Signal(name="order", parameters={"matching-type": "normal",
"text": "matching type normal"})
signal7 = Signal(name="order", parameters={"matching-type": "non-existing",
"text": "matching type non existing"})
signal8 = Signal(name="order", parameters={"matching-type": "non-existing",
"non-existing-parameter": "will not match order"})
synapse1 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1])
synapse2 = Synapse(name="Synapse2", neurons=[neuron3, neuron4], signals=[signal2])
synapse3 = Synapse(name="Synapse3", neurons=[neuron2, neuron4], signals=[signal3])
synapse4 = Synapse(name="Synapse4", neurons=[neuron2, neuron4], signals=[signal4])
synapse5 = Synapse(name="Synapse5", neurons=[neuron1, neuron2], signals=[signal5])
synapse6 = Synapse(name="Synapse6", neurons=[neuron1, neuron2], signals=[signal6])
synapse7 = Synapse(name="Synapse6", neurons=[neuron1, neuron2], signals=[signal7])
synapse8 = Synapse(name="Synapse6", neurons=[neuron1, neuron2], signals=[signal8])
all_synapse_list = [synapse1,
synapse2,
synapse3,
synapse4,
synapse5,
synapse6,
synapse7,
synapse8]
br = Brain(synapses=all_synapse_list)
# TEST1: should return synapse1
spoken_order = "this is the sentence"
# Create the matched synapse
expected_matched_synapse_1 = MatchedSynapse(matched_synapse=synapse1,
matched_order=spoken_order,
user_order=spoken_order)
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertEqual(len(matched_synapses), 1)
self.assertTrue(expected_matched_synapse_1 in matched_synapses)
# with defined normal matching type
spoken_order = "matching type normal"
expected_matched_synapse_5 = MatchedSynapse(matched_synapse=synapse6,
matched_order=spoken_order,
user_order=spoken_order)
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertEqual(len(matched_synapses), 1)
self.assertTrue(expected_matched_synapse_5 in matched_synapses)
# TEST2: should return synapse1 and 2
spoken_order = "this is the second sentence"
expected_matched_synapse_2 = MatchedSynapse(matched_synapse=synapse1,
matched_order=spoken_order,
user_order=spoken_order)
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertEqual(len(matched_synapses), 2)
self.assertTrue(expected_matched_synapse_1, expected_matched_synapse_2 in matched_synapses)
# TEST3: should empty
spoken_order = "not a valid order"
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertFalse(matched_synapses)
# TEST4: with matching type strict
spoken_order = "that is part of the fourth sentence"
expected_matched_synapse_3 = MatchedSynapse(matched_synapse=synapse4,
matched_order=spoken_order,
user_order=spoken_order)
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertTrue(expected_matched_synapse_3 in matched_synapses)
spoken_order = "that is part of the fourth sentence with more word"
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertFalse(matched_synapses)
# TEST5: with matching type ordered strict
spoken_order = "sentence 5 with specific order"
expected_matched_synapse_4 = MatchedSynapse(matched_synapse=synapse5,
matched_order=spoken_order,
user_order=spoken_order)
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertEqual(len(matched_synapses), 1)
self.assertTrue(expected_matched_synapse_4 in matched_synapses)
spoken_order = "order specific with 5 sentence"
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertFalse(matched_synapses)
# TEST6: non supported type of matching. should fallback to normal
spoken_order = "matching type non existing"
expected_matched_synapse_5 = MatchedSynapse(matched_synapse=synapse7,
matched_order=spoken_order,
user_order=spoken_order)
matched_synapses = OrderAnalyser.get_matching_synapse(order=spoken_order, brain=br)
self.assertTrue(expected_matched_synapse_5 in matched_synapses)
def test_get_split_order_without_bracket(self):
# Success
order_to_test = "this is the order"
expected_result = ["this", "is", "the", "order"]
self.assertEqual(OrderAnalyser._get_split_order_without_bracket(order_to_test), expected_result,
"No brackets Fails to return the expected list")
order_to_test = "this is the {{ order }}"
expected_result = ["this", "is", "the"]
self.assertEqual(OrderAnalyser._get_split_order_without_bracket(order_to_test), expected_result,
"With spaced brackets Fails to return the expected list")
order_to_test = "this is the {{order }}" # left bracket without space
expected_result = ["this", "is", "the"]
self.assertEqual(OrderAnalyser._get_split_order_without_bracket(order_to_test), expected_result,
"Left brackets Fails to return the expected list")
order_to_test = "this is the {{ order}}" # right bracket without space
expected_result = ["this", "is", "the"]
self.assertEqual(OrderAnalyser._get_split_order_without_bracket(order_to_test), expected_result,
"Right brackets Fails to return the expected list")
order_to_test = "this is the {{order}}" # bracket without space
expected_result = ["this", "is", "the"]
self.assertEqual(OrderAnalyser._get_split_order_without_bracket(order_to_test), expected_result,
"No space brackets Fails to return the expected list")
def test_is_normal_matching(self):
# same order
test_order = "expected order in the signal"
test_signal = "expected order in the signal"
self.assertTrue(OrderAnalyser.is_normal_matching(user_order=test_order,
signal_order=test_signal))
# not the same order
test_order = "this is an order"
test_signal = "expected order in the signal"
self.assertFalse(OrderAnalyser.is_normal_matching(user_order=test_order,
signal_order=test_signal))
# same order with more word in the user order
test_order = "expected order in the signal with more word"
test_signal = "expected order in the signal"
self.assertTrue(OrderAnalyser.is_normal_matching(user_order=test_order,
signal_order=test_signal))
# same order with bracket
test_order = "expected order in the signal"
test_signal = "expected order in the signal {{ variable }}"
self.assertTrue(OrderAnalyser.is_normal_matching(user_order=test_order,
signal_order=test_signal))
# same order with bracket
test_order = "expected order in the signal variable_to_catch"
test_signal = "expected order in the signal {{ variable }}"
self.assertTrue(OrderAnalyser.is_normal_matching(user_order=test_order,
signal_order=test_signal))
# same order with bracket and words after brackets
test_order = "expected order in the signal variable_to_catch other word"
test_signal = "expected order in the signal {{ variable }} other word"
self.assertTrue(OrderAnalyser.is_normal_matching(user_order=test_order,
signal_order=test_signal))
def test_is_strict_matching(self):
# same order with same amount of word
test_order = "expected order in the signal"
test_signal = "expected order in the signal"
self.assertTrue(OrderAnalyser.is_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order but not the same amount of word
test_order = "expected order in the signal with more word"
test_signal = "expected order in the signal"
self.assertFalse(OrderAnalyser.is_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order with same amount of word and brackets
test_order = "expected order in the signal variable_to_catch"
test_signal = "expected order in the signal {{ variable }}"
self.assertTrue(OrderAnalyser.is_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order with same amount of word and brackets with words after last brackets
test_order = "expected order in the signal variable_to_catch other word"
test_signal = "expected order in the signal {{ variable }} other word"
self.assertTrue(OrderAnalyser.is_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order with same amount of word and brackets with words after last brackets but more words
test_order = "expected order in the signal variable_to_catch other word and more word"
test_signal = "expected order in the signal {{ variable }} other word"
self.assertFalse(OrderAnalyser.is_strict_matching(user_order=test_order,
signal_order=test_signal))
def test_ordered_strict_matching(self):
# same order with same amount of word with same order
test_order = "expected order in the signal"
test_signal = "expected order in the signal"
self.assertTrue(OrderAnalyser.is_ordered_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order with same amount of word without same order
test_order = "signal the in order expected"
test_signal = "expected order in the signal"
self.assertFalse(OrderAnalyser.is_ordered_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order with same amount of word and brackets in the same order
test_order = "expected order in the signal variable_to_catch"
test_signal = "expected order in the signal {{ variable }}"
self.assertTrue(OrderAnalyser.is_ordered_strict_matching(user_order=test_order,
signal_order=test_signal))
# same order with same amount of word and brackets in the same order with words after bracket
test_order = "expected order in the signal variable_to_catch with word"
test_signal = "expected order in the signal {{ variable }} with word"
self.assertTrue(OrderAnalyser.is_ordered_strict_matching(user_order=test_order,
signal_order=test_signal))
# not same order with same amount of word and brackets
test_order = "signal the in order expected"
test_signal = "expected order in the signal {{ variable }}"
self.assertFalse(OrderAnalyser.is_ordered_strict_matching(user_order=test_order,
signal_order=test_signal))
# not same order with same amount of word and brackets with words after bracket
test_order = "word expected order in the signal variable_to_catch with"
test_signal = "expected order in the signal {{ variable }} with word"
self.assertFalse(OrderAnalyser.is_ordered_strict_matching(user_order=test_order,
signal_order=test_signal))
def test_is_order_matching(self):
# all lowercase
test_order = "expected order in the signal"
test_signal = "expected order in the signal"
self.assertTrue(OrderAnalyser.is_order_matching(user_order=test_order,
signal_order=test_signal))
# with uppercase
test_order = "Expected Order In The Signal"
test_signal = "expected order in the signal"
self.assertTrue(OrderAnalyser.is_order_matching(user_order=test_order,
signal_order=test_signal))
if __name__ == '__main__':
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(TestOrderAnalyser("test_get_matching_synapse"))
# runner = unittest.TextTestRunner()
# runner.run(suite)
```
#### File: kalliope/Tests/test_player_launcher.py
```python
import unittest
import mock
from kalliope.core.Models.Player import Player
from kalliope.core.Models.Settings import Settings
from kalliope.core.PlayerLauncher import PlayerLauncher
class TestPlayerLauncher(unittest.TestCase):
"""
Class to test Launchers Classes (PlayerLauncher) and methods
"""
def setUp(self):
pass
####
# Player Launcher
def test_get_player(self):
"""
Test the PlayerLauncher trying to run the Player
"""
player1 = Player("Player", {})
player2 = Player("Player2", {'test': "hitheparamtest"})
settings = Settings()
settings.players = [player1, player2]
with mock.patch("kalliope.core.Utils.get_dynamic_class_instantiation") as mock_get_class_instantiation:
# Get the player1
settings.default_player_name = "Player"
PlayerLauncher.get_player(settings=settings)
mock_get_class_instantiation.assert_called_once_with(package_name="players",
module_name=player1.name,
parameters=player1.parameters)
mock_get_class_instantiation.reset_mock()
# Get the player 2
settings.default_player_name = "Player2"
PlayerLauncher.get_player(settings=settings)
mock_get_class_instantiation.assert_called_once_with(package_name="players",
module_name=player2.name,
parameters=player2.parameters)
mock_get_class_instantiation.reset_mock()
```
#### File: kalliope/Tests/test_trigger_launcher.py
```python
import unittest
import mock
from kalliope.core.Models.Settings import Settings
from kalliope.core.TriggerLauncher import TriggerLauncher
from kalliope.core.Models.Trigger import Trigger
class TestTriggerLauncher(unittest.TestCase):
"""
Class to test Launchers Classes (TriggerLauncher) and methods
"""
def setUp(self):
pass
####
# Trigger Launcher
def test_get_trigger(self):
"""
Test the Trigger Launcher trying to run the trigger
"""
trigger1 = Trigger("Trigger", {})
trigger2 = Trigger("Trigger2", {'pmdl_file': "trigger/snowboy/resources/kalliope-FR-6samples.pmdl"})
settings = Settings()
settings.triggers = [trigger1, trigger2]
with mock.patch("kalliope.core.Utils.get_dynamic_class_instantiation") as mock_get_class_instantiation:
# Get the trigger 1
settings.default_trigger_name = "Trigger"
TriggerLauncher.get_trigger(settings=settings,
callback=None)
mock_get_class_instantiation.assert_called_once_with(package_name="trigger",
module_name=trigger1.name,
parameters=trigger1.parameters)
mock_get_class_instantiation.reset_mock()
# Get the trigger 2
settings.default_trigger_name = "Trigger2"
TriggerLauncher.get_trigger(settings=settings,
callback=None)
mock_get_class_instantiation.assert_called_once_with(package_name="trigger",
module_name=trigger2.name,
parameters=trigger2.parameters)
mock_get_class_instantiation.reset_mock()
``` |
{
"source": "Joshua-Booth/text-cryptography",
"score": 3
} |
#### File: text-cryptography/text_cryptography/__main__.py
```python
import string
import logging
from operator import add, sub
from collections import defaultdict
from text_cryptography.log import debug_logger as logger
DEBUG = False
if DEBUG:
logger.disabled = False
else:
logging.disable(logging.CRITICAL)
class Cryptography:
CHARS = string.ascii_letters + string.digits + string.punctuation + \
" \n"
def __init__(self, file=None, crypt_type=None, crypt_method=None,
key=None):
self._file = file
self._crypt_type = crypt_type
self._crypt_method = crypt_method
self._key = key
self.crypt_methods = {"C": lambda: self.caesar_cipher(),
"M": lambda: self.monoalphabetic(),
"P": lambda: self.polyalphabetic()}
def caesar_cipher(self):
"""
Encrypts or decrypts the file using a caesar cipher.
:return string: The encrypted/decrypted file data.
"""
chars = list(self.CHARS * 2)
data = ""
crypt_operator = add if self.crypt_type == "encrypt" else sub
for character in self.file_data:
try:
index = crypt_operator(chars.index(character), self.key)
data += chars[index]
except (ValueError, TypeError) as e:
logger.error(e)
print(f"Invalid character '{character}' in file {self.file}.")
logger.info(f"data: {data}")
return data
def monoalphabetic(self):
"""
Encrypts or decrypts the file using a monoalphabetic cipher.
:return string: The encrypted/decrypted file data.
"""
key = list(dict.fromkeys(self.key))
cipher_text = list(self.CHARS)
key = list(dict.fromkeys(key))
for index, key_char in enumerate(key, 0):
if key_char in self.CHARS:
cipher_text.remove(key_char)
cipher_text.insert(index, key_char)
cipher = dict(zip(self.CHARS, cipher_text))
logger.info(f"cipher: {cipher}")
keys = cipher.keys()
values = cipher.values()
data = ""
for character in self.file_data:
try:
if self.crypt_type == "encrypt":
data += list(keys)[list(values).index(character)]
else:
data += list(values)[list(keys).index(character)]
except (ValueError, TypeError) as e:
logger.error(e)
print(f"Invalid character '{character}' in file {self.file}.")
logger.info(f"data: {data}")
return data
def polyalphabetic(self):
"""
Encrypts or decrypts the file using a polyalphabetic cipher.
:return string: The encrypted/decrypted file data.
"""
sequence = []
for key_char in self.key:
for i, char in enumerate(self.CHARS, 0):
if key_char == char:
sequence.append(i)
map_data = []
for data_char in range(0, len(self.file_data), 3):
for char in sequence:
map_data.append(char)
pair_data = []
for x in zip(self.file_data, map_data):
pair_data.append(x)
crypt_operator = add if self.crypt_type == "encrypt" else sub
data = []
chars = list(self.CHARS * 2)
for pair in pair_data:
try:
char, shift = pair
new_char_index = crypt_operator(self.CHARS.index(char), shift)
data.append(chars[new_char_index])
except (ValueError, TypeError) as e:
logger.error(e)
print(f"Invalid character '{character}' in file {self.file}.")
logger.info(f"data: {data}")
return "".join(data)
@property
def file(self):
"""
Gets the file for the encryption/decryption method.
:return string/int: The file.
"""
return self._file
@file.setter
def file(self, value=None):
"""
Sets the file to the specified value if it is valid.
:param string value: The value to set as the file.
:return None:
"""
if Check.file_exists(value):
self._file = value
else:
print(f"File '{value}' does not exist")
raise SystemExit
@property
def file_data(self):
"""
Gets the contents of the data file.
:return string: The file contents.
"""
return self.read(self.file)
@property
def crypt_type(self):
"""
Gets the cryptography type, either encryption or decryption.
:return string: The encryption/decryption type.
"""
return self._crypt_type
@crypt_type.setter
def crypt_type(self, value=None):
"""
Sets the cryptography type to either 'encrypt' or 'decrypt'.
:param string value: The value to set as the cryptography type.
:return None:
"""
crypt_type = value.upper()
fail_message = "You must choose either encryption or decryption."
is_in, crypt_type = Check.is_in(crypt_type, 'E', 'D',
error_message=fail_message)
if is_in:
if crypt_type == "E":
self._crypt_type = "encrypt"
elif crypt_type == "D":
self._crypt_type = "decrypt"
else:
logger.error(f"crypt_type was: {self._crypt_type}")
raise ValueError(f"crypt_type was: {self._crypt_type}")
else:
raise SystemExit
@property
def crypt_method(self):
"""
Get the method of cryptography.
:return string: The method of cryptography.
"""
return self._crypt_method
@crypt_method.setter
def crypt_method(self, value=None):
"""
Set the method of cryptography.
:param value: The value to set as the method of cryptography.
:return None:
"""
message = """
Please choose from the following encryption/decryption methods:
Caesar Cipher: 'C'
Monoalphetic Cipher: 'M'
Polyalphabetic Cipher: 'P'"""
value = value.upper() if value is not None else value
is_valid_method, crypt_method = Check.is_in(value, 'C', 'M', 'P',
error_message=message)
if is_valid_method:
self._crypt_method = crypt_method
else:
raise SystemExit
@property
def key(self):
"""
Gets the key for the encryption/decryption method.
:return string/int: The key.
"""
return self._key
@key.setter
def key(self, value=None):
"""
Sets the ket to the specified value if it is valid.
:param string value: The value to assign to the key.
:return None:
"""
if self.crypt_method == 'C':
key_type = "number"
else:
key_type = "string"
input_message = f"Please enter a {key_type} as a " \
f"{self.crypt_type}ion key\n>> "
if value is None:
key = input(input_message)
else:
key = value
is_valid_key, key = Check.is_valid_key(key, self.crypt_method)
if is_valid_key:
self._key = key
else:
raise ValueError(f"Key{key} is invalid")
@staticmethod
def read(file):
"""
Read the file and return its contents.
:param string file: The file to read.
:return string: The file contents.
"""
with open(file, 'r') as file:
return file.read()
@staticmethod
def write(file, text):
"""
Write to the given file.
:param string file: The file to write to.
:param string text: The content to write to the file.
:return None:
"""
with open(file, 'w') as f:
f.write(text)
class Check:
@staticmethod
def file_exists(file):
"""
Check if the file exists by reading it.
:param string file: The file to check.
:return True | False: Does the file exist?
"""
try:
Cryptography.read(file)
return True
except (FileNotFoundError, FileExistsError):
return False
@staticmethod
def is_integer(value):
"""
Checks if the value is an integer.
:param string value: The value you want to check.
:returns True & value | False: Is the value an integer?
"""
try:
return True, int(value)
except (ValueError, TypeError):
return False
@staticmethod
def is_in(entered, *required, error_message=None):
"""
Checks entered values are valid by looking in the required values for a
match.
:param string entered: The entered values.
:param string required: The values to check against.
:param string error_message: An optional fail message.
:return True & entered | False: Is the entered value in the required
values?
"""
try:
while (str(entered).upper() not in required and
str(entered).lower() not in required):
logger.info(f"entered: {entered} required: {required}")
if error_message:
print(error_message)
print("Please enter from the following:\n" +
", ".join(char for char in required))
entered = input(">> ").upper()
except (ValueError, NameError):
return False
return True, entered
@staticmethod
def is_valid_key(key, crypt_method):
"""
Checks if the key is valid based on the cryptography method.
:param string | integer key: The key to check.
:param string crypt_method: The type of encryption/decryption.
:return True & string (key) | False: Is the key valid?
"""
logger.info(f"key: {key}, crypt_method: {crypt_method}")
if crypt_method == 'C':
while type(key) is not int or key not in range(0, 95):
try:
key = Check.is_integer(key)[1]
if key not in range(0, 95):
raise ValueError
except (TypeError, ValueError):
print("You must enter an integer between 1 and 95!")
key = input("Enter an encryption key\n>> ")
elif crypt_method in ('M', 'P'):
pass
else:
return False
return True, key
def main():
""" Beginning of the program. """
# file = None
# for arg in sys.argv:
# if ".txt" in arg or ".py" not in arg or ".log" not in arg:
# file = arg
file = input("Enter a file: ")
file_data = Cryptography()
file_data.file = file
crypt_type = input("Please enter 'E' to encrypt or 'D' to decrypt\n>> ")
file_data.crypt_type = crypt_type
crypt_type = "encrypt" if crypt_type == 'E' else "decrypt"
file_data.crypt_method = file_data.crypt_method
key = input("Please enter a key for your data\n>> ")
file_data.key = key
print(f"crypt_method: {file_data.crypt_method}")
new_data = file_data.crypt_methods[file_data.crypt_method]()
crypt_methods = defaultdict(str,
{'C': "Caesar",
'M': "Monoalphabetic",
'P': "Polyalphabetic"})
if DEBUG is False:
crypt_method = crypt_methods[file_data.crypt_method]
new_file_name = f"{crypt_method}_{crypt_type.capitalize()}ed.txt"
logger.info(f"{type(new_data)}: {new_data}")
Cryptography.write(new_file_name, new_data)
print(f"Your new {crypt_type}ed file has been created as " +
f"{new_file_name}.")
if __name__ == "__main__":
main()
``` |
{
"source": "JoshuaBotha/python_pyside2_template",
"score": 3
} |
#### File: python_pyside2_template/src/my_logger.py
```python
import logging
import sys
from typing import Union
def setup_logger(logger: Union[str, logging.Logger],
is_main: bool = None) -> logging.Logger:
assert type(logger) in [str, logging.Logger], "Provided logger not "\
"correct type"
class StdOutFilter(logging.Filter):
def filter(self, record: logging.LogRecord):
return record.levelno in (logging.DEBUG,
logging.INFO, logging.WARNING)
class StdErrFilter(logging.Filter):
def filter(self, record: logging.LogRecord):
return record.levelno not in (logging.DEBUG,
logging.INFO, logging.WARNING)
if type(logger) is str:
logger = logging.getLogger(logger)
if is_main:
logger.root.handlers = []
line = '-' * 80
fmt = f'{line}\n\n%(asctime)s %(threadName)s ' \
f'%(levelname)s: %(message)s'
formatter = logging.Formatter(fmt=fmt)
cli_err = logging.StreamHandler(stream=sys.stderr)
cli_err.setLevel(logging.ERROR)
cli_err.setFormatter(formatter)
cli_err.addFilter(StdErrFilter())
cli_out = logging.StreamHandler(stream=sys.stdout)
cli_out.setLevel(logging.DEBUG)
cli_out.setFormatter(formatter)
cli_out.addFilter(StdOutFilter())
file_log = logging.FileHandler(filename='log.txt', mode='w+')
file_log.setLevel(logging.DEBUG)
file_log.setFormatter(formatter)
root_logger = logging.root
root_logger.setLevel(logging.ERROR)
root_logger.addHandler(cli_err)
# logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(cli_out)
num_args = len(sys.argv) - 1
if num_args == 0 or (num_args > 0 and '--dev' not in sys.argv):
root_logger.addHandler(file_log)
# logger.debug('Debug test')
# logger.info('Info test')
# logger.warning('Warning test')
# logger.error('Error test')
# logger.critical('Critical test')
return logger
```
#### File: python_pyside2_template/src/processes.py
```python
import multiprocessing as mp
from queue import Empty
from typing import List
from uuid import UUID, uuid1
from my_logger import setup_logger
logger = setup_logger(__name__)
def create_queue() -> mp.JoinableQueue:
return mp.JoinableQueue()
def get_empty_queue_exception() -> type:
return Empty
def get_max_num_processes() -> int:
return mp.cpu_count()
def locate_uuid(object_list: List[object], wanted_uuid: UUID):
all_have_uuid = all([hasattr(obj, 'uuid') for obj in object_list])
assert all_have_uuid, "Not all objects in object_list have uuid's"
uuid_list = [obj.uuid for obj in object_list]
if wanted_uuid not in uuid_list:
return False, None, None
else:
uuid_ind = uuid_list.index(wanted_uuid)
return True, uuid_ind, uuid_list[uuid_ind]
class ProcessTask:
def __init__(self, obj: object, method_name: str):
assert hasattr(obj, method_name), "Object does not have provided " \
"method"
self.uuid = uuid1()
self.obj = obj
self.method_name = method_name
class ProcessTaskResult:
def __init__(self, task_uuid: UUID,
task_return,
new_task_obj: ProcessTask):
self.task_uuid = task_uuid
self.task_return = task_return
self.new_task_obj = new_task_obj
class SingleProcess(mp.Process):
def __init__(self, task_queue: mp.JoinableQueue,
result_queue: mp.JoinableQueue):
mp.Process.__init__(self)
assert type(task_queue) is mp.queues.JoinableQueue, \
'task provided not correct type'
assert type(result_queue) is mp.queues.JoinableQueue, \
'task provided not correct type'
self.task_queue = task_queue
self.result_queue = result_queue
def run(self):
try:
done = False
while not done:
task = self.task_queue.get()
if task is None:
done = True
self.task_queue.task_done()
self.result_queue.put(True)
else:
task_run = getattr(task.obj, task.method_name)
task_return = task_run()
process_result = ProcessTaskResult(task_uuid=task.uuid,
task_return=task_return,
new_task_obj=task.obj)
self.result_queue.put(process_result)
self.task_queue.task_done()
except Exception as e:
self.result_queue.put(e)
``` |
{
"source": "JoshuaBotha/python-qt-template",
"score": 3
} |
#### File: python-qt-template/src/dbg.py
```python
from termcolor import colored as termcolor
import cProfile
import pstats
import io
def profile(fnc):
"""A decorator that uses cProfile to profile a function"""
def inner(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
retval = fnc(*args, **kwargs)
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return retval
return inner
def p(debug_print, debug_from=None):
"""Prints text to terminal for debugging."""
if debug_from is None:
print_text = termcolor("[DEBUG]\t\t", 'blue') + debug_print
else:
print_text = termcolor("[DEBUG]\t\t", 'blue') + termcolor(debug_from + ":\t", 'green') + debug_print
print(print_text)
def u(debug_print: str, debug_from: str = None, end: bool = False):
"""
Updates the colourised string of the terminal with debug_print. If end is True the line is ended
Parameters
----------
debug_print: str
Test to be used updated the terminal with.
debug_from: str, optional
To specify where debug text is being called from.
end: str, optional
If true the line is ended.
"""
if debug_from is None:
print_text = termcolor("[DEBUG]\t\t", 'blue') + debug_print
else:
print_text = termcolor("[DEBUG]\t\t", 'blue') + termcolor(debug_from + ":\t", 'green') + debug_print
if not end:
print('\r' + print_text, end='', flush=True)
else:
print('\r' + print_text, flush=True)
# sys.stdout.write('\r' + print_text)
# sys.stdout.flush()
``` |
{
"source": "JoshuaBrockschmidt/ideal_ANN",
"score": 3
} |
#### File: JoshuaBrockschmidt/ideal_ANN/ANN.py
```python
import math
from random import seed as srand, random as rand
import sys
from time import time
output = sys.stdout
srand(time())
def setOutput(out):
output = out
def sigmoid(x):
return (1.0 / (1.0 + math.exp(-x)))
def sigmoidDeriv(x):
return (x * (1.0 - x))
class ANN:
def __init__(self, layers, learnRate, weights=[]):
self.inp = [ None for i in range(layers[0]) ]
self.hid = [ [None for i in range(layers[l])] for l in range(1, len(layers)-1) ]
self.out = [ None for i in range(layers[-1]) ]
self.learnRate = learnRate
# If weights are specified, make sure given table is valid.
weightsDeclared = True
if len(weights) > 0:
if len(weights) == len(layers):
for lyr in range(len(layers)):
if len(weights[lyr]) != layers[lyr]:
output.write("Error: invalid weights argument for ANN; there are " +
str(layers[lyr]) +
"neurons in layer " +
str(len(layers)) +
"layers; weights will be randomized\n")
weightsDeclared = False
break
self.weights = weights[:]
else:
output.write("Error: invalid weights argument for ANN; there are " +
str(len(layers)) +
"layers; weights will be randomized\n")
weightsDeclared = False
else:
weightsDeclared = False
# If weights self.weights have not been declared
if not weightsDeclared:
self.weights = []
for lyr in range(len(layers)-1):
self.weights.append([])
for n in range(layers[lyr]):
# Add additional weight (+1) for bias.
self.weights[lyr].append(
[ self._randWeight() for n2 in range(layers[lyr+1]+1) ]
)
def _randWeight(self):
return rand() - 0.5
def feedforward(self, ins):
# Validate arguments
if len(ins) != len(self.inp):
output.write("Error: could not feed forward ANN; there are " +
str(len(self.inp)) +
" input neurons\n")
return
# Update input layer
self.inp = ins[:]
# Calculate for hidden layers
lastLyr = self.inp
for hid_i in range(len(self.hid)):
# Calculate for individual layer's nuerons
for n_cur in range(len(self.hid[hid_i])):
sum = 0.0
for n_last in range(len(lastLyr)):
# Note that index of hidden node in self.weights is
# 1 greater than its index in self.hid and hid_err
# Thus, in self.weights, hid_i references
# the layer before it
sum += (
lastLyr[n_last] * self.weights[hid_i][n_last][n_cur]
)
# Add bias
sum += self.weights[hid_i][-1][n_cur]
self.hid[hid_i][n_cur] = sigmoid(sum)
lastLyr = self.hid[hid_i]
# Calculate for output layer
for n_cur in range(len(self.out)):
sum = 0.0
for n_last in range(len(lastLyr)):
sum += lastLyr[n_last] * self.weights[-1][n_last][n_cur]
# Add bias
sum += self.weights[-1][-1][n_cur]
self.out[n_cur] = sigmoid(sum)
# Feedforwarding was successful
return True
def backpropagate(self, target):
# Validate arguments
if len(target) != len(self.out):
output.write("Error: could not backpropagate ANN; there are " +
str(len(self.inp)) +
" input neurons\n")
return False
# Calculate output layer error
out_err = []
for n in range(len(self.out)):
out_err.append((target[n] - self.out[n]) * sigmoidDeriv(self.out[n]))
# Calculate hidden layers' error
hid_err = [ [] for h in range(len(self.hid))]
lastLyr = self.out
lastErr = out_err
# From top to bottom; modify weights that lead into hidden node
for hid_i in range(len(self.hid)-1, -1, -1):
for n_cur in range(len(self.hid[hid_i])):
hid_err[hid_i].append(0.0)
for n_last in range(len(lastErr)):
# Note that index of hidden node in self.weights is
# 1 greater than its index in self.hid and hid_err
# Thus, in self.weights, hid_i references
# the layer before it
hid_err[hid_i][n_cur] += lastErr[n_last] * self.weights[hid_i+1][n_cur][n_last]
hid_err[hid_i][n_cur] *= sigmoidDeriv(self.hid[hid_i][n_cur])
lastLyr = self.hid[hid_i]
lastErr = hid_err[hid_i]
# Calculate new weights and biases for hidden nodes
lastLyr = self.inp
# From bottom to top
for hid_i in range(len(self.hid)):
for n_cur in range(len(self.hid[hid_i])):
for n_last in range(len(lastLyr)):
# Note that index of hidden node in self.weights is
# 1 greater than its index in self.hid and hid_err
# Thus, in self.weights, hid_i references
# the layer before it
self.weights[hid_i][n_last][n_cur] += (
self.learnRate * hid_err[hid_i][n_cur] * lastLyr[n_last]
)
# Update the bias
self.weights[hid_i][-1][n_cur] += (
self.learnRate * hid_err[hid_i][n_cur]
)
lastLyr = self.hid[hid_i]
# Calculate new weights and biases for output nodes
for n_cur in range(len(self.out)):
for n_last in range(len(lastLyr)):
self.weights[-1][n_last][n_cur] += (
self.learnRate * out_err[n_cur] * lastLyr[n_last]
)
# Update the bias
self.weights[-1][-1][n_cur] += (
self.learnRate * out_err[n_cur]
)
# Backpropagation was successful
return True
def errSqr(self, target):
# Validate arguments
if len(target) != len(self.out):
output.write("Error: could not get error squared for ANN; there are " +
str(len(self.inp)) +
" input neurons")
return False
err = 0.0
for n_out in range(len(self.out)):
err += (self.out[n_out] - target[n_out])**2
err *= 0.5
return err
``` |
{
"source": "JoshuaBrockschmidt/simple-rnn",
"score": 4
} |
#### File: JoshuaBrockschmidt/simple-rnn/rnn.py
```python
import copy, numpy as np
import sequences
sigmoid = lambda x: 1 / (1 + np.exp(-x))
sigmoid_deriv = lambda x: x * (1 - x)
class SimpleRNN():
"""
A simple 3-layer RNN.
Original code from https://youtu.be/cdLUzrjnlr4.
"""
def __init__(self, alpha, input_dim, hidden_dim, output_dim):
"""
TODO
"""
self.alpha = alpha
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
# Initialize weights.
self.synapse_0 = 2 * np.random.random((input_dim, hidden_dim)) - 1
self.synapse_1 = 2 * np.random.random((hidden_dim, output_dim)) - 1
self.synapse_h = 2 * np.random.random((hidden_dim, hidden_dim)) - 1
def train(self, train_input, train_output, steps):
"""
Trains the RNN against training data.
"""
binary_dim = 8
synapse_0_update = np.zeros_like(self.synapse_0)
synapse_1_update = np.zeros_like(self.synapse_1)
synapse_h_update = np.zeros_like(self.synapse_h)
for i in range(steps):
# Select random data vectors.
j = np.random.randint(len(train_input))
a = train_input[j][0]
b = train_input[j][1]
c = train_output[j][0]
# Network's guess.
d = np.zeros_like(c)
overall_error = 0
layer_2_deltas = []
layer_1_values = []
layer_1_values.append(np.zeros(self.hidden_dim))
for j in range(binary_dim):
X = np.array([[a[binary_dim - j - 1], b[binary_dim - j - 1]]])
y = np.array([[c[binary_dim - j - 1]]]).T
# Hidden layer including influence from previous hidden layer.
layer_1 = sigmoid(np.dot(X, self.synapse_0) + np.dot(layer_1_values[-1], self.synapse_h))
# Output layer.
layer_2 = sigmoid(np.dot(layer_1, self.synapse_1))
# Calculate errors.
layer_2_error = y - layer_2
layer_2_deltas.append(layer_2_error * sigmoid_deriv(layer_2))
overall_error += np.abs(layer_2_error[0])
# Round output to nearest integer.
d[binary_dim - j - 1] = np.round(layer_2[0][0])
# Store hidden layer for next training iteration.
layer_1_values.append(copy.deepcopy(layer_1))
future_layer_1_delta = np.zeros(self.hidden_dim)
for j in range(binary_dim):
X = np.array([[a[j], b[j]]])
layer_1 = layer_1_values[-j - 1]
prev_layer_1 = layer_1_values[-j - 2]
# Error at output layer.
layer_2_delta = layer_2_deltas[-j - 1]
# Error at hidden layer.
layer_1_delta = (future_layer_1_delta.dot(self.synapse_h.T) + layer_2_delta.dot(self.synapse_1.T)) * sigmoid_deriv(layer_1)
synapse_1_update += np.atleast_2d(layer_1).T.dot(layer_2_delta)
synapse_h_update += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
synapse_0_update += X.T.dot(layer_1_delta)
future_layer_1_delta = layer_1_delta
# Update weights
self.synapse_0 += synapse_0_update * self.alpha
self.synapse_1 += synapse_1_update * self.alpha
self.synapse_h += synapse_h_update * self.alpha
synapse_0_update *= 0
synapse_1_update *= 0
synapse_h_update *= 0
if i % 1000 == 0:
print("Error: {}".format(overall_error))
print("Output: {}".format(d))
print("Expected: {}".format(c))
out = 0
for i, x in enumerate(reversed(d)):
out += x * 2**i
#print("{} + {} = {}".format(a_int, b_int, out))
print()
def eval(self):
"""
Evaluates model again a dataset.
"""
pass
def main():
int2binary = {}
binary_dim = 8
largest_number = 2**binary_dim
binary = np.unpackbits(
np.array([range(largest_number)], dtype=np.uint8).T, axis=1)
for i in range(largest_number):
int2binary[i] = binary[i]
inputs = []
outputs = []
for a in range(int(largest_number / 2)):
for b in range(int(largest_number / 2)):
inputs.append((int2binary[a], int2binary[b]))
outputs.append((int2binary[a + b], ))
model = SimpleRNN(0.1, 2, 16, 1)
model.train(inputs, outputs, 30000)
if __name__ == "__main__":
main()
``` |
{
"source": "joshuabuildsthings/GamestonkTerminal",
"score": 3
} |
#### File: alternative/oss/github_view.py
```python
__docformat__ = "numpy"
import logging
import os
from typing import Optional, List
from matplotlib import pyplot as plt
from matplotlib import ticker
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.alternative.oss import github_model
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_star_history(
repo: str, export: str = "", external_axes: Optional[List[plt.Axes]] = None
) -> None:
"""Display repo summary [Source: https://api.github.com]
Parameters
----------
repo : str
Repository to display star history. Format: org/repo, e.g., openbb-finance/openbbterminal
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = github_model.get_stars_history(repo)
if not df.empty:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(df["Date"], df["Stars"])
ax.set_xlabel("Date")
ax.set_ylabel("Stars")
ax.set_title(f"Star History for {repo}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "sh", df)
@log_start_end(log=logger)
def display_top_repos(
sortby: str,
categories: str,
limit: int,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Display repo summary [Source: https://api.github.com]
Parameters
----------
sortby : str
Sort repos by {stars, forks}
categories : str
Check for repo categories. If more than one separate with a comma: e.g., finance,investment. Default: None
limit : int
Number of repos to look at
export : str
Export dataframe data to csv,json,xlsx file
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = github_model.get_top_repos(categories=categories, sortby=sortby, top=limit)
if not df.empty:
if sortby == "forks":
df = df.sort_values(by="forks_count")
elif sortby == "stars":
df = df.sort_values(by="stargazers_count")
if external_axes is None:
_, ax = plt.subplots(figsize=(14, 8), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
for _, row in df.iterrows():
ax.barh(
y=row["full_name"],
width=row["stargazers_count" if sortby == "stars" else "forks_count"],
height=0.5,
)
ax.set_xlabel(sortby.capitalize())
ax.get_xaxis().set_major_formatter(
ticker.FuncFormatter(
lambda x, _: lambda_long_number_format_with_type_check(x)
)
)
ax.yaxis.set_label_position("left")
ax.yaxis.set_ticks_position("left")
ax.set_ylabel("Repository Full Name")
category_substr = "ies" if "," in categories else "y"
category_str = f"categor{category_substr} {categories} " if categories else ""
ax.set_title(f"Top repos {category_str}sorted by {sortby}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "tr", df)
@log_start_end(log=logger)
def display_repo_summary(repo: str, export: str = "") -> None:
"""Display repo summary [Source: https://api.github.com]
Parameters
----------
repo : str
Repository to display summary. Format: org/repo, e.g., openbb-finance/openbbterminal
export : str
Export dataframe data to csv,json,xlsx file
"""
df = github_model.get_repo_summary(repo)
if not df.empty:
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Repo summary"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rs",
df,
)
```
#### File: cryptocurrency/discovery/coinpaprika_view.py
```python
__docformat__ = "numpy"
import logging
import os
import openbb_terminal.cryptocurrency.discovery.coinpaprika_model as paprika
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_search_results(
query: str,
category: str,
top: int = 10,
sortby: str = "id",
descend: bool = False,
export: str = "",
) -> None:
"""Search over CoinPaprika. [Source: CoinPaprika]
Parameters
----------
query: str
Search query
category: str
Categories to search: currencies|exchanges|icos|people|tags|all. Default: all
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
"""
if category.lower() == "all":
category = "currencies,exchanges,icos,people,tags"
df = paprika.get_search_results(query=query, category=category)
if df.empty:
console.print(
f"No results for search query '{query}' in category '{category}'\n"
)
return
df = df.sort_values(by=sortby, ascending=descend)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="CoinPaprika Results",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"search",
df,
)
```
#### File: openbb_terminal/economy/fred_view.py
```python
__docformat__ = "numpy"
import logging
import os
import textwrap
from typing import Dict, Optional, List
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.config_terminal import theme
from openbb_terminal.decorators import check_api_key
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import fred_model
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def format_units(num: int) -> str:
"""Helper to format number into string with K,M,B,T. Number will be in form of 10^n"""
number_zeros = int(np.log10(num))
if number_zeros < 3:
return str(num)
if number_zeros < 6:
return f"{int(num/1000)}K"
if number_zeros < 9:
return f"{int(num/1_000_000)}M"
if number_zeros < 12:
return f"{int(num/1_000_000_000)}B"
if number_zeros < 15:
return f"{int(num/1_000_000_000_000)}T"
return f"10^{number_zeros}"
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def notes(series_term: str, num: int) -> pd.DataFrame:
"""Print Series notes. [Source: FRED]
Parameters
----------
series_term : str
Search for these series_term
num : int
Maximum number of series notes to display
"""
df_search = fred_model.get_series_notes(series_term)
if df_search.empty:
return
df_search["notes"] = df_search["notes"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=100)) if isinstance(x, str) else x
)
df_search["title"] = df_search["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
)
print_rich_table(
df_search[["id", "title", "notes"]].head(num),
title=f"[bold]Search results for {series_term}[/bold]",
show_index=False,
headers=["Series ID", "Title", "Description"],
)
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def display_fred_series(
d_series: Dict[str, Dict[str, str]],
start_date: str,
end_date: str = "",
raw: bool = False,
export: str = "",
limit: int = 10,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display (multiple) series from https://fred.stlouisfed.org. [Source: FRED]
Parameters
----------
d_series : str
FRED Series ID from https://fred.stlouisfed.org. For multiple series use: series1,series2,series3
start_date : str
Starting date (YYYY-MM-DD) of data
end_date : str
Ending date (YYYY-MM-DD) of data
store : bool
Whether to prevent plotting the data.
raw : bool
Output only raw data
export : str
Export data to csv,json,xlsx or png,jpg,pdf,svg file
limit: int
Number of raw data rows to show
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
series_ids = list(d_series.keys())
data = fred_model.get_aggregated_series_data(d_series, start_date, end_date)
if data.empty:
logger.error("No data")
console.print("[red]No data available.[/red]\n")
else:
# Try to get everything onto the same 0-10 scale.
# To do so, think in scientific notation. Divide the data by whatever the E would be
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
if len(series_ids) == 1:
s_id = series_ids[0]
sub_dict: Dict = d_series[s_id]
title = f"{sub_dict['title']} ({sub_dict['units']})"
ax.plot(data.index, data, label="\n".join(textwrap.wrap(title, 80)))
else:
for s_id, sub_dict in d_series.items():
data_to_plot = data[s_id].dropna()
exponent = int(np.log10(data_to_plot.max()))
data_to_plot /= 10**exponent
multiplier = f"x {format_units(10**exponent)}" if exponent > 0 else ""
title = f"{sub_dict['title']} ({sub_dict['units']}) {'['+multiplier+']' if multiplier else ''}"
ax.plot(
data_to_plot.index,
data_to_plot,
label="\n".join(textwrap.wrap(title, 80))
if len(series_ids) < 5
else title,
)
ax.legend(
bbox_to_anchor=(0, 0.40, 1, -0.52),
loc="upper right",
mode="expand",
borderaxespad=0,
prop={"size": 9},
)
ax.set_xlim(data.index[0], data.index[-1])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
data.index = [x.strftime("%Y-%m-%d") for x in data.index]
if raw:
print_rich_table(
data.tail(limit),
headers=list(data.columns),
show_index=True,
index_name="Date",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"fred",
data,
)
@log_start_end(log=logger)
@check_api_key(["API_FRED_KEY"])
def display_yield_curve(date: datetime, external_axes: Optional[List[plt.Axes]] = None):
"""Display yield curve based on US Treasury rates for a specified date.
Parameters
----------
date: datetime
Date to get yield curve for
external_axes: Optional[List[plt.Axes]]
External axes to plot data on
"""
rates, date_of_yield = fred_model.get_yield_curve(date)
if rates.empty:
console.print(
f"[red]Yield data not found for {date.strftime('%Y-%m-%d')}[/red].\n"
)
return
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.plot(rates.Maturity, rates.Rate, "-o")
ax.set_xlabel("Maturity")
ax.set_ylabel("Rate (%)")
theme.style_primary_axis(ax)
if external_axes is None:
ax.set_title(f"US Yield Curve for {date_of_yield.strftime('%Y-%m-%d')} ")
theme.visualize_output()
```
#### File: brokers/robinhood/robinhood_controller.py
```python
__docformat__ = "numpy"
import argparse
import logging
from typing import List
from prompt_toolkit.completion import NestedCompleter
from openbb_terminal.decorators import check_api_key
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
parse_known_args_and_warn,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.portfolio.brokers.robinhood import (
robinhood_model,
robinhood_view,
)
from openbb_terminal.rich_config import console, MenuText
logger = logging.getLogger(__name__)
class RobinhoodController(BaseController):
CHOICES_COMMANDS = ["holdings", "history", "login"]
valid_span = ["day", "week", "month", "3month", "year", "5year", "all"]
valid_interval = ["5minute", "10minute", "hour", "day", "week"]
PATH = "/portfolio/bro/rh/"
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["history"]["-i"] = {c: None for c in self.valid_interval}
choices["history"]["--interval"] = {c: None for c in self.valid_interval}
choices["history"]["-s"] = {c: None for c in self.valid_span}
choices["history"]["--span"] = {c: None for c in self.valid_span}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("portfolio/bro/rh/")
mt.add_cmd("login")
mt.add_raw("\n")
mt.add_cmd("holdings")
mt.add_cmd("history")
console.print(text=mt.menu_text, menu="Portfolio - Brokers - Robinhood")
@log_start_end(log=logger)
@check_api_key(["RH_USERNAME", "RH_PASSWORD"])
def call_login(self, _):
"""Process login"""
robinhood_model.login()
@log_start_end(log=logger)
def call_holdings(self, other_args: List[str]):
"""Process holdings command"""
parser = argparse.ArgumentParser(
prog="holdings",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Display info about your trading accounts on Robinhood",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
robinhood_view.display_holdings(export=ns_parser.export)
@log_start_end(log=logger)
def call_history(self, other_args: List[str]):
"""Process history command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="history",
description="""Historical Portfolio Info""",
)
parser.add_argument(
"-s",
"--span",
dest="span",
type=str,
choices=self.valid_span,
default="3month",
help="Span of historical data",
)
parser.add_argument(
"-i",
"--interval",
dest="interval",
default="day",
choices=self.valid_interval,
type=str,
help="Interval to look at portfolio",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
robinhood_view.display_historical(
interval=ns_parser.interval,
span=ns_parser.span,
export=ns_parser.export,
)
```
#### File: stocks/due_diligence/fmp_view.py
```python
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import export_data, print_rich_table
from openbb_terminal.stocks.due_diligence import fmp_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def rating(ticker: str, num: int, export: str):
"""Display ratings for a given ticker. [Source: Financial Modeling Prep]
Parameters
----------
ticker : str
Stock ticker
num : int
Number of last days ratings to display
export : str
Export dataframe data to csv,json,xlsx file
"""
df = fmp_model.get_rating(ticker)
# TODO: This could be displayed in a nice rating plot over time
# TODO: Add coloring to table
if not df.empty:
l_recoms = [col for col in df.columns if "Recommendation" in col]
l_recoms_show = [
recom.replace("rating", "")
.replace("Details", "")
.replace("Recommendation", "")
for recom in l_recoms
]
l_recoms_show[0] = "Rating"
print_rich_table(
df[l_recoms].head(num),
headers=l_recoms_show,
show_index=True,
title="Rating",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rot",
df,
)
```
#### File: stocks/fundamental_analysis/av_view.py
```python
__docformat__ = "numpy"
import logging
import os
from typing import List, Optional
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import (
export_data,
print_rich_table,
plot_autoscale,
camel_case_split,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.fundamental_analysis import av_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_overview(ticker: str):
"""Alpha Vantage stock ticker overview
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
df_fa = av_model.get_overview(ticker)
if df_fa.empty:
console.print("No API calls left. Try me later", "\n")
return
print_rich_table(
df_fa.drop(index=["Description"]),
headers=[""],
title=f"{ticker} Overview",
show_index=True,
)
console.print(f"Company Description:\n\n{df_fa.loc['Description'][0]}")
console.print("")
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_key(ticker: str):
"""Alpha Vantage key metrics
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
df_key = av_model.get_key_metrics(ticker)
if df_key.empty:
return
print_rich_table(
df_key, headers=[""], title=f"{ticker} Key Metrics", show_index=True
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_income_statement(
ticker: str, limit: int, quarterly: bool = False, export: str = ""
):
"""Alpha Vantage income statement
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
limit: int
Number of past statements
quarterly: bool
Flag to get quarterly instead of annual
export: str
Format to export data
"""
df_income = av_model.get_income_statements(ticker, limit, quarterly)
if df_income.empty:
return
indexes = df_income.index
new_indexes = [camel_case_split(ind) for ind in indexes]
df_income.index = new_indexes
print_rich_table(
df_income,
headers=list(df_income.columns),
title=f"{ticker} Income Statement",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "income", df_income)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_balance_sheet(
ticker: str, limit: int, quarterly: bool = False, export: str = ""
):
"""Alpha Vantage income statement
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
limit: int
Number of past statements
quarterly: bool
Flag to get quarterly instead of annual
export: str
Format to export data
"""
df_balance = av_model.get_balance_sheet(ticker, limit, quarterly)
if df_balance.empty:
return
indexes = df_balance.index
new_indexes = [camel_case_split(ind) for ind in indexes]
df_balance.index = new_indexes
print_rich_table(
df_balance,
headers=list(df_balance.columns),
title=f"{ticker} Balance Sheet",
show_index=True,
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "balance", df_balance
)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_cash_flow(
ticker: str, limit: int, quarterly: bool = False, export: str = ""
):
"""Alpha Vantage income statement
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
limit: int
Number of past statements
quarterly: bool
Flag to get quarterly instead of annual
export: str
Format to export data
"""
df_cash = av_model.get_cash_flow(ticker, limit, quarterly)
if df_cash.empty:
return
indexes = df_cash.index
new_indexes = [camel_case_split(ind) for ind in indexes]
df_cash.index = new_indexes
print_rich_table(
df_cash,
headers=list(df_cash.columns),
title=f"{ticker} Balance Sheet",
show_index=True,
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "cash", df_cash)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_earnings(
ticker: str, limit: int, quarterly: bool = False, export: str = ""
):
"""Alpha Vantage earnings
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
limit:int
Number of events to show
quarterly: bool
Flag to show quarterly instead of annual
export: str
Format to export data
"""
df_fa = av_model.get_earnings(ticker, quarterly)
if df_fa.empty:
return
print_rich_table(
df_fa.head(limit),
headers=list(df_fa.columns),
show_index=False,
title=f"{ticker} Earnings",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "earnings", df_fa)
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_fraud(
ticker: str,
export: str = "",
help_text: bool = False,
color: bool = True,
detail: bool = False,
):
"""Fraud indicators for given ticker
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
export : str
Whether to export the dupont breakdown
help_text : bool
Whether to show help text
color : bool
Whether to show color in the dataframe
detail : bool
Whether to show the details for the mscore
"""
df = av_model.get_fraud_ratios(ticker, detail=detail)
if df.empty:
console.print("")
return
df_color = df.copy()
if color:
for column in df_color:
df_color[column] = df_color[column].astype(str)
df_color = df_color.apply(lambda x: av_model.replace_df(x.name, x), axis=1)
print_rich_table(
df_color,
headers=list(df_color.columns),
show_index=True,
title="Fraud Risk Statistics",
)
help_message = """
MSCORE:
An mscore above -1.78 indicates a high risk of fraud, and one above -2.22 indicates a medium risk of fraud.
ZSCORE:
A zscore less than 0.5 indicates a high risk of fraud.
Mckee:
A mckee less than 0.5 indicates a high risk of fraud.
"""
if help_text:
console.print(help_message)
export_data(export, os.path.dirname(os.path.abspath(__file__)), "dupont", df)
return
@log_start_end(log=logger)
@check_api_key(["API_KEY_ALPHAVANTAGE"])
def display_dupont(
ticker: str,
raw: bool = False,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Shows the extended dupont ratio
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
raw : str
Show raw data instead of a graph
export : bool
Whether to export the dupont breakdown
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
df = av_model.get_dupont(ticker)
if df.empty:
console.print("[red]Invalid response from AlphaVantage[/red]\n")
return
if raw:
print_rich_table(
df, headers=list(df.columns), show_index=True, title="Extended Dupont"
)
return
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
colors = theme.get_colors()
df.transpose().plot(kind="line", ax=ax, color=colors)
ax.set_title("Extended Dupont by Year")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "dupont", df)
```
#### File: stocks/fundamental_analysis/polygon_view.py
```python
__docformat__ = "numpy"
import logging
import os
from openbb_terminal.decorators import check_api_key, log_start_end
from openbb_terminal.helper_funcs import (
export_data,
lambda_long_number_format,
print_rich_table,
)
from openbb_terminal.stocks.fundamental_analysis import polygon_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
@check_api_key(["API_POLYGON_KEY"])
def display_fundamentals(
ticker: str,
financial: str,
limit: int = 10,
quarterly: bool = False,
export: str = "",
):
"""Display tickers balance sheet or income statement
Parameters
----------
ticker: str
Stock ticker
financial:str
Either balance or income
limit: int
Number of results to show
quarterly:bool
Flag to get quarterly reports
export: str
Format to export data
"""
if financial == "balance":
fundamentals = polygon_model.get_financials(ticker, financial, quarterly)
title_str = "Balance Sheet"
elif financial == "income":
fundamentals = polygon_model.get_financials(ticker, financial, quarterly)
title_str = "Income Statement"
if fundamentals.empty:
# The empty data frame error handling done in model
return
# Snake case to english
fundamentals.index = fundamentals.index.to_series().apply(
lambda x: x.replace("_", " ").title()
)
# Readable numbers
fundamentals = fundamentals.applymap(lambda_long_number_format).fillna("-")
print_rich_table(
fundamentals.iloc[:, :limit].applymap(lambda x: "-" if x == "nan" else x),
show_index=True,
title=f"{ticker} {title_str}",
)
export_data(
export, os.path.dirname(os.path.abspath(__file__)), financial, fundamentals
)
```
#### File: options/screen/screener_controller.py
```python
__docformat__ = "numpy"
import argparse
import logging
import os
from typing import List
from prompt_toolkit.completion import NestedCompleter
from openbb_terminal import feature_flags as obbff
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
parse_known_args_and_warn,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import BaseController
from openbb_terminal.portfolio.portfolio_optimization import po_controller
from openbb_terminal.rich_config import console, MenuText
from openbb_terminal.stocks.comparison_analysis import ca_controller
from openbb_terminal.stocks.options.screen import syncretism_view
# pylint: disable=E1121
logger = logging.getLogger(__name__)
class ScreenerController(BaseController):
"""Screener Controller class"""
CHOICES_COMMANDS = ["view", "set", "scr"]
CHOICES_MENUS = [
"ca",
"po",
]
presets_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "..", "presets/"
)
preset_choices = [
f.split(".")[0] for f in os.listdir(presets_path) if f.endswith(".ini")
]
PATH = "/stocks/options/screen/"
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
self.preset = "high_IV"
self.screen_tickers: List = list()
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["view"] = {c: None for c in self.preset_choices}
choices["set"] = {c: None for c in self.preset_choices}
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
mt = MenuText("stocks/options/screen/")
mt.add_cmd("view")
mt.add_cmd("set")
mt.add_raw("\n")
mt.add_param("_preset", self.preset)
mt.add_raw("\n")
mt.add_cmd("scr")
mt.add_raw("\n")
mt.add_param("_screened_tickers", ", ".join(self.screen_tickers))
mt.add_raw("\n")
mt.add_menu("ca")
mt.add_menu("po")
console.print(text=mt.menu_text, menu="Stocks - Options - Screener")
@log_start_end(log=logger)
def call_view(self, other_args: List[str]):
"""Process view command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="view",
description="""View available presets under presets folder.""",
)
parser.add_argument(
"-p",
"--preset",
action="store",
dest="preset",
type=str,
help="View specific custom preset",
default="",
choices=self.preset_choices,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
if ns_parser.preset:
syncretism_view.view_available_presets(
preset=ns_parser.preset, presets_path=self.presets_path
)
else:
for preset in self.preset_choices:
console.print(preset)
console.print("")
@log_start_end(log=logger)
def call_set(self, other_args: List[str]):
"""Process set command"""
parser = argparse.ArgumentParser(
add_help=False,
prog="set",
description="""Set preset from custom and default ones.""",
)
parser.add_argument(
"-p",
"--preset",
action="store",
dest="preset",
type=str,
default="template",
help="Filter presets",
choices=self.preset_choices,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if ns_parser:
self.preset = ns_parser.preset
console.print("")
@log_start_end(log=logger)
def call_scr(self, other_args: List[str]):
"""Process scr command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="scr",
description="""Screener filter output from https://ops.syncretism.io/index.html.
Where: CS: Contract Symbol; S: Symbol, T: Option Type; Str: Strike; Exp v: Expiration;
IV: Implied Volatility; LP: Last Price; B: Bid; A: Ask; V: Volume; OI: Open Interest;
Y: Yield; MY: Monthly Yield; SMP: Regular Market Price; SMDL: Regular Market Day Low;
SMDH: Regular Market Day High; LU: Last Trade Date; LC: Last Crawl; ITM: In The Money;
PC: Price Change; PB: Price-to-book. """,
)
parser.add_argument(
"-p",
"--preset",
action="store",
dest="preset",
type=str,
default=self.preset,
help="Filter presets",
choices=self.preset_choices,
)
parser.add_argument(
"-l",
"--limit",
type=check_positive,
default=10,
help="Limit of random entries to display. Default shows all",
dest="limit",
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
self.screen_tickers = syncretism_view.view_screener_output(
preset=ns_parser.preset,
presets_path=self.presets_path,
n_show=ns_parser.limit,
export=ns_parser.export,
)
@log_start_end(log=logger)
def call_po(self, _):
"""Call the portfolio optimization menu with selected tickers"""
if self.screen_tickers:
self.queue = po_controller.PortfolioOptimizationController(
self.screen_tickers
).menu(custom_path_menu_above="/portfolio/")
else:
console.print(
"Some tickers must be screened first through one of the presets!\n"
)
@log_start_end(log=logger)
def call_ca(self, _):
"""Call the comparison analysis menu with selected tickers"""
if self.screen_tickers:
self.queue = ca_controller.ComparisonAnalysisController(
self.screen_tickers, self.queue
).menu(custom_path_menu_above="/stocks/")
else:
console.print(
"Some tickers must be screened first through one of the presets!\n"
)
```
#### File: stocks/sector_industry_analysis/financedatabase_view.py
```python
__docformat__ = "numpy"
# pylint:disable=too-many-arguments,too-many-lines
import logging
import os
from collections import OrderedDict
from typing import Dict, Optional, List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from openbb_terminal.config_terminal import theme
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
print_rich_table,
is_valid_axes_count,
)
from openbb_terminal.rich_config import console
from openbb_terminal.stocks.sector_industry_analysis import financedatabase_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_bars_financials(
finance_key: str,
finance_metric: str,
country: str,
sector: str,
industry: str,
marketcap: str = "",
exclude_exchanges: bool = True,
limit: int = 10,
export: str = "",
raw: bool = False,
already_loaded_stocks_data: Dict = None,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display financials bars comparing sectors, industry, analysis, countries, market cap and excluding exchanges.
Parameters
----------
finance_key: str
Select finance key from Yahoo Finance(e.g. financialData, defaultKeyStatistics, summaryProfile)
finance_metric: str
Select finance metric from Yahoo Finance (e.g. operatingCashflow, revenueGrowth, ebitda, freeCashflow)
country: str
Search by country to find stocks matching the criteria.
sector : str
Search by sector to find stocks matching the criteria.
industry : str
Search by industry to find stocks matching the criteria.
marketcap : str
Select stocks based on the market cap.
exclude_exchanges: bool
When you wish to include different exchanges use this boolean.
limit: int
Limit amount of companies displayed
export: str
Format to export data as
raw: bool
Output all raw data
already_loaded_stocks_data: Dict
Dictionary of filtered stocks data that has been loaded before
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
Returns
-------
dict
Dictionary of filtered stocks data
list
List of tickers filtered
"""
if already_loaded_stocks_data:
stocks_data = already_loaded_stocks_data
else:
stocks_data = financedatabase_model.get_stocks_data(
country, sector, industry, marketcap, exclude_exchanges
)
metric_data = {}
for symbol in list(stocks_data.keys()):
if finance_key in stocks_data[symbol] and "quoteType" in stocks_data[symbol]:
stock_name = stocks_data[symbol]["quoteType"]["longName"]
metric = (
stocks_data[symbol][finance_key][finance_metric]
if stocks_data[symbol][finance_key] is not None
and finance_metric in stocks_data[symbol][finance_key]
else None
)
if metric and stock_name:
metric_data[stock_name] = (metric, symbol)
if len(metric_data) > 1:
metric_data = dict(
OrderedDict(
sorted(metric_data.items(), key=lambda t: t[1][0], reverse=True)
)
)
company_names = list()
company_metrics = list()
company_tickers = list()
for name, metric in metric_data.items():
company_names.append(name)
company_metrics.append(metric[0])
company_tickers.append(metric[1])
metric_finance_col = (
"".join(
" " + char if char.isupper() else char.strip()
for char in finance_metric
)
.strip()
.capitalize()
)
df_all = pd.DataFrame(
{"Company": company_names, metric_finance_col: company_metrics}
)
if len(df_all) > limit:
console.print(f"Limiting the amount of companies displayed to {limit}.")
company_name = np.array(company_names)[:limit]
company_metric = np.array(company_metrics)[:limit]
company_ticker = np.array(company_tickers)[:limit]
df = df_all.head(limit)
if raw:
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Bars Financials"
)
else:
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
# set returns statement to be compatible with others
return dict(), list()
magnitude = 0
while max(company_metric) > 1_000 or abs(min(company_metric)) > 1_000:
company_metric = np.divide(company_metric, 1_000)
magnitude += 1
# check if the value is a percentage
if (
(magnitude == 0)
and all(company_metric >= 0)
and all(company_metric <= 1)
):
unit = "%"
company_metric = company_metric * 100
else:
unit = " KMBTP"[magnitude] if magnitude != 0 else ""
colors = iter(theme.get_colors())
for name, metric, ticker in zip(
company_name[::-1], company_metric[::-1], company_ticker[::-1]
):
if len(name.split(" ")) > 6 and len(name) > 40:
name = f'{" ".join(name.split(" ")[:4])}\n{" ".join(name.split(" ")[4:])}'
ax.barh(f"{name} ({ticker})", metric, label=ticker, color=next(colors))
metric_title = (
"".join(
" " + char if char.isupper() else char.strip()
for char in finance_metric
)
.strip()
.capitalize()
)
benchmark = np.median(company_metric)
ax.axvline(x=benchmark, lw=3, ls="--", c="grey")
title = f"The {metric_title.title()} (benchmark: {benchmark:.2f}{unit}) of "
title += marketcap + " cap companies " if marketcap else "Companies "
if industry:
title += f"in {industry} industry "
elif sector:
title += f"in {sector} sector "
if country:
title += f"in {country}"
title += " " if (industry or sector) else ""
title += (
"(excl. data from international exchanges)"
if exclude_exchanges
else "(incl. data from international exchanges)"
)
ax.set_title(title, wrap=True, fontsize=11)
labels = ax.get_xticks().tolist()
ax.set_xticks(labels)
ax.set_xticklabels([f"{label:.2f}{unit}" for label in labels])
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
finance_metric,
df_all,
)
return stocks_data, company_tickers
if len(metric_data) == 1:
console.print(
f"Only 1 company found '{list(metric_data.keys())[0]}'. No barchart will be depicted.\n"
)
return stocks_data, [list(metric_data.values())[0][1]]
console.print("No company found. No barchart will be depicted.\n")
return dict(), list()
@log_start_end(log=logger)
def display_companies_per_sector_in_country(
country: str,
mktcap: str = "",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_sectors_to_display: int = 15,
min_pct_to_display_sector: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per sector in a specific country (and market cap). [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each sector
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_sectors_to_display: int
Maximum number of sectors to display
min_pct_to_display_sector: float
Minimum percentage to display sector
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_sector = financedatabase_model.get_companies_per_sector_in_country(
country, mktcap, exclude_exchanges
)
companies_per_sector = dict(
OrderedDict(
sorted(companies_per_sector.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_sector.copy().items():
if value == 0:
del companies_per_sector[key]
if not companies_per_sector:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_sector, orient="index")
df.index.name = "Sector"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {country}\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_sector) > 1:
total_num_companies = sum(companies_per_sector.values())
min_companies_to_represent = round(
min_pct_to_display_sector * total_num_companies
)
filter_sectors_to_display = (
np.array(list(companies_per_sector.values()))
> min_companies_to_represent
)
if any(filter_sectors_to_display):
if not all(filter_sectors_to_display):
num_sectors_to_display = np.where(~filter_sectors_to_display)[0][0]
if num_sectors_to_display < max_sectors_to_display:
max_sectors_to_display = num_sectors_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_sector) > max_sectors_to_display:
companies_per_sector_sliced = dict(
list(companies_per_sector.items())[: max_sectors_to_display - 1]
)
companies_per_sector_sliced["Others"] = sum(
dict(
list(companies_per_sector.items())[max_sectors_to_display - 1 :]
).values()
)
legend, values = zip(*companies_per_sector_sliced.items())
else:
legend, values = zip(*companies_per_sector.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
plt.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_sector) == 1:
console.print(
f"Only 1 sector found '{list(companies_per_sector.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No sector found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cps",
df,
)
@log_start_end(log=logger)
def display_companies_per_industry_in_country(
country: str,
mktcap: str = "",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_industries_to_display: int = 15,
min_pct_to_display_industry: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per industry in a specific country. [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each industry
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_industries_to_display: int
Maximum number of industries to display
min_pct_to_display_industry: float
Minimum percentage to display industry
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_industry = (
financedatabase_model.get_companies_per_industry_in_country(
country, mktcap, exclude_exchanges
)
)
companies_per_industry = dict(
OrderedDict(
sorted(companies_per_industry.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_industry.copy().items():
if value == 0:
del companies_per_industry[key]
if not companies_per_industry:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_industry, orient="index")
df.index.name = "Industry"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {country}\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_industry) > 1:
total_num_companies = sum(companies_per_industry.values())
min_companies_to_represent = round(
min_pct_to_display_industry * total_num_companies
)
filter_industries_to_display = (
np.array(list(companies_per_industry.values()))
> min_companies_to_represent
)
if any(filter_industries_to_display):
if not all(filter_industries_to_display):
num_industries_to_display = np.where(~filter_industries_to_display)[
0
][0]
if num_industries_to_display < max_industries_to_display:
max_industries_to_display = num_industries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_industry) > max_industries_to_display:
companies_per_industry_sliced = dict(
list(companies_per_industry.items())[
: max_industries_to_display - 1
]
)
companies_per_industry_sliced["Others"] = sum(
dict(
list(companies_per_industry.items())[
max_industries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_industry_sliced.items())
else:
legend, values = zip(*companies_per_industry.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_industry) == 1:
console.print(
f"Only 1 industry found '{list(companies_per_industry.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No industry found. No pie chart will be depicted.")
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpic",
df,
)
@log_start_end(log=logger)
def display_companies_per_industry_in_sector(
sector: str,
mktcap: str = "",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_industries_to_display: int = 15,
min_pct_to_display_industry: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per industry in a specific sector. [Source: Finance Database]
Parameters
----------
sector: str
Select sector to get number of companies by each industry
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_industries_to_display: int
Maximum number of industries to display
min_pct_to_display_industry: float
Minimum percentage to display industry
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_industry = financedatabase_model.get_companies_per_industry_in_sector(
sector, mktcap, exclude_exchanges
)
companies_per_industry = dict(
OrderedDict(
sorted(companies_per_industry.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_industry.copy().items():
if value == 0:
del companies_per_industry[key]
if not companies_per_industry:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_industry, orient="index")
df.index.name = "Industry"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {sector} sector\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(
df,
headers=list(df.columns),
show_index=True,
title=title,
)
else:
colors = theme.get_colors()
if len(companies_per_industry) > 1:
total_num_companies = sum(companies_per_industry.values())
min_companies_to_represent = round(
min_pct_to_display_industry * total_num_companies
)
filter_industries_to_display = (
np.array(list(companies_per_industry.values()))
> min_companies_to_represent
)
if any(filter_industries_to_display):
if not all(filter_industries_to_display):
num_industries_to_display = np.where(~filter_industries_to_display)[
0
][0]
if num_industries_to_display < max_industries_to_display:
max_industries_to_display = num_industries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_industry) > max_industries_to_display:
companies_per_industry_sliced = dict(
list(companies_per_industry.items())[
: max_industries_to_display - 1
]
)
companies_per_industry_sliced["Others"] = sum(
dict(
list(companies_per_industry.items())[
max_industries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_industry_sliced.items())
else:
legend, values = zip(*companies_per_industry.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_industry) == 1:
console.print(
f"Only 1 industry found '{list(companies_per_industry.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No industry found. No pie chart will be depicted.")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpis",
df,
)
@log_start_end(log=logger)
def display_companies_per_country_in_sector(
sector: str,
mktcap: str = "",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_countries_to_display: int = 15,
min_pct_to_display_country: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per country in a specific sector. [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each country
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_countries_to_display: int
Maximum number of countries to display
min_pct_to_display_country: float
Minimum percentage to display country
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_country = financedatabase_model.get_companies_per_country_in_sector(
sector, mktcap, exclude_exchanges
)
companies_per_country = dict(
OrderedDict(
sorted(companies_per_country.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_country.copy().items():
if value == 0:
del companies_per_country[key]
if not companies_per_country:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_country, orient="index")
df.index.name = "Country"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"in {sector} sector\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_country) > 1:
total_num_companies = sum(companies_per_country.values())
min_companies_to_represent = round(
min_pct_to_display_country * total_num_companies
)
filter_countries_to_display = (
np.array(list(companies_per_country.values()))
> min_companies_to_represent
)
if any(filter_countries_to_display):
if not all(filter_countries_to_display):
num_countries_to_display = np.where(~filter_countries_to_display)[
0
][0]
if num_countries_to_display < max_countries_to_display:
max_countries_to_display = num_countries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_country) > max_countries_to_display:
companies_per_country_sliced = dict(
list(companies_per_country.items())[: max_countries_to_display - 1]
)
companies_per_country_sliced["Others"] = sum(
dict(
list(companies_per_country.items())[
max_countries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_country_sliced.items())
else:
legend, values = zip(*companies_per_country.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_country) == 1:
console.print(
f"Only 1 country found '{list(companies_per_country.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No country found. No pie chart will be depicted.")
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpcs",
df,
)
@log_start_end(log=logger)
def display_companies_per_country_in_industry(
industry: str,
mktcap: str = "",
exclude_exchanges: bool = True,
export: str = "",
raw: bool = False,
max_countries_to_display: int = 15,
min_pct_to_display_country: float = 0.015,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display number of companies per country in a specific industry. [Source: Finance Database]
Parameters
----------
country: str
Select country to get number of companies by each country
mktcap: str
Select market cap of companies to consider from Small, Mid and Large
exclude_exchanges : bool
Exclude international exchanges
export: str
Format to export data as
raw: bool
Output all raw data
max_countries_to_display: int
Maximum number of countries to display
min_pct_to_display_country: float
Minimum percentage to display country
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
companies_per_country = financedatabase_model.get_companies_per_country_in_industry(
industry, mktcap, exclude_exchanges
)
companies_per_country = dict(
OrderedDict(
sorted(companies_per_country.items(), key=lambda t: t[1], reverse=True)
)
)
for key, value in companies_per_country.copy().items():
if value == 0:
del companies_per_country[key]
if not companies_per_country:
console.print("No companies found with these parameters!\n")
return
df = pd.DataFrame.from_dict(companies_per_country, orient="index")
df.index.name = "Country"
df.columns = ["Number of companies"]
df["Number of companies"] = df["Number of companies"].astype(int)
title = mktcap + " cap companies " if mktcap else "Companies "
title += f"per country in {industry} industry\n"
title += "excl. exchanges" if exclude_exchanges else " incl. exchanges"
if raw:
print_rich_table(df, headers=list(df.columns), show_index=True, title=title)
else:
colors = theme.get_colors()
if len(companies_per_country) > 1:
total_num_companies = sum(companies_per_country.values())
min_companies_to_represent = round(
min_pct_to_display_country * total_num_companies
)
filter_countries_to_display = (
np.array(list(companies_per_country.values()))
> min_companies_to_represent
)
if any(filter_countries_to_display):
if not all(filter_countries_to_display):
num_countries_to_display = np.where(~filter_countries_to_display)[
0
][0]
if num_countries_to_display < max_countries_to_display:
max_countries_to_display = num_countries_to_display
else:
console.print(
"The minimum threshold percentage specified is too high, thus it will be ignored."
)
if len(companies_per_country) > max_countries_to_display:
companies_per_country_sliced = dict(
list(companies_per_country.items())[: max_countries_to_display - 1]
)
companies_per_country_sliced["Others"] = sum(
dict(
list(companies_per_country.items())[
max_countries_to_display - 1 :
]
).values()
)
legend, values = zip(*companies_per_country_sliced.items())
else:
legend, values = zip(*companies_per_country.items())
# This plot has 1 axis
if not external_axes:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
elif is_valid_axes_count(external_axes, 1):
(ax,) = external_axes
else:
return
ax.pie(
values,
labels=legend,
colors=colors,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=45,
)
ax.set_title(title, fontsize=14)
if not external_axes:
theme.visualize_output()
elif len(companies_per_country) == 1:
console.print(
f"Only 1 country found '{list(companies_per_country.keys())[0]}'. No pie chart will be depicted."
)
else:
console.print("No country found. No pie chart will be depicted.")
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"cpci",
df,
)
``` |
{
"source": "JoshuaC3/pyjanitor",
"score": 3
} |
#### File: pyjanitor/janitor/dataframe.py
```python
from pandas import DataFrame, Series
from .functions import (clean_names, coalesce, convert_excel_date,
encode_categorical, fill_empty, get_dupes,
get_features_targets, remove_empty, rename_column)
class JanitorSeries(Series):
@property
def _constructor(self):
return JanitorSeries
@property
def _constructor_expanddim(self):
return JanitorDataFrame
class JanitorDataFrame(DataFrame):
@property
def _constructor(self):
return JanitorDataFrame
@property
def _constructor_sliced(self):
return JanitorSeries
def clean_names(self):
return clean_names(self)
def remove_empty(self):
return remove_empty(self)
def get_dupes(self, columns=None):
return get_dupes(self, columns)
def encode_categorical(self, columns):
return encode_categorical(self, columns)
def rename_column(self, old, new):
return rename_column(self, old, new)
def get_features_targets(self, target_columns, feature_columns=None):
return get_features_targets(self, target_columns, feature_columns)
def coalesce(self, columns, new_column_name):
return coalesce(self, columns, new_column_name)
def convert_excel_date(self, column):
return convert_excel_date(self, column)
def fill_empty(self, columns, value):
return fill_empty(self, columns, value)
```
#### File: pyjanitor/janitor/functions.py
```python
import datetime as dt
from functools import reduce
import pandas as pd
from .errors import JanitorError
import re
def _strip_underscores(df, strip_underscores=None):
"""
Strip underscores from the beginning, end or both of the
of the DataFrames column names.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame.
"""
underscore_options = [None, 'left', 'right', 'both', 'l', 'r', True]
if strip_underscores not in underscore_options:
raise JanitorError(
"""strip_underscores must be one of: %s""" % underscore_options
)
if strip_underscores in ['left', 'l']:
df = df.rename(columns=lambda x: x.lstrip('_'))
elif strip_underscores in ['right', 'r']:
df = df.rename(columns=lambda x: x.rstrip('_'))
elif strip_underscores == 'both' or strip_underscores is True:
df = df.rename(columns=lambda x: x.strip('_'))
return df
def clean_names(df, strip_underscores=None):
"""
Clean column names.
Takes all column names, converts them to lowercase, then replaces all
spaces with underscores.
Functional usage example:
.. code-block:: python
df = clean_names(df)
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).clean_names()
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame.
"""
df = df.rename(
columns=lambda x: x.lower()
.replace(' ', '_')
.replace('/', '_')
.replace(':', '_')
.replace("'", '')
.replace(u'’', '')
.replace(',', '_')
.replace('?', '_')
.replace('-', '_')
.replace('(', '_')
.replace(')', '_')
.replace('.', '_')
)
df = df.rename(columns=lambda x: re.sub('_+', '_', x))
df = _strip_underscores(df, strip_underscores)
return df
def remove_empty(df):
"""
Drop all rows and columns that are completely null.
Implementation is shamelessly copied from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501
Functional usage example:
.. code-block:: python
df = remove_empty(df)
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).remove_empty()
:param df: The pandas DataFrame object.
:returns: A pandas DataFrame.
"""
nanrows = df.index[df.isnull().all(axis=1)]
df.drop(index=nanrows, inplace=True)
nancols = df.columns[df.isnull().all(axis=0)]
df.drop(columns=nancols, inplace=True)
return df
def get_dupes(df, columns=None):
"""
Returns all duplicate rows.
Functional usage example:
.. code-block:: python
get_dupes(df)
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
jn.DataFrame(df).get_dupes()
:param df: The pandas DataFrame object.
:param str/iterable columns: (optional) A column name or an iterable (list
or tuple) of column names. Following pandas API, this only considers
certain columns for identifying duplicates. Defaults to using all
columns.
:returns: The duplicate rows, as a pandas DataFrame.
"""
dupes = df.duplicated(subset=columns, keep=False)
return df[dupes == True] # noqa: E712
def encode_categorical(df, columns):
"""
Encode the specified columns as categorical.
Functional usage example:
.. code-block:: python
encode_categorical(df, columns="my_categorical_column") # one way
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
categorical_cols = ['col1', 'col2', 'col4']
jn.DataFrame(df).encode_categorical(columns=categorical_cols)
:param df: The pandas DataFrame object.
:param str/iterable columns: A column name or an iterable (list or tuple)
of column names.
:returns: A pandas DataFrame
"""
if isinstance(columns, list) or isinstance(columns, tuple):
for col in columns:
assert col in df.columns, \
JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501
df[col] = pd.Categorical(df[col])
elif isinstance(columns, str):
df[columns] = pd.Categorical(df[columns])
else:
raise JanitorError('kwarg `columns` must be a string or iterable!')
return df
def get_features_targets(df, target_columns, feature_columns=None):
"""
Get the features and targets as separate DataFrames/Series.
The behaviour is as such:
- `target_columns` is mandatory.
- If `feature_columns` is present, then we will respect the column names
inside there.
- If `feature_columns` is not passed in, then we will assume that the
rest of the columns are feature columns, and return them.
Functional usage example:
.. code-block:: python
X, y = get_features_targets(df, target_columns="measurement")
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
target_cols = ['output1', 'output2']
X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501
:param df: The pandas DataFrame object.
:param str/iterable target_columns: Either a column name or an iterable
(list or tuple) of column names that are the target(s) to be predicted.
:param str/iterable feature_columns: (optional) The column name or iterable
of column names that are the features (a.k.a. predictors) used to
predict the targets.
:returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are
pandas DataFrames.
"""
Y = df[target_columns]
if feature_columns:
X = df[feature_columns]
else:
if isinstance(target_columns, str):
xcols = [c for c in df.columns if target_columns != c]
elif (isinstance(target_columns, list)
or isinstance(target_columns, tuple)):
xcols = [c for c in df.columns if c not in target_columns]
X = df[xcols]
return X, Y
def rename_column(df, old, new):
"""
Rename a column in place.
Functional usage example:
.. code-block:: python
df = rename_column("old_column_name", "new_column_name")
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).rename_column("old_column_name", "new_column_name") # noqa: E501
This is just syntactic sugar/a convenience function for renaming one column
at a time. If you are convinced that there are multiple columns in need of
changing, then use the :py:meth:`pandas.DataFrame.rename` method.
:param str old: The old column name.
:param str new: The new column name.
:returns: A pandas DataFrame.
"""
return df.rename(columns={old: new})
def coalesce(df, columns, new_column_name):
"""
Coalesces two or more columns of data in order of column names provided.
Functional usage example:
.. code-block:: python
df = coalesce(df, columns=['col1', 'col2'])
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).coalesce(['col1', 'col2'])
The result of this function is that we take the first non-null value across
rows.
This is more syntactic diabetes! For R users, this should look familiar to
`dplyr`'s `coalesce` function; for Python users, the interface
should be more intuitive than the :py:meth:`pandas.Series.combine_first`
method (which we're just using internally anyways).
:param df: A pandas DataFrame.
:param columns: A list of column names.
:param str new_column_name: The new column name after combining.
:returns: A pandas DataFrame.
"""
series = [df[c] for c in columns]
def _coalesce(series1, series2):
return series1.combine_first(series2)
df = df.drop(columns=columns)
df[new_column_name] = reduce(_coalesce, series) # noqa: F821
return df
def convert_excel_date(df, column):
"""
Convert Excel's serial date format into Python datetime format.
Implementation is also from `Stack Overflow`.
.. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501
Functional usage example:
.. code-block:: python
df = convert_excel_date(df, column='date')
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).convert_excel_date('date')
:param df: A pandas DataFrame.
:param str column: A column name.
:returns: A pandas DataFrame with corrected dates.
"""
df[column] = (pd.TimedeltaIndex(df[column], unit='d')
+ dt.datetime(1899, 12, 30))
return df
def fill_empty(df, columns, value):
"""
Fill `NaN` values in specified columns with a given value.
Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.
Functional usage example:
.. code-block:: python
df = fill_empty(df, columns=['col1', 'col2'], value=0)
Method chaining example:
.. code-block:: python
df = pd.DataFrame(...)
df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)
:param df: A pandas DataFrame.
:param columns: Either a `str` or `list` or `tuple`. If a string is passed
in, then only that column will be filled; if a list or tuple of strings
are passed in, then they will all be filled with the same value.
:param value: The value that replaces the `NaN` values.
"""
if isinstance(columns, list) or isinstance(columns, tuple):
for col in columns:
assert col in df.columns, \
JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501
df[col] = df[col].fillna(value)
elif isinstance(columns, str):
df[columns] = df[columns].fillna(value)
else:
raise JanitorError('kwarg `columns` must be a string or iterable!')
return df
```
#### File: JoshuaC3/pyjanitor/setup.py
```python
from setuptools import setup
def requirements():
with open('requirements.txt', 'r+') as f:
return f.read()
setup(
name='pyjanitor',
version='0.1.1',
description='Tools for cleaning pandas DataFrames',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ericmjl/pyjanitor',
packages=['janitor'],
install_requires=requirements()
)
``` |
{
"source": "JoshuaC3/scikit-bonus",
"score": 3
} |
#### File: pandas/tests/test_one_hot_encoder_with_names.py
```python
import pandas as pd
import pytest
from sklearn.preprocessing import OneHotEncoder
from ..preprocessing import OneHotEncoderWithNames
@pytest.fixture
def get_data():
"""Create simple input data."""
input_data = pd.DataFrame(
{"A": [1, 2, 3, 2], "B": [0, 0, 0, 1], "C": ["a", "c", "b", "c"]}
)
sklearn_ohe_results = OneHotEncoder().fit_transform(input_data)
skbonus_ohe = OneHotEncoderWithNames()
skbonus_ohe_results = skbonus_ohe.fit_transform(input_data)
return skbonus_ohe, sklearn_ohe_results, skbonus_ohe_results
def test_one_hot_encoding(get_data):
"""Test if the values of the OneHotEncoderWithNames matches the ones of scikit-learn's OneHotEncoder."""
_, sklearn_ohe_results, skbonus_ohe_results = get_data
assert (sklearn_ohe_results == skbonus_ohe_results.values).all()
def test_column_names(get_data):
"""Test if the columns names are set properly."""
_, _, skbonus_ohe_results = get_data
assert skbonus_ohe_results.columns.tolist() == [
"A_1",
"A_2",
"A_3",
"B_0",
"B_1",
"C_a",
"C_b",
"C_c",
]
def test_assertion_error(get_data):
"""Test if the encoder notices that the column names differ between training and transformation time."""
df_new = pd.DataFrame(
{
"A": [1, 2, 3, 2],
"C": ["a", "c", "b", "c"],
"B": [0, 0, 0, 1],
}
)
skbonus_ohe, _, _ = get_data
with pytest.raises(
AssertionError,
match="Column names during fit and transform time should be identical, including the order.",
):
skbonus_ohe.transform(df_new)
``` |
{
"source": "joshuacassidygrant/linear-algebra-practice",
"score": 3
} |
#### File: joshuacassidygrant/linear-algebra-practice/MatrixMultGenerator.py
```python
import numpy as np
class MatrixMultGenerator:
vmin = -2
vmax = 3
def generate(self, w, h, s, restrictToIntegers):
m1 = np.random.randint(self.vmin, self.vmax, size=(w, s))
m2 = np.random.randint(self.vmin, self.vmax, size=(s, h))
sol = m1@m2
return (m1, m2, sol)
# TODO: create rational version
``` |
{
"source": "joshuacc1/Music-Player-CLI-Anywhere",
"score": 3
} |
#### File: Music-Player-CLI-Anywhere/casp/render.py
```python
import blessed
class Render:
"""Provides style rendering capabilities to the TUI"""
def __init__(self, width: int = 150, height: int = 150, skin: str = 'classic'):
self.term = blessed.Terminal()
self.width = width
self.height = height
self.skin = skin
self.screen = []
self.skins = {"classic": {"background": self.term.black_on_white,
"bar": self.term.black_on_wheat,
"time": self.term.fuchsia, "info": self.term.webpurple},
"dark": {"background": self.term.white_on_gray22,
"bar": self.term.white_on_darkslategray,
"time": self.term.turquoise, "info": self.term.orchid1},
"ocean": {"background": self.term.white_on_darkslategray,
"bar": self.term.white_on_cadetblue,
"time": self.term.darkgoldenrod4,
"info": self.term.dodgerblue4},
"cyberpunk": {"background": self.term.white_on_black,
"bar": self.term.white_on_midnightblue,
"time": self.term.aqua, "info": self.term.fuchsia},
"onedark": {"background": self.term.on_color_rgb(40, 44, 52),
"bar": self.term.on_color_rgb(33, 34, 43),
"time": self.term.color_rgb(229, 192, 123),
"info": self.term.color_rgb(198, 120, 221)},
"vlc": {"background": self.term.on_color_rgb(255, 255, 255),
"bar": self.term.on_color_rgb(239, 239, 239),
"time": self.term.color_rgb(249, 181, 95),
"info": self.term.color_rgb(118, 118, 118)}
}
def render_album(self) -> list:
"""Renders the album"""
screen = []
for i in range(self.height):
screen.append(' ' * self.width)
screen[0] = self.skins[self.skin]["background"] + ' ' * self.width
screen[-1] = ' ' * self.width + self.term.normal
return screen
def render_skin(self, album: str, time1: str, time2: str, progress_bar: str, play: bool, volume: str) -> None:
"""
Renders the skin
Album is n*n string separeted by '\n' with n given by gen_art_dim
time1 is a 5 character string of the time passed
time2 is a 5 character sting of the time left/total length of song
progress_bar is an n character string with n given by gen_progress_dim
play is a bool, set to True if the button should display "button" and False if "pause"
volume is a 6 character string of the current volume e.g. --x--- or however it is formatted
"""
for i in range(self.height):
self.screen.append(' ' * self.width)
self.screen[0] = self.skins[self.skin]["background"] + ' ' * self.width
self.screen[-1] = ' ' * self.width + self.term.normal
# adding in album art
album_height = self.gen_art_dim()[0]
album = album.split("\n")
for i in range(2, album_height + 2):
album_index = i - 2
self.screen[i] = album[album_index].center(self.width)
# changing colour
self.screen[-8] += (self.skins[self.skin]["bar"])
# adding in progress bar
self.screen[-6] = self.skins[self.skin]["info"] + "California Uber Alles".center(self.width)
self.screen[-6] += self.skins[self.skin]["bar"]
self.screen[-5] = " " + self.skins[self.skin]["time"] + time1 + self.skins[self.skin]["bar"]
self.screen[-5] += " " + progress_bar + " " + self.skins[self.skin]["time"] + time2
self.screen[-5] += self.skins[self.skin]["bar"] + " "
# adding play/pause etc
s = self.skins[self.skin]["info"] + ("play " if play else "pause") + self.skins[self.skin]["bar"]
t = " " + s + " " + self.skins[self.skin]["info"] + "|<<" + self.skins[self.skin]["bar"] + " " + \
self.skins[self.skin]["info"] + ">>|" + self.skins[self.skin]["bar"]
n = self.term.length(t)
self.screen[-3] = t + " " * (self.width - 18 - n) + self.skins[self.skin]["info"] + "volume: " + volume
self.screen[-3] += self.skins[self.skin]["bar"] + " "
def gen_art_dim(self) -> tuple[int, int]:
"""Generates the dimentions for the album art in the form (height, width)"""
if (self.width - 2) / 2 >= self.height - 11:
return self.height - 11, (self.height - 11) * 2
else:
return (self.width - 2) // 2, self.width - 2
def gen_progress_dim(self) -> int:
"""Generates the length of progress bar"""
return self.width - 18
def print_window(self) -> None:
"""Prints the screen"""
for i in self.screen:
print(i)
def gen_album(self, render: object) -> str:
"""Helper function that tests the album"""
height, width = render.gen_art_dim()
string = "█" * width + "\n"
string = string * height
return string
def get_skin(self, skin: str) -> dict:
"""Creates a skin and returns the lines"""
if skin in self.skins.keys():
return self.skins[skin]
``` |
{
"source": "joshua-cerniglia/duo_client_python",
"score": 2
} |
#### File: tests/admin/test_admins.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestAdmins(TestAdmin):
# Uses underlying paging
def test_get_admins(self):
response = self.client_list.get_admins()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_admins_with_limit(self):
response = self.client_list.get_admins(limit='20')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_admins_with_limit_offset(self):
response = self.client_list.get_admins(limit='20', offset='2')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_admins_with_offset(self):
response = self.client_list.get_admins(offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_admins_iterator(self):
response = self.client_list.get_admins_iterator()
response = next(response)
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
```
#### File: tests/admin/test_authlog.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestEndpoints(TestAdmin):
def test_get_authentication_log_v1(self):
""" Test to get authentication log on version 1 api.
"""
response = self.client_list.get_authentication_log(api_version=1)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/logs/authentication')
self.assertEqual(
util.params_to_dict(args)['account_id'],
[self.client_list.account_id])
def test_get_authentication_log_v2(self):
""" Test to get authentication log on version 1 api.
"""
response = self.client_authlog.get_authentication_log(api_version=2)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v2/logs/authentication')
self.assertEqual(
util.params_to_dict(args)['account_id'],
[self.client_authlog.account_id])
```
#### File: tests/admin/test_bypass_codes.py
```python
import duo_client.admin
from .. import util
from .base import TestAdmin
class TestBypassCodes(TestAdmin):
def test_delete_bypass_code_by_id(self):
""" Test to delete a bypass code by id.
"""
response = self.client.delete_bypass_code_by_id('DU012345678901234567')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'DELETE')
self.assertEqual(uri, '/admin/v1/bypass_codes/DU012345678901234567')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_bypass_codes_generator(self):
""" Test to get bypass codes generator.
"""
generator = self.client_list.get_bypass_codes_generator()
response = next(generator)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/bypass_codes')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client_list.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_bypass_codes(self):
""" Test to get bypass codes without params.
"""
response = self.client_list.get_bypass_codes()[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/bypass_codes')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client_list.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_bypass_codes_limit(self):
""" Test to get bypass codes with limit.
"""
response = self.client_list.get_bypass_codes(limit='20')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/bypass_codes')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client_list.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_bypass_codes_offset(self):
""" Test to get bypass codes with offset.
"""
response = self.client_list.get_bypass_codes(offset='20')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/bypass_codes')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client_list.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_bypass_codes_limit_offset(self):
""" Test to get bypass codes with limit and offset.
"""
response = self.client_list.get_bypass_codes(limit='20', offset='2')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/bypass_codes')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client_list.account_id],
'limit': ['20'],
'offset': ['2'],
})
```
#### File: tests/admin/test_integrations.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestIntegrations(TestAdmin):
def test_get_integrations_generator(self):
""" Test to get integrations generator.
"""
generator = self.client_list.get_integrations_generator()
response = next(generator)
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/integrations')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_integrations(self):
""" Test to get integrations without pagination params.
"""
response = self.client_list.get_integrations()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/integrations')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_integrations_with_limit(self):
""" Test to get integrations with pagination params.
"""
response = self.client_list.get_integrations(limit=20)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/integrations')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_integrations_with_limit_offset(self):
""" Test to get integrations with pagination params.
"""
response = self.client_list.get_integrations(limit=20, offset=2)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/integrations')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_integrations_with_offset(self):
""" Test to get integrations with pagination params.
"""
response = self.client_list.get_integrations(offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/integrations')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
```
#### File: tests/admin/test_phones.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestPhones(TestAdmin):
def test_get_phones_generator(self):
""" Test to get phones generator.
"""
generator = self.client_list.get_phones_generator()
response = next(generator)
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/phones')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_phones(self):
""" Test to get phones without pagination params.
"""
response = self.client_list.get_phones()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/phones')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_phones_with_limit(self):
""" Test to get phones with pagination params.
"""
response = self.client_list.get_phones(limit=20)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/phones')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_phones_with_limit_offset(self):
""" Test to get phones with pagination params.
"""
response = self.client_list.get_phones(limit=20, offset=2)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/phones')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_phones_with_offset(self):
""" Test to get phones with pagination params.
"""
response = self.client_list.get_phones(offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/phones')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
```
#### File: tests/admin/test_tokens.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestTokens(TestAdmin):
def test_get_tokens_generator(self):
""" Test to get tokens generator.
"""
generator = self.client_list.get_tokens_generator()
response = next(generator)
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/tokens')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_tokens(self):
""" Test to get tokens without pagination params.
"""
response = self.client_list.get_tokens()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/tokens')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_tokens_with_limit(self):
""" Test to get tokens with pagination params.
"""
response = self.client_list.get_tokens(limit=20)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/tokens')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_tokens_with_limit_offset(self):
""" Test to get tokens with pagination params.
"""
response = self.client_list.get_tokens(limit=20, offset=2)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/tokens')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_tokens_with_offset(self):
""" Test to get tokens with pagination params.
"""
response = self.client_list.get_tokens(offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/tokens')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
```
#### File: tests/admin/test_user_phones.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestUserPhones(TestAdmin):
def test_get_user_phones_iterator(self):
"""Test to get phones iterator by user id
"""
iterator = self.client_list.get_user_phones_iterator(
'DU012345678901234567')
response = next(iterator)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_phones(self):
"""Test to get phones by user id
"""
response = self.client_list.get_user_phones('DU012345678901234567')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_phones_with_offset(self):
"""Test to get phones by user id with pagination params
"""
response = self.client_list.get_user_phones(
'DU012345678901234567', offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_phones_with_limit(self):
"""Test to get phones by user id with pagination params
"""
response = self.client_list.get_user_phones(
'DU012345678901234567', limit=10)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['0'],
})
def test_get_user_phones_with_limit_and_offset(self):
"""Test to get phones by user id with pagination params
"""
response = self.client_list.get_user_phones(
'DU012345678901234567', limit=10, offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/phones')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['30'],
})
if __name__ == '__main':
unittest.main()
```
#### File: tests/admin/test_user_tokens.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestUserTokens(TestAdmin):
def test_get_user_tokens_iterator(self):
""" Test to get tokens iterator by user id.
"""
generator = self.client_list.get_user_tokens_iterator(
'DU012345678901234567')
response = next(generator)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/tokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_tokens(self):
""" Test to get tokens by user id.
"""
response = self.client_list.get_user_tokens('DU<PASSWORD>')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/tokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_tokens_offset(self):
""" Test to get tokens by user id with pagination params.
"""
response = self.client_list.get_user_tokens(
'DU<PASSWORD>', offset=100)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/tokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_tokens_limit(self):
""" Test to get tokens by user id with pagination params.
"""
response = self.client_list.get_user_tokens(
'DU<PASSWORD>789012<PASSWORD>', limit=500)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/tokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['500'],
'offset': ['0'],
})
def test_get_user_tokens_limit_and_offset(self):
""" Test to get tokens by user id with pagination params.
"""
response = self.client_list.get_user_tokens(
'DU012345678901234567', limit=10, offset=100)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/tokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['100'],
})
if __name__ == '__main__':
unittest.main()
```
#### File: tests/admin/test_user_u2f.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestUserU2F(TestAdmin):
def test_get_user_u2ftokens_iterator(self):
""" Test to get u2ftokens iterator by user id.
"""
iterator = self.client_list.get_user_u2ftokens_iterator(
'DU012345678901234567')
response = next(iterator)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_u2ftokens(self):
""" Test to get u2ftokens by user id.
"""
response = self.client_list.get_user_u2ftokens(
'DU012345678901234567')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_u2ftokens_with_offset(self):
""" Test to get u2ftokens by user id with pagination params.
"""
response = self.client_list.get_user_u2ftokens('DU012345678901234567',
offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_u2ftokens_with_limit(self):
""" Test to get u2ftokens by user id with pagination params.
"""
response = self.client_list.get_user_u2ftokens('DU012345678901234567',
limit=10)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['0'],
})
def test_get_user_u2ftokens_with_limit_and_offset(self):
""" Test to get u2ftokens by user id with pagination params.
"""
response = self.client_list.get_user_u2ftokens('DU012345678901234567',
limit=10, offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['30'],
})
if __name__ == '__main__':
unittest.main()
```
#### File: tests/admin/test_user_webauthn.py
```python
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestUserTestWebauthn(TestAdmin):
def test_get_user_webauthncredentials_iterator(self):
""" Test to get webauthn credentials iterator by user id.
"""
iterator = self.client_list.get_user_webauthncredentials_iterator(
'DU012345678901234567')
response = next(iterator)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/webauthncredentials')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_webauthncredentials(self):
""" Test to get webauthn credentials by user id.
"""
response = self.client_list.get_user_webauthncredentials(
'DU012345678901234567')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/webauthncredentials')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_webauthncredentials_with_offset(self):
""" Test to get webauthn credentials by user id with pagination params.
"""
response = self.client_list.get_user_webauthncredentials('DU012345678901234567',
offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/webauthncredentials')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_user_webauthncredentials_with_limit(self):
""" Test to get webauthn credentials by user id with pagination params.
"""
response = self.client_list.get_user_webauthncredentials('DU012345678901234567',
limit=10)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/webauthncredentials')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['0'],
})
def test_get_user_webauthncredentials_with_limit_and_offset(self):
""" Test to get webauthn credentials by user id with pagination params.
"""
response = self.client_list.get_user_webauthncredentials('DU012345678901234567',
limit=10, offset=30)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/webauthncredentials')
self.assertEqual(util.params_to_dict(args),
{
'account_id':[self.client.account_id],
'limit': ['10'],
'offset': ['30'],
})
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joshuacheong/Tallyfy-Google-Calendar-Sync-Tool",
"score": 3
} |
#### File: joshuacheong/Tallyfy-Google-Calendar-Sync-Tool/Tallyfy.py
```python
from __future__ import print_function
import requests
import datetime
authDict = {}
authDict["password"] = "<PASSWORD>"
authDict["email"] = "YOUR TALLYFY EMAIL"
orgID = "YOUR ORGANIZATION ID"
reqAuth = requests.post('https://v2-api.tallyfy.com/auth/login', data=authDict)
if reqAuth.status_code == 200:
print("Tallyfy API authorisation successful.")
else:
print("\n\nTallyfy API authorisation failed: \nPlease make sure your username and password is correct. \nPlease also ensure your internet is working. \nIf not, Tallyfy API may be down.")
data = reqAuth.json()
accessHeader = {'Authorization': 'Bearer ' +
data["data"]["token"]["access_token"]}
# responseString = json.dumps(data, indent=2)
# print(responseString)
url = 'https://v2-api.tallyfy.com/organizations/' + orgID + '/me/tasks'
def extractTasks(url, header, returnList):
r = requests.get(url, headers=header)
try:
r.raise_for_status()
print("Tallyfy API call for task extraction is successful.")
except requests.exceptions.HTTPError as e:
# Whoops it wasn't a 200
return "\n\nTallyfy API call for task extraction failed:\n" + str(e)
rData = r.json()["data"]
for task in rData:
deadline = task["deadline"]
deadline = datetime.datetime.strptime(deadline, '%Y-%m-%dT%H:%M:%SZ')
# Check if the task is part of a process
if task["run_id"] is None:
returnList.append({
'id': str(task["id"]),
'summary': str(task["name"]),
'description': str(task["description"]),
'start': {
'date': str(deadline.date()),
},
'end': {
'date': str(deadline.date()),
},
'reminders': {
'useDefault': True,
},
'colorId': '10', })
else:
returnList.append({
'id': str(task["id"]),
'summary': str(task["step"]["title"]),
'description': str(task["step"]["summary"]),
'start': {
'date': str(deadline.date()),
},
'end': {
'date': str(deadline.date()),
},
'reminders': {
'useDefault': True,
},
'colorId': '10', })
try:
nextURL = r["meta"]["pagination"]["links"]["next"]
extractTasks(nextURL, header, returnList)
except TypeError as e:
# This ends the function when no more tasks are found as nextURL does not exist.
return
``` |
{
"source": "joshuacherry/ansible-role-apt-mirror",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
TESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_apt_mirror_is_installed(host):
"""
Tests that apt-mirror is installed
"""
apt_mirror = host.package("apt-mirror")
assert apt_mirror.is_installed
def test_apt_mirror_list_file(host):
"""
Tests that the mirror.list file is created correctly
"""
mirror = host.file('/etc/apt/mirror.list')
assert 'set base_path /var/spool/apt-mirror' in mirror.content_string
def test_apt_sources_list_file(host):
"""
Tests that the sources.list file is created correctly
"""
sources = host.file('/etc/apt/sources.list')
assert 'Configured to point to upstream' in sources.content_string
def test_apt_cron_file(host):
"""
Tests that the cron entry file is created correctly
"""
cron = host.file('/etc/cron.d/apt-mirror')
assert '@daily apt-mirror /usr/bin/apt-mirror' in cron.content_string
``` |
{
"source": "joshuacherry/ansible-role-snipeit",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
TESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_snipeit_env_file(host):
"""
Tests that the .env file exists and contains variables
"""
variables = host.ansible.get_variables()
hostname = variables["inventory_hostname"]
file = host.file('/var/www/'+hostname+'/.env')
assert file.exists
assert file.user == 'www-data'
assert file.group == 'www-data'
assert file.contains('not.a.real.email.server')
def test_apache2_is_installed(host):
"""
Tests that apache2 is installed
"""
apache2 = host.package("apache2")
assert apache2.is_installed
def test_apache2_running_and_enabled(host):
"""
Tests that apache2 is running and enabled
"""
apache2 = host.service("apache2")
assert apache2.is_running
assert apache2.is_enabled
def test_apache2_80_is_listening(host):
"""
Tests that apache2 is listening on ports 80
"""
apache80 = host.socket("tcp://0.0.0.0:80")
assert apache80.is_listening
def test_apache_request_status(host):
"""
Tests that apache2 gives a 200 response
"""
args = """url=http://127.0.0.1/setup
follow_redirects=none validate_certs=no"""
request = host.ansible("uri", args, check=False)
assert request["status"] == 200
assert request["server"] == "Apache"
``` |
{
"source": "joshuachinemezu/covid19-reporter-api",
"score": 3
} |
#### File: covid19-reporter-api/resources/Stats.py
```python
from flask_restful import Resource
import requests
pomber_covid_stats = requests.get('https://pomber.github.io/covid19/timeseries.json')
coronastatistics_url = 'http://api.coronastatistics.live'
class Stats(Resource):
def get(self):
return {'status': 'success', 'data': pomber_covid_stats.json()}, 200
class GlobalStatsCount(Resource):
def get(self):
date = '';
confirmed = deaths = recovered = active = 0;
data = pomber_covid_stats.json()
countries = data.keys()
for country in countries:
current = data[country][len(data[country]) - 1]
date = current['date']
confirmed = confirmed + current['confirmed']
deaths = deaths + current['deaths']
recovered = recovered + current['recovered']
active = active + (current['confirmed'] - current['deaths'] - current['recovered'])
response = {'date': date, 'confirmed': confirmed, 'deaths': deaths, 'recovered':recovered, 'active':active, 'number_of_countries': len(countries)}
return {'status': 'success', 'data': response}, 200
class AffectedCountriesDetailed(Resource):
def get(self):
data = requests.get(coronastatistics_url + '/countries').json()
return {'status': 'success', 'data': data}, 200
``` |
{
"source": "joshuachp/python-project-2021",
"score": 3
} |
#### File: src/sentiment_analysis/amazon_input.py
```python
import json
from pathlib import Path
from .input_data import InputData
class AmazonJsonFile(InputData):
def __init__(self, path: Path) -> None:
super().__init__()
self.path: Path = path
self.stars: list[int] = []
# Read the input file
self.read_json_file()
def read_json_file(self) -> None:
if not (self.path.is_file and self.path.suffix == '.json'):
raise Exception("Provide a valid path to a json file")
with open(self.path, 'r') as file:
data = json.load(file)
if not isinstance(data, list):
raise Exception(
"Error in json schema, it should be an array of objects")
for review in data:
if not isinstance(review, dict):
raise Exception(
"Error in json schema, it should be an array of objects")
if not ('text' in review and isinstance(review['text'], str)):
raise Exception("Error in json schema, missing text field")
self.data.append(review['text'])
self.stars.append(review['stars'])
``` |
{
"source": "JoshuaCN/Input-Significance-Indicator-based-Attack",
"score": 3
} |
#### File: JoshuaCN/Input-Significance-Indicator-based-Attack/create_model.py
```python
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout, ZeroPadding2D
def create_mnist_model(xshape):
model = Sequential()
model.add(ZeroPadding2D(padding=((2, 2), (2, 2)), input_shape=xshape))
model.add(Conv2D(10, 5, activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(25, 5, activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(100, 4, activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
return model
def create_cifar_model(xshape):
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=xshape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model
```
#### File: JoshuaCN/Input-Significance-Indicator-based-Attack/isi_attack.py
```python
import numpy as np
import innvestigate
import innvestigate.utils as iutils
def isi(model, indicator, x, y=None, max_iter=30, norm=0, batch_size=100, **kwargs):
"""
Input Significance Indicator based Attack, two indicators: sensitivity and relevance are included.
Relevance-based attack supports l0,l2 or linf norm constraints.
Our sensitivity-based attack only supports the l0 norm, since attack in
other norms using sensitivity is very similar to the 'Basic Iterative Method'.
:param y: required for targeted attack.
:param indicator: choose sensitivity or relevance as the indicator.
:param kwargs: when norm=2, step size 'eps' and changed features 'n' is needed,
when norm=np.inf, step size 'eps' and 'clip_values'=(min,max) is needed.
:return: adversarial batch
"""
if indicator == 'sensitivity' and norm != 0:
raise ValueError('Input sensitivity based attack only supports L0 norm, for other norms try the Basic '
'Iterative Method/(or Projected Gradient Descent/)')
indicator = 'gradient' if indicator == 'sensitivity' else 'lrp.z'
dims = list(x[0].shape)
nb_features = np.product(dims)
adv_x = np.reshape(x.astype(np.float32), (-1, nb_features))
model = iutils.model_wo_softmax(model)
preds = np.argmax(model.predict(x), axis=1)
if y is None:
analyzer = innvestigate.create_analyzer(indicator, model)
else:
analyzer = innvestigate.create_analyzer(indicator, model, neuron_selection_mode='index')
for batch_id in range(int(np.ceil(adv_x.shape[0] / float(batch_size)))):
batch_index_1, batch_index_2 = batch_id * batch_size, (batch_id + 1) * batch_size
batch = adv_x[batch_index_1:batch_index_2]
current_pred = preds[batch_index_1:batch_index_2]
if y is None:
active_indices = np.where(current_pred == preds[batch_index_1:batch_index_2])[0]
else:
target = np.zeros_like(current_pred) + y
active_indices = np.where(current_pred != target)[0]
i = 0
used_features = np.zeros_like(batch)
while len(active_indices) != 0 and i < max_iter:
r = analyzer.analyze(np.reshape(batch, [batch.shape[0]] + dims)[active_indices], neuron_selection=y)
r = np.reshape(r, (-1, nb_features))
if norm == 0:
batch, used_features = _apply_l0_perturbation(batch, r, indicator, y, active_indices, used_features)
elif norm == 2:
batch = _apply_l2_perturbation(batch, r, y, active_indices, kwargs['n'], kwargs['eps'])
elif norm == np.inf:
batch = _apply_linf_perturbation(batch, r, y, active_indices, kwargs['eps'], kwargs['clip_values'])
current_pred = np.argmax(model.predict(np.reshape(batch, [batch.shape[0]] + dims)), axis=1)
if y is None:
active_indices = np.where(current_pred == preds[batch_index_1:batch_index_2])[0]
else:
active_indices = np.where(current_pred != target)[0]
i += 1
adv_x[batch_index_1:batch_index_2] = batch
adv_x = np.reshape(adv_x, x.shape)
return adv_x
def _apply_l0_perturbation(batch, score, indicator, y, active_indices, used_features):
"""
Add perturbations to data batch, and record the features that have been used.
"""
act_used_features = used_features[active_indices]
if indicator == 'gradient':
score[act_used_features == 1] = 0 # set sensitivity of already used features to zero
ind = np.argpartition(np.abs(score), -1, axis=1)[:, -1:] # find feature with the largest abs(sensitivity)
tmp_batch = batch[active_indices]
if y is None:
tmp_batch[np.arange(len(active_indices)), ind[:, 0]] = -1 * np.sign(
score[np.arange(len(active_indices)), ind[:, 0]])
else:
tmp_batch[np.arange(len(active_indices)), ind[:, 0]] = np.sign(
score[np.arange(len(active_indices)), ind[:, 0]])
batch[active_indices] = tmp_batch
else:
if y is None:
score[act_used_features == 1] = -np.inf # set relevance of already used features to -inf
ind = np.argpartition(score, -1, axis=1)[:, -1:] # find feature with the largest relevance
else:
score[act_used_features == 1] = np.inf # set relevance of already used features to inf
ind = np.argpartition(score, 0, axis=1)[:, 0:] # find feature with the least relevance
tmp_batch = batch[active_indices]
# tmp_batch[np.arange(len(active_indices)), ind[:, 0]] *= -1
tmp_batch[np.arange(len(active_indices)), ind[:, 0]] = -np.sign(
tmp_batch[np.arange(len(active_indices)), ind[:, 0]])
batch[active_indices] = tmp_batch
used_features[active_indices, ind[:, 0]] = 1
return batch, used_features
def _apply_l2_perturbation(batch, r, y, active_indices, n, eps):
if y is None:
ind = np.argpartition(r, -n, axis=1)[:, (-n):] # find n features with the largest relevance
else:
ind = np.argpartition(r, n - 1, axis=1)[:, :n] # find n features with the least relevance
tmp_batch = batch[active_indices]
for i in range(n):
tmp_batch[np.arange(len(active_indices)), ind[:, i]] -= eps * np.sign(
tmp_batch[np.arange(len(active_indices)), ind[:, i]])
batch[active_indices] = tmp_batch
return batch
def _apply_linf_perturbation(batch, r, y, active_indices, eps, clip_values):
tmp_batch = batch[active_indices]
if y is None:
tmp_batch[np.arange(len(active_indices)), :] -= eps * np.sign(r) * np.sign(
tmp_batch[np.arange(len(active_indices)), :])
else:
tmp_batch[np.arange(len(active_indices)), :] += eps * np.sign(r) * np.sign(
tmp_batch[np.arange(len(active_indices)), :])
tmp_batch = np.clip(tmp_batch, clip_values[0], clip_values[1])
batch[active_indices] = tmp_batch
return batch
``` |
{
"source": "joshua-cogliati-inl/HERON",
"score": 2
} |
#### File: HERON/src/DispatchPlot.py
```python
import itertools as it
import matplotlib as mpl
mpl.use('Agg') # Prevents the script from blocking while plotting
import matplotlib.pyplot as plt
from typing import List, Dict
from PluginBaseClasses.OutStreamPlotPlugin import PlotPlugin, InputTypes, InputData
# Matplotlib Global Settings
plt.rc("figure", figsize=(12, 8), titleweight='bold') # type: ignore
plt.rc("axes", titleweight="bold", labelsize=12, axisbelow=True, grid=True) # type: ignore
plt.rc("savefig", bbox="tight") # type: ignore
plt.rc("legend", fontsize=12) # type:ignore
plt.rc(["xtick", "ytick"], labelsize=10) # type: ignore
class DispatchPlot(PlotPlugin):
@classmethod
def getInputSpecification(cls):
"""
Define the acceptable user inputs for this class.
@ In, None
@ Out, specs, InputData.ParameterInput,
"""
specs = super().getInputSpecification()
specs.addSub(InputData.parameterInputFactory('source', contentType=InputTypes.StringType))
specs.addSub(InputData.parameterInputFactory('macro_variable', contentType=InputTypes.StringType))
specs.addSub(InputData.parameterInputFactory('micro_variable', contentType=InputTypes.StringType))
specs.addSub(InputData.parameterInputFactory('signals', contentType=InputTypes.StringListType))
return specs
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
super().__init__()
self.printTag = 'HERON.DispatchPlot'
self._sourceName = None
self._source = None
self._macroName = None
self._microName = None
self._addSignals = []
def handleInput(self, spec):
"""
Reads in data from the input file
@ In, spec, InputData.ParameterInput, input information
@ Out, None
"""
super().handleInput(spec)
for node in spec.subparts:
if node.getName() == 'source':
self._sourceName = node.value
elif node.getName() == 'macro_variable':
self._macroName = node.value
elif node.getName() == 'micro_variable':
self._microName = node.value
elif node.getName() == 'signals':
self._addSignals = node.value
def initialize(self, stepEntities):
"""
Set up plotter for each run
@ In, stepEntities, dict, entities from the Step
@ Out, None
"""
super().initialize(stepEntities)
src = self.findSource(self._sourceName, stepEntities)
if src is None:
self.raiseAnError(IOError, f'Source DataObject "{self._sourceName}" was not found in the Step!')
self._source = src
@staticmethod
def _group_by(iterable: List[str], idx: int) -> Dict[str, List[str]]:
"""
Return a dictionary containing grouped dispatch variables.
@ In, iterable, List[str], a list of variable names to group-by.
@ In, idx, int, the index of the variable to group-by.
@ Out, gr, Dict[str, List[str]], a dictionary mapping of grouped variable names.
"""
gr = {}
for var in iterable:
# var is expected to have the form: 'Dispatch__component__tracker__resource'
key = var.split('__')[idx]
if key in gr.keys():
gr[key].append(var)
else:
gr[key] = [var]
return gr
def plot_component(self, fig, axes, df, grp_vars, comp_idx, sid, mstep, cid) -> None:
"""
Plot and output the optimized dispatch for a specific sample, year, and cluster.
@ In, fig, matplotlib.figure.Figure, current figure used for plotting.
@ In, axes, List[List[matplotlib.Axes]], a list of axes to plot each variable.
@ In, df, pandas.DataFrame, a dataframe containing data to plot.
@ In, grp_vars, Dict[str, List[str]], a dictionary mapping components to variables.
@ In, comp_idx, Dict[str, int], a dictionary mapping components to numbers.
@ In, sid, int, the sample ID.
@ In, mstep, int, the macro step.
@ In, cid, int, the cluster ID.
@ Out, None
"""
for (key, group), ax in zip(grp_vars.items(), axes.flat):
lines = []
for var in group:
_, comp_name, tracker, _ = var.split('__')
comp_label = comp_name.replace('_', ' ').title()
cidx = comp_idx[comp_name]
# NOTE custom behavior based on production/storage labels
plot_ax = ax
var_label = f'{comp_label}, {tracker.title()}'
ls = '-'
mk = '1'
if tracker == 'production':
var_label = comp_label
elif tracker == 'level':
plot_ax = ax.twinx()
ls = ':'
mk = '.'
elif tracker == 'charge':
mk = '^'
elif tracker == 'discharge':
mk = 'v'
# Plot the micro-step variable on the x-axis (i.e Time)
ln = plot_ax.plot(df[self._microName], df[var], marker=mk, linestyle=ls, label=var_label, color=f"C{cidx}")
lines.extend(ln)
ax.set_title(key.title())
ax.set_xlabel(self._microName)
ax.legend(lines, [l.get_label() for l in lines], loc='center left', bbox_to_anchor=(1.03, 0.5))
# Output and save the image
file_name = f"dispatch_id{sid}_y{mstep}_c{cid}.png"
fig.tight_layout()
fig.savefig(file_name)
self.raiseAMessage(f'Saved figure to "{file_name}"')
plt.clf()
def plot_signal(self, fig, axes, df, sid, mstep, cid) -> None:
"""
Plot and output the synthetic history for a specific sample, year, and cluster.
@ In, fig, matplotlib.figure.Figure, a current figure used for plotting.
@ In, axes, List[List[matplotlib.Axes]], a list of axes to plot each variable.
@ In, df, pandas.DataFrame, a dataframe containing data to plot.
@ In, sid, int, the sample ID.
@ In, mstep, int, the macro step.
@ In, cid, int, the cluster ID.
"""
for name, ax in zip(self._addSignals, axes.flat):
var = df.get(name, None)
if var is None:
self.raiseAnError(f'Requested signal variable "{name}" but variable not in data!')
ax.plot(df[self._microName], var, marker='.', linestyle='-', label=name)
ax.set_title(name.title())
ax.set_xlabel(self._microName)
ax.legend(loc='center left', bbox_to_anchor=(1.03, 0.5))
signal_file_name = f"dispatch_id{sid}_y{mstep}_c{cid}_SIGNAL.png"
fig.tight_layout()
fig.savefig(signal_file_name)
self.raiseAMessage(f'Saved figure to "{signal_file_name}"')
plt.clf()
def run(self):
"""
Generate the plot
@ In, None
@ Out, None
"""
assert self._source is not None
ds = self._source.asDataset()
if ds is None:
self.raiseAWarning(f'No data in "{self._source.name}" data object; nothing to plot!')
return
df = ds.to_dataframe().reset_index()
dispatch_vars = list(filter(lambda x: "Dispatch__" in x, df.columns))
grouped_vars = self._group_by(dispatch_vars, -1)
grouped_comp = self._group_by(dispatch_vars, 1)
comp_idx = {comp: i for i, comp in enumerate(grouped_comp.keys())}
# Dimension variables to plot
sample_ids = df[self._source.sampleTag].unique()
cluster_ids = df['_ROM_Cluster'].unique() # TODO: find way to not hardcode name
macro_steps = df[self._macroName].unique()
for sample_id, macro_step, cluster_id in it.product(sample_ids, macro_steps, cluster_ids):
# Filter data to plot correct values for current dimension
dat = df[
(df[self._source.sampleTag] == sample_id) &
(df[self._macroName] == macro_step) &
(df['_ROM_Cluster'] == cluster_id)
]
# TODO: find a way to combine both plots into one output.
# Currently, this is difficult to do because of the nested
# nature of the subplots, as well as the dynamic number of
# components and signals to plot (i.e. dynamically nested subplots)
# Output optimized component dispatch for current dimension.
fig0, axs0 = plt.subplots(len(grouped_vars), 1, sharex=True, squeeze=False)
self.plot_component(fig0, axs0, dat, grouped_vars, comp_idx, sample_id, macro_step, cluster_id)
# Output synthetic time series signal for current dimension.
fig1, axs1 = plt.subplots(len(self._addSignals), 1, sharex=True, squeeze=False)
self.plot_signal(fig1, axs1, dat, sample_id, macro_step, cluster_id)
```
#### File: mechanics/labels/transfers.py
```python
def flex_price(data, meta):
sine = meta['HERON']['RAVEN_vars']['Signal']
t = meta['HERON']['time_index']
# DispatchManager
# scale electricity consumed to flex between -1 and 1
amount = - 2 * (sine[t] - 0.5)
labels = meta['HERON']['Case'].get_labels()
pivot_id = meta['HERON']['Case'].get_time_name()
year_id = meta['HERON']['Case'].get_year_name()
data = {'reference_price': amount,
'case_labels': labels,
'pivot_id': pivot_id,
'year_id': year_id}
return data, meta
``` |
{
"source": "joshua-cogliati-inl/TEAL",
"score": 2
} |
#### File: TEAL/tests/HourlyObjectOrientedTest.py
```python
import os
import sys
import numpy as np
import pandas as pd
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from TEAL import CashFlows
from TEAL import CashFlow as RunCashFlow
def run(df):
"""
Main run command.
@ In, df, pandas.Dataframe, loaded data to run
@ Out, metrics, dict, dictionary of metric results
"""
settings = build_econ_settings()
components = build_econ_components(df, settings)
metrics = RunCashFlow.run(settings, list(components.values()), {})
return metrics
def build_econ_settings():
"""
Constructs global settings for econ run
@ In, None
@ Out, settigns, CashFlow.GlobalSettings, settings
"""
params = {'DiscountRate': 0.10,
'tax': 0.21,
'inflation': 0.02184,
'ProjectTime': 5,
'Indicator': {'name': ['NPV'],
'active': ['MainComponent|RecursHourly', 'MainComponent|RecursYearly', 'MainComponent|Cap']}
}
settings = CashFlows.GlobalSettings()
settings.setParams(params)
settings._verbosity = 0
return settings
def build_econ_components(df, settings):
"""
Constructs run components
@ In, df, pandas.Dataframe, loaded data to run
@ In, settings, CashFlow.GlobalSettings, settings
@ Out, comps, dict, dict mapping names to CashFlow component objects
"""
# in this simple case, the project life is the component life for all components.
life = settings.getProjectTime()
# construct components
comps = {}
## first (and only) component in the case
name = 'MainComponent'
comp = CashFlows.Component()
comps[name] = comp
params = {'name': name,
'Life_time': 4}
comp.setParams(params)
## add cashflows to this component
cfs = []
### recurring cashflow evaluated hourly, to show usage
cf = createRecurringHourly(df, comp, 'A', 'D')
cfs.append(cf)
print('DEBUGG hourly recurring:', cf._yearlyCashflow)
### recurring cashflow evaluated yearly
cf = createRecurringYearly(df, comp, 'A', 'D')
cfs.append(cf)
print('DEBUGG yearly recurring:', cf._yearlyCashflow)
### capex cashflow
cf = createCapex(df, comp, 'B', 'D')
cfs.append(cf)
## amortization
cf.setAmortization('MACRS', 3)
amorts = comp._createDepreciation(cf)
cfs.extend(amorts)
# finally, add cashflows to component
comp.addCashflows(cfs)
return comps
def createCapex(df, comp, driver, alpha):
"""
Constructs capex object
@ In, df, pandas.Dataframe, loaded data to run
@ In, comp, CashFlow.Component, component this cf will belong to
@ In, driver, string, variable name in df to take driver from
@ In, alpha, string, variable name in df to take alpha from
@ Out, comps, dict, dict mapping names to CashFlow component objects
"""
life = comp.getLifetime()
# extract alpha, driver as just one value
alpha = df[alpha].mean()
driver = df[driver].mean()
cf = CashFlows.Capex()
cf.name = 'Cap'
cf.initParams(life)
cfFarams = {'name': 'Cap',
'alpha': alpha,
'driver': driver,
'reference': 1.0,
'X': 0.8,
'depreciate': 3,
'mult_target': None,
'inflation': False,
}
cf.setParams(cfFarams)
return cf
def createRecurringYearly(df, comp, driver, alpha):
"""
Constructs recurring cashflow with one value per year
@ In, df, pandas.Dataframe, loaded data to run
@ In, comp, CashFlow.Component, component this cf will belong to
@ In, driver, string, variable name in df to take driver from
@ In, alpha, string, variable name in df to take alpha from
@ Out, comps, dict, dict mapping names to CashFlow component objects
"""
life = comp.getLifetime()
cf = CashFlows.Recurring()
cfFarams = {'name': 'RecursYearly',
'X': 1,
'mult_target': None,
'inflation': False}
cf.setParams(cfFarams)
# because our data comes hourly, collapse it to be yearly
## 0 for first year (build year) -> TODO couldn't this be automatic?
alphas = np.zeros(life + 1)
drivers = np.zeros(life + 1)
alphas[1:] = df[alpha].groupby(df.index.year).mean().values[:life]
drivers[1:] = df[driver].groupby(df.index.year).mean().values[:life]
# construct annual summary cashflows
cf.computeYearlyCashflow(alphas, drivers)
return cf
def createRecurringHourly(df, comp, driver, alpha):
"""
Constructs recurring cashflow with one value per hour
@ In, df, pandas.Dataframe, loaded data to run
@ In, comp, CashFlow.Component, component this cf will belong to
@ In, driver, string, variable name in df to take driver from
@ In, alpha, string, variable name in df to take alpha from
@ Out, comps, dict, dict mapping names to CashFlow component objects
"""
life = comp.getLifetime()
cf = CashFlows.Recurring()
cfFarams = {'name': 'RecursHourly',
'X': 1,
'mult_target': None,
'inflation': False}
cf.setParams(cfFarams)
cf.initParams(life)
yearDfs = df.groupby([df.index.year])
for year, yearDf in yearDfs:
y = year - 2018
if y > life:
break
cf.computeIntrayearCashflow(y, yearDf[driver], yearDf[alpha])
return cf
if __name__ == '__main__':
# load multiyear data
## TODO use analytic data! this is data from a not-propr-ietary report, but not analytic.
targets = ['A', 'B', 'C', 'D', 'Year', 'Time']
indices = ['RAVEN_sample_ID']
print('Loading data ...')
full_df = pd.read_csv('aux_file/hourly.csv',
index_col=indices,
usecols=targets+indices) #,
#nrows=300000)
# just the first sample
df = full_df.loc[0]
years = pd.to_datetime(df['Year'].values + 2019, format='%Y')
hours = pd.to_timedelta(df['Time'].values, unit='H')
datetime = years + hours
df.index = datetime
df = df.sort_index()[['A', 'B', 'C', 'D']]
metrics = run(df)
calculated = metrics['NPV']
correct = 2.213218922e+08
# NOTE if inflation is applied to all cashflows, answer is 2.080898547e+08
if abs(calculated - correct)/correct < 1e-8:
print('Success!')
sys.exit(0)
else:
print('ERROR: correct: {:1.3e}, calculated: {:1.3e}, diff {:1.3e}'.format(correct, calculated, correct-calculated))
``` |
{
"source": "joshuacook/build-tooling",
"score": 2
} |
#### File: build-tooling/bdc/__init__.py
```python
from __future__ import annotations # PEP 563 (allows annotation forward refs)
import sys
import os
from os import path
import re
from datetime import datetime
from configparser import ConfigParser, NoOptionError
from enum import Enum
import master_parse
from gendbc import gendbc
from db_edu_util.notebooktools import parse_source_notebook, NotebookError
from db_edu_util import (
databricks,
wrap2stdout,
error,
verbose,
set_verbosity,
warn,
verbosity_is_enabled,
info,
die,
EnhancedTextWrapper,
working_directory,
)
from db_edu_util.databricks import DatabricksError
from grizzled.file import eglob
from bdc.bdcutil import *
from string import Template as StringTemplate
import dataclasses
from dataclasses import dataclass
from tempfile import TemporaryDirectory
import codecs
import shutil
import git
from typing import (
Sequence,
Any,
Type,
TypeVar,
Set,
Optional,
Dict,
AnyStr,
Tuple,
NoReturn,
Generator,
Union,
Callable,
Set,
)
__all__ = [
"bdc_check_build",
"bdc_list_notebooks",
"bdc_build_course",
"bdc_download",
"bdc_upload",
"bdc_check_build",
"bdc_load_build",
"bdc_print_info",
"BuildError",
"UploadDownloadError",
"BuildConfigError",
"UnknownFieldsError",
"BuildData",
]
# ---------------------------------------------------------------------------
# Constants
#
# (Some constants are below the class definitions.)
# ---------------------------------------------------------------------------
VERSION = "1.39.0"
DEFAULT_BUILD_FILE = "build.yaml"
PROG = os.path.basename(sys.argv[0])
USAGE = f"""
{PROG}, version {VERSION}
Usage:
{PROG} (--version)
{PROG} --info [--shell] [BUILD_YAML]
{PROG} --tag [BUILD_YAML]
{PROG} (-C | --check) [BUILD_YAML]
{PROG} (-h | --help)
{PROG} [-o | --overwrite] [-v | --verbose] [-d DEST | --dest DEST] [BUILD_YAML]
{PROG} --list-notebooks [BUILD_YAML]
{PROG} --upload [-v | --verbose] [-P PROF | --dprofile PROF ] SHARD_PATH [BUILD_YAML]
{PROG} --download [-v | --verbose] [-P PROF | --dprofile PROF ] SHARD_PATH [BUILD_YAML]
MASTER_CFG is the build tool's master configuration file.
BUILD_YAML is the build file for the course to be built. Defaults to {2}.
SHARD_PATH is the path to a folder on a Databricks shard, as supported
by the Databricks CLI. You must install databricks-cli and configure it
properly for --upload and --download to work.
Options:
-h --help Show this screen.
-C --check Parse the build file and validate that the referenced
paths actually exist.
-d DEST --dest DEST Specify output destination. Defaults to
~/tmp/curriculum/<course_id>
-o --overwrite Overwrite the destination directory, if it exists.
-v --verbose Print what's going on to standard output.
--info Display the course name and version, and exit
--shell Used with --info, this option causes the course
name and version to be emitted as shell variables.
--list-notebooks List the full paths of all notebooks in a course
--tag Create a Git tag from the course name and version
in the build.yaml. Applies the tag to the topmost
(i.e., HEAD) commit on the current branch of the
repository containing the build.yaml.
--upload Upload all notebooks to a folder on Databricks.
--download Download all notebooks from a folder on Databricks,
copying them into their appropriate locations on the
local file system, as defined in the build.yaml file.
-P PROF --dprofile PROF When uploading and downloading, pass authentication
profile PROF to the "databricks" commands. This
option corresponds exactly with the --profile
argument to "databricks".
--version Display version and exit.
"""
DEFAULT_INSTRUCTOR_FILES_SUBDIR = "InstructorFiles"
DEFAULT_INSTRUCTOR_LABS_DBC = "Instructor-Labs.dbc"
DEFAULT_STUDENT_FILES_SUBDIR = "StudentFiles"
DEFAULT_STUDENT_LABS_DBC = "Labs.dbc" # in the student directory
SLIDES_SUBDIR = "Slides" # in the instructor directory
DATASETS_SUBDIR = "Datasets" # in the student directory
INSTRUCTOR_NOTES_SUBDIR = "InstructorNotes" # in the instructor directory
# Post master-parse variables (and associated regexps)
TARGET_LANG = "target_lang"
TARGET_EXTENSION = "target_extension"
NOTEBOOK_TYPE = "notebook_type"
OUTPUT_DIR = "output_dir"
PROFILE_VAR = "profile"
DEFAULT_PROFILES = {"amazon": "Amazon", "azure": "Azure"}
PROFILE_ABBREVIATIONS = {"amazon": "am", "azure": "az"}
POST_MASTER_PARSE_VARIABLES = {
TARGET_LANG: variable_ref_patterns(TARGET_LANG),
TARGET_EXTENSION: variable_ref_patterns(TARGET_EXTENSION),
NOTEBOOK_TYPE: variable_ref_patterns(NOTEBOOK_TYPE),
OUTPUT_DIR: variable_ref_patterns(OUTPUT_DIR),
PROFILE_VAR: variable_ref_patterns(PROFILE_VAR),
}
# EXT_LANG is used when parsing the YAML file.
EXT_LANG = {".py": "Python", ".r": "R", ".scala": "Scala", ".sql": "SQL"}
# LANG_EXT: Mapping of language (in lower case) to extension
LANG_EXT = dict([(v.lower(), k) for k, v in list(EXT_LANG.items())])
# Used to create a Scala version notebook in the top-level. This is a string
# template, with the following variables:
#
# {course_name} - the course name
# {version} - the version
# {build_timestamp} - the build timestamp, in printable format
VERSION_NOTEBOOK_TEMPLATE = """// Databricks notebook source
// MAGIC %md # Course: ${course_name}
// MAGIC * Version ${version}
// MAGIC * Built ${build_timestamp}
// MAGIC * Git revision: ${git_commit}
// MAGIC
// MAGIC Copyright \u00a9 ${year} Databricks, Inc.
"""
# The version notebook file name. Use as a format string, with {0} as the
# version number.
VERSION_NOTEBOOK_FILE = "Version-{0}.scala"
ANSWERS_NOTEBOOK_PATTERN = re.compile("^.*_answers\..*$")
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
errors: int = 0
# ---------------------------------------------------------------------------
# Exported Exceptions
# ---------------------------------------------------------------------------
class BDCError(Exception):
"""
Base class for all build errors. Sometimes thrown on its own.
"""
def __init__(self, message: str):
super().__init__(message)
self._message = message
@property
def message(self):
return self._message
class BuildError(BDCError):
"""
Thrown when an error is encountered while building a course.
"""
pass
class UploadDownloadError(BDCError):
"""
Thrown to indicate an error uploading or downloading notebooks.
"""
pass
class BuildConfigError(BuildError):
"""
Subclass of BuildError indicating an error with the build configuration
(YAML) file.
"""
pass
class UnknownFieldsError(BuildConfigError):
"""
Subclass of BuildConfigError indicating that there are unknown fields
in a configuration section.
"""
def __init__(self, parent_section: str, section: str, bad_keys: Set[str]):
"""
:param parent_section: the parent section
:param section: the section containing the bad fields
:param bad_keys: the bad fields
"""
keys = ", ".join(bad_keys)
super(BuildConfigError, self).__init__(
f'"{parent_section}": Bad fields in "{section}" section: {keys}'
)
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
# See https://github.com/python/typing/issues/58#issuecomment-326240794
NotebookTypeClass = TypeVar("NotebookTypeClass", bound="NotebookType")
class NotebookType(Enum):
EXERCISES = "exercises"
INSTRUCTOR = "instructor"
ANSWERS = "answers"
@classmethod
def default_mappings(cls: Type[NotebookTypeClass]) -> Dict[NotebookType, str]:
return {
NotebookType.EXERCISES: "exercises",
NotebookType.INSTRUCTOR: "instructor",
NotebookType.ANSWERS: "answers",
}
def suffix_for(self) -> str:
"""
Get the filename suffix for the notebook type (e.g., '_exercises').
:return: the suffix
"""
return NotebookType.suffixes()[self]
@classmethod
def suffixes(cls: Type[NotebookTypeClass]) -> Dict[NotebookType, str]:
"""
Get a dict of NotebookType -> suffix mappings
:return: the mappings
"""
return {
NotebookType.EXERCISES: "_exercises",
NotebookType.INSTRUCTOR: "_instructor",
NotebookType.ANSWERS: "_answers",
}
def __repr__(self):
return f"NotebookType.{self.name}"
@dataclass
class UploadDownloadMapping:
"""
Stores the source-path to target paths mapping for a source notebook.
A given source notebook can map to multiple targets on the remote
Databricks instance.
Fields:
notebook: the NotebookData object for the source notebook
source_path: full path to the source notebook on the local machine
remote_targets: list of remote targets (partial paths)
"""
notebook: NotebookData
source_path: str
remote_targets: Sequence[str] = dataclasses.field(default_factory=list)
@dataclass(frozen=True)
class MiscFileData:
"""
Stores miscellaneous file data.
Fields:
src: path to source file
dest: path to destination
is_template: whether or not the source file is a template
dest_is_dir: whether the destination is a directory or a file
only_in_profile: if set, the file is only defined for a particular profile
"""
src: str
dest: str
is_template: bool
dest_is_dir: bool
only_in_profile: Optional[str]
@dataclass(frozen=True)
class SlideData:
"""
Stores slide file data.
Fields:
src: path to source file
dest: path to destination
"""
src: str
dest: str
@dataclass(frozen=True)
class DatasetData:
"""
Stores a dataset specification.
Fields:
src: path to source file
dest: path to destination
license: path to license file
readme: path to README
"""
src: str
dest: str
license: str
readme: str
@dataclass(frozen=True)
class MarkdownInfo:
"""
Stores information on how to process Markdown source files.
Fields:
html_stylesheet: optional path to stylesheet
"""
html_stylesheet: Optional[str]
@dataclass(frozen=True)
class NotebookHeading:
"""
Stores notebook heading information.
Fields:
path: path to heading file
enabled: True if the heading is enabled, False if disabled
"""
path: Optional[str]
enabled: bool
@dataclass(frozen=True)
class NotebookFooter:
"""
Stores notebook footer information.
Fields:
path: path to footer file
enabled: True if the footer is enabled, False if disabled
"""
path: Optional[str]
enabled: bool
@dataclass(frozen=True)
class BundleFile:
"""
Information about a file to include in the bundle.
Fields:
src: path to the source file
dest: path within the bundle
"""
src: str
dest: str
@dataclass(frozen=True)
class Bundle:
"""
Parsed bundle information.
- zipfile: the zip file for the bundle
- files: a list of BundleFile objects
"""
zipfile: str
files: Sequence[BundleFile] = dataclasses.field(default_factory=list)
@dataclass(frozen=True)
class OutputInfo:
student_dir: str
student_dbc: str
instructor_dir: str
instructor_dbc: str
@property
def student_labs_subdir(self) -> str:
(base, _) = path.splitext(self.student_dbc)
return joinpath(self.student_dir, base)
@property
def instructor_labs_subdir(self) -> str:
(base, _) = path.splitext(self.instructor_dbc)
return joinpath(self.instructor_dir, base)
@dataclass(frozen=True)
class CourseInfo(DefaultStrMixin):
name: str
version: str
class_setup: str
schedule: str
instructor_prep: str
copyright_year: str
deprecated: bool
course_type: master_parse.CourseType
title: Optional[str] = None
@property
def course_title(self):
if self.title:
return self.title
return self.name.replace("-", " ").replace("_", " ")
@property
def course_id(self) -> str:
"""
The course ID, which is a combination of the course name and the
version.
:return: the course ID string
"""
return f"{self.name}-{self.version}"
@dataclass(frozen=True)
class NotebookDefaults(DefaultStrMixin):
dest: Optional[str] = None
master: Optional[Dict[str, Any]] = dataclasses.field(default_factory=dict)
variables: Optional[Dict[str, str]] = dataclasses.field(default_factory=dict)
# See https://github.com/python/typing/issues/58#issuecomment-326240794
MasterParseInfoClass = TypeVar("MasterParseInfoClass", bound="MasterParseInfo")
class MasterParseInfo(DefaultStrMixin):
"""
Parsed master parser data for a notebook.
"""
LANGUAGES = ("python", "scala", "r", "sql")
VALID_FIELDS = {
"enabled": bool,
"python": bool,
"scala": bool,
"r": bool,
"sql": bool,
"answers": bool,
"exercises": bool,
"instructor": bool,
"heading": NotebookHeading.__class__,
"footer": NotebookFooter.__class__,
"encoding_in": str,
"encoding_out": str,
"debug": bool,
"enable_templates": bool,
"instructor_notes": str,
}
VALID_HEADING_FIELDS = {"path": str, "enabled": bool}
VALID_FOOTER_FIELDS = {"path": str, "enabled": bool}
def __init__(
self,
enabled: bool = False,
python: bool = True,
scala: bool = True,
r: bool = False,
sql: bool = False,
answers: bool = True,
exercises: bool = True,
instructor: bool = True,
instructor_notes: Optional[str] = None,
heading: Optional[NotebookHeading] = None,
footer: Optional[NotebookFooter] = None,
encoding_in: str = "UTF-8",
encoding_out: str = "UTF-8",
enable_templates: bool = False,
debug: bool = False,
):
"""
Create a new parsed master parse data object
:param enabled: whether master parsing is enabled
:param python: whether Python notebook generation is enabled
:param scala: whether Scala notebook generation is enabled
:param r: whether R notebook generation is enabled
:param sql: whether SQL notebook generation is enabled
:param answers: whether to generate answer notebooks
:param exercises: whether to generate exercises notebook
:param instructor: whether to generate instructor notebooks
:param instructor_notes: where to write notebook instructor notes (a
Markdown file)
:param heading: heading information (a NotebookHeading object)
:param footer: footer information (a NotebookFooter object)
:param encoding_in: the encoding of the source notebooks
:param encoding_out: the encoding to use when writing notebooks
:param enable_templates: whether to treat Markdown cells as Mustache
templates
:param debug: enable/disable debug messages for the master
parse phase
"""
if heading is None:
heading = NotebookHeading(path=None, enabled=True)
if footer is None:
footer = NotebookFooter(path=None, enabled=True)
self.enabled = enabled
self.python = python
self.scala = scala
self.r = r
self.sql = sql
self.answers = answers
self.exercises = exercises
self.instructor = instructor
self.instructor_notes = instructor_notes
self.heading = heading
self.footer = footer
self.encoding_in = encoding_in
self.encoding_out = encoding_out
self.enable_templates = enable_templates
self.debug = debug
def lang_is_enabled(self, lang: str) -> bool:
"""
Determine if a specific language is enabled.
:param lang: the name (string) for the language, in lower case
:return: True if it's enable, False if not
"""
return self.__getattribute__(lang)
def enabled_langs(self) -> Sequence[str]:
"""
Return a list of the enabled languages. e.g., ['scala', 'python']
:return: the list of enabled languages, which could be empty
"""
return [i for i in self.LANGUAGES if self.__getattribute__(i)]
def update_from_dict(self, d: Dict[str, Any]) -> NoReturn:
"""
Update the fields in this master parse record from a dictionary.
The dictionary should represent a master parse dictionary (e.g., as
parsed from YAML). Keys can be missing. Extra keys are ignored.
:param d: the dictionary
"""
for k in list(self.VALID_FIELDS.keys()):
if k in d:
if k == "heading":
heading_data = d[k]
if isinstance(heading_data, NotebookHeading):
self.heading = heading_data
else:
self.heading = self._parse_heading(d[k])
elif k == "footer":
footer_data = d[k]
if isinstance(footer_data, NotebookFooter):
self.footer = footer_data
else:
self.footer = self._parse_footer(d[k])
else:
self.__setattr__(k, d[k])
@classmethod
def extra_keys(
cls: Type[MasterParseInfoClass], d: Dict[str, Any]
) -> Optional[Set[str]]:
"""
Check a dictionary of master parse values for extra (unknown) keys.
:param d: the dictionary to check
:return: any unknown keys, or None if there aren't any.
"""
extra = set(d.keys()) - set(cls.VALID_FIELDS.keys())
heading = d.get("heading") or {}
for k in set(heading.keys()) - set(cls.VALID_HEADING_FIELDS.keys()):
extra.add(f"heading.{k}")
if len(extra) == 0:
extra = None
return extra
@classmethod
def from_dict(
cls: Type[MasterParseInfoClass], d: Dict[str, Any]
) -> MasterParseInfo:
"""
Create a MasterParseData object from a dictionary of values.
:param d: the dictionary.
:return: The object. Throws exceptions on error. Extra keys are not
interpreted as an error here; callers can report those errors
with more context.
"""
heading = cls._parse_heading_data(d.get("heading"))
return MasterParseInfo(
enabled=bool_field(d, "enabled", False),
python=bool_field(d, "python", True),
scala=bool_field(d, "scala", True),
r=bool_field(d, "r", True),
sql=bool_field(d, "sql", False),
answers=bool_field(d, "answers", True),
exercises=bool_field(d, "exercises", True),
instructor=bool_field(d, "instructor", True),
heading=heading,
encoding_in=d.get("encoding_in", "UTF-8"),
encoding_out=d.get("encoding_out", "UTF-8"),
debug=bool_field(d, "debug", False),
)
def to_dict(self) -> Dict[str, Any]:
"""
Convert this object into a dictionary.
:return: the dictionary of fields
"""
res = {}
res.update(self.__dict__)
return res
@classmethod
def _parse_footer(
cls: Type[MasterParseInfoClass], footer_data: Dict[AnyStr, AnyStr]
) -> NotebookFooter:
if footer_data:
footer = NotebookFooter(
path=footer_data.get("path", DEFAULT_NOTEBOOK_FOOTER.path),
enabled=bool_field(
footer_data, "enabled", DEFAULT_NOTEBOOK_FOOTER.enabled
),
)
else:
footer = NotebookFooter(path=None, enabled=True)
return footer
@classmethod
def _parse_heading(
cls: Type[MasterParseInfoClass], heading_data: Dict[AnyStr, Any]
) -> NotebookHeading:
if heading_data:
heading = NotebookHeading(
path=heading_data.get("path", DEFAULT_NOTEBOOK_HEADING.path),
enabled=bool_field(
heading_data, "enabled", DEFAULT_NOTEBOOK_HEADING.enabled
),
)
else:
heading = NotebookHeading(path=None, enabled=True)
return heading
class NotebookData(DefaultStrMixin):
"""
Parsed notebook data.
"""
def __init__(
self: NotebookData,
src: str,
dest: str,
upload_download: bool = True,
include_in_build: bool = True,
master: Optional[MasterParseInfo] = None,
variables: Optional[Dict[AnyStr, AnyStr]] = None,
only_in_profile: Optional[AnyStr] = None,
skip: bool = False,
):
"""
Captures parsed notebook data.
:param src: Partial or full path to the notebook
:param dest: Destination for the notebook, which can
contain variables. This value can be set
to `None`, as long as a destination is
available in the notebook defaults.
:param upload_download: Whether upload and download are enabled
for this notebook.
:params include_in_build: True to include the file in the build,
False to exclude it. This setting has no
bearing on upload/download.
:param master: The master parse data.
:param variables: Any variables for the notebook.
:param only_in_profile: Profile to which notebook is restricted, if
any.
:param skip: Whether or not to skip the notebook.
"""
super(NotebookData, self).__init__()
self.src = src
self.dest = dest
self.master = master
self.upload_download = upload_download
self.include_in_build = include_in_build
self.variables = variables or {}
self.only_in_profile = only_in_profile
self.skip = skip
def master_enabled(self) -> bool:
"""
Determine whether master notebook processing is enabled for this
notebook.
:return: true or false
"""
return self.master.enabled
def total_master_langs(self) -> int:
"""
Get the number of output languages produced by the master parser
for this notebook.
:return: 0 if the master parser isn't enabled. Number of output
languages otherwise.
"""
return len(self.master.enabled_langs()) if self.master.enabled else 0
def master_multiple_langs(self) -> bool:
"""
Determine whether the master parser is parsing to multiple languages
or not.
:return: True if master parsing is enabled and parsing to multiple
languages; False if master parsing is disabled or is enabled
but with only one output language.
"""
return self.total_master_langs() > 0
class BuildData(DefaultStrMixin):
"""
Parsed build data.
"""
def __init__(
self: BuildData,
build_file_path: str,
top_dbc_folder_name: str,
source_base: str,
output_info: OutputInfo,
course_info: CourseInfo,
notebooks: Sequence[NotebookData],
slides: Sequence[SlideData],
datasets: Sequence[DatasetData],
misc_files: Sequence[MiscFileData],
keep_lab_dirs: bool,
markdown_cfg: MarkdownInfo,
notebook_type_map: Dict[NotebookType, str],
profiles: Optional[Set[master_parse.Profile]] = None,
variables: Optional[Dict[AnyStr, AnyStr]] = None,
bundle_info: Optional[Bundle] = None,
):
"""
Create a new BuildData object.
:param build_file_path: path to the build file, for reference
:param top_dbc_folder_name: top-level directory in DBC, or None
:param source_base: value of source base field
:param output_info: info about the output directories and DBCs
:param course_info: parsed CourseInfo object
:param notebooks: list of parsed Notebook objects
:param slides: parsed SlideInfo object
:param datasets: parsed DatasetData object
:param misc_files: parsed MiscFileData objects
:param keep_lab_dirs: value of keep_lab_dirs setting
:param notebook_heading: parsed NotebookHeading object
:param markdown_cfg: parsed MarkdownInfo object
:param notebook_type_map: a dict mapping notebook types to strings.
Keys are from the NotebookType enum.
:param profiles: set of profiles, if any
:param variables: a map of user-defined variables
:param bundle_info Bundle data, if any
"""
super(BuildData, self).__init__()
self.build_file_path = build_file_path
self.course_directory = path.dirname(build_file_path)
self._notebooks = notebooks
self.course_info = course_info
self.source_base = source_base
self.output_info = output_info
self.slides = slides
self.datasets = datasets
self.profiles = set() if profiles is None else profiles
if markdown_cfg.html_stylesheet:
if path.isabs(markdown_cfg.html_stylesheet):
self.markdown = markdown_cfg
else:
# Stylesheet is relative to the build directory. Resolve it
# here.
p = joinpath(
path.dirname(build_file_path), markdown_cfg.html_stylesheet
)
self.markdown = dataclasses.replace(markdown_cfg, html_stylesheet=p)
else:
self.markdown = markdown_cfg
self.misc_files = misc_files
self.keep_lab_dirs = keep_lab_dirs
self.notebook_type_map = notebook_type_map
self.variables = variables or {}
self.bundle_info = bundle_info
if top_dbc_folder_name is None:
top_dbc_folder_name = "${course_name}"
folder_vars = merge_dicts(
variables,
{
"course_name": course_info.name,
"course_version": course_info.version,
"course_id": self.course_id,
},
)
self.top_dbc_folder_name = VariableSubstituter(top_dbc_folder_name).substitute(
folder_vars
)
@property
def has_profiles(self):
"""
Quick way to check whether the build has profiles or not.
:return: True or False
"""
return len(self.profiles) > 0
@property
def notebooks(self):
"""
Get a list of all non-skipped notebooks. If you want all notebooks,
including the skipped ones, use all_notebooks.
:return: a list of NotebookData objects
"""
return [n for n in self._notebooks if not n.skip]
@notebooks.setter
def notebooks(self, new_notebooks: Sequence[NotebookData]):
"""
Replace the notebooks in the build. Note that no special processing of
skipped notebooks is done here.
:param new_notebooks: the new notebooks
"""
self._notebooks = new_notebooks
@property
def all_notebooks(self):
"""
Get a list of all non-skipped notebooks. If you want all notebooks,
including the skipped ones, use all_notebooks.
:return: a list of NotebookData objects
"""
return list(self._notebooks)
@property
def course_type(self) -> master_parse.CourseType:
return self.course_info.course_type
@property
def name(self) -> str:
return self.course_info.name
@property
def course_id(self) -> str:
"""
The course ID, which is a combination of the course name and the
version.
:return: the course ID string
"""
return self.course_info.course_id
# ---------------------------------------------------------------------------
# Class-dependent Constants
# ---------------------------------------------------------------------------
DEFAULT_NOTEBOOK_FOOTER = NotebookFooter(path=None, enabled=True)
DEFAULT_NOTEBOOK_HEADING = NotebookHeading(path=None, enabled=True)
# Always generate Databricks notebooks.
MASTER_PARSE_DEFAULTS = {
"enabled": False,
"add_heading": True,
"python": True,
"r": False,
"scala": True,
"sql": False,
"answers": True,
"instructor": True,
"encoding_in": "UTF-8",
"encoding_out": "UTF-8",
"heading": DEFAULT_NOTEBOOK_HEADING,
"footer": DEFAULT_NOTEBOOK_FOOTER,
"debug": False,
}
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def load_build_yaml(yaml_file: str) -> BuildData:
"""
Load the YAML configuration file that defines the build for a particular
class. Returns a BuildData object. Throws BuildConfigError on error.
:param yaml_file the path to the build file to be parsed
:param output_dir the top-level build output directory
:return the Build object, representing the parsed build.yaml
"""
def required(
d: Dict[str, Any], key: str, where: str, error: Optional[str] = None
) -> Any:
"""
Get a required key
:param d: the dictionary
:param key: the key
:param where: where in the file the key should be (for errors)
:param error: error message, or None for default
:return: the value
"""
v = d.get(key)
if v is None:
if error:
msg = error
else:
msg = f'Missing required "{key}" in "{where}".'
raise BuildConfigError(msg)
return v
def do_parse_level_substitutions(
dest: str, src: str, allow_lang: bool = True, extra_vars: Dict[str, Any] = None
) -> str:
# Handles parse-time variable substitution, primarily for the notebook
# section. Some variables are substituted later.
if extra_vars is None:
extra_vars = {}
base_with_ext = path.basename(src)
(base_no_ext, ext) = path.splitext(base_with_ext)
if "@" in dest:
raise BuildConfigError('The "@" character is disallowed in destinations.')
# A certain set of variables is expanded only after master parsing; all
# others are expanded here. Any references to post master-parse variables
# (expanded in process_master_notebook) must be explicitly preserved
# here. This logic escapes them by removing the "$" and surrounding the
# rest with @ ... @. The escaping is undone, below.
adj_dest = dest
subbed = True
while subbed:
subbed = False
for pats in list(POST_MASTER_PARSE_VARIABLES.values()):
m = matches_variable_ref(pats, adj_dest)
while m:
varname = m[1].replace(r"$", "")
var = f"@{varname}@"
adj_dest = m[0] + var + m[2]
subbed = True
m = matches_variable_ref(pats, adj_dest)
fields = {
"basename": base_no_ext,
"extension": ext[1:] if ext.startswith("") else ext,
"filename": base_with_ext,
}
if not os.path.isabs(src):
# If the source path is not absolute, then allow it to be
# substituted into the destination.
fields["source_path_no_ext"] = os.path.splitext(src)[0]
if allow_lang:
fields["lang"] = EXT_LANG.get(ext, "???")
fields.update(extra_vars)
adj_dest = VariableSubstituter(adj_dest).safe_substitute(fields)
# Restore escaped variables.
escaped = re.compile(r"^([^@]*)@([^@]+)@(.*)$")
m = escaped.match(adj_dest)
while m:
adj_dest = m.group(1) + "$" + m.group(2) + m.group(3)
m = escaped.match(adj_dest)
return adj_dest
def parse_dict(
d: Dict[str, Any], fields_spec: Dict[str, Any], outer_section: str, section: str
) -> Dict[str, Any]:
res = {}
for field, type in list(fields_spec.items()):
if field not in d:
continue
if type is bool:
try:
res[field] = bool_value(d[field])
except ValueError as e:
raise BuildConfigError(
f'{outer_section}: Bad value for "{field}" in '
+ f'section "{section}": {e}'
)
continue
# Anything else gets copied as is for now.
res[field] = d[field]
return res
def parse_master_section(
data: Dict[str, Any], section_name: str, build_yaml_dir: str
) -> Dict[str, Any]:
# Parse the master section, returning a (possibly partial)
# dictionary (NOT a MasterParseInfo object).
extra_keys = MasterParseInfo.extra_keys(data)
if extra_keys:
raise UnknownFieldsError(section_name, "master", extra_keys)
master = parse_dict(data, MasterParseInfo.VALID_FIELDS, section_name, "master")
heading = master.get("heading")
if heading:
heading = parse_dict(
heading,
MasterParseInfo.VALID_HEADING_FIELDS,
section_name,
"master.heading",
)
heading_path = heading.get("path")
if heading_path == "DEFAULT":
heading["path"] = None
elif heading_path is not None:
# Resolve the path, relative to the build file.
if not path.isabs(heading_path):
heading_path = path.abspath(joinpath(build_yaml_dir, heading_path))
if not path.exists(heading_path):
raise BuildConfigError(
f'Footer file "{heading_path}" does not exist.'
)
heading["path"] = heading_path
master["heading"] = heading
footer = master.get("footer")
if footer:
footer = parse_dict(
footer,
MasterParseInfo.VALID_FOOTER_FIELDS,
section_name,
"master.footer",
)
footer_path = footer.get("path")
if footer_path == "DEFAULT":
footer["path"] = None
elif footer_path is not None:
# Resolve the path, relative to the build field.
if not path.isabs(footer_path):
footer_path = path.abspath(joinpath(build_yaml_dir, footer_path))
if not path.exists(footer_path):
raise BuildConfigError(
f'Footer file "{footer_path}" does not exist.'
)
footer["path"] = footer_path
master["footer"] = footer
return master
def parse_notebook_defaults(
contents: Dict[str, Any], section_name: str, build_yaml_dir: str
) -> NotebookDefaults:
cfg = contents.get(section_name)
if not cfg:
return NotebookDefaults(dest=None, master=None)
master = parse_master_section(
dict_get_and_del(cfg, "master", {}), "notebook_defaults", build_yaml_dir
)
variables = dict_get_and_del(cfg, "variables", {})
res = NotebookDefaults(
dest=dict_get_and_del(cfg, "dest", None), master=master, variables=variables
)
if len(list(cfg.keys())) > 0:
raise UnknownFieldsError("build", section_name, set(cfg.keys()))
return res
def parse_notebook(
obj: Dict[str, Any],
notebook_defaults: NotebookDefaults,
extra_vars: Dict[str, str],
profiles: Optional[Set[master_parse.Profile]],
build_yaml_dir: str,
) -> Optional[NotebookData]:
bad_dest = re.compile("^\.\./*|^\./*")
src = required(obj, "src", "notebooks section")
section = f'Notebook "{src}"'
dest = obj.get("dest", notebook_defaults.dest)
if not dest:
raise BuildConfigError(
f'Notebook "{src}": Missing "dest" section, and no default '
+ '"dest" in notebook defaults.'
)
variables = merge_dicts(notebook_defaults.variables, obj.get("variables", {}))
all_extra_vars = merge_dicts(extra_vars, variables)
dest = do_parse_level_substitutions(dest, src, extra_vars=all_extra_vars)
skip = bool_field(obj, "skip", False)
master = MasterParseInfo() # defaults
master.update_from_dict(notebook_defaults.master)
nb_master = parse_master_section(obj.get("master", {}), section, build_yaml_dir)
master.update_from_dict(nb_master)
_, dest_ext = os.path.splitext(dest)
if master.enabled and bad_dest.match(dest):
raise BuildConfigError(
f'Notebook "{src}": Relative destinations ("{dest}") are '
+ "disallowed."
)
if master.enabled:
total_langs = len(master.enabled_langs())
if total_langs > 1:
pat = POST_MASTER_PARSE_VARIABLES[TARGET_LANG]
if not matches_variable_ref(pat, dest):
raise BuildConfigError(
f'Notebook "{src}": When multiple master parser '
+ "languages are used, you must substitute "
+ f"${TARGET_LANG} in the destination."
)
else:
_, src_ext = os.path.splitext(src)
if (not dest_ext) or (dest_ext != src_ext):
raise BuildConfigError(
f'Notebook "{src}": "master" is disabled, so "dest" '
+ 'should have extension "{src_ext}".'
)
for pats in list(POST_MASTER_PARSE_VARIABLES.values()):
m = matches_variable_ref(pats, dest)
if m:
raise BuildConfigError(
f'Notebook "{src}": "{m[1]}" found in "dest", but '
+ '"master" is disabled.'
)
prof = obj.get("only_in_profile")
if prof:
if not profiles:
raise BuildConfigError(
f'Notebook "{src}": Bad value of "{prof}" for '
+ "only_in_profile. No profiles are defined in the build."
)
profile_names = [p.name for p in profiles]
if prof not in profile_names:
name_str = ", ".join(profile_names)
raise BuildConfigError(
f'Notebook "{src}": Bad value of "{prof}" for '
+ f"only_in_profile. Must be one of: {name_str}"
)
if prof and (not master.enabled):
raise BuildConfigError(
f'Notebook "{src}": only_in_profile is set, but master is '
+ "not enabled."
)
nb = NotebookData(
src=src,
dest=dest,
master=master,
upload_download=bool_field(obj, "upload_download", True),
include_in_build=bool_field(obj, "include_in_build", True),
variables=variables,
only_in_profile=prof,
skip=skip,
)
return nb
def parse_slide(
obj: Dict[str, Any], extra_vars: Dict[str, Any]
) -> Optional[SlideData]:
src = required(obj, "src", "notebooks")
dest = required(obj, "dest", "notebooks")
if bool_field(obj, "skip"):
verbose(f"Skipping slide {src}")
return None
else:
return SlideData(
src=src,
dest=do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
),
)
def parse_bundle(
obj: Dict[str, Any],
output_info: OutputInfo,
course_info: CourseInfo,
extra_vars: Dict[str, str],
) -> Optional[Bundle]:
if not obj:
return None
files = obj.get("files")
if not files:
return None
zip_vars = {
"course_name": course_info.name,
"course_version": course_info.version,
}
zipfile = obj.get("zipfile")
if zipfile:
# Use safe_substitute, which leaves all other variables alone.
zipfile = StringTemplate(zipfile).safe_substitute(zip_vars)
else:
zipfile = course_info.course_id + ".zip"
file_list = []
src_vars = {}
src_vars.update(extra_vars)
src_vars.update(
{
"student_dbc": output_info.student_dbc,
"instructor_dbc": output_info.instructor_dbc,
}
)
for d in files:
src = d["src"]
dest = d["dest"]
if not (dest or src):
raise BuildConfigError('"bundle" has a file with no "src" or "dest".')
if not src:
raise BuildConfigError('"bundle" has a file with no "src".')
if not dest:
raise BuildConfigError('"bundle" has a file with no "dest".')
src = StringTemplate(src).substitute(src_vars)
dest = do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
)
file_list.append(BundleFile(src=src, dest=dest))
return Bundle(zipfile=zipfile, files=file_list)
def parse_misc_file(
obj: Dict[str, Any], extra_vars: Dict[str, str]
) -> Optional[MiscFileData]:
src = required(obj, "src", "misc_files")
dest = required(obj, "dest", "misc_files")
if bool_field(obj, "skip"):
verbose(f"Skipping file {src}")
return None
else:
dest = do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
)
mf = MiscFileData(
src=src,
dest=dest,
dest_is_dir=obj.get("dest_is_dir", None),
is_template=obj.get("template", False),
only_in_profile=obj.get("only_in_profile", None),
)
# Sanity checks: A Markdown file can be translated to Markdown,
# PDF or HTML. An HTML file can be translated to HTML or PDF.
# is_template is disallowed for non-text files.
if mf.is_template and (not is_text_file(src)):
raise BuildConfigError(
f'Section misc_files: "{src}" is marked as a template'
+ "but it is not a text file."
)
# We can't check to see whether the target is a directory, since
# nothing exists yet. But if it has an extension, we can assume it
# is not a directory.
if has_extension(dest):
# It's a file, not a directory.
if mf.dest_is_dir:
raise BuildConfigError(
f'Section misc_files: "{src}" uses a "dest" of '
+ f'"{dest}", which has an extension, so it is assumed '
+ 'to be a file. But, "dest_is_dir" is set to true.'
)
if is_markdown(src):
if not (is_pdf(dest) or is_html(dest) or is_markdown(dest)):
raise BuildConfigError(
f'Section misc_files: "{src}" is Markdown, the '
+ f'target ("{dest}") is not a directory and is '
+ "not PDF, HTML or Markdown."
)
if is_html(src):
if not (is_pdf(dest) or is_html(dest)):
raise BuildConfigError(
f'Section misc_files: "{src}" is HTML, the '
+ f'target ("{dest}") is not a directory and is '
+ "not PDF or HTML."
)
else:
# No extension. Assume dest_is_dir is True, if not set.
if mf.dest_is_dir is None:
mf = dataclasses.replace(mf, dest_is_dir=True)
# Some simple sanity checks.
if (not mf.dest_is_dir) and (dest in (".", "..")):
raise BuildConfigError(
f'Section misc_files: "{src}" has a "dest" of '
+ f'"{dest}", but "dest_is_dir" is set to false. '
+ "That's just silly."
)
return mf
def parse_dataset(
obj: Dict[str, Any], extra_vars: Dict[str, Any], build_yaml_dir: str
) -> Optional[DatasetData]:
src = required(obj, "src", "notebooks")
dest = required(obj, "dest", "notebooks")
if bool_field(obj, "skip"):
verbose(f"Skipping data set {src}")
return None
else:
src_dir = path.dirname(src)
license = joinpath(src_dir, "LICENSE.md")
readme = joinpath(src_dir, "README.md")
p = joinpath(build_yaml_dir, src)
if not path.exists(p):
raise BuildConfigError(f'Dataset file "{p}" does not exist')
for i in (license, readme):
p = joinpath(build_yaml_dir, i)
if not path.exists(p):
raise BuildConfigError(
f'Dataset "{src}": Required "{p}" does not exist.'
)
if os.stat(p).st_size == 0:
raise BuildConfigError(f'Dataset "{src}": "{p}" is empty.')
adj_dest = do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
)
return DatasetData(src=src, dest=adj_dest, license=license, readme=readme)
def parse_file_section(
section: Sequence[Dict[str, Any]], parse: Callable[[Any, *Any], Any], *args: Any
) -> Tuple:
# Use the supplied parse function to parse each element in the
# supplied section, filtering out None results from the function.
# Convert the entire result to a tuple.
return tuple([o for o in [parse(i, *args) for i in section] if o != None])
def parse_markdown(obj: Dict[str, Any]) -> MarkdownInfo:
if obj:
stylesheet = obj.get("html_stylesheet")
else:
stylesheet = None
return MarkdownInfo(html_stylesheet=stylesheet)
def parse_notebook_types(contents: Dict[str, Any]) -> Dict[NotebookType, Any]:
res = NotebookType.default_mappings()
names_to_keys = dict([(t.value, t) for t in NotebookType])
invalid_keys = set()
for k, v in list(contents.get("notebook_type_name", {}).items()):
t = names_to_keys.get(k)
if not t:
invalid_keys.add(k)
else:
res[t] = v
if invalid_keys:
key_str = ", ".join(invalid_keys)
raise BuildConfigError(
f'Unknown key(s) in "notebook_type_name" section: {key_str}'
)
return res
def parse_min_version(key: str, value: str) -> Optional[Tuple[int, int]]:
res = contents.get(key)
if res is not None:
if isinstance(res, float):
raise BuildConfigError(
f'"{key}" of the form <major>.<minor> must be quoted.'
)
try:
# Ignore the match version.
res = parse_version_string(res)[0:2]
except ValueError as e:
raise BuildConfigError(f'Bad value of "{res}" for "{key}": {e}')
return res
def parse_course_type(
data: Dict[str, Any], section: str
) -> master_parse.CourseType:
course_type = data.get("type")
if not course_type:
raise BuildConfigError(
f'Missing required "{section}.type" setting in "{yaml_file}"'
)
if course_type.lower() == "self-paced":
return master_parse.CourseType.SELF_PACED
if course_type.lower() == "ilt":
return master_parse.CourseType.ILT
raise BuildConfigError(
f'Unknown value of "{course_type}" for "{course_type}.type". '
+ 'Legal values are "ilt" and "self-paced".'
)
def parse_course_info(
course_info_cfg: Dict[str, Any], section_name: str
) -> CourseInfo:
ilt_only = {"class_setup": None, "schedule": None, "instructor_prep": None}
name = required(course_info_cfg, "name", section_name)
version = required(course_info_cfg, "version", section_name)
ilt_only["class_setup"] = course_info_cfg.get("class_setup")
ilt_only["schedule"] = course_info_cfg.get("schedule")
ilt_only["instructor_prep"] = course_info_cfg.get("prep")
course_type = parse_course_type(course_info_cfg, section_name)
deprecated = course_info_cfg.get("deprecated", False)
copyright_year = course_info_cfg.get("copyright_year", str(datetime.now().year))
if type == master_parse.CourseType.SELF_PACED:
for k, v in list(ilt_only.items()):
if v:
warn(f"course_info.{k} is ignored for self-paced courses")
ilt_only[k] = None
return CourseInfo(
name=name,
title=course_info_cfg.get("title", name),
version=version,
class_setup=ilt_only["class_setup"],
schedule=ilt_only["schedule"],
instructor_prep=ilt_only["instructor_prep"],
course_type=course_type,
deprecated=deprecated,
copyright_year=copyright_year,
)
def parse_output_info(
contents: Dict[str, Any], course_info: CourseInfo
) -> OutputInfo:
student_dir = contents.get("student_dir", DEFAULT_STUDENT_FILES_SUBDIR)
instructor_dir = contents.get("instructor_dir", DEFAULT_INSTRUCTOR_FILES_SUBDIR)
student_dbc = contents.get("student_dbc", DEFAULT_STUDENT_LABS_DBC)
instructor_dbc = contents.get("instructor_dbc", DEFAULT_INSTRUCTOR_LABS_DBC)
for (k, v) in (
("student_dbc", student_dbc),
("instructor_dbc", instructor_dbc),
):
if path.dirname(v) != "":
raise BuildConfigError(f'"{k}" value "{v}" is not a simple file name.')
if student_dir == instructor_dir:
raise BuildConfigError(
'"student_dir" and "instructor_dir" cannot be the same. '
+ f'"student_dir" is "{student_dir}". '
+ f'"instructor_dir" is "{instructor_dir}".'
)
# Allow substitution of ${course_name}, {course_version} and/or
# ${course_id} in the file names.
fields = {
"course_name": course_info.name,
"course_version": course_info.version,
"course_id": course_info.course_id,
}
def sub(filename: str) -> str:
return VariableSubstituter(filename).substitute(fields)
return OutputInfo(
student_dir=student_dir,
instructor_dir=instructor_dir,
student_dbc=sub(student_dbc),
instructor_dbc=sub(instructor_dbc),
)
def parse_profiles(contents: Dict[str, Any]) -> Set[master_parse.Profile]:
profiles = contents.get("profiles")
use_profiles = bool_field(contents, "use_profiles", False)
if profiles and use_profiles:
raise BuildConfigError(
'You cannot specify both "use_profiles" and "profiles".'
)
if profiles:
res = set()
for thing in profiles:
if isinstance(thing, dict):
if len(list(thing.keys())) != 1:
raise BuildConfigError(f"Malformed profile: {thing}")
n = list(thing.keys())[0]
v = thing[n]
if not isinstance(v, str):
raise BuildConfigError(
f'The value of profile "{n}" ("{v}") is not ' + "a string."
)
res.add(master_parse.Profile(name=n, value=v))
continue
if isinstance(thing, str):
res.add(master_parse.Profile(name=thing, value=thing))
continue
raise BuildConfigError(
f'Profile "{thing}" is neither a simple string nor a '
+ '"name: value"'
)
elif use_profiles:
warn('"use_profiles" is deprecated. Use explicit profiles.')
res = {
master_parse.Profile(name="amazon", value="Amazon"),
master_parse.Profile(name="azure", value="azure"),
}
else:
res = set()
return res
# Main function logic
verbose(f"Loading {yaml_file}...")
contents = read_yaml_file(yaml_file)
bdc_min_version = parse_min_version(
"bdc_min_version", required(contents, "bdc_min_version", "build")
)
cur_major_minor = parse_version_string(VERSION)[0:2]
if bdc_min_version > cur_major_minor:
version_str = ".".join(map(str, bdc_min_version))
raise BuildConfigError(
f"This build requires bdc version {version_str}.x or greater, "
+ f"but you're using bdc version {VERSION}."
)
profiles = parse_profiles(contents)
variables = contents.get("variables", {})
notebooks_cfg = required(contents, "notebooks", "build")
slides_cfg = contents.get("slides", [])
misc_files_cfg = contents.get("misc_files", [])
datasets_cfg = contents.get("datasets", [])
course_info_cfg = required(contents, "course_info", "build")
course_info = parse_course_info(course_info_cfg, "course_info")
src_base = required(contents, "src_base", "build")
build_yaml_full = path.abspath(yaml_file)
build_yaml_dir = path.dirname(build_yaml_full)
src_base = path.abspath(joinpath(build_yaml_dir, src_base))
notebook_defaults = parse_notebook_defaults(
contents, "notebook_defaults", build_yaml_dir
)
if slides_cfg:
slides = parse_file_section(slides_cfg, parse_slide, variables)
else:
slides = None
if datasets_cfg:
datasets = parse_file_section(
datasets_cfg, parse_dataset, variables, build_yaml_dir
)
else:
datasets = None
if misc_files_cfg:
misc_files = parse_file_section(misc_files_cfg, parse_misc_file, variables)
else:
misc_files = None
if notebooks_cfg:
notebooks = parse_file_section(
notebooks_cfg,
parse_notebook,
notebook_defaults,
variables,
profiles,
build_yaml_dir,
)
else:
notebooks = None
need_master = any([n.master.enabled for n in notebooks])
if need_master:
required_master_min_version = parse_min_version(
"master_parse_min_version",
required(
contents,
"master_parse_min_version",
"build",
error='"master_parse_min_version" is required if any '
+ "notebooks use the master parser.",
),
)
master_version = parse_version_string(master_parse.VERSION)[0:2]
if required_master_min_version > master_version:
version_str = ".".join(map(str, required_master_min_version))
raise BuildConfigError(
f"This build requires master_parse version {version_str}.x "
+ "or greater, but you're using master_parse version "
+ f"{master_parse.VERSION}."
)
output_info = parse_output_info(contents, course_info)
bundle_info = parse_bundle(
contents.get("bundle"), output_info, course_info, variables
)
data = BuildData(
build_file_path=build_yaml_full,
top_dbc_folder_name=contents.get("top_dbc_folder_name"),
course_info=course_info,
output_info=output_info,
notebooks=notebooks,
slides=slides,
datasets=datasets,
source_base=src_base,
misc_files=misc_files,
keep_lab_dirs=bool_field(contents, "keep_lab_dirs"),
markdown_cfg=parse_markdown(contents.get("markdown")),
notebook_type_map=parse_notebook_types(contents),
variables=variables,
profiles=profiles,
bundle_info=bundle_info,
)
return data
def parse_args() -> Dict[str, Any]:
"""
Parse the command line parameters.
"""
from docopt import docopt
return docopt(USAGE, version=VERSION)
def expand_template(
src_template_file: str,
build: BuildData,
tempdir: str,
profile: Optional[master_parse.Profile],
):
import pystache
variables = {}
if build.variables:
variables["variables"] = build.variables
if profile:
for p in build.profiles:
if profile == p:
variables[p.name] = p.value
else:
variables[p.name] = ""
course_info_vars = {}
for k, v in list(build.course_info.__dict__.items()):
if v is None:
continue
if isinstance(v, Enum):
v = v.value
course_info_vars[k] = str(v)
variables["course_info"] = course_info_vars
output = joinpath(tempdir, path.basename(src_template_file))
with codecs.open(src_template_file, mode="r", encoding="utf8") as i:
with codecs.open(output, mode="w", encoding="utf8") as o:
o.write(pystache.render(i.read(), variables))
return output
# For copy_info_files and related logic:
#
# This is a table of special source file type to target file type
# processors. If the source type has a key in this table, then it
# is processed specially, and there MUST be an entry for the target type,
# or an error occurs. If the source type has no key in this table, then
# it is just copied as is. See _get_type().
INFO_PROCESSORS = {
# Table that maps a source type and a target type to a consistent
# three-arg lambda (src, dest, build) for generating the target.
# src_type -> target_type -> lambda
# The type is also the extension
"md": {
"html": lambda src, dest, build: markdown_to_html(
src, dest, stylesheet=build.markdown.html_stylesheet
),
"pdf": lambda src, dest, build: markdown_to_pdf(
src, dest, stylesheet=build.markdown.html_stylesheet
),
"md": lambda src, dest, build: copy(src, dest),
},
"html": {
"pdf": lambda src, dest, build: html_to_pdf(src, dest),
"html": lambda src, dest, build: copy(src, dest),
},
}
def _get_type(f: str) -> Optional[str]:
if is_markdown(f):
return "md"
if is_pdf(f):
return "pdf"
if is_html(f):
return "html"
return None
def _convert_and_copy_info_file(src: str, dest: str, build: BuildData) -> NoReturn:
"""
Workhorse function: Takes the source and target, looks up how to process
them, and processes them.
:param src: the source file
:param dest: the destination file (not directory)
:param build: the parsed build information
"""
src_type = _get_type(src)
dest_type = _get_type(dest)
if src_type is None:
# Not a special type that we have to convert. Just copy.
copy(src, dest)
elif dest_type is None:
# Source type is a special type (Markdown, HTML), and the destination
# (a) isn't marked as a directory, and (b) isn't a type we understand.
# Treat it as a straight copy.
shutil.copy(src, dest)
else:
proc = INFO_PROCESSORS.get(src_type, {}).get(dest_type, None)
if proc is None:
raise Exception(f'(BUG): No processor. "{src}" -> "{dest}".')
proc(src, dest, build)
def copy_info_file(
src_file: str,
target: str,
is_template: bool,
build: BuildData,
profile: Optional[master_parse.Profile],
) -> NoReturn:
"""
Copy a file that contains some kind of readable information (e.g., a
Markdown file, a PDF, etc.). If the file is a Markdown file, it is also
converted to HTML and copied.
"""
with TemporaryDirectory() as tempdir:
if is_template:
real_src = expand_template(src_file, build, tempdir, profile)
else:
real_src = src_file
# Okay to check for directory here. It should've been created already.
if not path.isdir(target):
# Copy and/or generate one file.
_convert_and_copy_info_file(real_src, target, build)
else:
# Is a directory. What we generate depends on the input.
# By this point, it has to exist.
src_type = _get_type(src_file)
if src_type is None:
# Just a copy.
base = path.basename(src_file)
copy(real_src, joinpath(target, base))
else:
dest_map = INFO_PROCESSORS.get(src_type)
if dest_map is None:
raise BuildError(
f'(BUG): Processor mismatch. "{src_file}" -> "{target}".'
)
for dest_type in list(dest_map.keys()):
(base, _) = path.splitext(path.basename(src_file))
out = joinpath(target, base + "." + dest_type)
_convert_and_copy_info_file(real_src, out, build)
def process_master_notebook(
dest_root: str,
notebook: NotebookData,
src_path: str,
build: BuildData,
profile: Optional[master_parse.Profile],
) -> NoReturn:
"""
Process a master notebook.
:param dest_root: top-level target directory for build
:param notebook: the notebook data from the build YAML
:param src_path: the pre-calculated path to the source notebook
:param dest_path: the path to the target directory, calculated
from dest_root and notebook.dest
:param build parsed build data
:param profile: build profile, or None
:return: None
"""
verbose(f"notebook={notebook}\ndest_root={dest_root}")
notebook_type_map = build.notebook_type_map
student_labs_subdir = build.output_info.student_labs_subdir
instructor_labs_subdir = build.output_info.instructor_labs_subdir
student_dir = joinpath(dest_root, student_labs_subdir)
instructor_dir = joinpath(dest_root, instructor_labs_subdir)
def move_master_notebooks(master, temp_output_dir):
"""
Move master-parsed notebooks.
:param master: the master notebook configuration data
:param temp_output_dir: the temporary output directory
"""
# See if we have to move the notebooks to other paths.
for lang in set(EXT_LANG.values()):
lc_lang = lang.lower()
if not master.lang_is_enabled(lc_lang):
continue
# This language is desired.
# Get the file name extension for the language. Note that this
# extension INCLUDES the ".".
lang_ext = LANG_EXT[lc_lang]
# The master parse tool created <notebook-basename>/<lang>/*
# in the temporary directory. The following recursive glob pattern
# will make finding the files simple. In this glob pattern, {0} is
# the notebook type (e.g., "_answers"), and {1} is the file
# extension (e.g., ".py")
glob_template = "**/*{0}*{1}"
# Copy all answers notebooks and exercises notebooks to the student
# labs directory. Copy all instructor notebooks to the instructor
# labs directory.
types_and_targets = []
if master.exercises:
types_and_targets.append((NotebookType.EXERCISES, student_dir))
if master.instructor:
types_and_targets.append((NotebookType.INSTRUCTOR, instructor_dir))
if master.answers:
types_and_targets.append((NotebookType.ANSWERS, student_dir))
base, _ = path.splitext(path.basename(notebook.src))
mp_notebook_dir = joinpath(temp_output_dir, base, lc_lang)
lang_dir = lc_lang.capitalize()
for notebook_type, target_dir in types_and_targets:
# Use a recursive glob pattern to find all matching notebooks.
# Note that eglob returns a generator.
copied = 0
suffix = NotebookType.suffix_for(notebook_type)
glob_pattern = glob_template.format(suffix, lang_ext)
matches = eglob(glob_pattern, mp_notebook_dir)
ext = LANG_EXT[lc_lang]
fields = merge_dicts(
notebook.variables,
{
TARGET_LANG: lang_dir,
TARGET_EXTENSION: ext[1:] if ext.startswith(".") else ext,
NOTEBOOK_TYPE: notebook_type_map.get(notebook_type, ""),
},
)
dest_subst = VariableSubstituter(notebook.dest).safe_substitute(fields)
if dest_subst.startswith(os.path.sep):
dest_subst = dest_subst[len(os.path.sep) :]
dest_base, _ = os.path.splitext(os.path.basename(dest_subst))
fields["target_basename"] = dest_base
for f in matches:
target = path.normpath(joinpath(target_dir, dest_subst))
copy(f, target)
copied += 1
if copied == 0:
raise BuildError(
f"Found no generated {lang} {notebook_type.value} "
f'notebooks for "{notebook.src}"!'
)
def copy_generated_instructor_notes(temp_file: str, final_dest: str) -> NoReturn:
# Need to do some substitution here. We need to get a fully-substituted
# destination, from which we can then extract the base file name.
lang = list(EXT_LANG.values())[0] # Just choose one. It doesn't matter.
ext = LANG_EXT[lang.lower()]
nb_dest_subst = VariableSubstituter(notebook.dest).safe_substitute(
merge_dicts(
notebook.variables,
{
TARGET_LANG: lang,
TARGET_EXTENSION: ext[1:] if ext.startswith(".") else ext,
NOTEBOOK_TYPE: notebook_type_map.get(NotebookType.EXERCISES, ""),
},
)
)
target_basename, _ = os.path.splitext(os.path.basename(nb_dest_subst))
# Now we can do substitution on the instructor notes target.
final_dest = VariableSubstituter(final_dest).safe_substitute(
merge_dicts(notebook.variables, {"target_basename": target_basename})
)
# Copy the generated Markdown file to the target destination.
parent = os.path.dirname(final_dest)
if not os.path.exists(parent):
mkdirp(parent)
verbose(f"+ cp {temp_file} {final_dest}")
_convert_and_copy_info_file(temp_file, final_dest, build)
# Convert to HTML.
no_ext_path, _ = os.path.splitext(final_dest)
html_path = f"{no_ext_path}.html"
pdf_path = f"{no_ext_path}.pdf"
markdown_to_html(
final_dest, html_path, stylesheet=build.markdown.html_stylesheet
)
html_to_pdf(html_path, pdf_path)
verbose(f"Running master parse on {src_path}")
master = notebook.master
extra_template_vars = {}
extra_template_vars.update(build.variables)
extra_template_vars.update(notebook.variables)
with TemporaryDirectory() as tempdir:
try:
if notebook.master.instructor_notes:
temp_instructor_notes = os.path.join(tempdir, "notes.md")
else:
temp_instructor_notes = None
params = master_parse.Params(
path=src_path,
output_dir=tempdir,
databricks=True,
ipython=False,
scala=master.scala,
python=master.python,
r=master.r,
sql=master.sql,
instructor=True,
exercises=True,
answers=master.answers,
notebook_heading_path=master.heading.path,
add_heading=master.heading.enabled,
notebook_footer_path=master.footer.path,
add_footer=master.footer.enabled,
encoding_in=master.encoding_in,
encoding_out=master.encoding_out,
enable_verbosity=verbosity_is_enabled(),
copyright_year=build.course_info.copyright_year,
active_profile=profile,
all_profiles=build.profiles,
course_type=build.course_info.course_type,
enable_debug=master.debug,
enable_templates=master.enable_templates,
instructor_notes_file=temp_instructor_notes,
extra_template_vars=extra_template_vars,
)
master_parse.process_notebooks(params)
move_master_notebooks(master, tempdir)
if temp_instructor_notes and os.path.exists(temp_instructor_notes):
copy_generated_instructor_notes(
temp_instructor_notes,
os.path.join(dest_root, notebook.master.instructor_notes),
)
except Exception as e:
e_cls = e.__class__.__name__
error(f"Failed to process {src_path}\n {e_cls}: {e}")
raise
def create_consolidated_inst_notes_index(build: BuildData, dest_root: str) -> NoReturn:
"""
Creates an "index.html" for all HTML files in the consolidated instructor
notes output directory.
:param build: The build data
:param dest_root: The root output directory, which is assumed to be
appropriate for the current build profile
:param profile: The build profile, if any
"""
from glob import glob
# We can safely assume that there's only one instructor notes output
# directory per build profile. Find it, if it's configured.
for nb in build.notebooks:
if nb.master and nb.master.instructor_notes:
instructor_notes_dir = os.path.dirname(nb.master.instructor_notes)
break
else:
instructor_notes_dir = None
if not instructor_notes_dir:
return
full_path = os.path.join(dest_root, instructor_notes_dir)
if not os.path.exists(full_path):
verbose(
f'Instructor notes directory "{full_path}" does not exist. '
f"Skipping index generation."
)
return
with TemporaryDirectory() as tempdir:
with working_directory(full_path):
index_md = os.path.join(tempdir, "index.md")
html_files = glob("*.html")
if len(html_files) == 0:
return
with codecs.open(index_md, mode="w", encoding="utf-8") as f:
print(
"# Instructor Notes for " f"{build.course_info.course_title}\n",
file=f,
)
print(f"Version: {build.course_info.version}\n", file=f)
for file in sorted(html_files):
name, _ = os.path.splitext(file)
file = file.replace(" ", "%20")
print(f"- [{name}]({file})", file=f)
markdown_to_html(index_md, "index.html")
def copy_notebooks(
build: BuildData,
labs_dir: str,
dest_root: str,
profile: Optional[master_parse.Profile],
) -> NoReturn:
"""
Copy the notebooks to the destination directory.
"""
os.makedirs(labs_dir)
for notebook in build.notebooks:
src_path = joinpath(build.source_base, notebook.src)
if (
profile
and notebook.only_in_profile
and notebook.only_in_profile != profile.name
):
info(
f'Suppressing notebook "{src_path}", which is ' f"{profile.name}-only."
)
continue
if not notebook.include_in_build:
info(
f'Suppressing notebook "{src_path}", because include_in_build '
'is set to "false".'
)
continue
if notebook.master_enabled():
process_master_notebook(
dest_root=dest_root,
notebook=notebook,
src_path=src_path,
build=build,
profile=profile,
)
else:
dest_path = joinpath(labs_dir, notebook.dest)
copy(src_path, dest_path)
remove_empty_subdirectories(dest_root)
create_consolidated_inst_notes_index(build, dest_root)
def copy_instructor_notes(
build: BuildData, dest_root: str, profile: Optional[master_parse.Profile]
) -> NoReturn:
# Starting at build.source_base, look for instructor notes and course
# guides. Only keep the ones for the labs and slides we're using.
#
# NOTE: This function handles explicit instructor notes files in the source
# tree. Handling of automatically generated instructor notes is in
# process_master_notebook().
if build.notebooks:
notebook_dirs = set([path.dirname(n.src) for n in build.notebooks])
else:
notebook_dirs = set()
if build.slides:
slide_dirs = set([path.dirname(s.src) for s in build.slides])
else:
slide_dirs = set()
def lives_under_one_of(dirs, to_match):
for d in dirs:
if d.startswith(to_match):
return True
return False
notes_re = re.compile(r"^instructor[-_]?notes[-._]", re.IGNORECASE)
guide_re = re.compile(r"^guide\.", re.IGNORECASE)
full_source_base = path.abspath(build.source_base)
for (dirpath, _, filenames) in os.walk(build.source_base):
for f in filenames:
# Get the file path relative to the source file. With labs
# (notebooks), if the file matches the instructor notes regex
# AND anywhere under one of the notebook directories, copy it.
#
# For instructor guides, the guide must live in one of the
# slides directories.
rel_dir = path.abspath(dirpath)[len(full_source_base) + 1 :]
keep = False
if notes_re.match(f) and lives_under_one_of(notebook_dirs, rel_dir):
keep = True
elif guide_re.match(f) and (rel_dir in slide_dirs):
keep = True
if keep:
s = joinpath(dirpath, f)
t = joinpath(
dest_root,
build.output_info.instructor_dir,
INSTRUCTOR_NOTES_SUBDIR,
rel_dir,
f,
)
(base, _) = path.splitext(path.basename(f))
verbose(f"Copying {s} to {t}")
copy_info_file(s, t, False, build, profile)
if is_html(s):
html = s
else:
html = None
if is_markdown(s):
t = joinpath(
dest_root,
build.output_info.instructor_dir,
INSTRUCTOR_NOTES_SUBDIR,
rel_dir,
f"{base}.html",
)
html = t
markdown_to_html(s, t, stylesheet=build.markdown.html_stylesheet)
if html:
t = joinpath(
dest_root,
build.output_info.instructor_dir,
INSTRUCTOR_NOTES_SUBDIR,
rel_dir,
f"{base}.pdf",
)
html_to_pdf(html, t)
continue
def make_dbc(build: BuildData, labs_dir: str, dbc_path: str) -> NoReturn:
"""
Create a DBC file from the labs.
"""
try:
gendbc(
source_dir=labs_dir,
encoding="utf-8",
dbc_path=dbc_path,
dbc_folder=build.top_dbc_folder_name,
flatten=False,
verbose=verbosity_is_enabled(),
debugging=False,
)
finally:
pass
def copy_slides(build: BuildData, dest_root: str) -> NoReturn:
"""
Copy the slides (if any).
"""
if build.slides:
for f in build.slides:
src = joinpath(build.source_base, f.src)
dest = joinpath(
dest_root, build.output_info.instructor_dir, SLIDES_SUBDIR, f.dest
)
copy(src, dest)
def copy_misc_files(
build: BuildData, dest_root: str, profile: Optional[master_parse.Profile]
) -> NoReturn:
"""
Copy the miscellaneous files (if any).
"""
if build.misc_files:
for f in build.misc_files:
if f.only_in_profile and (f.only_in_profile != profile.name):
continue
s = joinpath(build.course_directory, f.src)
dest = f.dest
if dest == ".":
dest = dest_root
t = joinpath(dest_root, dest)
if f.dest_is_dir and (not path.isdir(t)):
os.mkdir(t)
copy_info_file(s, t, f.is_template, build, profile)
def copy_datasets(build: BuildData, dest_root: str) -> NoReturn:
"""
Copy the datasets (if any).
"""
if build.datasets:
def target_for(file, dest):
return joinpath(
dest_root, build.output_info.student_dir, DATASETS_SUBDIR, dest, file
)
for ds in build.datasets:
source = joinpath(build.course_directory, ds.src)
copy(source, target_for(path.basename(source), ds.dest))
css = build.markdown.html_stylesheet
for i in (ds.license, ds.readme):
source = joinpath(build.course_directory, i)
(base, _) = path.splitext(path.basename(i))
pdf = target_for(f"{base}.pdf", ds.dest)
html = target_for(f"{base}.html", ds.dest)
markdown_to_html(source, html, stylesheet=css)
html_to_pdf(html, pdf)
def remove_empty_subdirectories(directory: str) -> NoReturn:
for dirpath, _, _ in os.walk(directory, topdown=False):
if len(os.listdir(dirpath)) == 0:
verbose(f"Deleting empty directory {dirpath}")
os.rmdir(dirpath)
def write_version_notebook(dir: str, notebook_contents: str, version: str) -> NoReturn:
nb_path = joinpath(dir, VERSION_NOTEBOOK_FILE.format(version))
ensure_parent_dir_exists(nb_path)
with codecs.open(nb_path, "w", encoding="UTF-8") as out:
out.write(notebook_contents)
def bundle_course(
build: BuildData, dest_dir: str, profile: Optional[master_parse.Profile]
) -> NoReturn:
from zipfile import ZipFile
# Expand any run-time variables in zipfile and dest.
if profile:
vars = {PROFILE_VAR: profile.name}
else:
vars = {PROFILE_VAR: ""}
t = StringTemplate(joinpath(dest_dir, build.bundle_info.zipfile))
zip_path = t.safe_substitute(vars)
verbose(f"Writing bundle {zip_path}")
with ZipFile(zip_path, "w") as z:
for file in build.bundle_info.files:
src = joinpath(dest_dir, file.src)
if not (path.exists(src)):
raise BuildError(f'While building bundle, cannot find "{src}".')
if path.isdir(src):
raise BuildError(f'Cannot make bundle: Source "{src}" is a directory')
dest = StringTemplate(file.dest).safe_substitute(vars)
z.write(src, dest)
def do_build(
build: BuildData, base_dest_dir: str, profile: Optional[master_parse.Profile] = None
) -> NoReturn:
if profile:
dest_dir = joinpath(base_dest_dir, profile.name)
else:
dest_dir = base_dest_dir
for d in (build.output_info.instructor_dir, build.output_info.student_dir):
mkdirp(joinpath(dest_dir, d))
try:
# Get a Git Repo object. Since we don't really know where the
# root of the repo is, let GitPython figure it out from the
# directory containing the build file.
repo = git.Repo(
os.path.dirname(build.build_file_path), search_parent_directories=True
)
# This will get the latest commit on the current head (which will
# be the top of whatever branch is currently selected).
git_commit = repo.head.reference.commit.hexsha
except Exception as e:
error(f"Unable to get Git commit info: {e}")
git_commit = "Unknown"
version = build.course_info.version
fields = merge_dicts(
build.variables,
{
"course_name": build.course_info.name,
"version": version,
"build_timestamp": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
"year": build.course_info.copyright_year,
"git_commit": git_commit,
},
)
version_notebook = VariableSubstituter(VERSION_NOTEBOOK_TEMPLATE).substitute(fields)
labs_full_path = joinpath(dest_dir, build.output_info.student_labs_subdir)
copy_notebooks(build, labs_full_path, dest_dir, profile)
copy_instructor_notes(build, dest_dir, profile)
write_version_notebook(labs_full_path, version_notebook, version)
student_dbc = joinpath(
dest_dir, build.output_info.student_dir, build.output_info.student_dbc
)
make_dbc(build=build, labs_dir=labs_full_path, dbc_path=student_dbc)
instructor_labs = joinpath(dest_dir, build.output_info.instructor_labs_subdir)
if os.path.exists(instructor_labs):
instructor_dbc = joinpath(
dest_dir, build.output_info.instructor_dir, build.output_info.instructor_dbc
)
write_version_notebook(instructor_labs, version_notebook, version)
make_dbc(build, instructor_labs, instructor_dbc)
copy_slides(build, dest_dir)
copy_misc_files(build, dest_dir, profile)
copy_datasets(build, dest_dir)
if build.bundle_info:
bundle_course(build, dest_dir, profile)
# Finally, remove the instructor labs folder and the student labs
# folder.
if not build.keep_lab_dirs:
rm_rf(labs_full_path)
rm_rf(instructor_labs)
def build_course(build: BuildData, dest_dir: str, overwrite: bool) -> NoReturn:
"""
:param build:
:param dest_dir:
:param overwrite:
:return:
"""
if build.course_info.deprecated:
raise BuildError(f"{build.course_info.name} is deprecated and cannot be built.")
verbose(f'Publishing to "{dest_dir}"')
if path.isdir(dest_dir):
if not overwrite:
raise BuildError(
f'Directory "{dest_dir}" already exists, and you did not '
"specify overwrite."
)
rm_rf(dest_dir)
if not build.profiles:
do_build(build, dest_dir, profile=None)
else:
for profile in build.profiles:
info("")
msg = f"Building profile {profile.name}"
info("-" * len(msg))
info(msg)
info("-" * len(msg))
do_build(build, dest_dir, profile)
if errors > 0:
raise BuildError(f"{errors} error(s).")
print(
f"\nPublished {build.course_info.name}, "
f"version {build.course_info.version} to {dest_dir}\n"
)
def ensure_shard_path_exists(shard_path: str, db_profile: Optional[str]) -> NoReturn:
try:
w = databricks.Workspace(db_profile)
w.ls(shard_path)
except DatabricksError as e:
if e.code == databricks.StatusCode.NOT_FOUND:
raise BDCError(f'Shard path "{shard_path}" does not exist.')
elif e.code == databricks.StatusCode.CONFIG_ERROR:
raise BDCError(f'"databricks" configuration error: {e.message}')
else:
raise BDCError(f'Unexpected error with "databricks": {e}')
def ensure_shard_path_does_not_exist(
shard_path: str, db_profile: Optional[str]
) -> NoReturn:
try:
w = databricks.Workspace(db_profile)
w.ls(shard_path)
raise BDCError(f'Shard path "{shard_path}" already exists.')
except DatabricksError as e:
if e.code == databricks.StatusCode.NOT_FOUND:
pass
elif e.code == databricks.StatusCode.CONFIG_ERROR:
raise BDCError(f'"databricks" configuration error: {e.message}')
else:
raise BDCError(f'Unexpected error with "databricks": {e}')
def notebook_is_transferrable(nb: NotebookData, build: BuildData) -> bool:
nb_full_path = path.abspath(joinpath(build.source_base, nb.src))
if not nb.upload_download:
info(f'Skipping notebook "{nb_full_path}": upload_download is disabled.')
return False
# if nb.skip:
# info(
# f'Skipping notebook "{nb_full_path}" (marked "skip")'
# )
# return False
return True
def get_sources_and_targets(build: BuildData) -> Sequence[UploadDownloadMapping]:
"""
Get the list of source notebooks to be uploaded/downloaded and map them
to their target names on the shard.
:param build: the build
:return: A dict of source names to partial-path target names. Each source
name can map to multiple targets.
"""
template_data = {
TARGET_LANG: "",
NOTEBOOK_TYPE: "",
}
profile_subst_pattern = re.compile(r"^(\d*-?)(.*)$")
def map_notebook_dest(nb):
template_data2 = {}
template_data2.update(template_data)
_, ext = path.splitext(nb.src)
if ext:
ext = ext[1:] # skip leading '.'
template_data2[TARGET_EXTENSION] = ext
p = path.normpath(
leading_slashes.sub(
"", VariableSubstituter(nb.dest).safe_substitute(template_data2)
)
)
if nb.only_in_profile:
(dir, file) = (path.dirname(p), path.basename(p))
m = profile_subst_pattern.match(file)
if not m:
new_file = "{}-{}".format(
PROFILE_ABBREVIATIONS[nb.only_in_profile], file
)
else:
new_file = "{}{}-{}".format(
m.group(1), PROFILE_ABBREVIATIONS[nb.only_in_profile], m.group(2)
)
p = joinpath(dir, new_file)
return p
buf = {}
notebooks = [
nb for nb in build.all_notebooks if notebook_is_transferrable(nb, build)
]
leading_slashes = re.compile(r"^/+")
target_dirs = {}
for nb in notebooks:
dest = map_notebook_dest(nb)
if nb.master.enabled:
# The destination might be a directory. Count how many notebooks
# end up in each directory.
target_dirs[dest] = target_dirs.get(dest, 0) + 1
for nb in notebooks:
nb_full_path = path.abspath(joinpath(build.source_base, nb.src))
# Construct partial path from target path.
base_with_ext = path.basename(nb_full_path)
(base_no_ext, ext) = path.splitext(base_with_ext)
if len(ext) > 0:
ext = ext[1:] # remove the leading "."
dest = map_notebook_dest(nb)
mapping = buf.get(
nb_full_path, UploadDownloadMapping(notebook=nb, source_path=nb_full_path)
)
mapping.remote_targets = mapping.remote_targets + [dest]
buf[nb_full_path] = mapping
return list(buf.values())
def check_for_extra_up_down_mappings(
mappings: Sequence[UploadDownloadMapping],
) -> Dict[str, Sequence[str]]:
"""
Check the result returned by get_sources_and_targets() for sources that
map to multiple targets.
:param mappings: the result of get_sources_and_targets()
:return: A sequence of tuples listing the multiple mappings.
"""
res = {}
for mapping in list(mappings):
if len(mapping.remote_targets) == 1:
continue
res[mapping.source_path] = mapping.remote_targets
return res
def upload_notebooks(
build: BuildData, shard_path: str, db_profile: Optional[str]
) -> NoReturn:
mappings = get_sources_and_targets(build)
def do_upload(notebooks: Sequence[UploadDownloadMapping]) -> NoReturn:
ensure_shard_path_does_not_exist(shard_path, db_profile)
with TemporaryDirectory() as tempdir:
info("Copying notebooks to temporary directory.")
for mapping in notebooks:
nb_full_path = mapping.source_path
partial_paths = mapping.remote_targets
if not path.exists(nb_full_path):
warn(f'Skipping nonexistent notebook "{nb_full_path}".')
continue
if mapping.notebook.skip:
info(f'Skipping "{nb_full_path}".')
continue
for partial_path in partial_paths:
temp_path = joinpath(tempdir, partial_path)
dir = path.dirname(temp_path)
mkdirp(dir)
verbose(f'Copying "{nb_full_path}" to "{temp_path}"')
copy(nb_full_path, temp_path)
with working_directory(tempdir):
info(f"Uploading notebooks to {shard_path}")
try:
w = databricks.Workspace(profile=db_profile)
w.import_dir(".", shard_path)
info(f"Uploaded {len(notebooks)} notebooks to " f"{shard_path}.")
except DatabricksError as e:
raise UploadDownloadError(f"Upload failed: {e}")
try:
do_upload(mappings)
except UploadDownloadError as e:
w = databricks.Workspace(profile=db_profile)
w.rm(shard_path)
raise
multiple_mappings = check_for_extra_up_down_mappings(mappings)
if len(multiple_mappings) > 0:
wrap2stdout("\n********")
wrap2stdout("CAUTION:")
wrap2stdout("********\n")
wrap2stdout("Some source files have been copied to multiple destinations!\n")
for src, targets in multiple_mappings.items():
wrap2stdout(
f'\n"{src}" has been uploaded to multiple places. Only edits '
+ f'to "{shard_path}/{targets[0]}" will be applied on download.'
)
wrap2stdout(
"\nIf you edit the build file before you run --download, "
+ "you might lose any edits to those files!"
)
def download_notebooks(
build: BuildData, shard_path: str, db_profile: Optional[str]
) -> NoReturn:
notebooks = get_sources_and_targets(build)
def do_download(notebooks: Sequence[UploadDownloadMapping]) -> NoReturn:
# remote_to_local is assumed to be a 1-1 mapping of remote paths to
# local paths.
ensure_shard_path_exists(shard_path, db_profile)
with TemporaryDirectory() as tempdir:
info("Downloading notebooks to temporary directory")
with working_directory(tempdir):
try:
w = databricks.Workspace(profile=db_profile)
w.export_dir(shard_path, ".")
except DatabricksError as e:
raise UploadDownloadError(f"Download failed: {e}")
info(f"Download complete.\n")
for mapping in notebooks:
# We only ever download the first one.
remote = mapping.remote_targets[0]
local = mapping.source_path
if not path.exists(remote):
warn(
"Cannot find downloaded version of course "
+ f'notebook "{local}".'
)
continue
if mapping.notebook.skip:
print(f'"{remote}" -> SKIPPED')
os.unlink(remote)
continue
print(f'"{remote}" -> {local}')
# Make sure there's a newline at the end of each file.
move(remote, local, ensure_final_newline=True)
# Remove any others, so they're not treated as leftovers.
for r in mapping.remote_targets[1:]:
if path.exists(r):
os.unlink(r)
# Are there any leftovers?
leftover_files = []
for root, dirs, files in os.walk("."):
for f in files:
leftover_files.append(path.relpath(joinpath(root, f)))
if len(leftover_files) > 0:
warn(
f"These files from {shard_path} aren't in the build "
+ "file and were not copied."
)
for f in leftover_files:
print(f" {f}")
# get_sources_and_targets() returns a list of UploadDownloadMapping objects.
# If there are duplicate remotes, keep the first one.
remote_to_local = {}
for mapping in notebooks:
# We only ever download the first one.
remote = mapping.remote_targets[0]
if remote in remote_to_local:
raise BDCError(f'(BUG): Found multiple instances of remote path "{remote}"')
remote_to_local[remote] = mapping.source_path
do_download(notebooks)
multiple_mappings = check_for_extra_up_down_mappings(notebooks)
if len(multiple_mappings) > 0:
wrap2stdout("\n********")
wrap2stdout("CAUTION:")
wrap2stdout("********\n")
wrap2stdout("Some source files exist more than once in the build file!")
for src, targets in multiple_mappings.items():
wrap2stdout(
f'\n"{src}" has ONLY been downloaded from '
+ f"{shard_path}/{targets[0]}."
)
def print_info(build: BuildData, shell: bool) -> NoReturn:
if shell:
print(
f'COURSE_NAME="{build.name}"; '
+ f'COURSE_VERSION="{build.course_info.version}"'
)
else:
print(f"Course name: {build.name}")
print(f"Course version: {build.course_info.version}")
def validate_build(build: BuildData) -> BuildData:
"""
:param build:
:return:
:raises BuildConfigError: validation failed, and errors were printed
"""
# TODO: Path joins here duplicate logic elsewhere. Consolidate.
errors = 0
error_prefix = "ERROR: "
wrapper = EnhancedTextWrapper(subsequent_indent=" " * len(error_prefix))
build_file_dir = path.dirname(path.abspath(build.build_file_path))
def complain(msg):
print(wrapper.fill(error_prefix + msg))
def rel_to_build(src):
return joinpath(build_file_dir, src)
def rel_to_src_base(src):
return joinpath(build.source_base, src)
if not path.exists(build.source_base):
complain(f'src_base "{path.abspath(build.source_base)}" does not exist.')
errors += 1
headings = set()
footers = set()
new_notebooks = []
# If there are any profiles in the notebooks or misc. files, but no
# profiles in the build, abort.
if len(build.profiles) == 0:
nb_profiles = {n.only_in_profile for n in build.notebooks if n.only_in_profile}
if len(nb_profiles) > 0:
raise BuildConfigError(
'At least one notebook has "only_in_profile" set, but the '
"build does not specify any profiles."
)
misc_profiles = {
m.only_in_profile for m in build.misc_files if m.only_in_profile
}
if len(misc_profiles) > 0:
raise BuildConfigError(
'At least one miscellaneous file has "only_in_profile" set, '
"but the build does not specify any profiles."
)
instructor_note_dirs = set()
for nb in build.all_notebooks:
if nb.skip:
# Keep, but don't validate.
new_notebooks.append(nb)
continue
src_path = rel_to_src_base(nb.src)
if not path.exists(src_path):
complain(f'Notebook "{src_path}" does not exist.')
errors += 1
continue
if os.stat(src_path).st_size == 0:
complain(f'Notebook "{src_path}" is an empty file. Ignoring it.')
continue
# Attempt to parse the notebook. If it has no cells, ignore it.
try:
parsed_nb = parse_source_notebook(src_path, encoding="UTF-8")
if len(parsed_nb.cells) == 0:
complain(f'Notebook "{src_path}" has no cells. Ignoring it.')
continue
except NotebookError as e:
complain(f'Notebook "{src_path}" cannot be parsed: {e}')
errors += 1
continue
new_notebooks.append(nb)
master = nb.master
if master and master.enabled:
if master.heading.enabled and (master.heading.path is not None):
headings.add(rel_to_build(master.heading.path))
if master.footer.enabled and (master.footer.path is not None):
footers.add(rel_to_build(master.footer.path))
if master.instructor_notes:
instructor_note_dirs.add(os.path.dirname(master.instructor_notes))
if len(instructor_note_dirs) > 1:
quoted_dirs = ", ".join([f'"{d}"' for d in instructor_note_dirs])
complain(
"Notebooks are using different instructor_notes directories "
f"({quoted_dirs}). Cannot generate an index.html unless all "
"instructor note files are written to the same output "
"directory."
)
errors += 1
build.notebooks = new_notebooks
for h in headings:
if not path.exists(h):
complain(f'Notebook heading "{h}" does not exist.')
errors += 1
for f in footers:
if not path.exists(f):
complain(f'Notebook footer "{f}" does not exist.')
errors += 1
for misc in build.misc_files:
src_path = rel_to_build(misc.src)
if not path.exists(src_path):
complain(f'misc_file "{src_path}" does not exist.')
errors += 1
if misc.only_in_profile and (not build.profiles):
complain(
f'misc file "{src_path}" specifies only_in_profile, but '
+ "profiles are not enabled."
)
errors += 1
if build.slides:
for slide in build.slides:
src_path = rel_to_src_base(slide.src)
if not path.exists(src_path):
complain(f'Slide "{src_path}" does not exist.')
errors += 1
if build.datasets:
for dataset in build.datasets:
src_path = joinpath(build.course_directory, dataset.src)
if not path.exists(src_path):
complain(f'Dataset "{src_path}" does not exist.')
errors += 1
if build.markdown and build.markdown.html_stylesheet:
if not path.exists(build.markdown.html_stylesheet):
complain(
f'markdown.html_stylesheet "{build.markdown.html_stylesheet}" '
+ "does not exist."
)
errors += 1
if errors == 1:
print("\n*** One error.")
elif errors > 1:
print(f"\n*** {errors} errors.")
if errors > 0:
raise BuildConfigError("Build file validation failure.")
return build
def load_and_validate(build_file: str) -> BuildData:
build = load_build_yaml(build_file)
return validate_build(build)
def init_verbosity(verbose: bool) -> NoReturn:
if verbose:
set_verbosity(True, verbose_prefix="bdc: ")
else:
set_verbosity(False, verbose_prefix="")
def default_output_directory_for_build(build: BuildData) -> str:
return joinpath(os.getenv("HOME"), "tmp", "curriculum", build.course_id)
# ---------------------------------------------------------------------------
# Exported functions
# ---------------------------------------------------------------------------
def bdc_check_build(build_file: str, verbose: bool = False) -> NoReturn:
"""
:param build_file:
:param verbose:
:return:
"""
init_verbosity(verbose)
_ = load_and_validate(build_file)
if errors == 0:
print("\nNo errors.")
else:
# Error messages already printed.
raise BuildError(f'There are problems with "{build_file}".')
def bdc_get_notebook_paths(build_file: str) -> Sequence[str]:
"""
Get the paths of all source notebooks in a build file. Notebooks that
are used multiple times are only listed once.
:param build_file: the build file to load
:return: the notebook paths, as absolute paths
"""
build = load_and_validate(build_file)
return sorted(
list(
set(
[
joinpath(build.source_base, notebook.src)
for notebook in build.notebooks
]
)
)
)
def bdc_list_notebooks(build_file: str) -> NoReturn:
"""
Print the paths of notebooks in a build file to standard output. Notebooks
that appear multiple times in a build are only listed once.
:param build_file: the build out.
:return: Nothing
"""
for p in bdc_get_notebook_paths(build_file):
print(p)
def bdc_print_info(build_file: str, shell_format: bool = False) -> NoReturn:
"""
Display information about the build file to standard output.
:param build_file: the path to the build file
:param shell_format: whether to print the info as shell assignments (True)
or in human-readable form (False).
:return: Nothing
"""
build = load_and_validate(build_file)
print_info(build, shell_format)
def bdc_upload(
build_file: str,
shard_path: str,
databricks_profile: Optional[str] = None,
verbose: bool = False,
) -> NoReturn:
"""
Upload a course's source notebooks to Databricks.
:param build_file: the path to the build file for the course
:param shard_path: the Databricks path to which to upload them
:param databricks_profile: the Databricks authentication profile to use
:param verbose: whether or not to display verbose messages
:return: Nothing
"""
init_verbosity(verbose)
build = load_and_validate(build_file)
upload_notebooks(build, shard_path, databricks_profile)
def bdc_download(
build_file: str,
shard_path: str,
databricks_profile: Optional[str] = None,
verbose: bool = False,
) -> NoReturn:
"""
Download a course's source notebooks from Databricks and copy them back
over top of the notebooks on the local disk.
:param build_file: the path to the build file for the course
:param shard_path: the Databricks path from which to download them
:param databricks_profile: the Databricks authentication profile to use
:param verbose: whether or not to display verbose messages
:return: Nothing
"""
init_verbosity(verbose)
build = load_and_validate(build_file)
download_notebooks(build, shard_path, databricks_profile)
def bdc_create_git_tag(build_file: str) -> NoReturn:
"""
Fashion a git tag from the course name and version in the build file, and
create the tag in the repository containing the build file. The tag will
point to the top-most revision on the current branch.
:param build_file: path to the build file
"""
# Allow load_and_validate() to bail with an exception on error.
build = load_and_validate(build_file)
tag = build.course_id.replace(" ", "-")
try:
repo = git.Repo(os.path.abspath(build_file), search_parent_directories=True)
head = repo.head.reference
branch = head.path.replace("refs/heads/", "")
existing_tags = {t.path.replace("refs/tags/", ""): t for t in repo.tags}
existing = existing_tags.get(tag)
if existing:
error(
f"Repo {repo.working_dir}: Tag {tag} already exists on "
f"branch {branch}. It points to commit "
f"{existing.commit.hexsha}."
)
else:
commit = head.commit.hexsha
repo.create_tag(tag, head)
info(
f"Repo {repo.working_dir}: Created tag {tag} on branch "
f"{branch}, pointing to commit {commit}."
)
except Exception as e:
error(f"Failed to create git tag {tag}: {e}")
raise
def bdc_output_directory_for_build(build_file: str) -> str:
"""
Determine the default output directory for a particular course.
:param build_file: the build file for the course
:return: the path to the output directory
"""
build = load_and_validate(build_file)
return default_output_directory_for_build(build)
def bdc_build_course(
build_file: str, dest_dir: str, overwrite: bool, verbose: bool = False
) -> NoReturn:
"""
Build a course.
:param build_file: the path to the build file
:param dest_dir: the destination directory for the build, or None to use
the default
:param overwrite: If the destination directory exists already, and this
parameter is True, then the destination directory will
be recursively removed before the build is run. If the
directory is there and this parameter is False, the
function will raise an exception.
:param verbose: whether or not to display verbose messages
:return: Nothing
"""
init_verbosity(verbose)
build = load_and_validate(build_file)
if not dest_dir:
dest_dir = joinpath(os.getenv("HOME"), "tmp", "curriculum", build.course_id)
build_course(build, dest_dir, overwrite)
def bdc_load_build(build_file: str) -> BuildData:
"""
Load the build file and return the BuildData object. Useful for tools
outside bdc (e.g., course) that need information from the build.
:param build_file: path to the build file
:return: The parsed build data
:raises BuildConfigError: something is wrong with the build file
"""
return load_and_validate(build_file)
# ---------------------------------------------------------------------------
# Main program
# ---------------------------------------------------------------------------
def main():
opts = parse_args()
course_config = opts["BUILD_YAML"] or DEFAULT_BUILD_FILE
if not os.path.exists(course_config):
die(f"{course_config} does not exist.")
try:
if opts["--check"]:
bdc_check_build(course_config)
elif opts["--info"]:
bdc_print_info(course_config, opts["--shell"])
elif opts["--list-notebooks"]:
bdc_list_notebooks(course_config)
elif opts["--tag"]:
bdc_create_git_tag(course_config)
elif opts["--upload"]:
bdc_upload(
course_config, opts["SHARD_PATH"], opts["--dprofile"], opts["--verbose"]
)
elif opts["--download"]:
bdc_download(
course_config, opts["SHARD_PATH"], opts["--dprofile"], opts["--verbose"]
)
else:
bdc_build_course(
course_config, opts["--dest"], opts["--overwrite"], opts["--verbose"]
)
except BuildConfigError as e:
die(f'Error in "{course_config}": {e}')
except BDCError as e:
die(str(e))
except KeyboardInterrupt:
die(f"\n*** Interrupted.")
if __name__ == "__main__":
main()
```
#### File: tests/bdcutil/test_flatten.py
```python
from bdc.bdcutil import flatten
from typing import Sequence, Any
def test_flatten():
data = [
(
["foobar", range(1, 3), ["a", "b", range(4, 6)], "xyz"],
["foobar", 1, 2, "a", "b", 4, 5, "xyz"],
),
([(1, 2, (3, 4), 5), [6, 7, [[8, 9]], 10]], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
(
[(1, 2, (3, 4), 5), [6, 7, [[8, 9]], 10], range(11, 20)],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
),
]
for input, expected in data:
assert list(flatten(input)) == list(expected)
```
#### File: db_edu_util/tests/test_all_pred.py
```python
from db_edu_util import all_pred
from string import ascii_lowercase, ascii_uppercase
def test_all_pred():
assert all_pred(lambda x: x > 0, [10, 20, 30, 40]) == True
assert all_pred(lambda x: x > 0, [0, 20, 30, 40]) == False
assert all_pred(lambda x: x > 0, [20, 30, 0, 40]) == False
assert all_pred(lambda c: c in ascii_uppercase, ascii_uppercase) == True
assert all_pred(lambda c: c in ascii_uppercase, ascii_lowercase) == False
assert all_pred(lambda c: c in ascii_uppercase, "SADLFKJaLKJSDF") == False
``` |
{
"source": "joshuacortez/data-matching-workflow",
"score": 2
} |
#### File: data-matching-workflow/python_scripts/dm_utils.py
```python
import networkx as nx
import csv
import pandas as pd
import itertools
import json
import dedupe
from itertools import combinations,product
import sys
import os
import numpy as np
from affinegap import normalizedAffineGapDistance
import simplejson
from tqdm import tqdm
import tempfile
from dedupe.clustering import cluster as dedupe_cluster
import dm_file_checker
def get_deduper_probs_and_threshold(deduper, unlabeled_data, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
deduper.data_model,
deduper.classifier,
deduper.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_linker_probs_and_threshold(linker, unlabeled_data_1, unlabeled_data_2, blocked_data = None, recall_weight = 1):
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
probs = dedupe.core.scoreDuplicates(pairs,
linker.data_model,
linker.classifier,
linker.num_cores)['score']
# the memory mapped file location of the scored records
temp_filename = probs.filename
probs = probs.copy()
probs.sort()
probs = probs[::-1]
# delete the memory mapped file so it won't clog the disk
os.remove(temp_filename)
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def get_model_weights(deduper_or_linker):
fields = [field.name for field in deduper_or_linker.data_model._variables]
model_weights = sorted(list(zip(fields, deduper_or_linker.classifier.weights)), key = lambda x: x[1], reverse = False)
model_weights = pd.DataFrame(model_weights, columns = ["variable", "logistic_reg_weight"])
return model_weights
def map_cluster_ids(deduper, unlabeled_data, threshold, hard_threshold = 0.0,
blocked_data = None, canonicalize = True, numeric_fields = None,
cluster_id_tag = None,
mapped_records_filepath = None,
cluster_canonical_filepath = None):
# BADLY NEED TO REFACTOR THIS
"""
Function that maps record ids to cluster ids
Parameters
----------
deduper : dedupe.Deduper
A trained instance of dedupe.
unlabeled_data : dict
The dedupe formatted data dictionary.
threshold : dedupe.Threshold
The threshold used for clustering.
hard_threshold: float
Threshold for record pair scores that will be included in the clustering
canonicalize : bool or list, default False
Option that provides the canonical records as additional columns.
Specifying a list of column names only canonicalizes those columns.
numeric_fields: list of str, default None
Specify which fields are numeric
cluster_id_tag: str, default None
Additional tag for distinguishing the cluster id of different datasets
Returns
-------
mapped_records
A dataframe storing the mapping from cluster_id to record_id
cluster_canonicals
A dataframe storing the canonical representation per cluster_id
"""
assert (hard_threshold < 1) and (hard_threshold >= 0), "hard_threshold should less than 1 at at least 0.0"
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id", "cluster id", "confidence score", "cluster type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
if canonicalize:
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "w", newline = "") as f:
cluster_canonical_header = [field.field for field in deduper.data_model.primary_fields]
cluster_canonical_header.append("cluster id")
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
else:
assert cluster_canonical_filepath is None, "can't have canonicalize be False if cluster_canonical_filepath exists"
# ## Clustering
if blocked_data is None:
pairs = deduper.pairs(unlabeled_data)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(deduper, blocked_data))
pair_scores = deduper.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
clustered_dupes = deduper.cluster(pair_scores, threshold)
if numeric_fields is not None:
assert isinstance(numeric_fields, list)
mapped_records = []
cluster_canonicals = []
record_ids_in_clusters = []
# assign cluster ids to record ids
i = 0
print("Mapping cluster ids...")
for cluster in tqdm(clustered_dupes):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
id_set, scores = cluster
if canonicalize:
cluster_data = [unlabeled_data[i] for i in id_set]
canonical_rep = get_canonical_rep(cluster_data, numeric_fields = numeric_fields)
canonical_rep["cluster id"] = cluster_id
if cluster_canonical_filepath is not None:
with open(cluster_canonical_filepath, "a") as f:
writer = csv.DictWriter(f, fieldnames = cluster_canonical_header, quoting = csv.QUOTE_ALL)
writer.writerow(canonical_rep)
else:
cluster_canonicals.append(canonical_rep)
for record_id, score in zip(id_set, scores):
record_dict = {
"record id": record_id,
"cluster id": cluster_id,
"confidence score": score,
"cluster type":'dup'
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(record_dict)
else:
mapped_records.append(record_dict)
record_ids_in_clusters.append(record_id)
record_ids_in_clusters = set(record_ids_in_clusters)
solo_ids = list(set(unlabeled_data.keys()).difference(record_ids_in_clusters))
# assign solo ids to record ids
print("Mapping solo record ids...")
for record_id in tqdm(solo_ids):
i += 1
cluster_id = "cl-{}".format(i)
if cluster_id_tag is not None:
cluster_id = "{}-{}".format(cluster_id_tag, cluster_id)
record_dict = {
"record id":record_id,
"cluster id":cluster_id,
"confidence score":None,
"cluster type":'solo'
}
mapped_records.append(record_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
if cluster_canonical_filepath is None:
cluster_canonicals = pd.DataFrame(cluster_canonicals)
else:
cluster_canonicals = None
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
if canonicalize:
return mapped_records, cluster_canonicals
else:
return mapped_records
def abs_distance(x,y):
return np.abs(x-y)
def get_canonical_rep(record_cluster, numeric_fields = None):
"""
Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field
"""
canonical_rep = {}
keys = record_cluster[0].keys()
if numeric_fields is None:
numeric_fields = []
for key in keys:
key_values = []
# difference distance functions for numeric and non-numeric fields
if key in numeric_fields:
comparator = abs_distance
else:
comparator = normalizedAffineGapDistance
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = dedupe.canonical.getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep
def get_linked_ids(linker, unlabeled_data_1, unlabeled_data_2, threshold, hard_threshold = 0.0, blocked_data = None,
mapped_records_filepath = None, constraint = "one-to-one"):
# BADLY NEED TO REFACTOR THIS
"""
constraint: What type of constraint to put on a join.
'one-to-one'
Every record in data_1 can match at most
one record from data_2 and every record
from data_2 can match at most one record
from data_1. This is good for when both
data_1 and data_2 are from different
sources and you are interested in
matching across the sources. If,
individually, data_1 or data_2 have many
duplicates you will not get good
matches.
'many-to-one'
Every record in data_1 can match at most
one record from data_2, but more than
one record from data_1 can match to the
same record in data_2. This is good for
when data_2 is a lookup table and data_1
is messy, such as geocoding or matching
against golden records.
'many-to-many'
Every record in data_1 can match
multiple records in data_2 and vice
versa. This is like a SQL inner join.
"""
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "w", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writeheader()
## link matching
if blocked_data is None:
pairs = linker.pairs(unlabeled_data_1, unlabeled_data_2)
else:
pairs = itertools.chain.from_iterable(get_blocked_pairs(linker, blocked_data))
pair_scores = linker.score(pairs)
pair_scores = pair_scores[pair_scores["score"] > hard_threshold]
assert constraint in {'one-to-one', 'many-to-one', 'many-to-many'}, (
'%s is an invalid constraint option. Valid options include '
'one-to-one, many-to-one, or many-to-many' % constraint)
if constraint == 'one-to-one':
links = linker.one_to_one(pair_scores, threshold)
elif constraint == 'many-to-one':
links = linker.many_to_one(pair_scores, threshold)
elif constraint == 'many-to-many':
links = pair_scores[pair_scores['score'] > threshold]
links = list(links)
# delete temporary file generated for pair_scores
try:
mmap_file = pair_scores.filename
del pair_scores
os.remove(mmap_file)
except AttributeError:
pass
mapped_records = []
ids_with_links_1 = []
ids_with_links_2 = []
print("Mapping linked pairs...")
for record_pair in tqdm(links):
record_ids, score = record_pair
pair_dict = {
"record id 1":record_ids[0],
"record id 2":record_ids[1],
"confidence score":score,
"link type":"dup",
}
if mapped_records_filepath is not None:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerow(pair_dict)
else:
mapped_records.append(pair_dict)
ids_with_links_1.append(record_ids[0])
ids_with_links_2.append(record_ids[1])
ids_with_links_1 = set(ids_with_links_1)
ids_with_links_2 = set(ids_with_links_2)
# include the records without found links
ids_without_links_1 = list(set(unlabeled_data_1.keys()).difference(ids_with_links_1))
ids_without_links_2 = list(set(unlabeled_data_2.keys()).difference(ids_with_links_2))
print("Mapping unlinked records in dataset 1...")
for record_id in tqdm(ids_without_links_1):
pair_dict = {
"record id 1":record_id,
"record id 2":None,
"confidence score":None,
"link type":"solo",
}
mapped_records.append(pair_dict)
print("Mapping unlinked records in dataset 2...")
for record_id in tqdm(ids_without_links_2):
pair_dict = {
"record id 1":None,
"record id 2":record_id,
"confidence score":None,
"link type":"solo",
}
mapped_records.append(pair_dict)
if mapped_records_filepath is None:
mapped_records = pd.DataFrame(mapped_records)
else:
with open(mapped_records_filepath, "a", newline = "") as f:
mapped_records_header = ["record id 1", "record id 2", "confidence score", "link type"]
writer = csv.DictWriter(f, fieldnames = mapped_records_header, quoting = csv.QUOTE_ALL)
writer.writerows(mapped_records)
mapped_records = None
return mapped_records
def get_uncertain_clusters(mapped_records_df, threshold = 0.9):
cluster_means_df = mapped_records_df\
.groupby("cluster id")\
.mean()\
.sort_values(by = "confidence score", ascending = True)
cluster_means_bool = (cluster_means_df["confidence score"] < threshold)
print("There are {} clusters with mean confidence score lower than {:.1f}% threshold".format(cluster_means_bool.sum(), threshold*100))
uncertain_clusters_dict = cluster_means_df.loc[cluster_means_bool,:].to_dict()["confidence score"]
return uncertain_clusters_dict
def get_pairs_from_uncertain_clusters(mapped_records_df, labeled_id_pairs, threshold = 0.9):
assert isinstance(labeled_id_pairs, list)
uncertain_clusters = get_uncertain_clusters(mapped_records_df, threshold = threshold)
n_uncertain_clusters = len(uncertain_clusters)
nth_cluster = 0
for cluster_id, mean_conf_score in uncertain_clusters.items():
nth_cluster += 1
pairs_in_cluster = []
# get record ids in cluster
ids_in_cluster = mapped_records_df.loc[mapped_records_df["cluster id"] == cluster_id,"record id"].values.tolist()
# generating record pairs from cluster
for id_1, id_2 in combinations(ids_in_cluster, 2):
id_pair = tuple(sorted((id_1,id_2)))
# if pair is not already tagged, grab data of records
if id_pair not in labeled_id_pairs:
pairs_in_cluster.append(id_pair)
yield ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score
def find_ids_of_labeled_data(labeled_data, unlabeled_data):
labeled_pair_ids = []
for label in labeled_data.keys():
assert label in ["distinct", "match"]
print("Finding ids for {} pairs".format(label))
data_pairs_list = labeled_data[label]
for data_pair in tqdm(data_pairs_list):
try:
# for backwards compatibility
record_1, record_2 = data_pair["__value__"]
except:
record_1, record_2 = data_pair
record_1_id = [key for key,val in unlabeled_data.items() if unlabeled_data[key] == record_1]
record_2_id = [key for key,val in unlabeled_data.items() if unlabeled_data[key] == record_2]
if len(record_1_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_1_id),record_1))
record_1_id = record_1_id[0]
if len(record_2_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_2_id),record_2))
record_2_id = record_2_id[0]
labeled_pair = {"record id 1":record_1_id, "record id 2":record_2_id, "label":label}
labeled_pair_ids.append(labeled_pair)
labeled_pair_ids = pd.DataFrame(labeled_pair_ids, dtype = "str")
return labeled_pair_ids
def find_ids_of_labeled_data_rl(labeled_data, unlabeled_data_1, unlabeled_data_2):
labeled_pair_ids = []
for label in labeled_data.keys():
assert label in ["distinct", "match"]
print("Finding ids for {} pairs".format(label))
data_pairs_list = labeled_data[label]
for data_pair in tqdm(data_pairs_list):
record_1, record_2 = data_pair
record_1_id = [key for key,val in unlabeled_data_1.items() if unlabeled_data_1[key] == record_1]
record_2_id = [key for key,val in unlabeled_data_2.items() if unlabeled_data_2[key] == record_2]
if len(record_1_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_1_id),record_1))
record_1_id = record_1_id[0]
if len(record_2_id) > 1:
print("Multiple record ids ({}) found for {}".format(len(record_2_id),record_2))
record_2_id = record_2_id[0]
labeled_pair = {"record id 1":record_1_id, "record id 2":record_2_id, "label":label}
labeled_pair_ids.append(labeled_pair)
labeled_pair_ids = pd.DataFrame(labeled_pair_ids, dtype = "str")
return labeled_pair_ids
def consoleLabel_cluster_old(deduper, mapped_records_df, labeled_id_pairs, unlabeled_data, threshold = 0.9):
'''
Command line interface for presenting and labeling uncertain clusters by the user
Argument :
A deduper object
'''
finished = False
fields = [field.field for field in deduper.data_model.primary_fields]
assert len(fields) == len(list(set(fields)))
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df,
labeled_id_pairs,
threshold = threshold)
while not finished:
try:
ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score = next(uncertain_pair_generator)
records_in_cluster = {i:unlabeled_data[i] for i in ids_in_cluster}
except StopIteration:
print("Already tagged all {} uncertain clusters.".format(n_uncertain_clusters))
print("Finished labeling")
break
print("Viewing {} out of {} uncertain clusters".format(nth_cluster, n_uncertain_clusters), file = sys.stderr)
print("Cluster contains {} records".format(len(ids_in_cluster)))
print("Mean Cluster Score {:.1f}%\n".format(mean_conf_score*100), file = sys.stderr)
for record_id, record in records_in_cluster.items():
print("Record {}".format(record_id), file=sys.stderr)
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
labeled_pairs["match"].append(record_pair)
elif user_input == "n":
print("Reviewing pairs in cluster", file=sys.stderr)
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
for record in record_pair:
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
labeled_pairs["match"].append(record_pair)
elif user_input == "n":
labeled_pairs["distinct"].append(record_pair)
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
break
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
deduper.markPairs(labeled_pairs)
def consoleLabel_cluster(deduper, mapped_records_df, labeled_id_pairs, unlabeled_data,
recall = 1.0, threshold = 0.9):
'''
Command line interface for presenting and labeling uncertain clusters by the user
Argument :
A deduper object
'''
finished = False
fields = [field.field for field in deduper.data_model.primary_fields]
assert len(fields) == len(list(set(fields)))
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df,
labeled_id_pairs,
threshold = threshold)
while not finished:
try:
ids_in_cluster, pairs_in_cluster, nth_cluster, n_uncertain_clusters, mean_conf_score = next(uncertain_pair_generator)
records_in_cluster = {i:unlabeled_data[i] for i in ids_in_cluster}
except StopIteration:
print("Already tagged all {} uncertain clusters.".format(n_uncertain_clusters))
print("Finished labeling")
break
print("Viewing {} out of {} uncertain clusters".format(nth_cluster, n_uncertain_clusters), file = sys.stderr)
print("Cluster contains {} records".format(len(ids_in_cluster)), file = sys.stderr)
print("Mean Cluster Score {:.1f}%\n".format(mean_conf_score*100), file = sys.stderr)
for record_id, record in records_in_cluster.items():
print("Record {}".format(record_id), file=sys.stderr)
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
user_input = _prompt_records_same()
if user_input == "y":
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
labeled_pairs["match"].append(record_pair)
labeled_id_pairs.append((id_1, id_2))
elif user_input == "n":
print("Reviewing pairs in cluster", file=sys.stderr)
for id_1, id_2 in pairs_in_cluster:
record_pair = (unlabeled_data[id_1], unlabeled_data[id_2])
for record in record_pair:
for field in fields:
line = "{} : {}".format(field, record[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
pair_user_input = _prompt_records_same()
if pair_user_input == "y":
labeled_pairs["match"].append(record_pair)
labeled_id_pairs.append((id_1,id_2))
elif pair_user_input == "n":
labeled_pairs["distinct"].append(record_pair)
labeled_id_pairs.append((id_1,id_2))
elif pair_user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
break
elif user_input == "f":
print("Finished labeling", file=sys.stderr)
finished = True
if (user_input == "y") or (user_input == "n"):
deduper.markPairs(labeled_pairs)
deduper.train(recall = recall)
clustering_threshold = deduper.threshold(unlabeled_data, recall_weight=1)
mapped_records_df = map_cluster_ids(deduper, unlabeled_data, clustering_threshold, canonicalize=False)
print("Resampling uncertain clusters based on retrained model", file=sys.stderr)
labeled_pairs = {"distinct":[], "match":[]}
uncertain_pair_generator = get_pairs_from_uncertain_clusters(mapped_records_df, labeled_id_pairs, threshold = threshold)
def _prompt_records_same():
print("Do these records refer to the same thing?", file = sys.stderr)
valid_response = False
user_input = ""
valid_responses = {"y", "n", "u", "f"}
while not valid_response:
prompt = "(y)es / (n)o / (u)nsure / (f)inished"
print(prompt, file=sys.stderr)
user_input = input()
if user_input in valid_responses:
valid_response = True
return user_input
def get_clusters_from_links(links, solo_records):
assert isinstance(links, pd.Index)
assert isinstance(solo_records, pd.Index)
clusters = nx.Graph(links.tolist())
clusters = list(nx.connected_components(clusters))
clusters.extend(solo_records.tolist())
return clusters
def get_deduper_candidate_pairs(deduper, unlabeled_data):
# gets candidate pairs after indexing
candidate_records = deduper.pairs(unlabeled_data)
candidate_records = [(candidate[0][0], candidate[1][0]) for candidate in candidate_records]
candidate_records = pd.MultiIndex.from_tuples(candidate_records)
# some candidate records can be placed in more than 1 block, so let's drop duplicates
candidate_records = candidate_records.drop_duplicates()
return candidate_records
def get_linker_candidate_pairs(linker, unlabeled_data_1, unlabeled_data_2):
# gets candidate pairs after indexing
candidate_records = linker.pairs(unlabeled_data_1, unlabeled_data_2)
candidate_records = [(candidate[0][0], candidate[1][0]) for candidate in candidate_records]
candidate_records = pd.MultiIndex.from_tuples(candidate_records)
# some candidate records can be placed in more than 1 block, so let's drop duplicates
candidate_records = candidate_records.drop_duplicates()
return candidate_records
# converts multindex to format preferred by dedupe method
def convert_rl_to_dedupe_candidate_pair(candidate_pairs, unlabeled_data):
assert isinstance(candidate_pairs, pd.Index)
output = []
for rec_id_1, rec_id_2 in candidate_pairs:
# dedupe candidate pairs must be in the format (record_id, record)
candidate_1 = (rec_id_1, unlabeled_data[rec_id_1])
candidate_2 = (rec_id_2, unlabeled_data[rec_id_2])
candidate_pair = (candidate_1, candidate_2)
output.append(candidate_pair)
return output
# converts multiindex to format preferred by linker method
def convert_rl_to_linker_candidate_pair(candidate_pairs, unlabeled_data_1, unlabeled_data_2):
assert isinstance(candidate_pairs, pd.Index)
output = []
for rec_id_1, rec_id_2 in candidate_pairs:
if rec_id_1 in unlabeled_data_1.keys():
rec_data_1 = unlabeled_data_1[rec_id_1]
rec_data_2 = unlabeled_data_2[rec_id_2]
assert rec_id_1 not in unlabeled_data_2.keys(), "{} key found in both datasets. Keys must be unique".format(rec_id_1)
assert rec_id_2 not in unlabeled_data_1.keys(), "{} key found in both datasets. Keys must be unique".format(rec_id_2)
else:
rec_data_1 = unlabeled_data_2[rec_id_1]
rec_data_2 = unlabeled_data_1[rec_id_2]
assert rec_id_2 not in unlabeled_data_2.keys(), "{} found in both datasets. Keys must be unique".format(rec_id_2)
# record linker candidate pairs must be in the format (record_id, record)
candidate_1 = (rec_id_1, rec_data_1)
candidate_2 = (rec_id_2, rec_data_2)
candidate_pair = (candidate_1, candidate_2)
output.append(candidate_pair)
return output
def read_unlabeled_data_json(unlabeled_data_filepath, empty_str_to_none = True, numeric_fields = None):
with open(unlabeled_data_filepath, "r") as json_file:
unlabeled_data = json.load(json_file)
unlabeled_data = pd.DataFrame.from_dict(unlabeled_data, orient = "index")
if numeric_fields is not None:
assert isinstance(numeric_fields, list)
for col in numeric_fields:
unlabeled_data[col] = unlabeled_data[col].apply(lambda x: x if x == "" else float(x))
if empty_str_to_none:
for col in unlabeled_data.columns.tolist():
empty_str_bool = (unlabeled_data[col] == "")
print("converting {} empty string values of column {} to None".format(empty_str_bool.sum(), col))
unlabeled_data.loc[empty_str_bool,col] = None
# converting NaNs of numeric columns (NaNs introduced because of the previous line) to None
if numeric_fields is not None:
for col in numeric_fields:
not_nan_bool = unlabeled_data[col].notnull()
print("converting {} NaN values of column {} to None".format((~not_nan_bool).sum(), col))
unlabeled_data[col] = unlabeled_data[col].where((not_nan_bool), None)
unlabeled_data = unlabeled_data.to_dict(orient = "index")
return unlabeled_data
def write_canonical_w_solo_unlabeled_data(canonicals_df, mapped_records_df, unlabeled_data,
canonical_w_solo_unlabeled_filepath):
# will be used for post cluster review, specifically on matching solos to clusters and merging clusters
# those two steps are based on the cluster canonicals
# remember to read in this written file using read_unlabeled_data_json later on
canonical_w_solo_data = canonicals_df.set_index("cluster id")\
.to_dict(orient = "index")
mapped_records_df = mapped_records_df.set_index("record id")
solo_records = mapped_records_df.loc[mapped_records_df["cluster type"] == "solo",:]\
.index.tolist()
for record_id in solo_records:
record = unlabeled_data[record_id]
cluster_id = mapped_records_df.loc[record_id,"cluster id"]
canonical_w_solo_data[cluster_id] = record
with open(canonical_w_solo_unlabeled_filepath, 'w') as outfile:
json.dump(canonical_w_solo_data, outfile)
def prepare_training_deduper(deduper, unlabeled_data, labeled_data_filepath, blocked_proportion = 0.5, sample_size = 15_000):
# If we have training data saved from a previous run of dedupe,
# look for it and load it in.
# __Note:__ if you want to train from scratch, delete the labeled_data_filepath
if os.path.exists(labeled_data_filepath):
print('reading labeled examples from ', labeled_data_filepath)
with open(labeled_data_filepath, 'rb') as labeled_data:
deduper.prepare_training(data = unlabeled_data, training_file = labeled_data,
blocked_proportion = blocked_proportion,
sample_size = sample_size)
else:
deduper.prepare_training(data = unlabeled_data, blocked_proportion = blocked_proportion,
sample_size = sample_size)
def save_trained_deduper(deduper, labeled_data_filepath, settings_filepath):
# When finished, save our training to disk
with open(labeled_data_filepath, 'w') as tf:
deduper.write_training(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_filepath, 'wb') as sf:
deduper.write_settings(sf)
def prepare_training_linker(linker, unlabeled_data_1, unlabeled_data_2, labeled_data_filepath, blocked_proportion = 0.5, sample_size = 15_000):
# If we have training data saved from a previous run of linker,
# look for it and load it in.
# __Note:__ if you want to train from scratch, delete the labeled_data_filepath
if os.path.exists(labeled_data_filepath):
print('reading labeled examples from ', labeled_data_filepath)
with open(labeled_data_filepath, 'rb') as labeled_data:
linker.prepare_training(data_1 = unlabeled_data_1, data_2 = unlabeled_data_2,
training_file = labeled_data,
blocked_proportion = blocked_proportion,
sample_size = sample_size)
else:
linker.prepare_training(data_1 = unlabeled_data_1, data_2 = unlabeled_data_2,
blocked_proportion = blocked_proportion,
sample_size = sample_size)
def save_trained_linker(linker, labeled_data_filepath, settings_filepath):
# When finished, save our training to disk
with open(labeled_data_filepath, 'w') as tf:
linker.write_training(tf)
# Save our weights and predicates to disk. If the settings file
# exists, we will skip all the training and learning next time we run
# this file.
with open(settings_filepath, 'wb') as sf:
linker.write_settings(sf)
def get_data_of_labeled_pairs(labeled_pairs_df, unlabeled_data):
df = pd.DataFrame.from_dict(unlabeled_data, orient = "index")
df_left = df.loc[labeled_pairs_df["record id 1"],:]
df_left.columns = ["{}_1".format(col) for col in df_left.columns]
df_left.index.name = "record id 1"
df_left = df_left.reset_index()
df_right = df.loc[labeled_pairs_df["record id 2"],:]
df_right.columns = ["{}_2".format(col) for col in df_right.columns]
df_right.index.name = "record id 2"
df_right = df_right.reset_index()
output = pd.concat([df_left, df_right], axis = 1, sort = False)
# sort columns
output = output.sort_index(axis = 1)
output = output.set_index(["record id 1", "record id 2"])
label_df = labeled_pairs_df.set_index(["record id 1", "record id 2"])
output = pd.merge(left = label_df, right = output, left_index = True, right_index = True, how = "inner")
return output
def get_deduped_data(mapped_records_df, canonicals_df, unlabeled_data, none_to_empty_str = True):
mapped_records_df = mapped_records_df.set_index("record id")
solo_record_ids = mapped_records_df.loc[mapped_records_df["cluster type"] == "solo","cluster id"].to_dict()
deduped_data = {cluster_id:unlabeled_data[record_id] for record_id,cluster_id in solo_record_ids.items()}
deduped_data = pd.DataFrame.from_dict(deduped_data, orient = "index")
deduped_data.index.name = "cluster id"
canonicals_df = canonicals_df.set_index("cluster id")
# appending the canonicalized cluster representations to the solo records
deduped_data = deduped_data.append(canonicals_df)
if none_to_empty_str:
deduped_data = deduped_data.where((deduped_data.notnull()), "")
return deduped_data
def write_deduper_blocks(deduper, unlabeled_data, blocks_filepath):
"""
simplify blocks by not writing the record entries, only the ids
"""
blocks = deduper.pairs(unlabeled_data)
with open(blocks_filepath, "w", newline = "") as csv_file:
writer = csv.writer(csv_file, quoting = csv.QUOTE_ALL)
header = ["block_id", "record_id"]
writer.writerow(header)
block_id = 1
for block in blocks:
"""
from dedupe source code:
Each item in a block must be a sequence of record_id, record and the
records also must be dictionaries
but we're only keeping the record_id, not the record here
"""
for record in block:
record_id, _, = record
block_entry = [block_id, record_id]
writer.writerow(block_entry)
block_id += 1
def read_deduper_blocks(unlabeled_data, blocks_filepath):
# assumes that the records are sorted by block number
current_block_id = None
block_records = []
"""
from dedupe source code:
Each item in a block must be a sequence of record_id, record, and the
records also must be dictionaries
"""
with open(blocks_filepath, "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
block_id, record_id = row["block_id"], row["record_id"]
if current_block_id == block_id:
block_records.append((record_id, unlabeled_data[record_id]))
else:
if current_block_id is not None:
yield block_records
current_block_id = block_id
block_records = [(record_id, unlabeled_data[record_id])]
yield block_records
def write_linker_blocks(linker, unlabeled_data_1, unlabeled_data_2, blocks_filepath):
"""
simplify blocks by not writing the record entries, only the ids
"""
blocks = linker.pairs(unlabeled_data_1, unlabeled_data_2)
block_id = 1
with open(blocks_filepath, "w", newline = "") as csv_file:
writer = csv.writer(csv_file, quoting = csv.QUOTE_ALL)
header = ["record_set_num", "block_id", "record_id"]
writer.writerow(header)
for block in blocks:
rec_1, rec_2 = block
rec_1_id, _ = rec_1
block_entry = ["1", block_id, rec_1_id]
writer.writerow(block_entry)
rec_2_id, _ = rec_2
block_entry = ["2", block_id, rec_2_id]
writer.writerow(block_entry)
block_id += 1
def read_linker_blocks(unlabeled_data_1, unlabeled_data_2, blocks_filepath):
# assumes that the records sorted by block number
block_records = ()
block_set_1 = []
block_set_2 = []
current_block_id = None
"""
from dedupe source code:
Each block must be a made up of two sequences, (base_sequence, target_sequence)
"""
with open(blocks_filepath, "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
record_set_num, block_id, record_id = row["record_set_num"], row["block_id"], row["record_id"]
if current_block_id == block_id:
if record_set_num == "1":
block_set_1.append((record_id, unlabeled_data_1[record_id]))
elif record_set_num == "2":
block_set_2.append((record_id, unlabeled_data_2[record_id]))
else:
raise ValueError("record_set_num should only be 1 or 2, but got {}".format(record_set_num))
else:
if current_block_id is not None:
block_records = (block_set_1, block_set_2)
yield block_records
current_block_id = block_id
if record_set_num == "1":
block_set_1 = [(record_id, unlabeled_data_1[record_id])]
block_set_2 = []
elif record_set_num == "2":
block_set_1 = []
block_set_2 = [(record_id, unlabeled_data_2[record_id])]
else:
raise ValueError("record_set_num should only be 1 or 2, but got {}".format(record_set_num))
block_records = (block_set_1, block_set_2)
yield block_records
def get_blocked_pairs(deduper_or_linker, blocked_data):
if isinstance(deduper_or_linker, dedupe.api.DedupeMatching):
pairs = (combinations(sorted(block), 2) for block in blocked_data)
elif isinstance(deduper_or_linker, dedupe.api.RecordLinkMatching):
pairs = (product(base, target) for base, target in blocked_data)
else:
raise ValueError("Passed not of class DedupeMatching or of RecordLinkMatching!")
return pairs
def count_blocked_pairs(deduper_or_linker, blocked_data):
candidate_records = itertools.chain.from_iterable(get_blocked_pairs(deduper_or_linker, blocked_data))
i = 0
for _ in candidate_records:
i += 1
return i
def write_training_set_from_pairs(labeled_pair_ids_df, labeled_data_filepath, unlabeled_data, unlabeled_data_2 = None):
# create a labeled training set directly for dedupe's consumption
labeled_data_train = {"distinct":[], "match":[]}
for _, row in labeled_pair_ids_df.iterrows():
rec_id_1 = row["record id 1"]
rec_id_2 = row["record id 2"]
rec_data_1 = unlabeled_data[rec_id_1]
if unlabeled_data_2 is None:
rec_data_2 = unlabeled_data[rec_id_2]
else:
rec_data_2 = unlabeled_data_2[rec_id_2]
label = row["label"]
data_entry = {
"__class__":"tuple",
"__value__":[rec_data_1, rec_data_2]
}
labeled_data_train[label].append(data_entry)
with open(labeled_data_filepath, "w") as json_file:
simplejson.dump(labeled_data_train,
json_file,
default=dedupe.serializer._to_json,
tuple_as_array=False,
ensure_ascii=True)
def get_deduped_data_for_rl(task_name, saved_files_path):
# gets deduped dataset from respective deduping for rl
dataset_name = task_name.split("-")[1]
dataset_1_name, dataset_2_name = dataset_name.split("_")
dedup_task_1 = "dedup-{}".format(dataset_1_name)
dedup_task_2 = "dedup-{}".format(dataset_2_name)
# get all filepaths
unlabeled_data_1_filepath, unlabeled_data_2_filepath = dm_file_checker.get_proper_unlabeled_data_filepath(task_name, saved_files_path)
numeric_fields_1, numeric_fields_2 = dm_file_checker.get_dataset_info(task_name, "numeric_fields", saved_files_path)
print("Numeric fields 1 are {}".format(numeric_fields_1))
print("Numeric fields 2 are {}".format(numeric_fields_2))
canonicals_1_filepath = dm_file_checker.get_filepath(dedup_task_1, "cluster_canonical", saved_files_path)
canonicals_2_filepath = dm_file_checker.get_filepath(dedup_task_2, "cluster_canonical", saved_files_path)
mapped_records_1_filepath = dm_file_checker.get_filepath(dedup_task_1, "mapped_records", saved_files_path)
mapped_records_2_filepath = dm_file_checker.get_filepath(dedup_task_2, "mapped_records", saved_files_path)
# read in data from filepaths
unlabeled_data_1 = read_unlabeled_data_json(unlabeled_data_1_filepath, empty_str_to_none = False,
numeric_fields = numeric_fields_1)
unlabeled_data_2 = read_unlabeled_data_json(unlabeled_data_2_filepath, empty_str_to_none = False,
numeric_fields = numeric_fields_2)
canonicals_1_df = pd.read_csv(canonicals_1_filepath, keep_default_na = False, low_memory = False)
canonicals_2_df = pd.read_csv(canonicals_2_filepath, keep_default_na = False, low_memory = False)
mapped_records_1_df = pd.read_csv(mapped_records_1_filepath, keep_default_na = False)
mapped_records_2_df = pd.read_csv(mapped_records_2_filepath, keep_default_na = False)
# get deduped data in dictionary form
deduped_data_1 = get_deduped_data(mapped_records_1_df, canonicals_1_df, unlabeled_data_1, none_to_empty_str = False)
deduped_data_2 = get_deduped_data(mapped_records_2_df, canonicals_2_df, unlabeled_data_2, none_to_empty_str = False)
if numeric_fields_1 is not None:
for col in numeric_fields_1:
deduped_data_1[col] = deduped_data_1[col].apply(lambda x: x if x == "" else float(x))
if numeric_fields_2 is not None:
for col in numeric_fields_2:
deduped_data_2[col] = deduped_data_2[col].apply(lambda x: x if x == "" else float(x))
for col in deduped_data_1.columns:
empty_str_bool = (deduped_data_1[col] == "")
print("in deduped data 1, converting {} empty string values of column {} to None".format(empty_str_bool.sum(), col))
deduped_data_1.loc[empty_str_bool,col] = None
for col in deduped_data_2.columns:
empty_str_bool = (deduped_data_2[col] == "")
print("in deduped data 2, converting {} empty string values of column {} to None".format(empty_str_bool.sum(), col))
deduped_data_2.loc[empty_str_bool,col] = None
# converting NaNs of numeric columns (NaNs introduced because of the previous line) to None
if numeric_fields_1 is not None:
for col in numeric_fields_1:
not_nan_bool = deduped_data_1[col].notnull()
print("in deduped data 1, converting {} NaN values of {} to None".format((~not_nan_bool).sum(), col))
deduped_data_1[col] = deduped_data_1[col].where((not_nan_bool), None)
if numeric_fields_2 is not None:
for col in numeric_fields_2:
not_nan_bool = deduped_data_2[col].notnull()
print("in deduped data 2, converting {} NaN values of {} to None".format((~not_nan_bool).sum(), col))
deduped_data_2[col] = deduped_data_2[col].where((not_nan_bool), None)
deduped_data_1 = deduped_data_1.to_dict(orient = "index")
deduped_data_2 = deduped_data_2.to_dict(orient = "index")
return deduped_data_1, deduped_data_2
# function to make sure the all record ids are prepended with the name of the dataset
def verify_rec_id_format(record_id, data_name):
if pd.isnull(record_id):
is_ok = True
else:
is_ok = (record_id.split("-")[0] == data_name)
return is_ok
# function to return all results from all record linkage results
def get_all_rl_results(rl_task_names, saved_files_path):
dupe_records = pd.DataFrame(columns = ["record id 1", "record id 2", "confidence score"])
all_records = set()
# iterate over each rl mapped file
for rl_task in rl_task_names:
data_name_1, data_name_2 = rl_task.split("-")[1].split("_")
mapped_records_filepath = dm_file_checker.get_filepath(rl_task, "mapped_records", saved_files_path)
print("Getting mapped record links from {}".format(rl_task))
mapped_records_df = pd.read_csv(mapped_records_filepath)
# make sure all record ids are prepended with the name of the dataset
ok_records_1 = mapped_records_df["record id 1"].apply(lambda x: verify_rec_id_format(x, data_name_1)).all()
ok_records_2 = mapped_records_df["record id 2"].apply(lambda x: verify_rec_id_format(x, data_name_2)).all()
assert (ok_records_1 and ok_records_2), "Record ids aren't prepended with the dataset name!"
append_dupe_records = mapped_records_df.loc[mapped_records_df["link type"] == "dup",\
["record id 1", "record id 2","confidence score"]]
dupe_records = dupe_records.append(append_dupe_records, ignore_index = True)
append_all_records = mapped_records_df.loc[:,["record id 1","record id 2"]]
append_all_records = append_all_records["record id 1"].dropna().unique().tolist() \
+ append_all_records["record id 2"].dropna().unique().tolist()
append_all_records = set(append_all_records)
all_records.update(append_all_records)
all_records = list(all_records)
pairs = dupe_records.loc[:,["record id 1", "record id 2"]]\
.apply(lambda row: (row["record id 1"], row["record id 2"]), axis = 1)\
.tolist()
n_pairs = len(pairs)
id_type = (str, 265)
pairs = np.array(pairs, dtype = id_type)
scores = dupe_records.loc[:,["confidence score"]].to_numpy(dtype = float).reshape(-1)
dtype = np.dtype([("pairs", id_type, 2),
("score", "f4", 1)])
temp_file, file_path = tempfile.mkstemp()
os.close(temp_file)
scored_pairs = np.memmap(file_path,
shape = n_pairs,
dtype = dtype)
scored_pairs["pairs"] = pairs
scored_pairs["score"] = scores
return scored_pairs, all_records
def get_fusion_probs_and_threshold(scored_pairs, recall_weight = 1):
probs = scored_pairs['score']
probs = probs.copy()
probs.sort()
probs = probs[::-1]
expected_dupes = np.cumsum(probs)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / np.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = np.argmax(score)
print('Maximum expected recall and precision')
print('recall: {:.2f}%'.format(recall[i]*100))
print('precision: {:.2f}%'.format(precision[i]*100))
print('With threshold: {:.2f}%'.format(probs[i]*100))
return probs, probs[i]
def map_cluster_fusion_ids(scored_pairs, all_records, threshold):
clustered_dupes = dedupe_cluster(scored_pairs, threshold)
mapped_records = []
record_ids_in_clusters = []
# assign cluster ids to record ids
i = 0
print("Mapping cluster ids...")
for cluster in tqdm(clustered_dupes):
i += 1
cluster_id = "fs-{}".format(i)
id_set, scores = cluster
for record_id, score in zip(id_set, scores):
record_dict = {
"record id": record_id,
"cluster id": cluster_id,
"confidence score": score,
"cluster type":'link'
}
mapped_records.append(record_dict)
record_ids_in_clusters.append(record_id)
record_ids_in_clusters = set(record_ids_in_clusters)
solo_ids = [key for key in all_records if key not in record_ids_in_clusters]
# assign solo ids to record ids
print("Mapping solo record ids...")
for record_id in tqdm(solo_ids):
i += 1
cluster_id = "fs-{}".format(i)
record_dict = {
"record id":record_id,
"cluster id":cluster_id,
"confidence score":None,
"cluster type":'solo'
}
mapped_records.append(record_dict)
mapped_records = pd.DataFrame(mapped_records)
return mapped_records
def get_all_dedup_results(rl_task_names, saved_files_path, remove_data_name_prefix = True):
all_dedup_mapped_records = pd.DataFrame()
dedup_datasets = set()
for rl_task in rl_task_names:
data_name_1, data_name_2 = rl_task.split("-")[1].split("_")
for data_name in [data_name_1, data_name_2]:
dedup_task = "dedup-{}".format(data_name)
mapped_records_filepath = dm_file_checker.get_filepath(dedup_task, "mapped_records", saved_files_path)
# replace IDs only of datasets that have undergone deduplication
if os.path.exists(mapped_records_filepath) & (data_name not in dedup_datasets):
dedup_datasets.add(data_name)
dedup_mapped_records = pd.read_csv(mapped_records_filepath)
dedup_mapped_records = dedup_mapped_records.rename(columns = {"confidence score":"dedup confidence score",
"cluster type":"dedup cluster type"})
if remove_data_name_prefix:
dedup_mapped_records["record id"] = dedup_mapped_records["record id"]\
.apply(lambda x: x.replace("{}-".format(data_name), ""))
all_dedup_mapped_records = all_dedup_mapped_records.append(dedup_mapped_records, ignore_index = True)
return all_dedup_mapped_records
def check_block_sizes(blocks):
block_sizes = []
for block in blocks:
block_size = len(block)
block_sizes.append(block_size)
block_sizes = sorted(block_sizes, reverse = True)
print("Sizes of top 10 biggest blocks are: {}".format(block_sizes[:10]))
record_pair_contributions = [int(size*(size-1)/2) for size in block_sizes[:10]]
print("Record pair contributions from top 10 biggest blocks are : {}".format(record_pair_contributions))
```
#### File: data-matching-workflow/python_scripts/init_files_folders.py
```python
import os
import json
from shutil import copyfile
def make_subfolders(parent_folder, folder_names, subfolder_names):
# make sure each folder has every subfolder
assert os.path.exists(parent_folder), "Parent folder {} doesn't exist".format(parent_folder)
for name in folder_names:
folder_path = os.path.join(parent_folder, name)
if not os.path.exists(folder_path):
print("Creating {}".format(folder_path))
os.mkdir(folder_path)
else:
print("{} already exists".format(folder_path))
for subfolder_name in subfolder_names:
subfolder_path = os.path.join(folder_path, subfolder_name)
if not os.path.exists(subfolder_path):
print("Creating {}".format(subfolder_path))
os.mkdir(subfolder_path)
else:
print("{} already exists".format(subfolder_path))
def check_notebook_filenames(foldername):
base_foldername = os.path.basename(foldername)
# check filenames of all jupyter notebooks
filenames = [i for i in os.listdir(foldername) if ".ipynb_checkpoints" not in i]
for filename in filenames:
filename_identifier = "-".join(filename.split("-")[:2])
assert base_foldername == filename_identifier, "Filename {} doesn't align with foldername {}".format(filename, base_foldername)
def check_notebook_filenames_all(parent_folder, folder_names, subfolder_names):
# make sure each folder has every subfolder
assert os.path.exists(parent_folder), "Parent folder {} doesn't exist".format(parent_folder)
for name in folder_names:
folder_path = os.path.join(parent_folder, name)
for subfolder_name in subfolder_names:
subfolder_path = os.path.join(folder_path, subfolder_name)
check_notebook_filenames(subfolder_path)
def make_directories(parent_directory = ".."):
# parent_directory contains all the data matching files
config_filepath = os.path.join(parent_directory, "saved_files/config.json")
assert os.path.exists(config_filepath), "Need to create config file first!"
with open(config_filepath, "r") as f:
config = json.load(f)
dataset_names = list(config["datasets"].keys())
task_names = list(config["tasks"].keys())
dedup_names = [name for name in task_names if "dedup-" in name]
rl_names = [name for name in task_names if "rl-" in name]
print("Making subfolders for saved_files...")
saved_files_folderpath = os.path.join(parent_directory, "saved_files")
make_subfolders(parent_folder = saved_files_folderpath,
folder_names = dataset_names,
subfolder_names = ["value_counts_checks", "cleaned_strings_checks"]
)
make_subfolders(parent_folder = saved_files_folderpath,
folder_names = dedup_names,
subfolder_names = ["training_output", "blocking_output", "output", "test_split_output"]
)
make_subfolders(parent_folder = saved_files_folderpath,
folder_names = rl_names,
subfolder_names = ["training_output", "blocking_output", "output", "test_split_output"]
)
make_subfolders(parent_folder = saved_files_folderpath,
folder_names = ["fusion"],
subfolder_names = []
)
print("Making subfolders for notebooks...")
notebooks_folderpath = os.path.join(parent_directory, "notebooks")
if not os.path.exists(notebooks_folderpath):
os.mkdir(notebooks_folderpath)
make_subfolders(parent_folder = notebooks_folderpath,
folder_names = ["preprocess"],
subfolder_names = ["preprocess-{}".format(i) for i in dataset_names]
)
make_subfolders(parent_folder = notebooks_folderpath,
folder_names = ["dedup"],
subfolder_names = dedup_names
)
make_subfolders(parent_folder = notebooks_folderpath,
folder_names = ["rl"],
subfolder_names = rl_names
)
make_subfolders(parent_folder = notebooks_folderpath,
folder_names = ["fusion"],
subfolder_names = []
)
def copy_prototype_notebooks(parent_directory = ".."):
# parent_directory contains all the data matching files
proto_notebooks_path = os.path.join(parent_directory,"prototype_notebooks")
assert os.path.exists(proto_notebooks_path), "Need to copy prototype folders first!"
config_filepath = os.path.join(parent_directory, "saved_files/config.json")
assert os.path.exists(config_filepath), "Need to create config file first!"
with open(config_filepath, "r") as f:
config = json.load(f)
dataset_names = list(config["datasets"].keys())
task_names = list(config["tasks"].keys())
dedup_names = [name for name in task_names if "dedup-" in name]
rl_names = [name for name in task_names if "rl-" in name]
# get all prototype filepaths first
proto_preprocess_path = os.path.join(proto_notebooks_path, "preprocess/preprocess-febrl3")
proto_preprocess_ntbks = os.listdir(proto_preprocess_path)
proto_preprocess_ntbks = [x for x in proto_preprocess_ntbks if (x != ".ipynb_checkpoints") and ("TEST" not in x)]
proto_dedup_path = os.path.join(proto_notebooks_path, "dedup/dedup-febrl3")
proto_dedup_ntbks = os.listdir(proto_dedup_path)
proto_dedup_ntbks = [x for x in proto_dedup_ntbks if (x != ".ipynb_checkpoints") and ("TEST" not in x)]
proto_rl_path = os.path.join(proto_notebooks_path, "rl/rl-febrl4a_febrl4b")
proto_rl_ntbks = os.listdir(proto_rl_path)
proto_rl_ntbks = [x for x in proto_rl_ntbks if (x != ".ipynb_checkpoints") and ("TEST" not in x)]
proto_fusion_path = os.path.join(proto_notebooks_path, "fusion")
proto_fusion_ntbks = os.listdir(proto_fusion_path)
proto_fusion_ntbks = [x for x in proto_fusion_ntbks if (x != ".ipynb_checkpoints") and ("TEST" not in x)]
# copy all prototype notebooks to corresponding notebooks
for dataset_name in dataset_names:
for source_ntbk in proto_preprocess_ntbks:
target_ntbk = source_ntbk.replace("febrl3", dataset_name)
source_path = os.path.join(proto_preprocess_path, source_ntbk)
target_path = os.path.join(parent_directory, "notebooks/preprocess/preprocess-{}".format(dataset_name),
target_ntbk)
if os.path.exists(target_path):
print("{} already exists".format(target_path))
else:
print("Copying prototype preprocessing ntbk {}".format(source_path))
copyfile(source_path, target_path)
for dedup_name in dedup_names:
for source_ntbk in proto_dedup_ntbks:
target_ntbk = source_ntbk.replace("dedup-febrl3", dedup_name)
source_path = os.path.join(proto_dedup_path, source_ntbk)
target_path = os.path.join(parent_directory, "notebooks/dedup/{}".format(dedup_name), target_ntbk)
if os.path.exists(target_path):
print("{} already exists".format(target_path))
else:
print("Copying prototype deduping ntbk {}".format(source_path))
copyfile(source_path, target_path)
for rl_name in rl_names:
for source_ntbk in proto_rl_ntbks:
target_ntbk = source_ntbk.replace("rl-febrl4a_febrl4b", rl_name)
source_path = os.path.join(proto_rl_path, source_ntbk)
target_path = os.path.join(parent_directory, "notebooks/rl/{}".format(rl_name), target_ntbk)
if os.path.exists(target_path):
print("{} already exists".format(target_path))
else:
print("Copying prototype rl ntbk {}".format(source_path))
copyfile(source_path, target_path)
for source_ntbk in proto_fusion_ntbks:
target_ntbk = source_ntbk
source_path = os.path.join(proto_fusion_path, source_ntbk)
target_path = os.path.join(parent_directory, "notebooks/fusion", target_ntbk)
if os.path.exists(target_path):
print("{} already exists".format(target_path))
else:
print("Copying prototype fusion ntbk {}".format(source_path))
copyfile(source_path, target_path)
def check_notebook_files(parent_directory = ".."):
# parent_directory contains all the data matching files
config_filepath = os.path.join(parent_directory, "saved_files/config.json")
assert os.path.exists(config_filepath), "Need to create config file first!"
with open(config_filepath, "r") as f:
config = json.load(f)
dataset_names = list(config["datasets"].keys())
task_names = list(config["tasks"].keys())
dedup_names = [name for name in task_names if "dedup-" in name]
rl_names = [name for name in task_names if "rl-" in name]
print("Checking filenames in notebook subfolders...")
notebooks_folderpath = os.path.join(parent_directory, "notebooks")
assert os.path.exists(notebooks_folderpath), "no notebooks folder found!"
check_notebook_filenames_all(
parent_folder = notebooks_folderpath,
folder_names = ["preprocess"],
subfolder_names = ["preprocess-{}".format(i) for i in dataset_names]
)
check_notebook_filenames_all(
parent_folder = notebooks_folderpath,
folder_names = ["dedup"],
subfolder_names = dedup_names
)
check_notebook_filenames_all(
parent_folder = notebooks_folderpath,
folder_names = ["rl"],
subfolder_names = rl_names
)
print("Notebook filenames all correct!")
def rename_and_make_notebooks_dir(parent_directory = ".."):
# rename notebooks to prototype_notebooks
notebooks_folderpath = os.path.join(parent_directory, "notebooks")
prototype_notebooks_folderpath = os.path.join(parent_directory, "prototype_notebooks")
os.rename(src = notebooks_folderpath, dst = prototype_notebooks_folderpath)
# make empty notebooks folder that will be filled later
os.mkdir(notebooks_folderpath)
if __name__ == "__main__":
notice = """
Run this script only after doing the following in the main project folder:
1. Have a python_scripts folder with the .py files (this file itself should be inside).
2. Have a saved_files folder with a filled out config.json file.
3. Have a notebooks folder.
4. Have a prototype_notebooks folder filled with prototype notebooks that use the febrl dataset.
Have you done the steps above already? (y/n)
"""
user_input = ""
valid_response = False
while not valid_response:
print(notice)
user_input = input("Answer:")
if user_input in ["y", "n"]:
valid_response = True
if user_input == "y":
rename_and_make_notebooks_dir()
make_directories()
copy_prototype_notebooks()
check_notebook_files()
elif user_input == "n":
print("Run this script again after doing the above mentioned steps.")
else:
print("Incorrect input. Only type y or n.")
``` |
{
"source": "joshuacortez/grape",
"score": 2
} |
#### File: grape/grape/hyperparameter_optimizer.py
```python
import pandas as pd
import xgboost as xgb
import lightgbm as lgb
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import ElasticNet, ElasticNetCV
import hyperopt
import numpy as np
from numpy.random import RandomState
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import cross_val_score, KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_absolute_error, mean_squared_error
from timeit import default_timer as timer
from copy import copy
from regression_model import RegressionModel
from param_space import parameter_space, int_params_dict
from utils import get_elastic_net_l1_ratio, _huber_approx_obj, eval_func_is_higher_better, prepare_folds
# todo: hyperparameter optimization that depends on time, or can stop midway
class HyperParameterOptimizer:
"""
Hands-free hyperparameter optimization powered by Hyperopt
Traditional methods use grid search and random search.
However, this instead uses Sequential Model Based Optimization (SMBO)
In SMBO, trials of hyperparams are informed by earlier trials of hyperparams
Parameters
----------
eval_func_name : str, optional (default="mse")
The evaluation function used to measure cross validation error
The hyperparameters that maximize/minimize the evaluation function
are chosen
verbosity : int, optional (default=0)
If verbosity >= 2, print after every iteration of hyperparameter optimization
Attributes
----------
Attributes which are saved initialization parameters:
eval_func_name : str
verbosity : int
Derived attributes after calling tune_hyperparams or tune_and_fit:
hyperopt_summary : dataframe of shape [total_n_iterations, 5]
Contains the following information per iteration of HPO:
loss : mean of cross validation error
loss_variance : variance of cross validation error
params : hyperparameters used in iteration
n_iterations : counter variable for the number of iterations
iteration_run_time : runtime in seconds
best_params : dict
Contains the hyperparameters under the minimum loss
model : a GRAPE RegresssionModel
Tuned with the best hyperparameters
Example
-------
see hyperparameter_optimizer_test in tests.py
References
----------
https://github.com/WillKoehrsen/hyperparameter-optimization/blob/master/Bayesian%20Hyperparameter%20Optimization%20of%20Gradient%20Boosting%20Machine.ipynb
"""
def __init__(self, eval_func_name = "mse", verbosity = 0):
self.verbosity = verbosity
self.eval_func_name = eval_func_name
# keep track of all random_states
def tune_hyperparams(self,
model,
train_valid_folds = 5,
total_n_iterations = 100,
use_model_copy = False):
"""
Finds the best hyperparameters for a GRAPE RegressionModel
Parameters
----------
model : a GRAPE RegressionModel
train_valid_folds : int, list of lists of ints, or cross validation generator
Optional (default=5)
The folds used for cross validation
If int, the number of folds used in sklearn's KFold
If list of list of indices, each sublist should contain the indices per fold
total_n_iterations : int
The number of iterations to run hyperparameter optimization until it stops
The hyperparameter with lowest loss within total_n_iterations is chosen
use_model_copy : bool
If False, pass only a reference to the GRAPE RegressionModel, saving memory
If True, makes a copy of the model, ensuring the original instance doesn't get overwritten
"""
self.hyperopt_summary = []
bayes_trials = hyperopt.Trials()
if use_model_copy:
model = copy(model)
self.model = model
train_valid_folds = prepare_folds(train_valid_folds,
self.model.X_train,
self.model.random_seed)
self._train_valid_folds = train_valid_folds
model_params = copy(parameter_space[self.model.model_type])
if self.model.model_type == "lightgbm":
self._d_train = lgb.Dataset(
data = self.model.X_train,
label = self.model.y_train,
weight = self.model.sample_weight
)
# TODO: make lightgbm silent pls
elif self.model.model_type == "xgboost":
self._d_train = xgb.DMatrix(
data = self.model.X_train,
label = self.model.y_train,
weight = self.model.sample_weight
)
self._n_iterations = 0
random_state = RandomState(self.model.random_seed)
_ = hyperopt.fmin(
fn = self._objective_callback,
space = model_params,
algo = hyperopt.tpe.suggest,
max_evals = total_n_iterations,
trials = bayes_trials,
rstate = random_state,
)
self.hyperopt_summary = pd.DataFrame(self.hyperopt_summary)
min_loss_idx = self.hyperopt_summary["loss"].idxmin()
self.best_params = self.hyperopt_summary.loc[min_loss_idx,"params"]
def fit_best_model(self, override_params = None):
"""
Fits a GRAPE RegressionModel with the best found hyperparameters
Parameters
----------
override_params : dict or None, optional (default=None)
If dict, contains parameters to add to best_params or to override
Example why this could be used:
In random_forest, higher values n_estimators (i.e. number of decision trees)
can only make predictive performance better. But it comes at the
expense of computational time. During hyperparameter tuning,
it's wise to keep it at a relatively low number (e.g. n_estimators = 100)
for good runtime. After tuning, it's a good idea to increase n_estimators
(e.g. n_estimators = 400) to increase predictive performance
"""
assert hasattr(self, "best_params"), "Need to tune_hyperparams first"
if override_params is None:
override_params = {}
model_params = copy(self.best_params)
print(model_params)
model_params.update(override_params)
self.model.fit(model_params = model_params)
# shorthand function
def tune_and_fit(self,
model,
train_valid_folds = None,
total_n_iterations = 100,
use_model_copy = False,
override_params = None):
"""
Convenience function for both finding and fitting the best hyperparams
Parameters
----------
see documentation of tune_hyperparams and fit_best_model
"""
self.tune_hyperparams(model,
train_valid_folds,
total_n_iterations,
use_model_copy)
self.fit_best_model(override_params)
def _objective_callback(self, model_params):
"""
Callback function that Hyperopt's fmin function attempts to minimize
More specifically, this computes the cross validation error of the model
Hyperopt's fmin function is incentivized to minimize cross validation error
"""
start_time = timer()
model_params = copy(model_params)
model_params = self._prepare_params(model_params,
model_type = self.model.model_type)
cv_scores = self.model.cross_validate(self._train_valid_folds,
self.eval_func_name,
model_params,
include_rf_oob = False)
loss = cv_scores["cv-{}-mean".format(self.eval_func_name)]
loss_std = cv_scores["cv-{}-std".format(self.eval_func_name)]
if eval_func_is_higher_better[self.eval_func_name]:
loss = loss*(-1)
# xgboost and lightgbm have the additional intricacy of having boosting rounds
if self.model.model_type in ["xgboost", "lightgbm"]:
best_idx_loss = np.argmin(loss)
loss = loss[best_idx_loss]
loss_std = loss_std[best_idx_loss]
num_boost_round = best_idx_loss + 1
if self.model.model_type == "lightgbm":
model_params["estimators"] = num_boost_round
elif self.model.model_type == "xgboost":
model_params["num_boost_round"] = num_boost_round
self._n_iterations += 1
self._print_iter()
run_time = timer() - start_time
output = {
'loss': loss,
'loss_variance':loss_std**2,
'params': model_params,
"n_iterations": self._n_iterations,
"iteration_run_time": run_time,
}
self.hyperopt_summary.append(output.copy())
output["status"] = hyperopt.STATUS_OK
return output
def _print_iter(self):
if self.verbosity >= 2:
print("Iteration:", self._n_iterations)
@staticmethod
def _prepare_params(model_params, model_type):
"""
Convenience function for conditional sampling (if applicable),
and for type conversions (converting to int) of Hyperopt's parameters
"""
if model_type == "elastic_net":
model_params["l1_ratio"] = get_elastic_net_l1_ratio(model_params)
if model_type == "lightgbm":
# conditional sampling from bayesian domain for the goss bossting type
if "boosting_type" in model_params.keys():
subsample = model_params['boosting_type'].get('subsample', 1.0)
model_params['boosting_type'] = model_params['boosting_type']['boosting_type']
model_params['subsample'] = subsample
# converting to int
for param in int_params_dict[model_type]:
if param in model_params.keys():
model_params[param] = int(model_params[param])
return model_params
```
#### File: grape/grape/preprocessor.py
```python
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
import pandas as pd
# todo: other feature engineering tasks
# automatically removing columns that have only 1 unique value
# automatically assigning a new column that says whether or not a feature had missing values
class FeaturePreprocessor:
"""
Preprocessing steps for features
Currently supported is categorical encoding
Parameters
----------
cat_encoding_type : str
Either label_encoder (generates a label per category) or label_binarizer (dummy variables)
Attributes
----------
Attributes which are saved initialization parameters:
cat_encoding_type : str
Derived attributes:
col_encoder_dict : dict
Keys correspond to column names in X_df
Values correspond to LabelEncoder or LabelBinarizer objects
"""
def __init__(self, cat_encoding_type):
assert cat_encoding_type in ["label_encoder", "label_binarizer"], "{} cat_encoding_type is not supported"
self.cat_encoding_type = cat_encoding_type
def fit(self, X_df, cat_cols_list):
"""
Fits LabelEncoder or LabelBinarizer objects for all categorical variables in a dataframe
Parameters
----------
X_df : dataframe
cat_cols_list : list of str
Contains the column names in X_df that are categorical and should be transformed
"""
X_df = X_df.copy()
assert len(cat_cols_list) > 1, "Need to specify at least one categorical variable"
for col in cat_cols_list:
assert col in X_df.columns, "{} not found among columns in X_df".format(col)
self._cat_cols_list = cat_cols_list
self.col_encoder_dict = {}
if self.cat_encoding_type == "label_encoder":
# tree based methods only need to preprocess categorical variables
if self._cat_cols_list:
# label encoding on categorical variables
for col in self._cat_cols_list:
labelencoder = LabelEncoder()
labelencoder.fit(X_df[col])
self.col_encoder_dict[col] = labelencoder
if self.cat_encoding_type == "label_binarizer":
# one hot encoding for categorical variables
for col in self._cat_cols_list:
# Label Encode those with 2 labels only
if X_df[col].nunique() == 2:
labelencoder = LabelEncoder()
labelencoder.fit(X_df[col])
self.col_encoder_dict[col] = labelencoder
else:
onehotencoder = LabelBinarizer()
onehotencoder.fit(X_df[col])
self.col_encoder_dict[col] = onehotencoder
# need to pre-emptively transform the one hot encoded variables before scaling
# https://stats.stackexchange.com/questions/69568/whether-to-rescale-indicator-binary-dummy-predictors-for-lasso
X_df = self._encode_categorical_cols(X_df)
def transform(self, X_df):
"""
Transforms the categorical variables
Parameters
----------
X_df : dataframe
Returns
-------
X_df : dataframe
Transformed after categorical encoding
"""
X_df = self._encode_categorical_cols(X_df)
return X_df
def fit_transform(self, X_df, cat_cols_list):
"""
Convenience function for fitting and transforming in one go
See fit and transform methods
Parameters:
-----------
X_df : dataframe
cat_cols_list : list of str
Returns:
--------
X_df : dataframe
Transformed after categorical encoding
"""
self.fit(X_df, cat_cols_list)
X_df = self.transform(X_df)
return X_df
def _encode_categorical_cols(self, X_df):
# not sure if copy is really needed
X_df = X_df.copy()
for col in self.col_encoder_dict.keys():
encoder = self.col_encoder_dict[col]
# first check if labels in X_df[col] are among the labels in the encoder
unique_labels = X_df[col].unique().tolist()
is_subset = set(unique_labels).issubset(set(encoder.classes_.tolist()))
assert is_subset, "Found labels in {} that are not found among the classes in the encoder {}".format(col, set(unique_labels) - set(encoder.classes_.tolist()) )
if isinstance(encoder, LabelBinarizer):
onehot_df = encoder.transform(X_df[col])
onehot_cols = ["{}_{}".format(col, label) for label in encoder.classes_]
onehot_df = pd.DataFrame(data = onehot_df, columns = onehot_cols, index = X_df.index)
# drop the first class
onehot_cols = onehot_cols[1:]
onehot_df = onehot_df.loc[:,onehot_cols]
# append the one hot encoded columns to X_df
X_df = pd.concat([X_df, onehot_df], axis = 1)
# drop the original column
X_df = X_df.drop(columns = col)
if isinstance(encoder, LabelEncoder):
X_df[col] = encoder.transform(X_df[col])
return X_df
``` |
{
"source": "joshuacwnewton/portalocker",
"score": 3
} |
#### File: portalocker/portalocker/exceptions.py
```python
import typing
class BaseLockException(Exception):
# Error codes:
LOCK_FAILED = 1
def __init__(
self,
*args: typing.Any,
fh: typing.Optional[typing.IO] = None,
**kwargs: typing.Any,
) -> None:
self.fh = fh
Exception.__init__(self, *args)
class LockException(BaseLockException):
pass
class AlreadyLocked(BaseLockException):
pass
class FileToLarge(BaseLockException):
pass
``` |
{
"source": "joshuadamanik/CCBBA-Delivery",
"score": 3
} |
#### File: CCBBA-Delivery/models/task.py
```python
import numpy as np
class Task:
def __init__(self, task_id, init_pos, target_pos, reward):
self.id = task_id
self.pos = init_pos
self.target = target_pos
self.reward = reward
self.start_time = -1e+10
self.deadline = 1e+10
``` |
{
"source": "JoshuaDavid/Neighbor_Joining",
"score": 3
} |
#### File: Neighbor_Joining/src/distancetree.py
```python
head = lambda arr: arr[0]
tail = lambda arr: arr[1:]
class Tree:
parent = None
def __init__(self, left = None, right = None, dleft = 1, dright = 1):
self.left = left
left.parent = self
self.right = right
left.dparent = dleft
right.parent = self
self.dleft = dleft
right.dparent = dright
self.dright = dright
def __repr__(self):
s = "┬" + ("─" * int(self.dleft))
if self.left:
left = repr(self.left).splitlines()
s += "" + head(left) + "\n"
for line in tail(left):
s += "│" + (" " * int(self.dleft))
s += line + "\n"
if self.right:
right = repr(self.right).splitlines()
s += "└" + ("─" * int(self.dright))
s += head(right) + "\n"
for line in tail(right):
s += " " + (" " * int(self.dright))
s += line + "\n"
return s
def __eq__(self, other):
if other.__class__.__name__ != "Tree":
return False
if self.left == other.left:
return self.left == other.left and \
self.right == other.right and \
self.dleft == other.dleft and \
self.dright == other.dright
else:
return self.left == other.right and \
self.right == other.left and \
self.dleft == other.dright and \
self.dright == other.dleft
def contains(self, other):
if id(self) == id(other):
return True
else:
if other.parent == None:
return False
else:
return self.contains(other.parent)
def distanceTo(self, other):
if id(self) == id(other):
return 0
elif self.contains(other):
return other.dparent + self.distanceTo(other.parent)
elif other.contains(self):
return self.dparent + other.distanceTo(self.parent)
elif self.parent and other.parent:
return self.dparent + other.dparent + self.parent.distanceTo(other.parent)
else:
raise LookupError("Nodes are not in the same tree!")
class Leaf(Tree):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
``` |
{
"source": "JoshuaDavid/omega-bot",
"score": 3
} |
#### File: JoshuaDavid/omega-bot/main.py
```python
import os
import random
import discord
import requests
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
GUILD = os.getenv("DISCORD_GUILD")
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
OMEGA = commands.Bot(command_prefix="!o ", intents=discord.Intents.all())
def random_line(filename):
lines = open(filename).read().splitlines()
return random.choice(lines)
def ssc_search_query(search):
api_endpoint = "https://www.googleapis.com/customsearch/v1"
return f"{api_endpoint}?key={GOOGLE_API_KEY}&cx=7e281d64bc7d22cb7&q={search}"
def scott_post_helper(args):
response = random_line("scott_links.txt")
if args:
query = ""
for item in args:
if " " in item:
query += f'"{item}" '
else:
query += f"{item} "
try:
response = requests.get(ssc_search_query(query)).json()["items"][0]["link"]
except KeyError:
response = "No matches found."
return response
@OMEGA.event
async def on_ready():
print(f"{OMEGA.user.name} is connected to the following servers:")
for guild in OMEGA.guilds:
print(f"{guild.name}(id: {guild.id})")
guild = discord.utils.get(OMEGA.guilds, name=GUILD)
print(f"Currently selected server: {guild.name}")
members = "\n - ".join([member.name for member in guild.members])
print(f"Guild Members:\n - {members}")
@OMEGA.command(
name="scott",
help="Responds with a Scott article (based on the arguments provided or random otherwise)",
)
async def scott_post(ctx, *args):
print("scott command invocation:")
print(scott_post_helper(args))
await ctx.send(scott_post_helper(args))
if __name__ == "__main__":
OMEGA.run(TOKEN)
``` |
{
"source": "joshuadavidsakharny/pythonProjects",
"score": 4
} |
#### File: joshuadavidsakharny/pythonProjects/Joshua David Sakharny - Interest On Investment.py
```python
initInvestAmount = float(input("Initial Investment Amount: "))
numYearsList = list(range(1, (int(input("Number Of Years: ")) + 1)))
numYears = len(numYearsList)
intRate = float(input("Interest Rate: "))
totalIntEarned = 0
# calculation
# :: calculate interest multiplier for simpler variable manipulation
intMulti = (intRate / 100) + 1
# :: calculate yearly interest earned with yearly end balance
def cal_yearly_int_earned_end_balance(init_invest, num_years, int_rate):
for year in num_years:
yearly_int_earned = (init_invest * int_rate) - init_invest
investment_inc = init_invest + yearly_int_earned
yearly_end_balance = yearly_int_earned + init_invest
init_invest = investment_inc
print("Year {} Interest Earned: ${:0.2f}".format(year, yearly_int_earned))
print("Year {} End Balance: ${:0.2f}".format(year, yearly_end_balance) + "\n")
# :: calculate total end balance
def cal_total_int_earned_end_balance(init_invest, num_years, int_rate, total_int_earned):
while num_years > 0:
yearly_int_earned = (init_invest * int_rate) - init_invest
investment_inc = init_invest + yearly_int_earned
yearly_end_balance = yearly_int_earned + init_invest
init_invest = investment_inc
total_int_earned += yearly_int_earned
total_end_balance = yearly_end_balance
num_years -= 1
if num_years == 0:
print("Total Interest Earned: $" + str(format(total_int_earned, '.2f')))
print("Total End Balance: $" + str(format(total_end_balance, '.2f')))
# output
print("\n")
print("Initial Investment Amount: " + str(format(initInvestAmount, '.2f')) + "\n")
cal_yearly_int_earned_end_balance(initInvestAmount, numYearsList, intMulti)
cal_total_int_earned_end_balance(initInvestAmount, numYears, intMulti, totalIntEarned)
```
#### File: joshuadavidsakharny/pythonProjects/Joshua David Sakharny - Ticket Discount Calculator (Python GUI).py
```python
from breezypythongui import EasyFrame
class ButtonDemo(EasyFrame):
"""Illustrates command buttons and user events."""
def __init__(self):
"""Sets up the window, label, and buttons."""
EasyFrame.__init__(self)
# title and description labels
self.label = self.addLabel(text="Ticket Discount Calculator",
row=0, column=0,
columnspan=2, sticky="NSEW")
self.label2 = self.addLabel(text="Original Ticket Price = $25.00",
row=1, column=0,
columnspan=2, sticky="NSEW")
self.label3 = self.addLabel(text="Children (16 & Younger) = 10% Discount",
row=2, column=0,
columnspan=2, sticky="W")
self.label4 = self.addLabel(text="Adults (17 - 64) = 2% (of Age) Discount",
row=3, column=0,
columnspan=2, sticky="W")
self.label5 = self.addLabel(text="Seniors (65 & Older) = 20% Discount",
row=4, column=0,
columnspan=2, sticky="W")
# clears all field entries for re-use of program / gui
self.clearBtn = self.addButton(text="Clear",
row=5, column=0,
command=self.clear)
# Label and field for the input (age)
self.addLabel(text="Input Age",
row=6, column=0)
self.inputField = self.addTextField(text="",
row=6,
column=1)
# Label and field for the discount section
self.addLabel(text="Discount",
row=7, column=0)
self.outputField = self.addTextField(text="",
row=7,
column=1,
state="readonly")
# label and field for the discounted total section
self.addLabel(text="Discounted Total",
row=8, column=0)
self.outputField2 = self.addTextField(text="",
row=8,
column=1,
state="readonly")
# command button / calculates discount and discounted totals
self.button = self.addButton(text="Calculate Discount",
row=5, column=1,
columnspan=2,
command=self.calculate)
# event handling method for the button
def calculate(self):
"""."""
int1 = self.inputField.getText()
age = int(int1)
discount = int(0)
total = int(25)
# comparison operations to determine discount per age limits
if 16 < age < 65:
discount += age * .02
elif age >= 65:
discount += 25 * .20
elif age <= 16:
discount = 25 * .10
self.outputField.setText("- $" + str(format(discount, '.2f')))
self.outputField2.setText("$" + str(format(total - discount, '.2f')))
# methods to handle user events
def clear(self):
"""Resets the label to the empty string and
the button states."""
self.inputField.setValue("")
self.outputField.setValue("")
self.outputField2.setValue("")
# instantiates and pops up the window
if __name__ == "__main__":
ButtonDemo().mainloop()
``` |
{
"source": "joshuadavidthomas/django_coverage_plugin",
"score": 2
} |
#### File: django_coverage_plugin/django_coverage_plugin/__init__.py
```python
from .plugin import DjangoTemplatePluginException # noqa
from .plugin import DjangoTemplatePlugin
def coverage_init(reg, options):
reg.add_file_tracer(DjangoTemplatePlugin(options))
```
#### File: django_coverage_plugin/django_coverage_plugin/plugin.py
```python
from __future__ import print_function
import os.path
import re
import coverage.plugin
import django
import django.template
from django.template.base import Lexer, NodeList, Template, TextNode
from django.template.defaulttags import VerbatimNode
from django.templatetags.i18n import BlockTranslateNode
from six.moves import range
try:
from django.template.base import TokenType
def _token_name(token_type):
token_type.name.capitalize()
except ImportError:
# Django <2.1 uses separate constants for token types
from django.template.base import (
TOKEN_BLOCK,
TOKEN_MAPPING,
TOKEN_TEXT,
TOKEN_VAR,
)
class TokenType:
TEXT = TOKEN_TEXT
VAR = TOKEN_VAR
BLOCK = TOKEN_BLOCK
def _token_name(token_type):
return TOKEN_MAPPING[token_type]
class DjangoTemplatePluginException(Exception):
"""Used for any errors from the plugin itself."""
pass
# For debugging the plugin itself.
SHOW_PARSING = False
SHOW_TRACING = False
def check_debug():
"""Check that Django's template debugging is enabled.
Django's built-in "template debugging" records information the plugin needs
to do its work. Check that the setting is correct, and raise an exception
if it is not.
Returns True if the debug check was performed, False otherwise
"""
from django.conf import settings
if not settings.configured:
return False
# I _think_ this check is all that's needed and the 3 "hasattr" checks
# below can be removed, but it's not clear how to verify that
from django.apps import apps
if not apps.ready:
return False
# django.template.backends.django gets loaded lazily, so return false
# until they've been loaded
if not hasattr(django.template, "backends"):
return False
if not hasattr(django.template.backends, "django"):
return False
if not hasattr(django.template.backends.django, "DjangoTemplates"):
raise DjangoTemplatePluginException("Can't use non-Django templates.")
if not django.template.engines._engines:
return False
for engine in django.template.engines.all():
if not isinstance(engine, django.template.backends.django.DjangoTemplates):
raise DjangoTemplatePluginException(
"Can't use non-Django templates."
)
if not engine.engine.debug:
raise DjangoTemplatePluginException(
"Template debugging must be enabled in settings."
)
return True
if django.VERSION < (1, 8):
raise RuntimeError("Django Coverage Plugin requires Django 1.8 or higher")
if django.VERSION >= (1, 9):
# Since we are grabbing at internal details, we have to adapt as they
# change over versions.
def filename_for_frame(frame):
try:
return frame.f_locals["self"].origin.name
except (KeyError, AttributeError):
return None
def position_for_node(node):
try:
return node.token.position
except AttributeError:
return None
def position_for_token(token):
return token.position
else:
def filename_for_frame(frame):
try:
return frame.f_locals["self"].source[0].name
except (KeyError, AttributeError, IndexError):
return None
def position_for_node(node):
return node.source[1]
def position_for_token(token):
return token.source[1]
def read_template_source(filename):
"""Read the source of a Django template, returning the Unicode text."""
# Import this late to be sure we don't trigger settings machinery too
# early.
from django.conf import settings
if not settings.configured:
settings.configure()
with open(filename, "rb") as f:
# The FILE_CHARSET setting will be removed in 3.1:
# https://docs.djangoproject.com/en/3.0/ref/settings/#file-charset
if django.VERSION >= (3, 1):
charset = 'utf-8'
else:
charset = settings.FILE_CHARSET
text = f.read().decode(charset)
return text
class DjangoTemplatePlugin(
coverage.plugin.CoveragePlugin,
coverage.plugin.FileTracer,
):
def __init__(self, options):
extensions = options.get("template_extensions", "html,htm,txt")
self.extensions = [e.strip() for e in extensions.split(",")]
self.debug_checked = False
self.django_template_dir = os.path.normcase(os.path.realpath(
os.path.dirname(django.template.__file__)
))
self.source_map = {}
# --- CoveragePlugin methods
def sys_info(self):
return [
("django_template_dir", self.django_template_dir),
("environment", sorted(
("%s = %s" % (k, v))
for k, v in os.environ.items()
if "DJANGO" in k
)),
]
def file_tracer(self, filename):
if os.path.normcase(filename).startswith(self.django_template_dir):
if not self.debug_checked:
# Keep calling check_debug until it returns True, which it
# will only do after settings have been configured
self.debug_checked = check_debug()
return self
return None
def file_reporter(self, filename):
return FileReporter(filename)
def find_executable_files(self, src_dir):
# We're only interested in files that look like reasonable HTML
# files: Must end with one of our extensions, and must not have
# funny characters that probably mean they are editor junk.
rx = r"^[^.#~!$@%^&*()+=,]+\.(" + "|".join(self.extensions) + r")$"
for (dirpath, dirnames, filenames) in os.walk(src_dir):
for filename in filenames:
if re.search(rx, filename):
yield os.path.join(dirpath, filename)
# --- FileTracer methods
def has_dynamic_source_filename(self):
return True
# "render" is the public method, but "render_annotated" is an internal
# method sometimes implemented directly on nodes.
RENDER_METHODS = {"render", "render_annotated"}
def dynamic_source_filename(self, filename, frame):
if frame.f_code.co_name not in self.RENDER_METHODS:
return None
if 0:
dump_frame(frame, label="dynamic_source_filename")
filename = filename_for_frame(frame)
if filename is not None:
if filename.startswith("<"):
# String templates have a filename of "<unknown source>", and
# can't be reported on later, so ignore them.
return None
return filename
return None
def line_number_range(self, frame):
assert frame.f_code.co_name in self.RENDER_METHODS
if 0:
dump_frame(frame, label="line_number_range")
render_self = frame.f_locals['self']
if isinstance(render_self, (NodeList, Template)):
return -1, -1
position = position_for_node(render_self)
if position is None:
return -1, -1
if SHOW_TRACING:
print("{!r}: {}".format(render_self, position))
s_start, s_end = position
if isinstance(render_self, TextNode):
first_line = render_self.s.splitlines(True)[0]
if first_line.isspace():
s_start += len(first_line)
elif VerbatimNode and isinstance(render_self, VerbatimNode):
# VerbatimNode doesn't track source the same way. s_end only points
# to the end of the {% verbatim %} opening tag, not the entire
# content. Adjust it to cover all of it.
s_end += len(render_self.content)
elif isinstance(render_self, BlockTranslateNode):
# BlockTranslateNode has a list of text and variable tokens.
# Get the end of the contents by looking at the last token,
# and use its endpoint.
last_tokens = render_self.plural or render_self.singular
s_end = position_for_token(last_tokens[-1])[1]
filename = filename_for_frame(frame)
line_map = self.get_line_map(filename)
start = get_line_number(line_map, s_start)
end = get_line_number(line_map, s_end-1)
if start < 0 or end < 0:
start, end = -1, -1
if SHOW_TRACING:
print("line_number_range({}) -> {}".format(
filename, (start, end)
))
return start, end
# --- FileTracer helpers
def get_line_map(self, filename):
"""The line map for `filename`.
A line map is a list of character offsets, indicating where each line
in the text begins. For example, a line map like this::
[13, 19, 30]
means that line 2 starts at character 13, line 3 starts at 19, etc.
Line 1 always starts at character 0.
"""
if filename not in self.source_map:
template_source = read_template_source(filename)
if 0: # change to see the template text
for i in range(0, len(template_source), 10):
print("%3d: %r" % (i, template_source[i:i+10]))
self.source_map[filename] = make_line_map(template_source)
return self.source_map[filename]
class FileReporter(coverage.plugin.FileReporter):
def __init__(self, filename):
super(FileReporter, self).__init__(filename)
# TODO: html filenames are absolute.
self._source = None
def source(self):
if self._source is None:
self._source = read_template_source(self.filename)
return self._source
def lines(self):
source_lines = set()
if SHOW_PARSING:
print("-------------- {}".format(self.filename))
if django.VERSION >= (1, 9):
lexer = Lexer(self.source())
else:
lexer = Lexer(self.source(), self.filename)
tokens = lexer.tokenize()
# Are we inside a comment?
comment = False
# Is this a template that extends another template?
extends = False
# Are we inside a block?
inblock = False
for token in tokens:
if SHOW_PARSING:
print(
"%10s %2d: %r" % (
_token_name(token.token_type),
token.lineno,
token.contents,
)
)
if token.token_type == TokenType.BLOCK:
if token.contents == "endcomment":
comment = False
continue
if comment:
continue
if token.token_type == TokenType.BLOCK:
if token.contents.startswith("endblock"):
inblock = False
elif token.contents.startswith("block"):
inblock = True
if extends:
continue
if extends and not inblock:
# In an inheriting tempalte, ignore all tags outside of
# blocks.
continue
if token.contents == "comment":
comment = True
if token.contents.startswith("end"):
continue
elif token.contents in ("else", "empty"):
continue
elif token.contents.startswith("elif"):
# NOTE: I don't like this, I want to be able to trace elif
# nodes, but the Django template engine doesn't track them
# in a way that we can get useful information from them.
continue
elif token.contents.startswith("extends"):
extends = True
source_lines.add(token.lineno)
elif token.token_type == TokenType.VAR:
source_lines.add(token.lineno)
elif token.token_type == TokenType.TEXT:
if extends and not inblock:
continue
# Text nodes often start with newlines, but we don't want to
# consider that first line to be part of the text.
lineno = token.lineno
lines = token.contents.splitlines(True)
num_lines = len(lines)
if lines[0].isspace():
lineno += 1
num_lines -= 1
source_lines.update(range(lineno, lineno+num_lines))
if SHOW_PARSING:
print("\t\t\tNow source_lines is: {!r}".format(source_lines))
return source_lines
def running_sum(seq):
total = 0
for num in seq:
total += num
yield total
def make_line_map(text):
line_lengths = [len(line) for line in text.splitlines(True)]
line_map = list(running_sum(line_lengths))
return line_map
def get_line_number(line_map, offset):
"""Find a line number, given a line map and a character offset."""
for lineno, line_offset in enumerate(line_map, start=1):
if line_offset > offset:
return lineno
return -1
def dump_frame(frame, label=""):
"""Dump interesting information about this frame."""
locals = dict(frame.f_locals)
self = locals.get('self', None)
context = locals.get('context', None)
if "__builtins__" in locals:
del locals["__builtins__"]
if label:
label = " ( %s ) " % label
print("-- frame --%s---------------------" % label)
print("{}:{}:{}".format(
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
type(self),
))
print(locals)
if self:
print("self:", self.__dict__)
if context:
print("context:", context.__dict__)
print("\\--")
```
#### File: django_coverage_plugin/tests/__init__.py
```python
try:
from django.urls import re_path
except ImportError:
from django.conf.urls import url as re_path
def index(request):
"""A bogus view to use in the urls below."""
pass
urlpatterns = [
re_path(r'^home$', index, name='index'),
]
``` |
{
"source": "joshuadavidthomas/django-extensions",
"score": 2
} |
#### File: management/commands/drop_test_database.py
```python
from itertools import count
import os
import logging
import warnings
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from django_extensions.settings import SQLITE_ENGINES, POSTGRESQL_ENGINES, MYSQL_ENGINES
from django_extensions.management.mysql import parse_mysql_cnf
from django_extensions.management.utils import signalcommand
from django_extensions.utils.deprecation import RemovedInNextVersionWarning
class Command(BaseCommand):
help = "Drops test database for this project."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
default=True, help='Tells Django to NOT prompt the user for input of any kind.'
)
parser.add_argument(
'-U', '--user', action='store', dest='user', default=None,
help='Use another user for the database then defined in settings.py'
)
parser.add_argument(
'-P', '--password', action='store', dest='password', default=None,
help='Use another password for the database then defined in settings.py'
)
parser.add_argument(
'-D', '--dbname', action='store', dest='dbname', default=None,
help='Use another database name then defined in settings.py'
)
parser.add_argument(
'-R', '--router', action='store', dest='router', default=DEFAULT_DB_ALIAS,
help='Use this router-database other then defined in settings.py'
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to run command for. Defaults to the "%s" database.' % DEFAULT_DB_ALIAS,
)
@signalcommand
def handle(self, *args, **options):
"""Drop test database for this project."""
database = options['database']
if options['router'] != DEFAULT_DB_ALIAS:
warnings.warn("--router is deprecated. You should use --database.", RemovedInNextVersionWarning, stacklevel=2)
database = options['router']
dbinfo = settings.DATABASES.get(database)
if dbinfo is None:
raise CommandError("Unknown database %s" % database)
engine = dbinfo.get('ENGINE')
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = options['user'] or dbinfo.get('USER') or user
password = options['password'] or dbinfo.get('PASSWORD') or password
try:
database_name = dbinfo['TEST']['NAME']
except KeyError:
database_name = None
if database_name is None:
database_name = TEST_DATABASE_PREFIX + (options['dbname'] or dbinfo.get('NAME'))
if database_name is None or database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = options["verbosity"]
if options['interactive']:
confirm = input("""
You have requested to drop all test databases.
This will IRREVERSIBLY DESTROY
ALL data in the database "{db_name}"
and all cloned test databases generated via
the "--parallel" flag (these are sequentially
named "{db_name}_1", "{db_name}_2", etc.).
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """.format(db_name=database_name))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
def get_database_names(formatter):
"""
Return a generator of all possible test database names.
e.g., 'test_foo', 'test_foo_1', test_foo_2', etc.
formatter: func returning a clone db name given the primary db name
and the clone's number, e.g., 'test_foo_1' for mysql/postgres, and
'test_foo_1..sqlite3' for sqlite (re: double dots, see comments).
"""
yield database_name
yield from (formatter(database_name, n) for n in count(1))
if engine in SQLITE_ENGINES:
# By default all sqlite test databases are created in memory.
# There will only be database files to delete if the developer has
# specified a test database name, which forces files to be written
# to disk.
logging.info("Unlinking %s databases" % engine)
def format_filename(name, number):
filename, ext = os.path.splitext(name)
# Since splitext() includes the dot in 'ext', the inclusion of
# the dot in the format string below is incorrect and creates a
# double dot. Django makes this mistake, so it must be
# replicated here. If fixed in Django, this code should be
# updated accordingly.
# Reference: https://code.djangoproject.com/ticket/32582
return '{}_{}.{}'.format(filename, number, ext)
try:
for db_name in get_database_names(format_filename):
if not os.path.isfile(db_name):
break
logging.info('Unlinking database named "%s"' % db_name)
os.unlink(db_name)
except OSError:
return
elif engine in MYSQL_ENGINES:
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
cursor = connection.cursor()
for db_name in get_database_names('{}_{}'.format):
exists_query = \
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='%s';" \
% db_name
row_count = cursor.execute(exists_query)
if row_count < 1:
break
drop_query = 'DROP DATABASE IF EXISTS `%s`' % db_name
logging.info('Executing: "' + drop_query + '"')
cursor.execute(drop_query)
elif engine in POSTGRESQL_ENGINES:
import psycopg2 as Database # NOQA
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
for db_name in get_database_names('{}_{}'.format):
exists_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname='%s';" \
% db_name
try:
cursor.execute(exists_query)
# NOTE: Unlike MySQLdb, the psycopg2 cursor does not return the row count
# however both cursors provide it as a property
if cursor.rowcount < 1:
break
drop_query = "DROP DATABASE IF EXISTS \"%s\";" % db_name
logging.info('Executing: "' + drop_query + '"')
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
return
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options['interactive']:
print("Reset successful.")
``` |
{
"source": "joshuadavidthomas/pytest-rich-reporter",
"score": 2
} |
#### File: src/pytest_rich_reporter/plugin.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from .console import console
from .reporter import RichReporter
if TYPE_CHECKING:
from _pytest.config import Config
from _pytest.config.argparsing import Parser
def pytest_addoption(parser: Parser):
"""
Add options to the pytest command line for the rich plugin
:param parser: The pytest command line parser
"""
group = parser.getgroup("rich")
group.addoption(
"--rich",
action="store_true",
dest="rich",
default=False,
help="Enable rich output for the terminal reporter (default: False)",
)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Config):
"""
Configure the rich plugin
:param config: The pytest config object
"""
use_rich = getattr(config.option, "rich", False)
if use_rich:
standard_reporter = config.pluginmanager.get_plugin("terminalreporter")
rich_reporter = RichReporter(config, console)
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(rich_reporter, "terminalreporter")
``` |
{
"source": "joshuadavidthomas/shot-scraper",
"score": 3
} |
#### File: shot-scraper/shot_scraper/cli.py
```python
import click
from click_default_group import DefaultGroup
from playwright.sync_api import sync_playwright
from runpy import run_module
import sys
import time
import yaml
@click.group(
cls=DefaultGroup,
default="shot",
default_if_no_args=True,
context_settings=dict(help_option_names=["-h", "--help"]),
)
@click.version_option()
def cli():
"Tools for taking automated screenshots"
pass
@cli.command()
@click.argument("url") # TODO: validate with custom type
@click.option(
"-w",
"--width",
type=int,
help="Width of browser window, defaults to 1280",
default=1280,
)
@click.option(
"-h",
"--height",
type=int,
help="Height of browser window and shot - defaults to the full height of the page",
)
@click.option(
"-o",
"--output",
type=click.Path(file_okay=True, writable=True, dir_okay=False, allow_dash=True),
default="-",
)
@click.option(
"-s", "--selector", help="Take shot of first element matching this CSS selector"
)
@click.option(
"-j", "--javascript", help="Execute this JavaScript prior to taking the shot"
)
@click.option("--quality", type=int, help="Save as JPEG with this quality, e.g. 80")
@click.option(
"--wait", type=int, help="Wait this many milliseconds before taking screenshot"
)
def shot(url, output, width, height, selector, javascript, quality, wait):
"""
Take a single screenshot of a page or portion of a page.
Usage:
shot-scraper http://www.example.com/ -o example.png
Use -s to take a screenshot of one area of the page, identified using a CSS selector:
shot-scraper https://simonwillison.net -o bighead.png -s '#bighead'
"""
shot = {
"url": url,
"selector": selector,
"javascript": javascript,
"width": width,
"height": height,
"quality": quality,
"wait": wait,
}
with sync_playwright() as p:
browser = p.chromium.launch()
if output == "-":
shot = take_shot(browser, shot, return_bytes=True)
sys.stdout.buffer.write(shot)
else:
shot["output"] = str(output)
shot = take_shot(browser, shot)
browser.close()
@cli.command()
@click.argument("config", type=click.File(mode="r"))
def multi(config):
"""
Take multiple screenshots, defined by a YAML file
Usage:
shot-scraper config.yml
Where config.yml contains configuration like this:
\b
- output: example.png
url: http://www.example.com/
"""
shots = yaml.safe_load(config)
with sync_playwright() as p:
browser = p.chromium.launch()
for shot in shots:
take_shot(browser, shot)
browser.close()
@cli.command()
def install():
"""
Install Playwright browser needed by this tool.
Usage:
shot-scraper install
"""
sys.argv = ["playwright", "install", "chromium"]
run_module("playwright", run_name="__main__")
def take_shot(browser, shot, return_bytes=False):
url = shot.get("url") or ""
if not (url.startswith("http://") or url.startswith("https://")):
raise click.ClickException(
"'url' must start http:// or https:// - got: \n{}".format(url)
)
output = shot.get("output", "").strip()
if not output and not return_bytes:
raise click.ClickException(
"'output' filename is required, messing for url:\n {}".format(url)
)
quality = shot.get("quality")
wait = shot.get("wait")
page = browser.new_page()
viewport = {}
full_page = True
if shot.get("width") or shot.get("height"):
viewport = {
"width": shot.get("width") or 1280,
"height": shot.get("height") or 720,
}
page.set_viewport_size(viewport)
if shot.get("height"):
full_page = False
page.goto(url)
if wait:
time.sleep(wait / 1000)
message = ""
selector = shot.get("selector")
javascript = shot.get("javascript")
if javascript:
page.evaluate(javascript)
screenshot_args = {}
if quality:
screenshot_args.update({"quality": quality, "type": "jpeg"})
if not return_bytes:
screenshot_args["path"] = output
if not selector:
screenshot_args["full_page"] = full_page
if selector:
if return_bytes:
return page.locator(selector).screenshot(**screenshot_args)
else:
page.locator(selector).screenshot(**screenshot_args)
message = "Screenshot of '{}' on '{}' written to '{}'".format(
selector, url, output
)
else:
# Whole page
if return_bytes:
return page.screenshot(**screenshot_args)
else:
page.screenshot(**screenshot_args)
message = "Screenshot of '{}' written to '{}'".format(url, output)
click.echo(message, err=True)
``` |
{
"source": "joshuadavidthomas/Wagtail-Pipit",
"score": 2
} |
#### File: src/customimage/serializers.py
```python
from rest_framework import serializers
from wagtail.images.shortcuts import get_rendition_or_not_found
from customimage.models import CustomImage
class CustomImageSerializer(serializers.ModelSerializer):
renditions = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
id = serializers.SerializerMethodField()
focal = serializers.SerializerMethodField()
def get_id(self, obj):
if hasattr(self, "_mocked_id"):
return self._mocked_id
return obj.pk
def get_url(self, obj):
if hasattr(self, "_mocked_url"):
return self._mocked_url
return obj.file.url if obj.file else None
def get_renditions(self, obj):
if hasattr(self, "_mocked_renditions"):
return self._mocked_renditions
if not hasattr(self, "_renditions"):
return None
renditions = {}
for (name, spec) in self._renditions:
rendition = get_rendition_or_not_found(obj, spec)
renditions[name] = rendition.attrs_dict
return renditions
def get_focal(self, obj):
# Default focal point values
background_x = 0.5
background_y = 0.5
if obj.focal_point_width:
# Get point relative to image size, make sure it isn't more than 1
background_x = min(round(obj.focal_point_x / obj.width, 4), 1)
background_y = min(round(obj.focal_point_y / obj.height, 4), 1)
return {"x": "{:.2%}".format(background_x), "y": "{:.2%}".format(background_y)}
class Meta:
model = CustomImage
fields = [
"title",
"file",
"width",
"height",
"file_size",
"focal",
"renditions",
]
def get_image_serializer(renditions=None):
renditions = renditions if renditions else []
"""
:param renditions: [(renditionName, wagtailspec,)]
:return: Monkey patched CustomImageSerializer
For docs regarding supported rendition-params (wagtailspec), see:
http://docs.wagtail.io/en/v1.13.1/topics/images.html#using-images-in-templates
example:
src = get_image_serializer([
('rend1', 'fill-200x200',),
('rend2', 'min-200x1000',),
])(img_instance).data['renditions'].get('rend1')
"""
class PatchedSerializer(CustomImageSerializer):
_renditions = renditions
return PatchedSerializer
```
#### File: main/tests/test_page_not_found.py
```python
from django.test import TestCase
class PageNotFoundTest(TestCase):
def test_that_404_contains_no_errors(self):
response = self.client.get("/a-404-url/")
self.assertEqual(response.status_code, 404)
def test_that_404_view_uses_proper_serializer(self):
response = self.client.get("/a-404-url/")
content = response.content.decode("utf-8")
self.assertTrue("component_name" in content)
```
#### File: main/views/page_not_found.py
```python
from django.template.response import TemplateResponse
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from main.mixins import ReactViewMixin
from main.serializers import NotFoundPageSerializer
class PageNotFoundView(ReactViewMixin, TemplateView):
component_name = "NotFoundPage"
serializer_class = NotFoundPageSerializer
def render_to_response(self, context, **response_kwargs):
response = super().render_to_response(context, **response_kwargs)
response.status_code = 404
return response
def get_component_props(self):
return {"exception": _("Page not found")}
``` |
{
"source": "joshuadeng/torchrec",
"score": 2
} |
#### File: torchrec/distributed/comm_ops.py
```python
from dataclasses import dataclass, field
from typing import List, Optional, Tuple, TypeVar, Any
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
# OSS
try:
import fbgemm_gpu # @manual # noqa
except ImportError:
pass
W = TypeVar("W")
# TODO: T96382816, NE Parity Backward compatibility
GRADIENT_DIVISION: bool = True
def set_gradient_division(val: bool) -> None:
global GRADIENT_DIVISION
GRADIENT_DIVISION = val
# Some commonly used notations for comm ops:
# B - batch size
# T - number of embedding tables
# D - embedding dimension
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Constructor Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
def __init__(self, pg: dist.ProcessGroup) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
# pyre-fixme[11]: Annotation dist.Work is not defined as a type.
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.rsi = None # type: ignore
self.wait_function = None # type: ignore
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.tensor)
self.req = None
self.tensor = None
return ret
@dataclass
class All2AllPooledInfo(object):
"""
The data class that collects the attributes when calling the alltoall_pooled
operation.
Attributes:
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of
dim_sum_per_rank, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
mixed_dim (bool): the flag whether the input is mixed
dimensioned or not.
D (int): embedding dimension of the embedding table.
B_local (int): local batch size before scattering.
"""
dim_sum_per_rank: List[int]
dim_sum_per_rank_tensor: Optional[Tensor]
cumsum_dim_sum_per_rank_tensor: Optional[Tensor]
mixed_dim: bool
D: int = -1 # -1 means doesn't use
B_local: int = -1
@dataclass
class All2AllSequenceInfo(object):
"""
The data class that collects the attributes when calling the alltoall_sequence
operation.
Attributes:
embedding_dim (int): embedding dimension.
lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after
AlltoAll.
forward_recat_tensor (Tensor): recat tensor for forward.
backward_recat_tensor (Tensor): recat tensor for backward.
input_splits (List[int]): input splits.
output_splits (List[int]): output splits.
lengths_sparse_before_features_all2all (Optional[Tensor]): lengths of sparse
features before AlltoAll.
"""
embedding_dim: int
lengths_after_sparse_data_all2all: Tensor
forward_recat_tensor: Tensor
backward_recat_tensor: Tensor
input_splits: List[int]
output_splits: List[int]
permuted_lengths_after_sparse_data_all2all: Optional[Tensor] = None
@dataclass
class All2AllVInfo(object):
"""
The data class that collects the attributes when calling the alltoallv operation.
Attributes:
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
B_global (int): global batch size for each rank.
B_local (int): local batch size before scattering.
B_local_list: (List[int]): local batch sizes for each embedding table locally
(in my current rank).
D_local_list (List[int]): embedding dimension of each embedding table locally
(in my current rank).
input_split_sizes (List[int]): The input split sizes for each rank, this
remembers how to split the input when doing the all_to_all_single operation.
output_split_sizes (List[int]): The output split sizes for each rank, this
remembers how to fill the output when doing the all_to_all_single operation.
"""
dims_sum_per_rank: List[int]
B_global: int
B_local: int
B_local_list: List[int]
D_local_list: List[int]
input_split_sizes: List[int] = field(default_factory=list)
output_split_sizes: List[int] = field(default_factory=list)
@dataclass
class ReduceScatterInfo(object):
"""
The data class that collects the attributes when calling the reduce_scatter_pooled
operation.
Attributes:
input_sizes (List[int]): the sizes of the input tensors. This remembers the
sizes of the input tensors when running the backward pass and producing the
gradient.
"""
input_sizes: List[int]
def _get_split_lengths_by_len(
world_size: int, my_rank: int, n: int
) -> Tuple[int, List[int]]:
k, m = divmod(n, world_size)
if m == 0:
splits = [k] * world_size
my_len = k
else:
splits = [(k + 1) if i < m else k for i in range(world_size)]
my_len = splits[my_rank]
return (my_len, splits)
def alltoall_pooled(
a2a_pooled_embs_tensor: Tensor,
dim_sum_per_rank: List[int],
mixed_dim: bool = False,
dim_sum_per_rank_tensor: Optional[Tensor] = None,
cumsum_dim_sum_per_rank_tensor: Optional[Tensor] = None,
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[Tensor]:
"""
Performs AlltoAll operation for a single pooled embedding tensor. Each process
splits the input pooled embeddings tensor based on the world size, and then scatters
the split list to all processes in the group. Then concatenates the received tensors
from all processes in the group and returns a single output tensor.
Args:
a2a_pooled_embs_tensor (Tensor): input pooled embeddings. Must be pooled
together before passing into this function. Usually with the shape of
B x T x D, where B - batch size, T - number of embedding tables,
D - embedding dimension. When `mixed_dim=True`, the input shape should be
B x D_local_sum, where D_local_sum is the dimension sum of all the local
embedding tables.
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
mixed_dim (bool): the flag whether the input is mixed dimensioned or not.
dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of
dim_sum_per_rank, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
tensor.
.. warning::
`alltoall_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(a2a_pooled_embs_tensor)
myreq = Request(group)
a2ai = All2AllPooledInfo(
dim_sum_per_rank=dim_sum_per_rank,
dim_sum_per_rank_tensor=dim_sum_per_rank_tensor,
cumsum_dim_sum_per_rank_tensor=cumsum_dim_sum_per_rank_tensor,
mixed_dim=mixed_dim,
)
# pyre-fixme[16]: `All2All_Pooled_Req` has no attribute `apply`.
All2All_Pooled_Req.apply(group, myreq, a2ai, a2a_pooled_embs_tensor)
return myreq
def alltoall_sequence(
# (T, B, L_i * D) flattened
a2a_sequence_embs_tensor: Tensor,
forward_recat_tensor: Tensor,
backward_recat_tensor: Tensor,
lengths_after_sparse_data_all2all: Tensor,
input_splits: List[int],
output_splits: List[int],
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[Tensor]:
"""
Performs AlltoAll operation for sequence embeddings. Each process splits the input
tensor based on the world size, and then scatters the split list to all processes in
the group. Then concatenates the received tensors from all processes in the group
and returns a single output tensor.
Note:
AlltoAll operator for (T * B * L_i, D) tensors.
Does not support mixed dimensions.
Args:
a2a_sequence_embs_tensor (Tensor): input embeddings. Usually with the shape of
(T * B * L_i, D), where B - batch size, T - number of embedding tables,
D - embedding dimension.
forward_recat_tensor (Tensor): recat tensor for forward.
backward_recat_tensor (Tensor): recat tensor for backward.
lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after
AlltoAll.
input_splits (Tensor): input splits.
output_splits (Tensor): output splits.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
tensor.
.. warning::
`alltoall_sequence` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(a2a_sequence_embs_tensor)
myreq = Request(group)
a2ai = All2AllSequenceInfo(
embedding_dim=a2a_sequence_embs_tensor.shape[1],
lengths_after_sparse_data_all2all=lengths_after_sparse_data_all2all,
forward_recat_tensor=forward_recat_tensor,
backward_recat_tensor=backward_recat_tensor,
input_splits=input_splits,
output_splits=output_splits,
)
# sequence of embeddings, bags are definitely non-uniform
# pyre-fixme[16]: `All2All_Seq_Req` has no attribute `apply`.
All2All_Seq_Req.apply(group, myreq, a2ai, a2a_sequence_embs_tensor)
return myreq
def alltoallv(
inputs: List[Tensor],
out_split: Optional[List[int]] = None,
per_rank_split_lengths: Optional[List[int]] = None,
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[List[Tensor]]:
"""
Performs alltoallv operation for a list of input embeddings. Each process scatters
the list to all processes in the group.
Args:
input (List[Tensor]): list of tensors to scatter, one per rank. The tensors in
the list usually have different lengths.
out_split (Optional[List[int]]): output split sizes (or dim_sum_per_rank), if
not specified, we will use `per_rank_split_lengths` to construct a output
split with the assumption that all the embs have the same dimension.
per_rank_split_lengths (Optional[List[int]]): split lengths per rank. If not
specified, the `out_split` must be specified.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
list of tensors.
.. warning::
`alltoallv` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
world_size = dist.get_world_size(group)
my_rank = dist.get_rank(group)
myreq = Request(group)
B_global, _ = inputs[0].size()
D_local_list = [e.size()[1] for e in inputs]
B_local, B_local_list = _get_split_lengths_by_len(world_size, my_rank, B_global)
if out_split is not None:
dims_sum_per_rank = out_split
elif per_rank_split_lengths is not None:
# all the embs have the same dimension
dims_sum_per_rank = [s * D_local_list[0] for s in per_rank_split_lengths]
else:
raise RuntimeError("Need to specify either out_split or per_rank_split_lengths")
a2ai = All2AllVInfo(
dims_sum_per_rank=dims_sum_per_rank,
B_local=B_local,
B_local_list=B_local_list,
D_local_list=D_local_list,
B_global=B_global,
)
# pyre-fixme[16]: `All2Allv_Req` has no attribute `apply`.
All2Allv_Req.apply(group, myreq, a2ai, inputs)
return myreq
def reduce_scatter_pooled(
inputs: List[Tensor],
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[Tensor]:
"""
Performs reduce-scatter operation for a pooled embeddings tensor split into world
size number of chunks. The result of the reduce operation gets scattered to all
processes in the group. Then concatenates the received tensors from all processes in
the group and returns a single output tensor.
Args:
inputs (List[Tensor]): list of tensors to scatter, one per rank.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
tensor.
.. warning::
`reduce_scatter_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(inputs[dist.get_rank(group)])
myreq = Request(group)
rsi = ReduceScatterInfo(input_sizes=[tensor.size() for tensor in inputs])
# pyre-fixme[16]: `ReduceScatter_Req` has no attribute `apply`.
ReduceScatter_Req.apply(group, myreq, rsi, *inputs)
return myreq
# TODO: improve performance of _recat_pooled_embedding_grad_out and
# recat_pooled_embedding_mixed_dim_grad_out, see T87591139
def _recat_pooled_embedding_grad_out(
grad_output: Tensor, num_features_per_rank: List[int]
) -> Tensor:
"""
TODO: improve performance of _recat_pooled_embedding_grad_out in an
efficient fashion (the .contiguous() calls are extremely expensive).
see T87591139
"""
grad_outputs_by_rank = grad_output.split(num_features_per_rank, dim=1)
return torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
def _recat_seq_embedding(
input_embeddings: Tensor,
split_sizes: List[int],
T_local: int,
my_size: int,
forward: bool,
) -> Tensor:
seq_embeddings_by_rank = input_embeddings.split(split_sizes)
if forward:
return torch.cat(
[
seq_embeddings_by_rank[t * my_size + i]
# .contiguous().view(-1)
for i in range(my_size)
for t in range(T_local)
],
dim=0,
)
else:
return torch.cat(
[
seq_embeddings_by_rank[i * T_local + t]
# .contiguous()
# .view(-1)
for t in range(T_local)
for i in range(my_size)
],
dim=0,
)
class All2All_Pooled_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllPooledInfo,
input_embeddings: Tensor,
) -> Tensor:
world_size = dist.get_world_size(pg)
if a2ai.mixed_dim:
(B_global, D_local_sum) = input_embeddings.shape
else:
(B_global, T_local, D) = input_embeddings.shape
D_local_sum = T_local * D
a2ai.D = D
dim_sum_per_rank = a2ai.dim_sum_per_rank
B_local = B_global // world_size
a2ai.B_local = B_local
assert (
B_global % world_size == 0
), f"num of ranks {world_size} doesn't divide global batch size {B_global}"
sharded_input_embeddings = input_embeddings.view(
world_size, B_local, D_local_sum
)
D_global_sum = sum(dim_sum_per_rank)
sharded_output_embeddings = torch.empty(
B_local * D_global_sum,
dtype=input_embeddings.dtype,
device=input_embeddings.device,
)
with record_function("## alltoall_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=[
B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank
],
input_split_sizes=None,
group=pg,
async_op=True,
)
assert (
sum(B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank)
== B_local * D_global_sum
)
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.a2ai = a2ai
myreq.wait_function = All2All_Pooled_Wait
ctx.myreq = myreq
ctx.pg = pg
ctx.mixed_dim = a2ai.mixed_dim
return sharded_output_embeddings
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_output = myreq.tensor
if ctx.mixed_dim:
(W, B_local, D_local_sum) = grad_output.shape
grad_input = grad_output.view(W * B_local, D_local_sum)
else:
(W, B_local, T_local, D) = grad_output.shape
grad_input = grad_output.view(W * B_local, T_local, D)
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
return (None, None, None, grad_input)
class All2All_Pooled_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
sharded_output_embeddings: Tensor,
) -> Tensor:
a2ai = myreq.a2ai
ctx.a2ai = a2ai
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.pg = pg
ctx.myreq = myreq
dim_sum_per_rank = a2ai.dim_sum_per_rank
B_local = a2ai.B_local
mixed_dim = a2ai.mixed_dim
outputs_by_rank = sharded_output_embeddings.split(
[B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank]
)
if mixed_dim:
result = torch.cat(
[output.view(B_local, -1) for output in outputs_by_rank], dim=1
)
else:
D = a2ai.D
result = torch.cat(
[output.view(B_local, -1, D) for output in outputs_by_rank], dim=1
)
return result
@staticmethod
# pyre-fixme[14]: `backward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, grad_output: Tensor) -> Tuple[None, None, Tensor]:
myreq = ctx.myreq
a2ai = ctx.a2ai
pg = ctx.pg
world_size = dist.get_world_size(pg)
my_rank = dist.get_rank(pg)
dim_sum_per_rank = a2ai.dim_sum_per_rank
D_local_sum = dim_sum_per_rank[my_rank]
if a2ai.mixed_dim:
(B_local, D_global_sum) = grad_output.shape
sharded_grad_input_sizes = (world_size, B_local, D_local_sum)
else:
(B_local, T_global, D) = grad_output.shape
D_global_sum = T_global * D
grad_output = grad_output.view(B_local, -1)
T_local = D_local_sum // D
sharded_grad_input_sizes = (world_size, B_local, T_local, D)
assert sum(dim_sum_per_rank) == D_global_sum
sharded_grad_output = _recat_pooled_embedding_grad_out(
grad_output.contiguous(),
dim_sum_per_rank,
)
sharded_grad_input = torch.empty(
sharded_grad_input_sizes, device=grad_output.device, dtype=grad_output.dtype
)
with record_function("## alltoall_bwd_single ##"):
req = dist.all_to_all_single(
output=sharded_grad_input,
input=sharded_grad_output,
output_split_sizes=None,
input_split_sizes=[
B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank
],
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = sharded_grad_input
# Note - this mismatch is by design! We return sharded_grad_output to allow PyTorch shape matching to proceed correctly.
return (None, None, sharded_grad_output)
class All2All_Seq_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllSequenceInfo,
sharded_input_embeddings: Tensor,
) -> Tensor:
world_size = dist.get_world_size(pg)
my_rank = dist.get_rank(pg)
D = a2ai.embedding_dim
forward_recat_tensor = a2ai.forward_recat_tensor
lengths_after_sparse_data_all2all = a2ai.lengths_after_sparse_data_all2all * D
input_splits = [i * D for i in a2ai.output_splits]
output_splits = [i * D for i in a2ai.input_splits]
local_T = lengths_after_sparse_data_all2all.shape[0]
if local_T > 0:
with record_function("## alltoall_seq_embedding_fwd_permute ##"):
(
permuted_lengths_after_sparse_data_all2all,
sharded_input_embeddings,
_,
) = torch.ops.fbgemm.permute_sparse_data(
forward_recat_tensor,
lengths_after_sparse_data_all2all.view(local_T * world_size, -1),
sharded_input_embeddings.view(-1),
None,
sharded_input_embeddings.numel(),
)
else:
permuted_lengths_after_sparse_data_all2all = None
sharded_output_embeddings = torch.empty(
sum(output_splits),
dtype=sharded_input_embeddings.dtype,
device=sharded_input_embeddings.device,
)
with record_function("## alltoall_seq_embedding_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=output_splits,
input_split_sizes=input_splits,
group=pg,
async_op=True,
)
a2ai.permuted_lengths_after_sparse_data_all2all = (
permuted_lengths_after_sparse_data_all2all
)
a2ai.input_splits = input_splits
a2ai.output_splits = output_splits
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.a2ai = a2ai
myreq.wait_function = All2All_Seq_Req_Wait
ctx.myreq = myreq
ctx.pg = pg
ctx.my_rank = my_rank
ctx.world_size = world_size
return sharded_output_embeddings
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
myreq = ctx.myreq
a2ai = myreq.a2ai
D = a2ai.embedding_dim
backward_recat_tensor = a2ai.backward_recat_tensor
permuted_lengths_after_sparse_data_all2all = (
a2ai.permuted_lengths_after_sparse_data_all2all
)
myreq.req.wait()
sharded_grad_input = myreq.tensor
myreq.req = None
myreq.tensor = None
if permuted_lengths_after_sparse_data_all2all is not None:
with record_function("## alltoall_seq_embedding_bwd_permute ##"):
_, sharded_grad_input, _ = torch.ops.fbgemm.permute_sparse_data(
backward_recat_tensor,
permuted_lengths_after_sparse_data_all2all,
sharded_grad_input,
None,
sharded_grad_input.numel(),
)
return (None, None, None, sharded_grad_input.view(-1, D))
class All2All_Seq_Req_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
sharded_output_embeddings: Tensor,
) -> Tensor:
a2ai = myreq.a2ai
D = a2ai.embedding_dim
ctx.a2ai = a2ai
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.pg = pg
ctx.myreq = myreq
return sharded_output_embeddings.view(-1, D)
@staticmethod
# pyre-fixme[14]: `backward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, sharded_grad_output: Tensor) -> Tuple[None, None, Tensor]:
myreq = ctx.myreq
a2ai = ctx.a2ai
pg = ctx.pg
input_splits = a2ai.output_splits
output_splits = a2ai.input_splits
sharded_grad_input = torch.empty(
sum(output_splits),
device=sharded_grad_output.device,
dtype=sharded_grad_output.dtype,
)
with record_function("## alltoall_seq_embedding_bwd_single ##"):
req = dist.all_to_all_single(
output=sharded_grad_input,
input=sharded_grad_output.view(-1),
output_split_sizes=output_splits,
input_split_sizes=input_splits,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = sharded_grad_input
# Note - this mismatch is by design! We return sharded_grad_output
# to allow PyTorch shape matching to proceed correctly.
return (None, None, sharded_grad_output.view(-1))
class All2Allv_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllVInfo,
inputs: List[Tensor],
) -> Tensor:
input_split_sizes = [m * sum(a2ai.D_local_list) for m in a2ai.B_local_list]
output_split_sizes = [a2ai.B_local * e for e in a2ai.dims_sum_per_rank]
input = torch.cat(inputs, dim=1).view([-1])
output = input.new_empty(sum(output_split_sizes))
with record_function("## alltoallv_bwd_single ##"):
req = dist.all_to_all_single(
output,
input,
output_split_sizes,
input_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = All2Allv_Wait
a2ai.input_split_sizes = input_split_sizes
a2ai.output_split_sizes = output_split_sizes
myreq.a2ai = a2ai
ctx.a2ai = a2ai
ctx.myreq = myreq
return output
@staticmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *grad_output):
a2ai = ctx.a2ai
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_input = myreq.tensor
grad_inputs = grad_input.view([a2ai.B_global, -1]).split(
a2ai.D_local_list, dim=1
)
grad_inputs = [gin.contiguous() for gin in grad_inputs]
myreq.tensor = None
return (None, None, None, *grad_inputs)
class All2Allv_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def forward(ctx, pg: dist.ProcessGroup, myreq, output) -> Tuple[Tensor]:
a2ai = myreq.a2ai
ctx.a2ai = a2ai
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.pg = pg
ctx.myreq = myreq
outputs = tuple(
[
out.view([a2ai.B_local, -1])
for out in output.split(a2ai.output_split_sizes)
]
)
return outputs
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *grad_outputs) -> Tuple[None, None, Tensor]:
pg = ctx.pg
myreq = ctx.myreq
a2ai = ctx.a2ai
grad_outputs = [gout.contiguous().view([-1]) for gout in grad_outputs]
grad_output = torch.cat(grad_outputs)
grad_input = grad_output.new_empty([a2ai.B_global * sum(a2ai.D_local_list)])
with record_function("## alltoall_bwd_single ##"):
req = dist.all_to_all_single(
grad_input,
grad_output,
a2ai.input_split_sizes,
a2ai.output_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = grad_input
return (None, None, grad_output)
class ReduceScatter_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
rsi: ReduceScatterInfo,
*inputs: Any,
) -> Tensor:
my_rank = dist.get_rank(pg)
output = inputs[my_rank].new_empty(
inputs[my_rank].size(),
dtype=inputs[my_rank].dtype,
device=inputs[my_rank].device,
)
with record_function("## reduce_scatter ##"):
req = dist.reduce_scatter(output, list(inputs), group=pg, async_op=True)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatter_Wait
myreq.rsi = rsi
ctx.myreq = myreq
ctx.pg = pg
return output
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_inputs = list(myreq.tensor)
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
for grad_input in grad_inputs:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
return (None, None, None, *grad_inputs)
class ReduceScatter_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
output: Tensor,
) -> Tensor:
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.myreq = myreq
ctx.pg = pg
return output
@staticmethod
# pyre-fixme[14]: `backward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, grad_output: Tensor) -> Tuple[None, None, Tensor]:
myreq = ctx.myreq
rsi = myreq.rsi
grad_inputs = [
grad_output.new_empty(
in_size,
dtype=grad_output.dtype,
device=grad_output.device,
)
for in_size in rsi.input_sizes
]
with record_function("## reduce_scatter_bw (all_gather) ##"):
req = dist.all_gather(
grad_inputs,
grad_output.contiguous(),
group=ctx.pg,
async_op=True,
)
myreq.req = req
myreq.tensor = grad_inputs
return (None, None, grad_output)
```
#### File: distributed/planner/enumerators.py
```python
import math
from typing import Tuple, Optional, Dict, List, Union
import torch
from torch import nn
from torchrec.distributed.planner.constants import (
MIN_CW_DIM,
POOLING_FACTOR,
)
from torchrec.distributed.planner.shard_estimators import (
EmbeddingPerfEstimator,
EmbeddingStorageEstimator,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Enumerator,
ShardingOption,
Shard,
Topology,
PartitionByType,
ShardEstimator,
)
from torchrec.distributed.planner.utils import sharder_name
from torchrec.distributed.types import ModuleSharder, ShardingType
class EmbeddingEnumerator(Enumerator):
"""
Generates embedding sharding options for given nn.Module, considering user provided
constraints.
Constructor Args:
topology (Topology): device topology.
constraints (Optional[Dict[str, ParameterConstraints]]): dict of parameter names
to provided ParameterConstraints.
"""
def __init__(
self,
topology: Topology,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
estimator: Optional[Union[ShardEstimator, List[ShardEstimator]]] = None,
) -> None:
self._compute_device: str = topology.compute_device
self._world_size: int = topology.world_size
self._local_world_size: int = topology.local_world_size
self._constraints = constraints
self._batch_size: int = topology.batch_size
if estimator:
self._estimators: List[ShardEstimator] = (
[estimator] if not isinstance(estimator, list) else estimator
)
else:
self._estimators: List[ShardEstimator] = [
EmbeddingPerfEstimator(topology=topology, constraints=constraints),
EmbeddingStorageEstimator(topology=topology, constraints=constraints),
]
def enumerate(
self, module: nn.Module, sharders: List[ModuleSharder[nn.Module]]
) -> List[ShardingOption]:
"""
Generates relevant sharding options given module and sharders.
Args:
module (nn.Module): module to be sharded.
sharders (List[ModuleSharder[nn.Module]]): provided sharders for module.
Returns:
List[ShardingOption]: valid sharding options with values populated.
"""
sharder_map: Dict[str, ModuleSharder[nn.Module]] = {
sharder_name(sharder.module_type): sharder for sharder in sharders
}
sharding_options: List[ShardingOption] = []
for child_path, child_module in module.named_modules():
sharder_key = sharder_name(type(child_module))
sharder = sharder_map.get(sharder_key, None)
if not sharder:
continue
for name, param in sharder.shardable_parameters(child_module).items():
for sharding_type in self._filter_sharding_types(
name, sharder.sharding_types(self._compute_device)
):
for compute_kernel in self._filter_compute_kernels(
name,
sharder.compute_kernels(sharding_type, self._compute_device),
):
input_lengths = (
self._constraints[name].pooling_factors
if self._constraints and self._constraints.get(name)
else [POOLING_FACTOR]
)
col_wise_shard_dim = (
self._constraints[name].min_partition
if self._constraints and self._constraints.get(name)
else None
)
(
shard_sizes,
shard_offsets,
) = calculate_shard_sizes_and_offsets(
tensor=param,
world_size=self._world_size,
local_world_size=self._local_world_size,
sharding_type=sharding_type,
col_wise_shard_dim=col_wise_shard_dim,
)
sharding_options.append(
ShardingOption(
name=name,
tensor=param,
module=(child_path, child_module),
upstream_modules=[],
downstream_modules=[],
input_lengths=input_lengths,
batch_size=self._batch_size,
compute_kernel=compute_kernel,
sharding_type=sharding_type,
partition_by=get_partition_by_type(sharding_type),
shards=[
Shard(size=size, offset=offset)
for size, offset in zip(shard_sizes, shard_offsets)
],
)
)
for estimator in self._estimators:
estimator.estimate(sharding_options, sharder_map)
return sharding_options
def _filter_sharding_types(self, name: str, sharding_types: List[str]) -> List[str]:
if not self._constraints or not self._constraints.get(name):
return sharding_types
constraints: ParameterConstraints = self._constraints[name]
if not constraints.sharding_types:
return sharding_types
constrained_sharding_types: List[str] = constraints.sharding_types
sharding_types = list(set(constrained_sharding_types) & set(sharding_types))
if not sharding_types:
raise RuntimeError(
f"No available sharding types after applying user provided constraints for {name}"
)
return sharding_types
def _filter_compute_kernels(
self, name: str, compute_kernels: List[str]
) -> List[str]:
if not self._constraints or not self._constraints.get(name):
return compute_kernels
constraints: ParameterConstraints = self._constraints[name]
if not constraints.compute_kernels:
return compute_kernels
constrained_compute_kernels: List[str] = constraints.compute_kernels
compute_kernels = list(set(constrained_compute_kernels) & set(compute_kernels))
if not compute_kernels:
raise RuntimeError(
f"No available compute kernels after applying user provided constraints for {name}"
)
return compute_kernels
def get_partition_by_type(sharding_type: str) -> str:
"""
Gets corresponding partition by type for provided sharding type.
Args:
sharding_type (str): sharding type string.
Returns:
str: the corresponding PartitionByType value.
"""
device_sharding_types = {
ShardingType.TABLE_WISE.value,
ShardingType.COLUMN_WISE.value,
}
host_sharding_types = {ShardingType.TABLE_ROW_WISE.value}
uniform_sharding_types = {
ShardingType.ROW_WISE.value,
ShardingType.DATA_PARALLEL.value,
}
if sharding_type in device_sharding_types:
return PartitionByType.DEVICE.value
elif sharding_type in host_sharding_types:
return PartitionByType.HOST.value
elif sharding_type in uniform_sharding_types:
return PartitionByType.UNIFORM.value
raise ValueError(f"Unrecognized sharding type provided: {sharding_type}")
def calculate_shard_sizes_and_offsets(
tensor: torch.Tensor,
world_size: int,
local_world_size: int,
sharding_type: str,
col_wise_shard_dim: Optional[int] = None,
) -> Tuple[List[List[int]], List[List[int]]]:
"""
Calculates sizes and offsets for tensor sharded according to provided sharding type.
Args:
tensor (torch.Tensor): tensor to be sharded.
world_size (int): total number of devices in topology.
local_world_size (int): total number of devices in host group topology.
sharding_type (str): provided ShardingType value.
col_wise_shard_dim (Optional[int]): dimension for column wise sharding split.
Returns:
Tuple[List[List[int]], List[List[int]]]: shard sizes, represented as a list of
the dimensions of the sharded tensor on each device, and shard offsets,
represented as a list of coordinates of placement on each device.
Raises:
ValueError: If `sharding_type` is not a valid ShardingType.
"""
(rows, columns) = tensor.shape
if sharding_type == ShardingType.DATA_PARALLEL.value:
return [[rows, columns]] * world_size, [[0, 0]] * world_size
elif sharding_type == ShardingType.TABLE_WISE.value:
return [[rows, columns]], [[0, 0]]
elif sharding_type == ShardingType.COLUMN_WISE.value:
return _calculate_cw_shard_sizes_and_offsets(columns, rows, col_wise_shard_dim)
elif sharding_type == ShardingType.ROW_WISE.value:
return _calculate_rw_shard_sizes_and_offsets(rows, world_size, columns)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return _calculate_rw_shard_sizes_and_offsets(rows, local_world_size, columns)
raise ValueError(f"Unrecognized sharding type provided: {sharding_type}")
def _calculate_rw_shard_sizes_and_offsets(
hash_size: int, num_devices: int, columns: int
) -> Tuple[List[List[int]], List[List[int]]]:
"""
Sets prefix of shard_sizes to be ceil(hash_size/num_devices).
For example if hash_size = 10, num_devices = 3, we will allocate the rows as 3,3,3,1
(rather than 3,3,2,2).
This is due to implementation in RW sharding that sets block_size_lists to be ceil.
The balanced way is harder to support on GPU. For more details see
https://fb.quip.com/xbgbAchCTOL0
Also consider the example of hash_size = 5, num_devices = 4. The expected rows per
rank is [2,2,1,0].
"""
block_size: int = math.ceil(hash_size / num_devices)
last_rank: int = hash_size // block_size
last_block_size: int = hash_size - block_size * last_rank
shard_sizes: List[List[int]] = []
for rank in range(num_devices):
if rank < last_rank:
local_row: int = block_size
elif rank == last_rank:
local_row: int = last_block_size
else:
local_row: int = 0
shard_sizes.append([local_row, columns])
shard_offsets = [[0, 0]]
for i in range(num_devices - 1):
shard_offsets.append([shard_sizes[i][0] + shard_offsets[i][0], 0])
return shard_sizes, shard_offsets
def _calculate_cw_shard_sizes_and_offsets(
hash_size: int,
rows: int,
col_wise_shard_dim: Optional[int] = None,
) -> Tuple[List[List[int]], List[List[int]]]:
block_size: int = min(
col_wise_shard_dim if col_wise_shard_dim else MIN_CW_DIM, hash_size
)
num_col_wise_shards, residual = divmod(hash_size, block_size)
shard_sizes: List[List[int]] = [[rows, block_size]] * (num_col_wise_shards - 1)
shard_sizes.append([rows, block_size + residual])
shard_offsets: List[List[int]] = [
[0, block_size * rank] for rank in range(num_col_wise_shards)
]
return shard_sizes, shard_offsets
```
#### File: planner/tests/test_proposers.py
```python
import unittest
from typing import List, cast
import torch
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer
from torchrec.distributed.planner.types import Topology, ShardingOption
from torchrec.distributed.tests.test_model import TestSparseNN
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestProposers(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.enumerator = EmbeddingEnumerator(topology=topology)
self.greedy_proposer = GreedyProposer()
self.uniform_proposer = UniformProposer()
def test_greedy_two_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
),
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_1",
feature_names=["feature_1"],
),
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.greedy_proposer.load(search_space)
# simulate first five iterations:
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.greedy_proposer.feedback(partitionable=True)
expected_output = [
[
(
"table_0",
"data_parallel",
"batched_dense",
),
(
"table_1",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"data_parallel",
"dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_dense",
),
],
]
self.assertEqual(expected_output, output)
def test_uniform_three_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.uniform_proposer.load(search_space)
output = []
proposal = self.uniform_proposer.propose()
while proposal:
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.uniform_proposer.feedback(partitionable=True)
proposal = self.uniform_proposer.propose()
expected_output = [
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_2",
"data_parallel",
"batched_dense",
),
(
"table_3",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"table_wise",
"batched_fused",
),
(
"table_2",
"table_wise",
"batched_fused",
),
(
"table_3",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"row_wise",
"batched_fused",
),
(
"table_2",
"row_wise",
"batched_fused",
),
(
"table_3",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"table_row_wise",
"batched_fused",
),
(
"table_2",
"table_row_wise",
"batched_fused",
),
(
"table_3",
"table_row_wise",
"batched_fused",
),
],
]
self.assertEqual(expected_output, output)
``` |
{
"source": "joshua-dias/Python---Estudos-Gerais",
"score": 3
} |
#### File: joshua-dias/Python---Estudos-Gerais/botwpp.py
```python
from selenium import webdriver
import time
class BotWhatsapp:
def __init__ (self):
# Preencher a mensagem à ser enviada
self.mensagem = "Mensagem de teste"
# Preencher o nome da pessoa ou grupo abaixo(Sem limite de pessoas)
self.destinatario = ['João']
options = webdriver.ChromeOptions()
options.add_argument('lang=pt-br')
# Preencher o caminho onde está seu chromedriver.
self.driver = webdriver.Chrome(executable_path =r'C:\Users\~stan twice\Desktop\Rep Josh\chromedriver.exe')
def EnviarMensagem(self):
# Acessando o Whatsapp
self.driver.get('https://web.whatsapp.com')
time.sleep(20)
# Criei um for pois o mesmo pode fazer acesso à quantas pessoas e/ou grupos necessários.
for grupo in self.destinatario:
grupo = self.driver.find_element_by_xpath(f"//span[@title = '{grupo}']")
time.sleep(3)
grupo.click()
# Momento de clicar na Caixa de Texto | Necessário validar o nome da classe abaixo:
caixadetexto = self.driver.find_element_by_class_name('_3uMse')
time.sleep(3)
caixadetexto.click()
caixadetexto.send_keys(self.<PASSWORD>)
# Momento de clicar no botão de enviar.
enviar = self.driver.find_element_by_xpath(f"//span[@data-icon = 'send']")
time.sleep(3)
enviar.click()
time.sleep(5)
# Chamando o método.
bot = BotWhatsapp()
bot.EnviarMensagem()
``` |
{
"source": "JoshuaDiazAtencia/OOP-1-1",
"score": 4
} |
#### File: JoshuaDiazAtencia/OOP-1-1/Simple calculator.py
```python
from tkinter import *
window = Tk()
window.title("Super simple calculator")
window.geometry("400x300+20+10")
class MyWindow:
def __init__(self, window):
self.lbl1 = Label(window, text="Super standard calculator", fg="black")
self.lbl1.place(relx=0.50, y=50, anchor="center")
self.lbl2 = Label(window, text="Input 1st number")
self.lbl2.place(x=50, y=80)
self.lbl3 = Entry(window, bd=3)
self.lbl3.place(x=180, y=80)
self.lbl4 = Label(window, text="Input 2nd number")
self.lbl4.place(x=50, y=120)
self.lbl5 = Entry(window, bd=3)
self.lbl5.place(x=180, y=120)
self.btn1 = Button(window, text="addition", command=self.add)
self.btn1.place(x=50, y=150)
self.btn1.bind("<Button-1>", self.add)
self.btn2 = Button(window, text="subtraction", command=self.sub)
self.btn2.place(x=120, y=150)
self.btn3 = Button(window, text="multiplication", command=self.mult)
self.btn3.place(x=200, y=150)
self.btn4 = Button(window, text="division", command=self.divd)
self.btn4.place(x=300, y=150)
self.lbl6 = Label(window, text="Result:")
self.lbl6.place(x=50, y=200)
self.lbl7 = Entry(window, bd=3)
self.lbl7.place(x=100, y=200)
def add(self):
self.lbl7.delete("0", END)
num1 = int(self.lbl3.get())
num2 = int(self.lbl5.get())
answer = num1+num2
self.lbl7.insert(END, str(answer))
def sub(self):
self.lbl7.delete("0", END)
num1 = int(self.lbl3.get())
num2 = int(self.lbl5.get())
answer = num1-num2
self.lbl7.insert(END, str(answer))
def mult(self):
self.lbl7.delete("0", END)
num1 = int(self.lbl3.get())
num2 = int(self.lbl5.get())
answer = num1*num2
self.lbl7.insert(END, str(answer))
def divd(self):
self.lbl7.delete("0", END)
num1 = int(self.lbl3.get())
num2 = int(self.lbl5.get())
answer = num1/num2
self.lbl7.insert(END, str(answer))
mywin = MyWindow(window)
window.mainloop()
``` |
{
"source": "joshua-d-miller/admin-scripts",
"score": 2
} |
#### File: admin-scripts/jamf Pro Extension Attributes/TeamViewer ID.py
```python
from __future__ import print_function
from Foundation import CFPreferencesCopyAppValue
# ---------------------------------------------------------
def get_teamviewer_id():
'''Retreives the TeamViewer ID of the current client'''
# The directory to search for the TeamViewer PLIST file
# Usually this is /Library/Preferences
tv_plist = "com.teamviewer.teamviewer.preferences"
try:
tv_id = CFPreferencesCopyAppValue(
'ClientID', '/Library/Preferences/{0:}.plist'.format(
tv_plist
))
print('<result>{0:}</result>'.format(tv_id))
exit(0)
except IOError:
print('<result>Not Installed</result>')
if __name__ == '__main__':
get_teamviewer_id()
```
#### File: admin-scripts/jamf Pro Policies/Unload LaunchAgents.py
```python
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
from subprocess import CalledProcessError, check_output, PIPE
def unload_launchAgents(agent_to_unload):
'''Function to unload the specified launchAgents'''
# Create Agent Path
la_path = '/Library/LaunchAgents/{0:}.plist'.format(agent_to_unload)
# Determine Status
try:
la_status = check_output(
['/bin/launchctl', 'asuser', str(current_user[1]), '/bin/launchctl',
'list', agent_to_unload], stderr=PIPE)
except CalledProcessError:
la_status = ""
# Disable the LaunchAgent
try:
check_output(
['/bin/launchctl', 'disable', 'user/{0:}/{1:}'.format(
str(current_user[1]), agent_to_unload)],
stderr=PIPE)
except CalledProcessError:
print("Agent {0:} Already Disabled. Continuing...".format(
agent_to_unload))
# Unload the LaunchAgent
while la_status != "":
try:
check_output(
['/bin/launchctl', 'bootout', 'gui/{0:}'.format(
str(current_user[1])), la_path], stderr=PIPE)
la_status = check_output(
['/bin/launchctl', 'asuser', str(current_user[1]),
'/bin/launchctl', 'list', agent_to_unload],
stderr=PIPE)
except CalledProcessError:
la_status = ""
if __name__ == '__main__':
# Get Current User
current_user = SCDynamicStoreCopyConsoleUser(None, None, None)
# LaunchAgents to unload (Create your list of LaunchAgents here)
# Use just their name without .plist
la_list = [
''
]
for agent in la_list:
unload_launchAgents(agent)
```
#### File: admin-scripts/munki/Remove Duplicates.py
```python
from __future__ import print_function
from os import devnull, remove, walk
from sys import argv
import subprocess
def remove_duplicates_from_repo():
'''Removes duplicates from the munki repo'''
# This for loop will search your Munki Repository for duplications
count = 1
try:
while count != 10:
DUPE_TO_REMOVE = '__' + str(count)
for dirname, dirnames, filenames in walk(argv[1]):
for filename in filenames:
# Is the current file a duplication in the Munki Repo?
if DUPE_TO_REMOVE in filename:
try:
# Remove the File
remove(dirname + '/' + filename)
print(' ✓ {0:} has been removed.'.format(filename))
except StandardError as error:
print(' ✖ Could not remove the file {0:}'.format(filename))
print(' {0:}'.format(error))
else:
continue
count = count + 1
except Exception as error:
print(' ✖ Could not traverse directory or other unspecified error. \
Please make sure Munki Repo is mounted.')
def update_catalogs():
'''Updates munki catalogs'''
try:
print('• Updating munki catalogs')
# Suppress the output of makecatalogs
our_null = open(devnull, 'w')
subprocess.check_call(
['/usr/local/munki/makecatalogs'],
stdout=our_null, stderr=our_null)
print(' ✓ Completed')
except Exception as error:
print('Could not update the Munki catalogs. Please run makecatalogs')
print(error)
if __name__ == '__main__':
print('', end='\n')
print('------------------------', end='\n')
print(' Penn State MacAdmins ', end='\n')
print('------------------------', end='\n')
if argv[1] == "":
print('No munki repo specified! Exiting...')
else:
print('• Remove Duplicates from munki repo')
remove_duplicates_from_repo()
update_catalogs()
``` |
{
"source": "JoshuaDownes/s2n",
"score": 2
} |
#### File: tests/integration/s2n_sslyze_test.py
```python
import argparse
import os
import sys
import subprocess
import itertools
import multiprocessing
import json
import time
from pprint import pprint
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
def cleanup_processes(*processes):
for p in processes:
p.kill()
p.wait()
def run_sslyze_scan(endpoint, port, scan_output_location, enter_fips_mode=False):
"""
Run SSLyze scan against s2nd listening on `endpoint` and `port`
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str scan_output_location: Path and Filename of where to output JSON Results file
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:return: 0 on successfully negotiation(s), -1 on failure
"""
s2nd_cmd = ["../../bin/s2nd"]
s2nd_cmd.extend([str(endpoint), str(port), "-n", "-s", "--parallelize"])
s2nd_ciphers = "test_all"
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
s2nd_cmd.append("--ciphers")
s2nd_cmd.append(s2nd_ciphers)
# Run s2nd in the background
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sslyze_cmd = ["sslyze"]
sslyze_cmd.extend(["--robot", str(str(endpoint) + ":" + str(port)), str("--json_out=" + scan_output_location)])
sslyze = subprocess.Popen(sslyze_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sslyze.wait(timeout=(300 * 1000))
#cleanup
cleanup_processes(s2nd, sslyze)
return 0
def print_result(result_prefix, return_code):
suffix = ""
if return_code == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix ="FAILED"
print(result_prefix + suffix)
def check_sslyze_results(scan_output_location):
json_obj = json.load(open(scan_output_location))
scan_time = json_obj["total_scan_time"]
robot_result = json_obj["accepted_targets"][0]["commands_results"]["robot"]
if("error_message" in robot_result):
print_result("SSLyze Error: " + robot_result["error_message"] + " ", 1)
return 1
failures = 0
robot_attack_failure = 0
if(robot_result["robot_result_enum"] != "NOT_VULNERABLE_NO_ORACLE"):
robot_attack_failure = 1
failures += 1
print_result("ROBOT Attack Regression Test... ", robot_attack_failure)
print("\nSSLyze Results Location: " + scan_output_location)
print("SSLyze Scan Time: %0.2f seconds\n" % float(scan_time))
return failures
def run_sslyze_test(host, port, fips_mode):
seconds_since_epoch = str(int(time.time()))
scan_output_location = "/tmp/sslyze_output_%s.json" % seconds_since_epoch
run_sslyze_scan(host, port, scan_output_location, fips_mode)
failed = check_sslyze_results(scan_output_location)
os.remove(scan_output_location)
return failed
def main():
parser = argparse.ArgumentParser(description='Runs SSLyze scan against s2nd')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--libcrypto', default='openssl-1.1.0', choices=['openssl-1.0.2', 'openssl-1.0.2-fips', 'openssl-1.1.0', 'openssl-1.1.x-master', 'libressl'],
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.0.""")
args = parser.parse_args()
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
host = args.host
port = args.port
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\n\tRunning s2nd in FIPS mode.")
print("\n\tRunning SSLyze tests with: " + os.popen('openssl version').read())
return run_sslyze_test(host, port, fips_mode)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "JoshuaDriesman/sample-technical-interview-problems",
"score": 3
} |
#### File: sample-technical-interview-problems/Anagrams/sorting_solution.py
```python
def is_annagram(s1, s2):
return sorted(s1) == sorted(s2)
``` |
{
"source": "joshuadugie/scripts",
"score": 2
} |
#### File: joshuadugie/scripts/msdn_subscriber_downloads.py
```python
import glob
import httplib
import itertools
import json
import os
import re
import sys
import threading
import time
from collections import OrderedDict
from multiprocessing.pool import ThreadPool
from threading import Lock
# program configuration
num_attempts_per_id = 10
num_threads = 64
max_id = 72500
min_id = 0
idlist = range(max_id, min_id, -1)
output_filename = 'msdn_subscriber_downloads'
remove_attrs = ['DownloadProvider', 'NotAuthorizedReasonId',
'IsAuthorization', 'BenefitLevels']
js_date_pattern = re.compile('/Date\(([0-9]+)\)/')
iso8601_date_pattern = '%FT%TZ'
# program strings
existing_results = 'Pulled in %d existing results'
new_results_length = '\nTotal results: %d'
status_str = '\r<Thread %02d> downloading FileId %05d'
failure_msg = '\n<Thread %02d> FAILED TO download FileId %05d'
exception_str = '\n<Thread %02d> EXCEPTION(%05d): %s'
uncaught_exception_str = '\n<Thread %02d> non-Exception EXCEPTION(%05d)'
# HTTP data
domain = 'msdn.microsoft.com'
path = '/en-us/subscriptions/securejson/GetFileSearchResult'
cookie_str = "MY_MS_COOKIE"
headers = {
"Cookie": cookie_str,
"Origin": "https://"+domain,
"Accept-Encoding": "",
"Accept-Language": "en-US,en;q=0.8",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36" + \
" (KHTML, like Gecko) Chrome/40.0.2214.93" + \
" Safari/537.36",
"Content-Type": "application/json; charset=UTF-8",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Referer": "https://"+domain+"/subscriptions/securedownloads/?",
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive"
}
post_data = '{"Languages":"en","Architectures":"",' + \
'"ProductFamilyIds":"","FileExtensions":"",' + \
'"MyProducts":false,"ProductFamilyId":0,' + \
'"SearchTerm":"","Brand":"MSDN","PageIndex":0,' + \
'"PageSize":10000,"FileId":%d}'
# locks
sysout_lock = Lock()
idlist_lock = Lock()
results_lock = Lock()
def replace_old_date(old_date):
if js_date_pattern.match(old_date):
date_int = int(js_date_pattern.match(old_date).group(1))
date_int /= 1000
new_date_str = time.strftime(iso8601_date_pattern,
time.gmtime(date_int))
return new_date_str.decode()
return old_date
def worker_thread(n):
global idlist
global results
tls = threading.local()
# open an SSL keep-alive connection
tls.conn = httplib.HTTPSConnection(domain)
# loop until none are left to download
while len(idlist):
# get an ID to retrieve
idlist_lock.acquire()
if len(idlist):
tls.current_id = idlist.pop()
idlist_lock.release()
else:
idlist_lock.release()
break
# print status
sysout_lock.acquire()
tls.status_str = status_str[:]
sys.stdout.write(tls.status_str % (n, tls.current_id))
sys.stdout.flush()
sysout_lock.release()
# attempt to get the ID num_attempts_per_id times
tls.k = 0
tls.success = False
while tls.k < num_attempts_per_id and not tls.success:
try:
tls.k += 1
tls.conn.request('POST', path, post_data % tls.current_id,
headers)
r1 = tls.conn.getresponse()
response = r1.read()
if response and len(response) > 0:
json_str = json.loads(response)
if json_str and 'TotalResults' in json_str:
if json_str['TotalResults'] > 0:
ordered_json = json \
.JSONDecoder(object_pairs_hook=OrderedDict) \
.decode(response)
new_files = ordered_json['Files']
for new_file in new_files:
for attr in remove_attrs:
if attr in new_file:
new_file.pop(attr)
if 'PostedDate' in new_file:
new_file['PostedDate'] = replace_old_date(new_file['PostedDate'])
if 'Sha1Hash' in new_file:
new_file['Sha1Hash'] = new_file['Sha1Hash'].lower()
results_lock.acquire()
for new_file in new_files:
if new_file not in results:
results.append(new_file)
results_lock.release()
tls.success = True
break
# abort attempt on any exception
except Exception as e:
sysout_lock.acquire()
print exception_str % (n, tls.current_id, repr(e))
sysout_lock.release()
break
except:
sysout_lock.acquire()
print uncaught_exception_str % (n, tls.current_id)
sysout_lock.release()
break
# if failed to get after num_attempts_per_id times
if tls.k == num_attempts_per_id and not tls.success:
sysout_lock.acquire()
print failure_msg % (n, tls.current_id)
sysout_lock.release()
# close connection
tls.conn.close()
return
if __name__ == '__main__':
global results
results = []
# read existing results
for fn in glob.glob(output_filename+'*.json'):
f = open(fn, 'rb')
d = f.read()
f.close()
#os.remove(fn)
j = json \
.JSONDecoder(object_pairs_hook=OrderedDict) \
.decode(d)
j = j['Files']
results += j
print existing_results % len(results)
# Replace old dates
for r in results:
if 'PostedDate' in r:
r['PostedDate'] = replace_old_date(r['PostedDate'])
# kick off the threads to retrieve all FileIds
threads = []
for i in range(num_threads):
t = threading.Thread(target=worker_thread, args=(i,))
threads.append(t)
t.start()
# wait for all threads to finish
for i in range(num_threads):
threads[i].join()
# sort results, prettify, output
results = sorted(results, key=lambda x: x['FileId'])
print new_results_length % len(results)
for i in range(0, len(results)):
results[i]['Sha1Hash'] = results[i]['Sha1Hash'].lower()
for i in range(min_id, max_id, 500):
f = open(output_filename + ('.%d-%d.json'%(i,i+499)), 'wb')
t = filter(lambda x: x['FileId'] >= i and x['FileId'] < (i+500), results)
f.write(json.dumps(OrderedDict([('Files', t)]),
indent=2, separators=(',', ': ')))
f.close()
``` |
{
"source": "JoshuaEbenezer/ChipQA-0",
"score": 2
} |
#### File: JoshuaEbenezer/ChipQA-0/chipqa0.py
```python
import time
from joblib import Parallel,delayed
import numpy as np
import cv2
import queue
import glob
import os
import time
import scipy.ndimage
import joblib
import sys
import matplotlib.pyplot as plt
import niqe
import save_stats
from numba import jit,prange
import argparse
parser = argparse.ArgumentParser(description='Generate ChipQA-0 features from a folder of videos and store them')
parser.add_argument('input_folder',help='Folder containing input videos')
parser.add_argument('results_folder',help='Folder where features are stored')
args = parser.parse_args()
C=1
def gen_gauss_window(lw, sigma):
sd = np.float32(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * np.float32(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
def compute_image_mscn_transform(image, C=1, avg_window=None, extend_mode='constant'):
if avg_window is None:
avg_window = gen_gauss_window(3, 7.0/6.0)
assert len(np.shape(image)) == 2
h, w = np.shape(image)
mu_image = np.zeros((h, w), dtype=np.float32)
var_image = np.zeros((h, w), dtype=np.float32)
image = np.array(image).astype('float32')
scipy.ndimage.correlate1d(image, avg_window, 0, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(mu_image, avg_window, 1, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(image**2, avg_window, 0, var_image, mode=extend_mode)
scipy.ndimage.correlate1d(var_image, avg_window, 1, var_image, mode=extend_mode)
var_image = np.sqrt(np.abs(var_image - mu_image**2))
return (image - mu_image)/(var_image + C), var_image, mu_image
return mscn_buffer
@jit(nopython=True)
def find_sts_locs(sts_slope,cy,cx,step,h,w):
if(np.abs(sts_slope)<1):
x_sts = np.arange(cx-int((step-1)/2),cx+int((step-1)/2)+1)
y = (cy-(x_sts-cx)*sts_slope).astype(np.int64)
y_sts = np.asarray([y[j] if y[j]<h else h-1 for j in range(step)])
else:
# print(np.abs(sts_slope))
y_sts = np.arange(cy-int((step-1)/2),cy+int((step-1)/2)+1)
x= ((-y_sts+cy)/sts_slope+cx).astype(np.int64)
x_sts = np.asarray([x[j] if x[j]<w else w-1 for j in range(step)])
return x_sts,y_sts
@jit(nopython=True)
def find_sts_polar(cos_theta,sin_theta,cy,cx,r):
x = np.asarray(r*cos_theta,dtype=np.int32)
y = np.asarray(r*sin_theta,dtype=np.int32)
x_sts = cx+x
y_sts = cy+y
return x_sts,y_sts
@jit(nopython=True)
def lut_find(theta,fy,fx,rst,rct):
st_theta = np.pi/2+np.arctan2(fy, fx)
indices = np.searchsorted(theta,st_theta)
rsin_theta = rst[:,indices]
rcos_theta =rct[:,indices]
return rcos_theta,rsin_theta
def lut_find_sts(img_buffer,grad_img_buffer,step,cy,cx,fx,fy,rst,rct,theta):
h, w = img_buffer[step-1].shape[:2]
sts =np.zeros((h,w))
grad_sts = np.zeros((h,w)) #grad_img_buffer[step-1]
start = time.time()
rcos_theta,rsin_theta = lut_find(theta,fy,fx,rst,rct) #cx[None,:]+rcos_theta,cy[None,:]+rsin_theta
x_sts,y_sts = cx[None,:]+rcos_theta,cy[None,:]+rsin_theta
end = time.time()
print(end-start,' time for LUT')
start = time.time()
for i in range(len(cy)):
sts[cy[i]-2:cy[i]+3,cx[i]-2:cx[i]+3] = img_buffer[:,y_sts[:,i],x_sts[:,i]]
grad_sts[cy[i]-2:cy[i]+3,cx[i]-2:cx[i]+3] = grad_img_buffer[:,y_sts[:,i],x_sts[:,i]]
end = time.time()
print(end-start,' is the time for array indexing')
return sts,grad_sts
def find_sts(img_buffer,grad_img_buffer,step,cy,cx,fx,fy):
h, w = img_buffer[step-1].shape[:2]
sts_slope =np.tan(np.pi/2+np.arctan2(fy, fx)) #the direction along the STS plane which lies in the spatial image
sts =np.zeros((h,w))
grad_sts =np.zeros((h,w))
r = np.arange(-2,3)
for i in range(len(cy)):
x_sts,y_sts = find_sts_locs(sts_slope[i],cy[i],cx[i],step,h,w)
sts[cy[i]-2:cy[i]+3,cx[i]-2:cx[i]+3] = img_buffer[:,y_sts,x_sts]
grad_sts[cy[i]-2:cy[i]+3,cx[i]-2:cx[i]+3] = grad_img_buffer[:,y_sts,x_sts]
return sts,grad_sts
def sts_fromfilename(i,filenames,results_folder):
filename = filenames[i]
st_time_length = 5
name = os.path.basename(filename)
print(name)
cap = cv2.VideoCapture(filename)
count=1
ret, prev = cap.read()
print(ret)
scale_percent = 0.5
theta = np.arange(-np.pi/2,3*np.pi/2+0.1,0.3)
ct = np.cos(theta)
st = np.sin(theta)
r = np.arange(-2,3)
rct = np.round(np.outer(r,ct))
rst = np.round(np.outer(r,st))
rct = rct.astype(np.int32)
rst = rst.astype(np.int32)
prevY = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
prevY = prevY.astype(np.float32)
gradient_x = cv2.Sobel(prevY,ddepth=-1,dx=1,dy=0)
gradient_y = cv2.Sobel(prevY,ddepth=-1,dx=0,dy=1)
gradient_mag = np.sqrt(gradient_x**2+gradient_y**2)
prev_grad_mscn, _, _ = compute_image_mscn_transform(gradient_mag)
prev_mscn, _, _ = compute_image_mscn_transform(prevY)
img_buffer =queue.Queue(maxsize=st_time_length)
img_grad_buffer =queue.Queue(maxsize=st_time_length)
img_buffer.put(prev_mscn.astype(np.float32))
img_grad_buffer.put(prev_grad_mscn.astype(np.float32))
step = st_time_length
h,w = prev.shape[0],prev.shape[1]
# dsize
dsize = (int(scale_percent*h),int(scale_percent*w))
cy, cx = np.mgrid[step:h-step:step, step:w-step:step].reshape(2,-1).astype(int) # these will be the centers of each block
dcy, dcx = np.mgrid[step:dsize[0]-step:step, step:dsize[1]-step:step].reshape(2,-1).astype(int) # these will be the centers of each block
prevY_down = cv2.resize(prevY,(dsize[1],dsize[0]),interpolation=cv2.INTER_CUBIC)
gradient_x = cv2.Sobel(prevY_down,ddepth=-1,dx=1,dy=0)
gradient_y = cv2.Sobel(prevY_down,ddepth=-1,dx=0,dy=1)
gradient_mag = np.sqrt(gradient_x**2+gradient_y**2)
prev_grad_mscn, _, _ = compute_image_mscn_transform(gradient_mag)
prev_mscn, _, _ = compute_image_mscn_transform(prevY_down)
head, tail = os.path.split(filename)
spat_list = []
X_list = []
down_img_buffer =queue.Queue(maxsize=st_time_length)
down_img_grad_buffer =queue.Queue(maxsize=st_time_length)
down_img_buffer.put(prev_mscn.astype(np.float32))
down_img_grad_buffer.put(prev_grad_mscn.astype(np.float32))
j=0
total_time = 0
while(True):
# try:
#
j = j+1
# print('Frame ',j)
ret,bgr = cap.read()
count=count+1
print(count)
if(ret==False):
count=count-1
break
Y = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
Y = Y.astype(np.float32)
spat_feats = niqe.compute_niqe_features(Y)
spat_list.append(spat_feats)
gradient_x = cv2.Sobel(Y,ddepth=-1,dx=1,dy=0)
gradient_y = cv2.Sobel(Y,ddepth=-1,dx=0,dy=1)
gradient_mag = np.sqrt(gradient_x**2+gradient_y**2)
Y_grad_mscn,_,_ = compute_image_mscn_transform(gradient_mag)
Y_mscn,_,_ = compute_image_mscn_transform(Y,C=C)
Y_down = cv2.resize(Y,(dsize[1],dsize[0]),interpolation=cv2.INTER_CUBIC)
gradient_x = cv2.Sobel(Y_down,ddepth=-1,dx=1,dy=0)
gradient_y = cv2.Sobel(Y_down,ddepth=-1,dx=0,dy=1)
gradient_mag_down = np.sqrt(gradient_x**2+gradient_y**2)
Ydown_grad_mscn,_,_ = compute_image_mscn_transform(gradient_mag_down)
Ydown_mscn,_,_ = compute_image_mscn_transform(Y_down)
flow =cv2.calcOpticalFlowFarneback(prevY,Y, None, 0.5, 3, 15, 3, 5, 1.2, 0)
down_flow=cv2.calcOpticalFlowFarneback(prevY_down,Y_down, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevY = Y
prevY_down = Y_down
img_buffer.put(Y_mscn.astype(np.float32))
img_grad_buffer.put(Y_grad_mscn.astype(np.float32))
down_img_buffer.put(Ydown_mscn.astype(np.float32))
down_img_grad_buffer.put(Ydown_grad_mscn.astype(np.float32))
if (down_img_buffer.qsize()>=st_time_length):
med_flow = cv2.medianBlur(flow,5)
fy, fx =med_flow[cy,cx].T# flow[:,:,0].flatten(),flow[:,:,1].flatten() #
sts,sts_grad = lut_find_sts(np.array(img_buffer.queue),np.array(img_grad_buffer.queue),st_time_length,cy,cx,fx.astype(np.float32),fy.astype(np.float32),rst,rct,theta)
feats = save_stats.brisque(sts)
grad_feats = save_stats.brisque(sts_grad)
down_med_flow = cv2.medianBlur(down_flow,5)
dfy, dfx =down_med_flow[dcy,dcx].T# flow[:,:,0].flatten(),flow[:,:,1].flatten() #
dsts,dsts_grad = lut_find_sts(np.array(down_img_buffer.queue),np.array(down_img_grad_buffer.queue),st_time_length,dcy,dcx,dfx.astype(np.float32),dfy.astype(np.float32),rst,rct,theta)
dfeats = save_stats.brisque(dsts)
dgrad_feats = save_stats.brisque(dsts_grad)
allfeats = np.concatenate((feats,dfeats,grad_feats,dgrad_feats))
X_list.append(allfeats)
img_buffer.get()
img_grad_buffer.get()
down_img_buffer.get()
down_img_grad_buffer.get()
X = np.concatenate((np.average(spat_list,axis=0),np.average(X_list,axis=0)))
train_dict = {"features":X}
filename =os.path.join(os.path.splitext(name)[0]+'.z')
joblib.dump(train_dict,os.path.join(results_folder,filename))
return
def sts_fromvid(args):
filenames = glob.glob(os.path.join(args.input_folder,'*.mp4'))
filenames = sorted(filenames)
flag = 0
Parallel(n_jobs=-10)(delayed(sts_fromfilename)(i,filenames,args.results_folder) for i in range(len(filenames)))
return
def main():
args = parser.parse_args()
sts_fromvid(args)
if __name__ == '__main__':
# print(__doc__)
main()
``` |
{
"source": "joshuaeh/redditSearch",
"score": 4
} |
#### File: joshuaeh/redditSearch/redditSearch.py
```python
import pandas as pd
import requests
import json
import csv
import time
import datetime
def RedditSearch(query, before='', after='', search_type='hybrid'):
'''
query (string)
after (UTC Timestamp) *** Note that these must be integers ***
DEFAULT: 7 Days before now
before (UTC Timestamp)
DEFAULT: now
search_type (string)
'comment' -> only search comments
'submission' -> only search submissions
'hybrid' -> search both comments and submissions
'''
# Defaults
today = datetime.datetime.utcnow().timestamp()
delta_time = datetime.timedelta(days=7)
if not after or not before:
after = datetime.datetime.now() - delta_time
after = int(after.timestamp())
before = int(today)
print('UTC Before:', before)
print('UTC After:', after)
search_type = search_type.lower()
if search_type not in ['comment', 'submission', 'hybrid']:
print('Unknown search_type, defaulting to hybrid')
search_type = 'hybrid'
subCount = 0 # data counter
commCount = 0 # data counter
subStats = {} # data for storage
commStats = {} #data storage
subList = []
commList = []
# subfunctions
def getPushshiftData_Submission(query, after, before):
'''
query(String) string to search that
after (Timestamp)
before (Timestamp)
'''
url = 'https://api.pushshift.io/reddit/search/submission/?q='+str(query)+\
'&size=1000&after='+str(after)+'&before='+str(before)
# url params well documented at https://github.com/pushshift/api for both comments and submissions
r = requests.get(url)
data = json.loads(r.text)
return data['data']
def getPushshiftData_Comments(query, after, before):
'''
query(String) string to search that
after (Timestamp)
before (Timestamp)
'''
url = 'https://api.pushshift.io/reddit/search/comment/?q='+str(query)+\
')&size=1000&after='+str(after)+'&before='+str(before)
# url params well documented at https://github.com/pushshift/api for both comments and submissions
r = requests.get(url)
data = json.loads(r.text)
return data['data']
try:
# Collect Submissions
# Get initial Submissions that fit query
if search_type != 'comment':
print('Beginning Submission Query')
data = getPushshiftData_Submission(query, after, before)
# Will run until all posts have been gathered i.e. When the length of data variable = 0
# from the 'after' date up until before date
while len(data) > 0:
after_ = int(data[-1]['created_utc'])
for submission in data:
submission['created_utc'] = datetime.datetime.fromtimestamp(submission['created_utc'])
subCount+=1
subList.append(submission)
# Calls getPushshiftData() with the created date of the last submission
print('Oldest Post Date:' + str(data[-1]['created_utc']))
#update after variable to last created date of submission
#data has changed due to the new after variable provided by above code
data = getPushshiftData_Submission(query, after_, before)
print('Submission Query Finished')
# Collect Comments
if search_type != 'submission':
print('Beginning Comment Query')
data = getPushshiftData_Comments(query, after, before)
# Will run until all posts have been gathered i.e. When the length of data variable = 0
# from the 'after' date up until before date
while len(data) > 0:
after_ = int(data[-1]['created_utc'])
for comment in data:
comment['created_utc'] = datetime.datetime.fromtimestamp(comment['created_utc'])
commCount+=1
commList.append(comment)
# Calls getPushshiftData() with the created date of the last submission
print('Oldest Comment Date:' + str((data[-1]['created_utc'])))
#update after variable to last created date of submission
#data has changed due to the new after variable provided by above code
data = getPushshiftData_Comments(query, after_, before)
print('Comment Query Finished')
except:
print('Error while Processing')
# Convert to dfs (sub_id,created,sub,title,text,url,author,score,nsfw,numComms,permalink,flair
print('Building Output')
subDf = pd.DataFrame(subList)
# subDf = subDf.set_index('created_utc')
commDf = pd.DataFrame(commList)
# commDf = commDf.set_index('created_utc')
print('Number of Submissions Collected:', subCount)
print('Number of Comments Collected:', commCount)
return subDf, commDf
submissions, comments = RedditSearch('gummy bears')
submissions.to_csv('submissions.csv')
comments.to_csv('comments.csv')
``` |
{
"source": "joshuaeitan/misra_gries",
"score": 3
} |
#### File: misra_gries/misra_gries/misra_gries.py
```python
from collections import Counter
def misra_gries(stream, k):
counters = Counter()
for item in stream:
## case 1: item already has counter or there are empty counters
if item in counters or len(counters) < k:
counters[item] += 1
## case 2: item doesn't have counter and there are no empty counters
else:
for key in list(counters.keys()):
counters[key] -= 1
counters["decrements"]+=1
if counters[key] == 0:
del counters[key]
return counters
## yield one item to misra-gries at a time to simulate a data stream
def get_items(data):
for item in data:
yield item
``` |
{
"source": "joshuaellinger/corona19-data-pipeline",
"score": 2
} |
#### File: src/sources/url_manager.py
```python
from typing import Tuple
from loguru import logger
import time
from shared.util import fetch_with_requests
from capture.captive_browser import CaptiveBrowser
class UrlManager:
def __init__(self, headless=True, browser="requests"):
self.history = {}
self.size = 0
self.browser = browser
self.headless = headless
self._captive = None
def is_repeat(self, url: str) -> bool:
return url in self.history
def reset(self):
self.history = {}
self.size = 0
def shutdown(self):
if self._captive != None:
self._captive.close()
self._captive = None
def fetch_with_captive(self, url: str) -> Tuple[bytes, int]:
if self._captive == None:
self._captive = CaptiveBrowser(self.headless, self.browser)
self._captive.navigate(url)
if self._captive.has_slow_elements():
logger.debug(f" found slow elements, wait for 5 seconds")
time.sleep(5)
return self._captive.page_source(), self._captive.status_code()
def fetch(self, url: str) -> Tuple[bytes, int]:
if url in self.history:
return self.history[url]
if self.browser == "requests":
content, status = fetch_with_requests(url)
else:
content, status = self.fetch_with_captive(url)
self.history[url] = (content, status)
if content != None:
self.size += len(content)
return content, status
```
#### File: src/sources/url_source_manager.py
```python
from loguru import logger
from sources.url_source import UrlSource, UrlSources
from sources.url_source_parsers import sources_config
from sources.url_source_validator import UrlSourceValidator
from shared.directory_cache import DirectoryCache
from transform.change_list import ChangeList
class UrlSourceManager():
def __init__(self, cache: DirectoryCache):
self.cache = cache
self.change_list = None
def update_sources(self, mode: str) -> UrlSources:
self.change_list = ChangeList(self.cache)
self.change_list.load()
self.change_list.start_run()
sources = UrlSources()
sources.scan(sources_config)
sources.read(self.cache, "sources.txt")
logger.info(f" found {len(sources.items)} sources")
validator = UrlSourceValidator()
for src in sources.items:
if not src.check_mode(mode):
continue
src.update_from_remote()
src.write_parsed(src.name, self.cache)
if validator.validate(src):
src.status = "valid"
logger.info(f" {src.name}: save")
src.write(src.name, self.cache, self.change_list)
logger.info(f" {src.name}: updated from remote")
else:
src.status = "invalid"
validator.display_status()
if src.read(src.name, self.cache):
logger.warning(f" {src.name}: use local cache")
else:
self.change_list.record_failed(src.name, "source", src.endpoint, "no local cache")
sources.update_status()
sources.write(self.cache, "sources.txt")
self.change_list.finish_run()
return sources
```
#### File: src/sources/url_source_parsers.py
```python
import io
import pandas as pd
import urllib.parse
import json
from lxml import html
from loguru import logger
from shared.google_sheet import GoogleSheet
def clean_google_url(s: str) -> str:
"extract dest from a google query link"
if s == None or s == "": return None
if type(s) != str: return None
idx = s.find("?q=")
if idx < 0: return s
idx += 3
eidx = s.find("&", idx)
if eidx < 0: eidx = len(s)
s = s[idx:eidx]
s = urllib.parse.unquote_plus(s)
return s
# ------------------------------------------
def parse_google_csv(content: bytes) -> pd.DataFrame:
df = pd.read_csv(io.StringIO(content.decode('utf-8')))
print(df.columns)
df["location"] = df["state"]
df["main_page"] = df["covid19Site"].apply(clean_google_url)
df["data_page"] = df["covid19SiteSecondary"].apply(clean_google_url)
return df
# ------------------------------------------
state_abrrevs = {
"Alabama": "AL",
"Alaska": "AK",
"Arizona": "AZ",
"Arkansas": "AR",
"California": "CA",
"Colorado": "CO",
"Connecticut": "CT",
"Delaware": "DE",
"Florida": "FL",
"Georgia": "GA",
"Hawaii": "HI",
"Idaho": "ID",
"Illinois": "IL",
"Indiana": "IN",
"Iowa": "IA",
"Kansas": "KS",
"Kentucky": "KY",
"Louisiana": "LA",
"Maine": "ME",
"Maryland": "MD",
"Massachusetts": "MA",
"Michigan": "MI",
"Minnesota": "MN",
"Mississippi": "MS",
"Missouri": "MO",
"Montana": "MT",
"Nebraska": "NE",
"Nevada": "NV",
"New Hampshire": "NH",
"New Jersey": "NJ",
"New Mexico": "NM",
"New York": "NY",
"North Carolina": "NC",
"North Dakota": "ND",
"Ohio": "OH",
"Oklahoma": "OK",
"Oregon": "OR",
"Pennsylvania": "PA",
"Rhode Island": "RI",
"South Carolina": "SC",
"South Dakota": "SD",
"Tennessee": "TN",
"Texas": "TX",
"Utah": "UT",
"Vermont": "VT",
"Virginia": "VA",
"Washington": "WA",
"West Virginia": "WV",
"Wisconsin": "WI",
"Wyoming": "WY",
"District of Columbia": "DC",
"Marshall Islands": "MH",
"Armed Forces Africa": "AE",
"Armed Forces Americas": "AA",
"Armed Forces Canada": "AE",
"Armed Forces Europe": "AE",
"Armed Forces Middle East": "AE",
"Armed Forces Pacific": "AP",
# special cases
"Washington DC": "DC",
"Commonwealth of the Northern Mariana Islands": "MP",
"Guam": "GU",
"Puerto Rico": "PR",
"Virgin Islands": "VI"
}
def parse_urlwatch(content: bytes) -> pd.DataFrame:
recs = json.loads(content)
df = pd.DataFrame(recs)
df.index = df.name
df["location"] = df.name
df["main_page"] = df.url.apply(clean_google_url)
df["data_page"] = ""
df["error_msg"] = ""
# assign 2nd link to data page so we get only one record instead of two
# for mutiple links, treat it as a variant.
names = {}
for x in df.itertuples():
cnt = names.get(x.name)
if cnt == None: cnt = 0
names[x.name] = cnt + 1
if cnt == 1:
df.iloc[x.Index, "data_page"] = x.main_page
df.iloc[x.Index, "main_page"] = ""
df.iloc[x.Index, "location"] += "_data"
elif cnt > 1:
df.iloc[x.Index, "main_page"] = ""
df.iloc[x.Index, "location"] += f"_{cnt}"
# apply state abbreviations
df["abbrev"] = df.name.map(state_abrrevs)
missing = pd.isnull(df.abbrev)
df.loc[~missing, "location"] = df.abbrev
df.loc[missing, "error_msg"] = "bad abbrev"
df_new = pd.DataFrame({
"location": df["location"],
"main_page": df["main_page"],
"data_page": df["data_page"],
"error_msg": df["error_msg"],
})
return df_new
# ------------------------------------------
def parse_states(content: bytes) -> pd.DataFrame:
sheet = GoogleSheet(content)
df = sheet.get_tab("States")
#print(f"columns = \n{df.columns}")
df_new = pd.DataFrame({
"location": df["State"],
"main_page": df["COVID-19 site"].apply(clean_google_url),
"data_page": df["COVID-19 site (secondary)"].apply(clean_google_url),
})
return df_new
# ------------------------------------------
def parse_community_counties(content: bytes) -> pd.DataFrame:
try:
doc = html.fromstring(content)
table = doc.find(".//table")
# data the columns out of an HTML table
num_cols = 14 # outcome
names = ["row"]
cols = [[]]
def text(x: html.Element):
t = x.text
if t == None: t = ""
if len(x) > 0:
for ch in x:
t += " " + text(ch)
if x.tail != None:
t += " " + x.tail
return t.strip()
def extract(x: html.Element, row_num: int) -> int:
if x.tag == "tr":
row_num += 1
if row_num == 1: return row_num
if row_num > 2:
cols[0].append(row_num-2)
i = 0
for ch in x:
t = text(ch)
if row_num == 2:
names.append(t)
cols.append([])
elif i < len(names):
cols[i+1].append(t)
if i == num_cols: break
i += 1
return row_num
elif x.tag in ["table", "thead", "tbody"]:
for ch in x:
row_num = extract(ch, row_num)
return row_num
else:
print(f"bad tag {x.tag}")
extract(table, 0)
df = pd.DataFrame(index = cols[0])
for i in range(len(names)):
n = names[i]
df[n] = cols[i]
df = df[df.Country == "USA"]
df = df[~pd.isnull(df["Abbr."])]
df = df[df["Abbr."].str.strip() != ""]
df_new = pd.DataFrame({
"location": df["Abbr."],
"main_page": df["Source"],
})
return df_new
except Exception as ex:
logger.exception(ex)
exit(-1)
# ------------------------------------------
def parse_cds(content: bytes) -> pd.DataFrame:
recs = json.loads(content)
df = pd.DataFrame(recs)
df.reset_index(inplace=True)
df = df[df.country == "USA"]
df = df[~pd.isnull(df.county)]
def clean_name(s: str) -> str:
if s == None or s == "": return ""
s = s.replace(" County", "")
s = s.replace(". ", "_")
s = s.replace(".", "_")
s = s.replace(" ", "_")
s = s.replace("'", "")
s = s.replace(",", "_")
return s
df["location"] = df.state + "." + df.county.apply(clean_name)
#TODO: add population as a comment
df_new = pd.DataFrame({
"location": df.location,
"main_page": df.url
})
return df_new
# ------------------------------------------
sources_config = [
{
"name": "google-states-csv",
"subfolder": "",
"endpoint": "https://covid.cape.io/states/info.csv",
"content_type": "csv",
"parser": parse_google_csv,
"action": "enabled",
"display_dups": False
},
{
"name": "google-states",
"subfolder": "",
"endpoint": "https://docs.google.com/spreadsheets/d/18oVRrHj3c183mHmq3m89_163yuYltLNlOmPerQ18E8w/htmlview?sle=true#",
"parser": parse_states,
"content_type": "html",
"action": "disabled",
"display_dups": False
},
{
"name": "urlwatch",
"subfolder": "",
"endpoint": "https://covidtracking.com/api/urls",
"content_type": "json",
"parser": parse_urlwatch,
"action": "enabled",
"display_dups": False
},
{
"name": "cds",
"subfolder": "",
"endpoint": "http://blog.lazd.net/coronadatascraper/data.json",
"parser": parse_cds,
"content_type": "json",
"action": "enabled",
"display_dups": False
},
{
"name": "community-data-counties",
"subfolder": "counties",
"endpoint": "https://docs.google.com/spreadsheets/d/1T2cSvWvUvurnOuNFj2AMPGLpuR2yVs3-jdd_urfWU4c/edit#gid=1477768381",
"content_type": "html",
"parser": parse_community_counties,
"action": "test",
"display_dups": False
}
]
```
#### File: src/transform/html_formater.py
```python
from lxml import html
from loguru import logger
padding = ["\n" + " "*n for n in range(50)]
padding.insert(0, "")
class HtmlFormater():
def __init__(self):
pass
def _indent_text(self, text: str, depth: int) -> str:
if text == None: return padding[depth]
text = text.strip()
if text == "": return padding[depth]
parts = text.split("\n")
if len(parts) == 1: return text + padding[depth]
parts.insert(0, "")
parts.append("")
return padding[depth+2].join(parts)
def _indent_elem(self, elem: html.Element, depth: int):
if len(elem) > 0:
elem.text = self._indent_text(elem.text, depth+1)
for ch in elem:
self._indent_elem(ch, depth+1)
elem[-1].tail = self._indent_text(elem.tail, depth)
elem.tail = self._indent_text(elem.tail, depth)
else:
elem.text = self._indent_text(elem.text, 0)
elem.tail = self._indent_text(elem.tail, depth)
def _inject_extra_elements(self, tree: html.Element, xurl: str):
if xurl == None: return
if len(tree) == 0 or tree[0].tag != "head":
return
base = tree.findall("base")
if len(base) > 0: return
base = html.Element("base")
base.attrib["ref"] = xurl
tree.insert(0, base)
def format(self, xurl: str, content: bytes) -> bytes:
tree = html.fromstring(content)
self._inject_extra_elements(tree, xurl)
self._indent_elem(tree, 1)
return html.tostring(tree)
```
#### File: src/transform/html_helpers.py
```python
from lxml import html
def make_source_link(kind: str, stage: str, name: str) -> html.Element:
d = html.Element("span")
if kind != stage and kind != "source":
a = html.Element("a")
# "http://covid19-api.exemplartech.com/github-data/raw/AZ.html
a.attrib["href"] = f"../{stage}/{name}"
a.text = stage
d.append(a)
else:
d.text = stage
d.tail = " < "
return d
def make_source_links(kind: str, name: str, source: str):
div = html.Element("div")
div.attrib["class"] = "source"
kind = kind.lower()
if not kind in ["extract", "clean", "raw", "source"]:
raise Exception("Invalid kind: " + kind)
d = make_source_link(kind, "extract", name)
div.append(d)
d = make_source_link(kind, "clean", name)
div.append(d)
d = make_source_link(kind, "raw", name)
div.append(d)
d = make_source_link(kind, source, name)
d.tail = ""
div.append(d)
return div
```
#### File: corona19-data-pipeline/src/x_html_compare.py
```python
import re
from shared.directory_cache import DirectoryCache
def clean_content(content: bytes) -> bytes:
if content == None: return None
content = re.sub(b'<input type="hidden" .* />', b'', content)
#CA
content = re.sub(b'formDigestElement.value = .*', b'', content)
#CO
content = re.sub(b'"beacon":"bam.nr-data.net".*}', b'}', content)
#CO_data
content = re.sub(b'nonce=".*"', b'', content)
#CT
content = re.sub(b'<meta name="VIcurrentDateTime" content="637193341564631315"', b'<meta name="VIcurrentDateTime" content=""', content)
#NJ
content = re.sub(b'<script async type="text/javascript" src="/_Incapsula_Resource?SWJIYLWA=.*">', b'<script>', content)
#WA
content = re.sub(b'gomenu[0-9a-z]+', b'gomenu1234', content)
content = re.sub(b'megamenu[0-9a-z]+', b'megamenu1234', content)
# content = re.sub(b'<script.*>.*?</script>', b'', content, count=1000, flags=re.DOTALL)
return content
class HTMLCompare:
def __init__(self, cache: DirectoryCache):
self.cache = cache
self.content_a = b''
self.content_b = b''
self.is_identical = False
self.is_re = False
def load_saved_versions(self, key: str):
" loads A/B version, returns true if they are identical "
self.content_a = self.cache.load(key, "A")
self.content_b = self.cache.load(key, "B")
self.is_identical = False
self.is_re = False
if self.content_a == None or self.content_b == None:
return
if self.content_a == self.content_b:
self.is_identical = True
else:
self.is_re = clean_content(self.content_a) == clean_content(self.content_b)
def is_different(self, new_content: bytes, old_content: bytes) -> bool:
if self.is_identical:
return new_content != old_content
if self.is_re:
return clean_content(new_content) != clean_content(old_content)
raise Exception("Cannot compare because clean fails to make content the same")
```
#### File: corona19-data-pipeline/tests/test_capture.py
```python
import sys
import os
from capture.captive_browser import CaptiveBrowser
def test_auto_resize():
browser = CaptiveBrowser(headless=False, browser="firefox")
browser.navigate("https://coronavirus.dc.gov/page/coronavirus-data")
browser.screenshot("c:\\temp\\test.png", full_page=True)
if __name__ == "__main__":
test_auto_resize()
```
#### File: corona19-data-pipeline/tests/test_git.py
```python
import sys
import os
from src import check_path
check_path()
from shared.util_git import pull, push, isbehind
def test_status():
xdir = sys.path[0]
x = isbehind(xdir)
print(f"isbehind = {x}")
if __name__ == "__main__":
test_status()
``` |
{
"source": "Joshua-Elms/algorithms",
"score": 4
} |
#### File: Regression/Linear_Regression/helper_functions.py
```python
import numpy as np
def SS(data:np.ndarray, eq_params:tuple|list) -> float:
"""
Calculate the sum of sqared error for a given dataset and line.
Args:
data: Row major numpy array with target variable in last column
eq_params: coefficients for slope-intercept form of line, ex: if eq is y = 1 + 2x -> eq_params is (1, 2)
Importantly, order of the coefficients in eq_params must be the same as the variables in data
Returns:
sum_squared_diffs: Float value, sum of squared error from data to prediction line
"""
# calculate the predicted values of the target variable using the equation parameters and all non-target dimensions
y_intercept, *dim_coefficients = eq_params
coefficients_applied_to_data = data[:, :-1] * dim_coefficients
predicted_target_values = np.sum(coefficients_applied_to_data, axis=1) + y_intercept
# calculate, square, and sum the distances from each point to its position predicted by the line
diffs = predicted_target_values - data[:, -1]
squared_diffs = np.square(diffs)
sum_squared_diffs = np.sum(squared_diffs)
return sum_squared_diffs
# def determine_line_of_best_fit_for_vis():
# pass
def determine_LBF(data: np.array) -> tuple:
"""
Iteratively calculate the line of best fit (LBF) for given data
Args:
data: Row major representation of data in np.array w/ target variable in last column
precision: # of decimal places to calculate the paramaters for LBF to
Returns:
line_params: tuple of parameters for LBF, ex: if line is y = 1 + 2x -> line_params is (1, 2)
"""
pass
if __name__ == "__main__":
points = np.array([[0, 0], [1, 2], [2, 4], [3, 6]])
line_params = [0, 2]
result = SS(points, line_params)
print(result)
points_t = np.transpose(points)
print(np.matmul(points_t,points))
``` |
{
"source": "Joshua-Elms/CSCI-B365",
"score": 4
} |
#### File: Meteorology_Modeling_Project/functions/gen_superset.py
```python
from itertools import combinations, chain
def power_set(iterable):
"""
Generate the power set (without the empty set) for any given iterator
"""
pset = chain.from_iterable(combinations(iterable, r) for r in range(len(iterable)+1))
return list(list(combo) for combo in pset if len(combo) > 0)
if __name__ == "__main__":
print(power_set([1, 2, 3]))
```
#### File: CSCI-B365/Miscellaneous/kmeans.py
```python
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from math import dist
import imageio
def set_params(k=2, half_of_points=20, dims=2, means=(5, 15), stdevs=(1, 1)):
"""
Determine the parameters of the algorithm
Args:
None
Returns:
k: int, determines # of centroids that will be chosen (final # of clusters)
"""
means = (5, 15)
stdevs = (2, 4)
half_of_points = 3
k=2
return k, half_of_points, dims, means, stdevs
def generate_data(k, half_of_points, dims, distr_means, stdevs):
"""
Uses random generator from NumPy to create two clusters for us to play with
Based on the min and max of each cluster, determine the initial conditions for our centroids
Args:
k: num of centroids/clusters to be found
half_of_points: Multiply this by 2 to find the number of clusterable data points from which we will make a data frame, 1/2 for each cluster
dims: How many dimensions the data will span (each will be a column in final df); separate from clusters, as this will only generate 3 clusters
distr_means: Where the clusters will be centered around (mean for normal distr.)
stdevs: Measure of spread for each cluster (stdev for normal distr.)
Returns:
init_data_matrix: nd.array containing our points for clustering
means: initial values for each of the k centroids, "dim" dimensional random values from ranges of init_data_matrix
"""
rng = np.random.default_rng()
clust1 = rng.normal(loc=distr_means[0], scale=stdevs[0], size=(half_of_points, dims))
clust2 = rng.normal(loc=distr_means[1], scale=stdevs[1], size=(half_of_points, dims))
data = np.concatenate((clust1, clust2)) # create nd.array from data
columns = [data[:, i] for i in range(dims)] # get list of all columns
dim_ranges = [(column.min(), column.max()) for column in columns] # get min and max of each column into tuple
means = [rng.uniform(*dim_range, size=k).tolist() for dim_range in dim_ranges] # create dim sized, randomly selected points to represent centroids
return data, means
def get_column_major(arr):
"""
Internal function for switching nested list from row major to column major
"""
cm_arr = []
for c in range(len(arr[0])):
new_col = []
for row in arr:
new_col.append(row[c])
cm_arr.append(new_col)
return cm_arr
def build_df(matrix, rm_means, dims, k_num):
"""
Create dataframe containing all points, centroids, and type / cluster labels for each
Args:
matrix: matrix containing points to cluster
rm_means: k_num points (repr'ed by tpls) that fall randomly in the range of each dimension
dims: int, # of numeric dimensions for data
k_num: # of cluster to form
Returns:
df: Dataframe containing all necessary data for clustering
"""
# Initialize df containing only generated points for clustering
df1 = pd.DataFrame(matrix)
col_names = list(f"Dim_{i+1}" for i in range(dims))
df1.columns = col_names # set names for all of our points dimensions
df1["Type"] = "data" # Type will be either data or centroid
df1["Cluster"] = "None" # Cluster will be one of the centroids (k1, k2, ...)
# Initialize df containing all centroids
cm_means = get_column_major(rm_means)
k_dict = {col_names[d]:rm_means[d] for d in range(dims) for i in range(k_num)} # Maps Dim1: initial dim1 values for each k, etc
k_dict["Type"] = "centroid"
k_dict["Cluster"] = [f"C{i+1}" for i in range(k_num)] # Assigns a cluster to each centroid
# Concatenate the two df's above
df_k = pd.DataFrame(k_dict)
df = pd.concat([df1, df_k], ignore_index=True)
df.index += 1
return df
def Assignment(df_in, k_num):
"""
Given a dataframe containing points to be clustered and some k centroids,
assign each point to the nearest (euc. distance) centroid's cluster using math.dist()
Args:
df_in: dataframe
Returns:
df_out: reassigned points to new dataframe
same: Bool, whether or not df_in == df_out
"""
centroids = df_in[df_in.Type == "centroid"]
centroids.index = tuple(range(k_num))
points = df_in[df_in.Type != "centroid"]
df_point_dists = [points.copy(deep=True) for k in range(k_num)] # must be able to calculate distance from points for each centroid
for i in range(k_num): # Calculate distance between some k and some point for each k and point
df_point_dists[i]["Dist"] = 0
for index, row in df_point_dists[i].iterrows():
df_point_dists[i].iloc[index-1, 4] = dist((row["Dim_1"], row["Dim_2"]), (centroids.iloc[i]["Dim_1"], centroids.iloc[i]["Dim_2"]))
df_out = df_in.copy(deep=True) # create output df
for i in range(len(points)): # for each set of distances, choose the minimum one and assign the cluster for the point it was closest to
distances = [df_point_dists[v].iloc[i]["Dist"] for v in range(k_num)]
min_index = distances.index(min(distances))
df_out.iloc[i, 3] = centroids.iloc[min_index]["Cluster"]
return df_out, df_in
def Update(df, k_num):
"""
Given a df with clusters assigned, change centroids to reflect the actual centers of their clusters
Args:
df
Returns:
df
"""
l1 = list(df[df["Type"] == "centroid"]["Dim_1"])
l2 = list(df[df["Type"] == "centroid"]["Dim_2"])
k_origs = get_column_major(((l1), (l2)))
Dim_1_mean = [df.iloc[1:-k_num][df["Cluster"] == f"C{i+1}"]["Dim_1"].mean() for i in range(k_num)]
Dim_2_mean = [df.iloc[1:-k_num][df["Cluster"] == f"C{i+1}"]["Dim_2"].mean() for i in range(k_num)]
seq = tuple(range(-1, -k_num-1, -1))
for i in range(k_num): # (i - 1)
df.iloc[seq[i], 0] = Dim_1_mean[seq[i]]
df.iloc[seq[i], 1] = Dim_2_mean[seq[i]]
l3 = list(df[df["Type"] == "centroid"]["Dim_1"])
l4 = list(df[df["Type"] == "centroid"]["Dim_2"])
k_modded = get_column_major(((l3), (l4)))
largest_move = max([dist((k_origs[i][0], k_origs[i][1]), (k_modded[i][0], k_modded[i][1])) for i in range(k_num)])
return df, largest_move
def generate_graph(df, save_path, step, k_num, plines=None, plabels=None):
"""
Generate plot of all points and centroids
Args:
df: data to be graphed, must have "Dim_1", "Dim_2", "Cluster", and "Type" attrs
save_path: relative / absolute path to folder where plot should be saved
step: which iteration of the algorithm you are on
"""
all_color_lst = ["#FF0000", "#0000FF", "#00FF00", "#D030C9", "#D09D30"]
all_color_dict = {"None":"#000000", "C1":"#FF0000", "C2":"#0000FF", "C3":"#00FF00", "C4":"#D030C9", "C5":"#D09D30"}
color_dict = {f"C{i+1}":str(all_color_lst[i]) for i in range(k_num)}
color_dict["None"] = "#000000"
color_lst = all_color_lst[:k_num+1]
if step > 0:
sns.set_theme()
sp = sns.scatterplot(data=df, x="Dim_1", y="Dim_2", hue="Cluster", palette=all_color_dict, style="Type", size="Type", sizes=[25,80]).set(title=f"K-Means Algorithm: Step {step}")
plt.legend([],[], frameon=False)
sp[0].figure.legend(plines, plabels, bbox_to_anchor=(0.915, 0.88), loc='upper left', borderaxespad=0)
plt.savefig(f"{save_path}/kmeans_step{step}.jpeg", bbox_inches='tight')
plt.clf()
pass
else:
sns.set_theme()
sp = sns.scatterplot(data=df, x="Dim_1", y="Dim_2", hue="Cluster", palette=all_color_dict, style="Type", size="Type", sizes=[25,80]).set(title=f"K-Means Algorithm: Step {step}")
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.savefig(f"{save_path}/kmeans_step{step}.jpeg", bbox_inches='tight')
lines_labels = [ax.get_legend_handles_labels() for ax in sp[0].figure.axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
plt.clf()
return lines, labels
def controller(df, k_num, path):
"""
Puts data through steps and calls other function until it is determined that the clusters are complete
"""
changing = True
step = 0
lines, labels = generate_graph(df, path, step, k_num)
while changing:
df, compare = Assignment(df, k_num)
df, change = Update(df, k_num)
changing = False if change < 0.1 else True
step += 1
generate_graph(df, path, step, k_num, lines, labels)
return step
def make_gif(path_in, iters):
"""
Uses imageio library to convert a series of images into a gif
Args:
path_in: str filepath for folder storing jpegs
"""
filenames = [f"{path_in}/kmeans_step{i}.jpeg" for i in range(iters)]
movie_path = f"{path_in}/movie.gif"
with imageio.get_writer(movie_path, mode='I', duration=3) as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
pass
def main():
path = "/Users/joshuaelms/Desktop/github_repos/CSCI-B365/Miscellaneous/jpgs"
k_num, half_of_points, dims, distr_means, stdevs = set_params()
init_matrix, init_means = generate_data(k_num, half_of_points, dims, distr_means, stdevs)
df = build_df(init_matrix, init_means, dims, k_num)
iters = controller(df, k_num, path)
make_gif(path, iters)
def classwork():
path = "/Users/joshuaelms/Desktop/github_repos/CSCI-B365/Miscellaneous/jpgs"
k_num, half_of_points, dims, distr_means, stdevs = set_params()
data = np.asarray([[4, 5], [2, 4], [1, 3], [3, 3], [6, 2], [8, 3], [7, 1]])
dim_ranges = [(column.min(), column.max()) for column in data] # get min and max of each column into tuple
rng = np.random.default_rng()
d_means1 = [rng.uniform(*dim_range, size=k_num).tolist() for dim_range in dim_ranges] # create dim sized, randomly selected points to represent centroids
d_means2 = [[1, 7, 4], [3, 1, 5]]
df = build_df(data, d_means1, dims, k_num)
iters = controller(df, k_num, path)
make_gif(path, iters)
if __name__ == "__main__":
main()
```
#### File: Joshua-Elms/CSCI-B365/practice.py
```python
import matplotlib.pyplot as plt
# import pandas as pd
# import sys
# sys.path.insert(0, "/Users/joshuaelms/Desktop/github_repos/CSCI-B365_repo/CSCI-B365/Miscellaneous")
# import homemade_stats as hstat
# t_points = [(0,0), (2,2), (4,4)]
# df = pd.DataFrame(t_points, columns=("X", "Y"))
# print(df.describe())
# fig, ax = plt.subplots()
# ax.plot(t_points, "ro", linewidth=0)
# print(hstat.std(df["X"]))
# print(hstat.std(df["Y"]))
# plt.show()
def get_column_major(arr):
cm_arr = []
for c in range(len(arr[0])):
new_col = []
for row in arr:
new_col.append(row[c])
cm_arr.append(new_col)
return cm_arr
def multiply_matrices(arr1, arr2):
arr_out = []
cm_arr2 = get_column_major(arr2)
for r, row_arr1 in enumerate(arr1):
new_row = []
for col_arr2 in cm_arr2:
dp = 0
for i in range(len(col_arr2)):
dp += row_arr1[i]*col_arr2[i]
new_row.append(dp)
arr_out.append(new_row)
return arr_out
# for n in range(9):
# if n == 10:
# print("- "*10)
# print(q5(n))
arr1 = [[2, -1], [4, 1], [5, -3]]
arr2 = [[3, 1], [-2, -1]]
for row in multiply_matrices(arr1, arr2):
print(row)
# for row in get_column_major(arr2):
# print(row)
``` |
{
"source": "JoshuaEng/FLINNG",
"score": 3
} |
#### File: FLINNG/analysis/recall_precision.py
```python
import matplotlib
import numpy as np
import argparse
from get_data import get_data_colored, get_dataset_title
import traceback
parser = argparse.ArgumentParser()
parser.add_argument("dataset")
parser.add_argument("directory")
parser.add_argument('-save', action='store_true')
parser.add_argument("topk")
args = parser.parse_args()
directory = args.directory
save = args.save
dataset = args.dataset
compare_by = args.topk
if save:
matplotlib.use('agg')
import matplotlib.pyplot as plt
def get_pareto(record):
record.sort(key = lambda x: x[3])
record = record[::-1]
result = [record[0]]
for i in range(1, len(record)):
if result[-1][2] < record[i][2]:
result.append(record[i])
return result
titlefontsize = 22
axisfontsize = 18
labelfontsize = 12
# markers = ["s", "x"]
# times = [2, 20]
# linestyles = ["--", "-."]
times = [20]
linestyles = ["--"]
all_data = get_data_colored(dataset)
for method, data, c, mark, marksize in all_data:
for ls, t in list(zip(linestyles, times)):
try:
filtered = get_pareto([d for d in data if d[1] < t and d[0].startswith("R" + compare_by + "@")])
plt.plot([d[2] for d in filtered], [d[3] for d in filtered], color = c, linestyle = ls, marker = mark, label = method.upper()+" ("+str(t)+"ms)", alpha = 0.8, markersize = marksize)
except:
print(method, "failed in recall precision on", dataset)
pass
plt.legend(fontsize = labelfontsize, loc = 'lower left')
plt.title(f"{get_dataset_title(dataset)}: Top-{compare_by} Nearest Neighbours",fontsize = titlefontsize)
plt.xlabel("Recall",fontsize = axisfontsize)
plt.ylabel("Precision",fontsize = axisfontsize)
if save:
plt.savefig(f"{dataset}-{compare_by}.png", bbox_inches='tight')
else:
plt.show()
``` |
{
"source": "JoshuaEN/restler-fuzzer-testing",
"score": 2
} |
#### File: Server/restler-fuzzer/run.py
```python
# Modified from https://github.com/microsoft/restler-fuzzer/blob/main/restler-quick-start.py
# Original copyright:
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import contextlib
import os
import subprocess
import requests
import json
import time
import shutil
from jsonpointer import resolve_pointer, set_pointer, JsonPointer
from pathlib import Path
RESTLER_TEMP_DIR = 'restler_working_dir'
@contextlib.contextmanager
def usedir(dir):
""" Helper for 'with' statements that changes the current directory to
@dir and then changes the directory back to its original once the 'with' ends.
Can be thought of like pushd with an auto popd after the 'with' scope ends
"""
curr = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(curr)
def download_spec(url, api_spec_path):
print(f'> Downloading OpenAPI Specification from {url!s}')
swagger = requests.get(url)
print(f'> Saving OpenAPI Specification to {api_spec_path!s}')
Path(api_spec_path).write_bytes(swagger.content)
def transform_template_config(output_config_path, inputs_path, results_path, compile_path):
config = json.loads(inputs_path.joinpath('config.template.json').read_bytes())
if "SwaggerSpecFilePath" in config:
config["SwaggerSpecFilePath"] = [str(results_path.joinpath('swagger.json'))]
if "GrammarOutputDirectoryPath" in config:
config["GrammarOutputDirectoryPath"] = str(compile_path)
if "CustomDictionaryFilePath" in config:
config["CustomDictionaryFilePath"] = str(inputs_path.joinpath('dict.json'))
if "AnnotationFilePath" in config:
config["AnnotationFilePath"] = str(inputs_path.joinpath('annotations.json'))
if "EngineSettingsFilePath" in config:
config["EngineSettingsFilePath"] = str(results_path.joinpath('engine_settings.json'))
if "GrammarInputFilePath" in config:
config["GrammarInputFilePath"] = str(inputs_path.joinpath('grammar.json'))
print(f'> Writing config to {output_config_path!s}')
output_config_path.write_text(json.dumps(config,sort_keys=True, indent=4))
def transform_post_compile_config(output_config_path, compile_path, output_grammar_path):
config = json.loads(compile_path.joinpath('config.json').read_bytes())
config["GrammarInputFilePath"] = str(output_grammar_path)
print(f'> Writing config to {output_config_path!s}')
output_config_path.write_text(json.dumps(config,sort_keys=True, indent=4))
def transform_grammar_json(output_grammar_path, grammar_transforms_path, compile_path):
grammar_transforms = json.loads(grammar_transforms_path.read_bytes())['transforms']
grammar_path = compile_path.joinpath('grammar.json')
grammar = json.loads(grammar_path.read_bytes())
resolve_pointer_default = {}
for grammar_transform in grammar_transforms:
endpoint_grammar = next((e for e in grammar['Requests'] if e['id']['endpoint'] == grammar_transform['endpoint'] and e['id']['method'].lower() == grammar_transform['method'].lower()), None)
grammar_transform_str = f"""{grammar_transform['method'].upper()} {grammar_transform['endpoint']}"""
if endpoint_grammar is None:
raise Exception(f'Transform {grammar_transform_str} not found in {str(grammar_path)}')
transforms = grammar_transform['transforms']
for transform in transforms:
pointer = transform['pointer']
val = resolve_pointer(endpoint_grammar, pointer, resolve_pointer_default)
command = transform['action'][0]
args = transform['action'][1:]
if command == 'replace':
if val is resolve_pointer_default:
raise Exception(f'Transform {grammar_transform_str} {pointer} path not found in {str(grammar_path)}')
set_pointer(endpoint_grammar, pointer, args[0])
elif command == 'remove':
if val is resolve_pointer_default:
raise Exception(f'Transform {grammar_transform_str} {pointer} path not found in {str(grammar_path)}')
if isinstance(val, list):
if not isinstance(args[0], int):
raise Exception(f'Transform {grammar_transform_str} {pointer} expects to be given an int for removing the index of an Array')
if len(val) > args[0]:
raise Exception(f'Transform {grammar_transform_str} {pointer} the index to remove is {args[0]}, but the array\'s max index is {len(val)-1}')
del val[args[0]]
elif isinstance(val, dict):
if args[0] not in val:
raise Exception(f'Transform {grammar_transform_str} {pointer} the key {args[0]} did not appear in the object')
del val[args[0]]
else:
raise Exception(f'Transform {grammar_transform_str} {pointer} cannot remove from {val!s}')
else:
raise Exception(f'Transform {grammar_transform_str} {pointer} unknown command: {command}')
output_grammar_path.write_text(json.dumps(grammar,sort_keys=False, indent=2))
def transform_template_engine_settings(fuzz, output_engine_settings_path, inputs_path, results_path):
settings = json.loads(inputs_path.joinpath('engine_settings.template.json').read_bytes())
if "per_resource_settings" in settings:
for path in settings["per_resource_settings"]:
resource_settings = settings["per_resource_settings"][path]
if "custom_dictionary" in resource_settings:
relative_custom_dictionary_path = resource_settings["custom_dictionary"]
current_path = inputs_path.joinpath(relative_custom_dictionary_path).resolve()
dest_path = results_path.joinpath(relative_custom_dictionary_path).resolve()
shutil.copyfile(current_path, dest_path)
resource_settings["custom_dictionary"] = str(dest_path)
if "token_refresh_cmd" in settings:
token_cmd_path = str(inputs_path.joinpath("..", "gettoken.py").resolve())
settings["token_refresh_cmd"] = f"python {token_cmd_path} --token_url http://localhost:5000/api/v1/authenticate"
# if "fuzzing_mode" in settings:
# if settings["fuzzing_mode"] == "directed-smoke-test" and fuzz:
# settings.pop("fuzzing_mode")
# if not fuzz:
# settings["fuzzing_mode"] = "directed-smoke-test"
print(f'> Writing engine settings to {output_engine_settings_path!s}')
output_engine_settings_path.write_text(json.dumps(settings,sort_keys=True, indent=4))
def compile_spec(config_path, results_path, restler_dll_path):
""" Compiles a specified api spec
@param restler_dll_path: The absolute path to the RESTler driver's dll
@type restler_dll_path: Str
@return: None
@rtype : None
"""
if not os.path.exists(results_path):
os.makedirs(results_path)
with usedir(results_path):
command = f"dotnet {restler_dll_path} compile {config_path}"
print(f'> {command}')
subprocess.run(command, shell=True, check=True)
def test_spec(fuzz, ip, port, host, use_ssl, fuzz_args, inputs_path, results_path, compile_path, restler_dll_path):
""" Runs RESTler's test mode on a specified Compile directory
@param ip: The IP of the service to test
@type ip: Str
@param port: The port of the service to test
@type port: Str
@param host: The hostname of the service to test
@type host: Str
@param use_ssl: If False, set the --no_ssl parameter when executing RESTler
@type use_ssl: Boolean
@param restler_dll_path: The absolute path to the RESTler driver's dll
@type restler_dll_path: Str
@return: None
@rtype : None
"""
command = (
f"dotnet {restler_dll_path} {'fuzz' if fuzz else 'test'} --grammar_file {compile_path.joinpath('grammar.py')} --dictionary_file {compile_path.joinpath('dict.json')}"
f" --settings {compile_path.joinpath('engine_settings.json')} {fuzz_args if fuzz_args is not None else ''}"
)
if not use_ssl:
command = f"{command} --no_ssl"
if ip is not None:
command = f"{command} --target_ip {ip}"
if port is not None:
command = f"{command} --target_port {port}"
if host is not None:
command = f"{command} --host {host}"
with usedir(results_path):
print(f'> {command}')
subprocess.run(command, shell=True, check=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ip',
help='The IP of the service to test',
type=str, required=False, default='localhost')
parser.add_argument('--port',
help='The port of the service to test',
type=str, required=False, default='5000')
parser.add_argument('--restler_drop_dir',
help="The path to the RESTler drop",
type=str, required=True)
parser.add_argument('--use_ssl',
help='Set this flag if you want to use SSL validation for the socket',
action='store_true')
parser.add_argument('--host',
help='The hostname of the service to test',
type=str, required=False, default='localhost:5000')
parser.add_argument('--skip_download',
help='Skip downloading the OpenAPI Specification from the server',
action='store_true')
parser.add_argument('--skip_transform_config',
help='Skip transforming the config.template.json',
action='store_true')
parser.add_argument('--skip_compile',
help='Skip compiling the grammar',
action='store_true')
parser.add_argument('--fuzz',
help='Enables fuzzing (vs. testing)',
action='store_true')
parser.add_argument('--skip_fuzzing',
help='Skip running the fuzzer',
action='store_true')
parser.add_argument('--fuzz_only',
help='Only run the fuzzer; skipping all other steps',
action='store_true')
parser.add_argument('--grammar_transforms',
help='JSON file with transforms to perform on the grammar',
type=str, required=False, default=None)
parser.add_argument('--fuzz_args',
help='Args to pass to the fuzzer',
type=str, required=False, default=None)
args = parser.parse_args()
if args.fuzz_only is True:
if args.skip_fuzzing is True:
raise Exception('Both --fuzz_only and --skip_fuzzing cannot be set to True')
args.skip_download = args.skip_transform_config = args.skip_compile = True
# Generate paths
restler_dll_path = Path(os.path.abspath(args.restler_drop_dir)).joinpath('restler', 'Restler.dll')
base_path = Path(os.path.abspath(os.path.dirname(__file__)))
inputs_path = base_path.joinpath('inputs')
results_path = base_path.joinpath('results') #, time.strftime("%Y-%m-%d-%H-%M-%S"))
api_spec_path = results_path.joinpath('swagger.json')
compile_path = results_path.joinpath('Compile')
output_config_path = results_path.joinpath('config.json')
output_config_with_grammar_path = results_path.joinpath('config-with-grammar.json')
output_engine_settings_path = results_path.joinpath('engine_settings.json')
if not os.path.exists(results_path):
os.makedirs(results_path)
# Get swagger.json
if args.skip_download is not True:
download_spec('http://localhost:5000/swagger/v1/swagger.json', api_spec_path)
# Get a config with the abs paths filled in
if args.skip_transform_config is not True:
transform_template_config(output_config_path, inputs_path, results_path, compile_path)
transform_template_engine_settings(args.fuzz, output_engine_settings_path, inputs_path, results_path)
# Compile
if args.skip_compile is not True:
compile_spec(output_config_path, results_path, restler_dll_path.absolute())
# We have to compile again after we transform the grammar
if args.grammar_transforms is not None:
first_pass_compile_path = results_path.joinpath('Compile-First-Pass')
output_grammar_path = results_path.joinpath('transformed-grammar.json')
# Remove anything left from previous run
if os.path.exists(first_pass_compile_path):
shutil.rmtree(str(first_pass_compile_path))
# Move the first compile result someplace else
os.rename(compile_path, first_pass_compile_path)
# Actually transform the grammar
transform_grammar_json(output_grammar_path, Path(args.grammar_transforms), first_pass_compile_path)
# Make a new config.json pointing to the grammar
transform_post_compile_config(output_config_with_grammar_path, first_pass_compile_path, output_grammar_path)
# Compile again
compile_spec(output_config_with_grammar_path, results_path, restler_dll_path.absolute())
# Copy some files from the first pass that are not automatically added during the second pass
shutil.copyfile(first_pass_compile_path.joinpath('dict.json'), compile_path.joinpath('dict.json'))
shutil.copyfile(first_pass_compile_path.joinpath('engine_settings.json'), compile_path.joinpath('engine_settings.json'))
shutil.copyfile(output_grammar_path, compile_path.joinpath('grammar.json'))
# Test
if args.skip_fuzzing is not True:
test_spec(args.fuzz, args.ip, args.port, args.host, args.use_ssl, args.fuzz_args, inputs_path, results_path, compile_path, restler_dll_path.absolute())
print(f"Run complete.\nSee {results_path} for results.")
``` |
{
"source": "Joshua-Enrico/AirBnB_clone_v4",
"score": 3
} |
#### File: tests/test_api/test_pep8.py
```python
from datetime import datetime
import inspect
import models
import os
from models import state
from models.base_model import BaseModel
import pep8
import unittest
from api.v1 import app
from api.v1.views import states as test_state
from api.v1.views import amenities
from api.v1.views import cities
from api.v1.views import index
from api.v1.views import places_reviews
from api.v1.views import places_amenities
from api.v1.views import places
from api.v1.views import users
State = state.State
class TestStateDocs(unittest.TestCase):
"""Tests to check the documentation for all api files"""
@classmethod
def setUpClass(cls):
"""Set up for the doc tests"""
cls.state_f = inspect.getmembers(State, inspect.isfunction)
def test_pep8_conformance_app(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/app.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_states(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/states.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_amenities(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/amenities.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_cities(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/cities.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_places_rev(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/places_reviews.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_places(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/places.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_users(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/users.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_index(self):
"""Test that models/state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/index.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_pep8(self):
"""Test that tests/test_models/test_state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['tests/test_api/test_pep8.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_places_amenities(self):
"""Test that tests/test_models/test_state.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['api/v1/views/places_amenities.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
# file docstring
def test_state_module_docstring_app(self):
"""Test for the state.py module docstring"""
self.assertIsNot(app.__doc__, None,
"state.py needs a docstring")
self.assertTrue(len(app.__doc__) >= 1,
"state.py needs a docstring")
def test_state_class_docstring_state(self):
"""Test for the State class docstring"""
self.assertIsNot(test_state.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(test_state.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_amenities(self):
"""Test for the State class docstring"""
self.assertIsNot(amenities.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(amenities.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_cities(self):
"""Test for the State class docstring"""
self.assertIsNot(cities.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(cities.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_index(self):
"""Test for the State class docstring"""
self.assertIsNot(index.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(index.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_rev(self):
"""Test for the State class docstring"""
self.assertIsNot(places_reviews.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(places_reviews.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_places(self):
"""Test for the State class docstring"""
self.assertIsNot(places.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(places.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_users(self):
"""Test for the State class docstring"""
self.assertIsNot(users.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(users.__doc__) >= 1,
"State class needs a docstring")
def test_state_class_docstring_amenities_rev(self):
"""Test for the State class docstring"""
self.assertIsNot(places_amenities.__doc__, None,
"State class needs a docstring")
self.assertTrue(len(users.__doc__) >= 1,
"State class needs a docstring")
# dosctring tests
def test_state_func_docstrings(self):
"""Test for the presence of docstrings in State methods"""
for func in self.state_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_index(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(index, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_user(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(users, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_places(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(places, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_states(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(test_state, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_place_rev(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(places_reviews, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_amenity_rev(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(places_amenities, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_cities(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(cities, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
def test_state_func_docstrings_amenities(self):
"""Test for the presence of docstrings in State methods"""
index_f = inspect.getmembers(amenities, inspect.isfunction)
for func in index_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
```
#### File: AirBnB_clone_v4/tests/test_console.py
```python
import unittest
from unittest.mock import patch
from io import StringIO
import pep8
import pep8 as pycodestyle
import os
import json
import console
import tests
import inspect
import models
from console import HBNBCommand
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
from models.engine.file_storage import FileStorage
from models.engine.db_storage import DBStorage
Model = HBNBCommand
City = HBNBCommand
module_doc = HBNBCommand.__doc__
path1 = "console.py"
path2 = "tests/test_console.py"
HBNBCommand = console.HBNBCommand
class TestConsoleDocs(unittest.TestCase):
"""Class for testing documentation of the console"""
def test_pep8_conformance_console(self):
"""Test that console.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['console.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_console(self):
"""Test that tests/test_console.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['tests/test_console.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_console_module_docstring(self):
"""Test for the console.py module docstring"""
self.assertIsNot(console.__doc__, None,
"console.py needs a docstring")
self.assertTrue(len(console.__doc__) >= 1,
"console.py needs a docstring")
def test_HBNBCommand_class_docstring(self):
"""Test for the HBNBCommand class docstring"""
self.assertIsNot(HBNBCommand.__doc__, None,
"HBNBCommand class needs a docstring")
self.assertTrue(len(HBNBCommand.__doc__) >= 1,
"HBNBCommand class needs a docstring")
class TestBaseModelDocs(unittest.TestCase):
"""Test to check behaviors"""
@classmethod
def setUpClass(self):
"""setting up tests"""
self.base_funcs = inspect.getmembers(BaseModel, inspect.isfunction)
def test_pep8(self):
"""Testing pep8"""
for path in [path1,
path2]:
with self.subTest(path=path):
errors = pycodestyle.Checker(path).check_all()
self.assertEqual(errors, 0)
def test_module_docstring(self):
"""Test module docstring"""
self.assertIsNot(module_doc, None,
"base_model.py needs a docstring")
self.assertTrue(len(module_doc) > 1,
"base_model.py needs a docstring")
def test_class_docstring(self):
"""Test classes doctring"""
self.assertIsNot(BaseModel.__doc__, None,
"BaseModel class needs a docstring")
self.assertTrue(len(BaseModel.__doc__) >= 1,
"BaseModel class needs a docstring")
def test_func_docstrings(self):
"""test func dostrings"""
for func in self.base_funcs:
with self.subTest(function=func):
self.assertIsNot(
func[1].__doc__,
None,
"{:s} method needs a docstring".format(func[0])
)
self.assertTrue(
len(func[1].__doc__) > 1,
"{:s} method needs a docstring".format(func[0])
)
class ConsoleTest(unittest.TestCase):
"""testing console"""
@classmethod
def setUpClass(self):
"""setting class up"""
self.console = HBNBCommand()
def test_docstrings(self):
"""testing docstings"""
self.assertIsNotNone(console.__doc__)
self.assertIsNotNone(HBNBCommand.emptyline.__doc__)
self.assertIsNotNone(HBNBCommand.do_quit.__doc__)
self.assertIsNotNone(HBNBCommand.do_EOF.__doc__)
self.assertIsNotNone(HBNBCommand.do_create.__doc__)
self.assertIsNotNone(HBNBCommand.do_show.__doc__)
self.assertIsNotNone(HBNBCommand.do_destroy.__doc__)
self.assertIsNotNone(HBNBCommand.do_all.__doc__)
self.assertIsNotNone(HBNBCommand.do_update.__doc__)
self.assertIsNotNone(HBNBCommand.default.__doc__)
def test_non_exist_command(self):
"""testing a command that doesn't exist like goku"""
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("goku")
self.assertEqual('*** Unknown syntax: goku\n' or '',
f.getvalue())
def test_empty_line(self):
"""testing empty input"""
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("\n")
self.assertEqual('', f.getvalue())
@unittest.skipIf(type(models.storage) == DBStorage, "Testing DBStorage")
class CreateTest(unittest.TestCase):
"""testing command test in console"""
@classmethod
def setUpClass(self):
"""setting class up"""
self.console = HBNBCommand()
def test_create(self):
"""testing creat input"""
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create")
self.assertEqual("** class name missing **\n",
f.getvalue())
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create holbieees")
self.assertEqual("** class doesn't exist **\n",
f.getvalue())
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create BaseModel")
HBNBCommand().onecmd("create User")
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("all User")
self.assertEqual(
'[[User]', f.getvalue()[:7])
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create BaseModel")
self.assertRegex(f.getvalue(), '^[0-9a-f]{8}-[0-9a-f]{4}-[1-5]'
'[0-9a-f]{3}-[89ab][0-9a-f]{3}-'
'[0-9a-f]{12}$')
with patch('sys.stdout', new=StringIO()) as f:
HBNBCommand().onecmd("create User")
self.assertRegex(f.getvalue(), '^[0-9a-f]{8}-[0-9a-f]{4}-[1-5]'
'[0-9a-f]{3}-[89ab][0-9a-f]{3}-'
'[0-9a-f]{12}$')
```
#### File: tests/test_models/test_city.py
```python
from datetime import datetime
import inspect
from models import city
from models.base_model import BaseModel
import os
import pep8
import unittest
from sqlalchemy.orm.attributes import InstrumentedAttribute
City = city.City
class TestCityDocs(unittest.TestCase):
"""Tests to check the documentation and style of City class"""
@classmethod
def setUpClass(cls):
"""Set up for the doc tests"""
cls.city_f = inspect.getmembers(City, inspect.isfunction)
def test_pep8_conformance_city(self):
"""Test that models/city.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['models/city.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_city(self):
"""Test that tests/test_models/test_city.py conforms to PEP8."""
pep8s = pep8.StyleGuide(quiet=True)
result = pep8s.check_files(['tests/test_models/test_city.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_city_module_docstring(self):
"""Test for the city.py module docstring"""
self.assertIsNot(city.__doc__, None,
"city.py needs a docstring")
self.assertTrue(len(city.__doc__) >= 1,
"city.py needs a docstring")
def test_city_class_docstring(self):
"""Test for the City class docstring"""
self.assertIsNot(City.__doc__, None,
"City class needs a docstring")
self.assertTrue(len(City.__doc__) >= 1,
"City class needs a docstring")
def test_city_func_docstrings(self):
"""Test for the presence of docstrings in City methods"""
for func in self.city_f:
self.assertIsNot(func[1].__doc__, None,
"{:s} method needs a docstring".format(func[0]))
self.assertTrue(len(func[1].__doc__) >= 1,
"{:s} method needs a docstring".format(func[0]))
class TestCity(unittest.TestCase):
"""Test the City class"""
def test_is_subclass(self):
"""Test that City is a subclass of BaseModel"""
city = City()
self.assertIsInstance(city, BaseModel)
self.assertTrue(hasattr(city, "id"))
self.assertTrue(hasattr(city, "created_at"))
self.assertTrue(hasattr(city, "updated_at"))
@unittest.skipIf(os.getenv('HBNB_TYPE_STORAGE') == 'db',
"Testing DBStorage")
def test_name_attr(self):
"""Test that City has attribute name, and it's an empty string"""
city = City()
self.assertTrue(hasattr(city, "name"))
self.assertEqual(city.name, "")
@unittest.skipIf(os.getenv('HBNB_TYPE_STORAGE') != 'db',
"Testing FileStorage")
def test_name_attr_db(self):
"""Test for DBStorage name attribute"""
city = City()
self.assertTrue(hasattr(City, "name"))
self.assertIsInstance(City.name, InstrumentedAttribute)
@unittest.skipIf(os.getenv('HBNB_TYPE_STORAGE') == 'db',
"Testing DBStorage")
def test_state_id_attr(self):
"""Test that City has attribute state_id, and it's an empty string"""
city = City()
self.assertTrue(hasattr(city, "state_id"))
self.assertEqual(city.state_id, "")
@unittest.skipIf(os.getenv('HBNB_TYPE_STORAGE') != 'db',
"Testing FileStorage")
def test_state_id_attr_db(self):
"""Test for DBStorage state_id attribute"""
city = City()
self.assertTrue(hasattr(City, "state_id"))
self.assertIsInstance(City.state_id, InstrumentedAttribute)
def test_to_dict_creates_dict(self):
"""test to_dict method creates a dictionary with proper attrs"""
c = City()
new_d = c.to_dict()
self.assertEqual(type(new_d), dict)
for attr in c.__dict__:
if attr is not "_sa_instance_state":
with self.subTest(attr=attr):
self.assertTrue(attr in new_d)
self.assertTrue("__class__" in new_d)
def test_to_dict_values(self):
"""test that values in dict returned from to_dict are correct"""
t_format = "%Y-%m-%dT%H:%M:%S.%f"
c = City()
new_d = c.to_dict()
self.assertEqual(new_d["__class__"], "City")
self.assertEqual(type(new_d["created_at"]), str)
self.assertEqual(type(new_d["updated_at"]), str)
self.assertEqual(new_d["created_at"], c.created_at.strftime(t_format))
self.assertEqual(new_d["updated_at"], c.updated_at.strftime(t_format))
def test_str(self):
"""test that the str method has the correct output"""
city = City()
string = "[City] ({}) {}".format(city.id, city.__dict__)
self.assertEqual(string, str(city))
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.