metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JKafka97/PythonForBeginners",
"score": 3
} |
#### File: 01-02_Loops/Homework/zviratka.py
```python
from random import random, randint
print('''Vítej u zábavné a všemi oblíbené hry KRAVY A BÝCI!
Tvým úkolem bude uhádnout čtyř místné číslo, které vygeneruje počítač. ''')
dobytek = {"kráva" : 0, "býk" : 0}
def main():
hodnota_pc = cislo_pc()
while dobytek["býk"] != 4:
odhad = typni_si()
if hodnota_pc == odhad:
print("Vyhrál jsi.")
break
else:
i = 0
for jednotlivy_clen in odhad:
if jednotlivy_clen == hodnota_pc[i]:
dobytek["býk"]+=1
elif jednotlivy_clen in hodnota_pc:
dobytek["kráva"]+=1
i +=1
print("Máš", dobytek["kráva"], "krav a", dobytek["býk"], "býků")
def typni_si():
cislo_uzivatel = ""
while len(cislo_uzivatel) < 4:
cislo = input("Jaký je tvůj typ? Zadávej jednociferná čísla: ")
if len(cislo) > 1:
print("Seš dement")
else:
if cislo in cislo_uzivatel:
print('Fuck you! Z<NAME>')
elif cislo not in cislo_uzivatel:
cislo_uzivatel += cislo
return cislo_uzivatel
def cislo_pc():
cislo_pc = set()
while len(cislo_pc) < 4:
cislo_pc.add(randint(0,9))
cislo_str = ""
for jednotliva_cisla in cislo_pc:
cislo_str += str(jednotliva_cisla)
return cislo_str
main()
```
#### File: JKafka97/PythonForBeginners/obsah_ctverce.py
```python
def main():
try:
strana = int(otazka())
obsah = strana * strana
print('Obsah tvého čtverce je', obsah, 'cm2')
except:
main()
def otazka():
return input('Zadej velikost strany čtverce v centimetrech: ')
main()
``` |
{
"source": "jkahn/charticle",
"score": 3
} |
#### File: src/charticle/venn.py
```python
import attr
import matplotlib.pyplot as plt
import matplotlib_venn
from charticle import _validators
@attr.s(slots=True)
class FontSizes(object):
"""Utility class for font size tracking."""
title = attr.ib(default=20, validator=_validators.positive_int)
sets = attr.ib(default=14, validator=_validators.positive_int)
intersections = attr.ib(default=12, validator=_validators.positive_int)
@attr.s(slots=True)
class Venn2(object):
"""Object for a 2-circle Venn. Set attributes at init or by assignment.
:param str a_name:
:param str b_name: Label text for outside the A & B circles.
:param str a:
:param str b: Label text for the 1-member crescents.
:param str ab: Label text for the lenticular intersection of A & B.
:param str title: Text for the title of the plot.
:param palette: a color palette for the A & B sets.
:type palette: Venn2.Palette
:param fontsizes: the font sizes for various labels.
:type fontsizes: FontSizes
"""
@attr.s(repr_ns="Venn2", slots=True)
class Palette(object):
"""Container of color palette for both sets.
:param `a,b`: color names for the two sets.
:type `a,b`: legal html colornames or hex codes
:param alpha: color combination alpha for intersection.
:type alpha: float in [0,1]
TODO: add some default "constant" palettes.
"""
a, b = [attr.ib(default=n, validator=_validators.legal_color)
for n in ('red', 'green')]
alpha = attr.ib(default=0.4, validator=_validators.zero_to_one)
@attr.s(repr_ns="Venn2", slots=True)
class Sizes(object):
"""Utility class for shaping the Venn2."""
a, b, c, ab, normalize = [
attr.ib(default=1.0, validator=_validators.non_negative)
for _ in range(5)]
def to_dict(self):
return {
'10': self.a, '01': self.b, '11': self.ab,
}
a_name, b_name = [attr.ib(default=None,
validator=_validators.optional_string)
for n in ('A', 'B')]
a, b, ab = [attr.ib(default=None, validator=_validators.optional_string)
for n in ('a', 'b', 'ab')]
title = attr.ib(default=None, validator=_validators.optional_string)
sizes = attr.ib(default=attr.Factory(Sizes))
fontsizes = attr.ib(default=attr.Factory(FontSizes))
palette = attr.ib(default=attr.Factory(Palette))
def plot(self, ax=None):
"""Produce a plot on the specified axes.
Puts label strings in the right places and produces the figure.
ax: the axis on which to plot this diagram. Defaults to current axes.
"""
if ax is None:
ax = plt.axes()
attr.validate(self)
attr.validate(self.sizes)
attr.validate(self.palette)
attr.validate(self.fontsizes)
# Adjust the relative size of the areas so that there is more
# space in the outer ones.
v = matplotlib_venn.venn2(
# region sizes,
subsets=self.sizes.to_dict(), normalize_to=self.sizes.normalize,
# region colors,
set_colors=(self.palette.a, self.palette.b),
alpha=self.palette.alpha,
ax=ax)
# String 'A', 'B', 'C', are the outer set label names declared
# by matplotlib_venn.
for label, val in (('A', self.a_name), ('B', self.b_name)):
t = v.get_label_by_id(label)
t.set_text("" if val is None else val)
t.set_fontsize(self.fontsizes.sets)
# Numeric strings are the labels for the intersecting regions
# declared by matplotlib_venn
for label, val in (
('10', self.a), ('01', self.b), ('11', self.ab)):
t = v.get_label_by_id(label)
if t is None:
continue
t.set_text("" if val is None else val)
t.set_fontsize(self.fontsizes.intersections)
if self.title:
ax.set_title(self.title, size=self.fontsizes.title)
return v
@attr.s(slots=True)
class Venn3(object):
"""Object for a 3-label venn. Set attributes at init or by assignment.
:param str a_name:
:param str b_name:
:param str c_name: Label text for the outer circles.
:param str a:
:param str b:
:param str c: Label text for the 1-member patches.
:param str ab:
:param str ac:
:param str bc: Label text for the 2-set-intersection patches.
:param str abc: Label text for the full 3-set intersection.
:param str title: Text for the title of the plot.
:param palette: a color palette for the sets.
:type palette: Venn3.Palette
:param sizes: the region sizes (relative to 1.0).
:type sizes: Venn3.Sizes
:param fontsizes: the font sizes for various labels.
:type fontsizes: FontSizes
"""
@attr.s(repr_ns="Venn3", slots=True)
class Sizes(object):
"""Utility class for shaping the Venn3."""
a, b, c, ab, ac, bc, abc, normalize = [
attr.ib(default=1.0, validator=_validators.non_negative)
for _ in range(8)]
def set_double_weight(self, weight):
self.bc = self.ac = self.ab = weight
return self
def set_single_weight(self, weight):
self.a = self.b = self.c = weight
return self
def to_dict(self):
return {
'100': self.a, '010': self.b, '001': self.c,
'011': self.bc, '101': self.ac, '110': self.ab,
'111': self.abc
}
@attr.s(repr_ns="Venn3", slots=True)
class Palette(object):
"""Container of color palette for all 3 items.
:param `a,b,c`: color names for the three sets.
:type `a,b,c`: legal html colornames or hex codes
:param alpha: color combination alpha for intersections.
:type alpha: float in [0,1]
TODO: add some default "constant" palettes.
"""
a, b, c = [attr.ib(default=n, validator=_validators.legal_color)
for n in ('red', 'green', 'blue')]
alpha = attr.ib(default=0.4, validator=_validators.zero_to_one)
a_name, b_name, c_name = [attr.ib(default=None,
validator=_validators.optional_string)
for n in ('A', 'B', 'C')]
a, b, c = [attr.ib(default=None, validator=_validators.optional_string)
for n in ('a', 'b', 'c')]
ab, bc, ac = [attr.ib(default=None, validator=_validators.optional_string)
for n in ('a & b', 'b & c', 'a & c')]
abc = attr.ib(default=None,
validator=_validators.optional_string)
title = attr.ib(default=None, validator=_validators.optional_string)
sizes = attr.ib(default=attr.Factory(Sizes))
fontsizes = attr.ib(default=attr.Factory(FontSizes))
palette = attr.ib(default=attr.Factory(Palette))
def plot(self, ax=None):
"""Produce a plot on the specified axes.
Puts label strings in the right places and produces the figure.
ax: the axis on which to plot this diagram. Defaults to current axes.
"""
if ax is None:
ax = plt.axes()
attr.validate(self)
attr.validate(self.sizes)
attr.validate(self.palette)
attr.validate(self.fontsizes)
# Adjust the relative size of the areas so that there is more
# space in the outer ones.
v = matplotlib_venn.venn3(
# region sizes,
subsets=self.sizes.to_dict(), normalize_to=self.sizes.normalize,
# region colors,
set_colors=(self.palette.a, self.palette.b, self.palette.c),
alpha=self.palette.alpha,
ax=ax)
# String 'A', 'B', 'C', are the outer set label names declared
# by matplotlib_venn.
for label, val in (('A', self.a_name), ('B', self.b_name),
('C', self.c_name)):
t = v.get_label_by_id(label)
t.set_text("" if val is None else val)
t.set_fontsize(self.fontsizes.sets)
# Numeric strings are the labels for the intersecting regions
# declared by matplotlib_venn
for label, val in (
('100', self.a), ('010', self.b), ('001', self.c),
('110', self.ab), ('011', self.bc), ('101', self.ac),
('111', self.abc)):
t = v.get_label_by_id(label)
if t is None:
continue # no such region.
t.set_text("" if val is None else val)
t.set_fontsize(self.fontsizes.intersections)
if self.title:
ax.set_title(self.title, size=self.fontsizes.title)
return v
``` |
{
"source": "J-Kahn/PSID",
"score": 3
} |
#### File: J-Kahn/PSID/download.py
```python
import requests
import lxml.html
import os
import os.path
import zipfile
import sas
import gc
# Keep track of starting directory for convenience
start_dir = os.getcwd()
target_dir = '/home/PSID' # Enter the directory you want the PSID files in
user = '' # Enter your user name, you must register at psidonline.isr.umich.edu
password = '' # Enter associated password
login_url = 'http://simba.isr.umich.edu/u/Login.aspx' # Logi url
# Make target directory if it does not exist
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Go to target directory
os.chdir(target_dir)
# Start request session and go to login URL
session = requests.session()
# Get out VIEWSTATE and EVENTVALIDATION variables
start = session.get(login_url)
html = start.text
root = lxml.html.fromstring(html)
EVENTVALIDATION = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value']
VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value']
acc_pwd = { 'ctl00$ContentPlaceHolder1$Login1$UserName' : user,
'ctl00$ContentPlaceHolder1$Login1$Password' : password,
'ctl00$ContentPlaceHolder1$Login1$LoginButton' : 'Log In' ,
'__EVENTTARGET' : '',
'__EVENTARGUMENT' : '',
'__VIEWSTATE' : VIEWSTATE,
'__EVENTVALIDATION' : EVENTVALIDATION
}
# Send login message to PSID site
session.post(login_url, data=acc_pwd)
# Check for login
z=session.get('http://simba.isr.umich.edu/data/data.aspx')
tf2 = 'Logout' in z.content
print 'Successful login: ' + str(tf2)
# File years, numbers and labels
file_year = range(1968,1998) + range( 1999 , 2012, 2 )
request_numbers = [1056 ] + range(1058,1083) + range(1047,1052) + [1040, 1052 , 1132 , 1139 , 1152 , 1156]
request_start = 'http://simba.isr.umich.edu/Zips/GetFile.aspx?file='
file_start = 'FAM'
# Function to download PSID zip file
def download_psid(number, local_filename, sessions):
# Get the file using requests
r = sessions.get(request_start + number, stream=True)
with open(local_filename, 'wb') as f:
#Write it out in chunks incase it's big
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return local_filename
# Extracting PSID using psid_unzip. Extractall will also extract STATA .do files, etc.
def psid_unzip(filename, extractall = False):
zfile = zipfile.ZipFile(filename)
for name in zfile.namelist():
# Only take out the files we want
if '.sas' in name or '.txt' in name or '.pdf' in name or extractall == True:
(dirname, filename) = os.path.split(name)
if '.pdf' in name:
dirname = dirname + "Codebooks" # Different directory for Codebooks
if '.txt' in name:
nascii = name # Keep track of ascii name
if '.sas' in name:
nsas = name # Keep track of sas name
print "Decompressing " + filename + " on " + dirname
if not dirname == '':
if not os.path.exists(dirname):
os.makedirs(dirname)
zfile.extract(name, dirname) # Extract file
return (nsas, nascii)
for i in range(0,len(request_numbers)):
print 'PSID year: ' + str(file_year[i])
file_name = file_start + str(file_year[i]) + '.zip'
# Grab file
x = download_psid(str(request_numbers[i]), file_name, session)
print "Downloaded"
# Unzip
nsas, nascii = psid_unzip(file_name)
print "Unzipped"
# Turn it into a .csv
sas.sas_to_csv(nsas, nascii, file_start + str(file_year[i]) + '.csv')
print "Converted to CSV"
# Cleanup
os.remove(nsas)
os.remove(nascii)
os.remove(file_name)
print "Deleted"
gc.collect()
os.chdir(start_dir)
``` |
{
"source": "jkain88/django-wordcount",
"score": 3
} |
#### File: django-wordcount/wordcount/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request,'home.html')
def count(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
wordcheck = {}
for word in wordlist:
if word.isalpha():
if word in wordcheck:
wordcheck[word] += 1
else:
wordcheck[word] = 1
sorted_words = sorted(wordcheck.items(), key=operator.itemgetter(1) ,reverse = True)
return render(request,'count.html',{'fulltext': fulltext, 'count':len(wordlist), 'wordcheck':sorted_words})
def about(request):
return render(request, 'about.html')
``` |
{
"source": "JKakaofanatiker/ChickennuggetBot",
"score": 3
} |
#### File: JKakaofanatiker/ChickennuggetBot/main.py
```python
import discord
print("Welcome!")
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
await client.change_presence(activity=discord.Game(name="Sending Nuggets"))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('Chickennugget'):
await message.channel.send('C h i c k e n n u g g e t')
await message.channel.send('https://cdn.discordapp.com/emojis/833678092282101780.png?v=1')
print("Nuggets sent!")
if message.content.startswith('Kfc'):
await message.channel.send('KFC ist lecker!')
client.run('Your token here')
``` |
{
"source": "jkalish14/AlgoTrading_Colab",
"score": 3
} |
#### File: algotradingcolab/db/populate_trading_days.py
```python
from database import DataBase
import config
import alpaca_trade_api as tradeapi
from datetime import date, timedelta
def days_in_range(d1 : date, d2 : date):
return [d1 + timedelta(days=x) for x in range((d2-d1).days + 1)]
# Initialize the alpaca API
API_ENV = "Paper"
alpaca_api = config.API_SETTINGS["Alpaca"]
api_settings = alpaca_api[API_ENV]
api = tradeapi.REST(api_settings["KEY"], alpaca_api["Secret_Key"], api_settings["URL"], api_version='v2')
# Get the list of tickers
rv = api.get_calendar()
dates_dict = {day.date.date() : {"open" : day.open ,
"session_close" : day.session_close,
"close" : day.close ,
"session_open" : day.session_open}
for day in rv}
open_date_list = list(dates_dict.keys())
all_dates = days_in_range(open_date_list[0], open_date_list[-1])
vals = []
for day in all_dates:
is_trading_day = day in open_date_list
if is_trading_day:
day_obj = dates_dict[day]
val = (day, is_trading_day, day_obj["close"], day_obj["open"], day_obj["session_close"], day_obj["session_open"])
else:
val = (day, is_trading_day, None, None, None, None)
vals.append(val)
# Initialize the DB and write to it
db =DataBase(config.DB_ACCESS[config.DB_LOCATION])
sql_cmd = """
INSERT INTO trading_days
(date, market_open, close_time, open_time, session_close, session_open)
VALUES %s
ON CONFLICT (date) DO NOTHING
"""
db.execute_values(sql_cmd, vals)
db.close_cursor()
db.commit()
db.close_connection()
```
#### File: AlgoTrading_Colab/algotradingcolab/stockScreener.py
```python
from datetime import date
import importlib
from pandas.core.indexes import period
import talib
# Stadard Python packages
import numpy as np
import pandas as pd
# # Standard plotly imports
from plotly.subplots import make_subplots
import plotly.graph_objs as go
import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
# Our Libraries
from db import config
from db.database import DataBase
## Initialize the DB
db = DataBase(config.DB_ACCESS[config.DB_LOCATION])
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Create the dropdown lists
db.execute("SELECT id, name, symbol from stocks")
rv = db.cursor.fetchall()
ticker_dict = { entry[2] : entry for entry in rv}
ticker_dropdown_options = [{"label": ticker, "value" : ticker} for ticker in list(ticker_dict)]
allowable_time_frames = ["1Min"]
time_frame_dropdown_options = [{"label": time_frame, "value" : time_frame} for time_frame in allowable_time_frames]
technical_periods_dropdown_options = [{"label" : i, "value" : i} for i in range(5,500)]
app.layout = html.Div(children=[
## Top Section
html.Div([
html.H1("Stock Screener "),
html.H3(id="company-name")
], style = {"textAlign" : "center"} ),
html.Div([
html.Div([
html.Label("Ticker:"),
dcc.Dropdown(
id='ticker-dropdown',
options=ticker_dropdown_options,
value="AAPL"
)], style={"width" : 100, "margin-right": "30px"}),
html.Div([
html.Label("Time-Frame:"),
dcc.Dropdown(
id='time-frame-dropdown',
options=time_frame_dropdown_options,
value= "1Min",
)], style={"width" : 100, "margin-right": "30px"}),
html.Div([
html.Label("Number Of Periods:"),
dcc.Input(
id='num-periods-input',
type='number',
value=200,
debounce=True
)], style={"width" : 200})
],
style={"columnCount" : 3, "display" : "flex", "justify-content" : "center"}),
## Graph
html.Div(
id="graph-container",
children=dcc.Graph(id='quant-chart',
style={'height' : 650}),
style={'backgroundColor' : 'rgba(250,250,250,1)'}
),
## Bottm Technical Analysis settings
html.Div([
html.Center([html.H3("Technical Analysis Settings")]),
html.Div(
[
html.Div([
html.Div([
html.Label("SMA-Periods:", style={"height":40}),
html.Label("EMA-Periods:", style={"height":40}),
html.Label("Bollinger Bands Period:", style={"height":40})
], style={"width" : 180}),
html.Div([
dcc.Dropdown(
id='sma-periods-dropdown',
multi = True,
options = technical_periods_dropdown_options,
value = [100]
),
dcc.Dropdown(
id="ema-periods-dropdown",
multi = True,
options = technical_periods_dropdown_options,
value = [20]
),
dcc.Input(
id='bb-bands-period-dropdown',
type='number',
value = 20,
debounce=True,
),
], style={"width" : 210})
],
style={"columnCount":2, "display" : "flex", "height" : 600, "margin-right": "30px"}),
html.Div([
html.Div([
html.Label("MACD periods:", style={"height":40}),
html.Label("MACD Signal Period:", style={"height":40})
], style={"width" : 150}),
html.Div([
dcc.Dropdown(
id="macd-periods-dropdown",
multi = True,
options = technical_periods_dropdown_options,
value = [20, 12]
),
dcc.Input(
id='macd-signal-period',
type='number',
value=9,
debounce=True
)
], style={"width" : 210})
], style={"columnCount":2, "display" : "flex", "height" : 400 })
], style={"columnCount" : 2, "display" : "flex", "justify-content" : "center"})
])
])
@app.callback(
[Output('quant-chart','figure'),
Output('company-name', 'children')],
[Input('ticker-dropdown', 'value'),
Input('time-frame-dropdown', 'value'),
Input('num-periods-input', 'value'),
Input('sma-periods-dropdown', 'value'),
Input('ema-periods-dropdown', 'value'),
Input('bb-bands-period-dropdown', 'value'),
Input('macd-periods-dropdown', 'value'),
Input('macd-signal-period', 'value')])
def update_plot(ticker : str,
time_frame : str,
periods : int,
sma_periods : list[int] ,
ema_periods : list[int] ,
bb_band_periods: int ,
macd_periods: list[int] ,
macd_signal_period : int,
):
if ticker is None: ticker = "AAPL"
if time_frame is None : time_frame = "1Day"
if periods is None: periods = 200
if not sma_periods: sma_periods = [100]
if not ema_periods: ema_periods = [20]
if bb_band_periods is None: bb_band_periods = 20
if not macd_periods or len(macd_periods) < 2: macd_periods = [20, 12]
if macd_signal_period is None : macd_signal_period = 9
max_ta_periods = max(sma_periods + ema_periods + [periods, bb_band_periods, macd_signal_period] + macd_periods)
# Get requested num of points
sql_cmd = f"""
SELECT * FROM price_minute
WHERE stock_id = {ticker_dict[ticker][0]}
ORDER BY date_time DESC
LIMIT {periods + max_ta_periods }
"""
db.execute(sql_cmd)
rv = db.cursor.fetchall()
data = { pd.Timestamp(p[1]) : {"o" : p[2], "h" : p[3], "l" : p[4], "c" : p[5], "v" : p[6]} for p in rv}
df = pd.DataFrame(data.values(), data.keys())
df.sort_index(inplace = True)
# # Create the plot with all of the traces
fig = make_subplots(rows=4, row_heights=[0.2, 0.6, 0.2, 0.2], vertical_spacing=0, horizontal_spacing=0, shared_xaxes=True)
# Add to the top subplot
dates = df.index
rsi = talib.RSI(df.c)
fig.add_traces(data=[go.Scatter(x=dates, y=rsi, name="RSI", line_width=0.7, showlegend=False),
go.Scatter(x=dates, y= [70]*len(dates), line=dict(color='black', width=0.5), showlegend=False, hoverinfo='skip'),
go.Scatter(x=dates, y= [30]*len(dates), line=dict(color='black', width=0.5), showlegend=False, hoverinfo='skip'),
go.Scatter(x=dates, y= [50]*len(dates), line=dict(color='black', width=0.5 , dash='dash'), showlegend=False, hoverinfo='skip')],
rows=1,
cols=1
)
# # Add the the middle subplot
bb_high, bb_mid, bb_low = talib.BBANDS(df.c, timeperiod=bb_band_periods)
trace_data=[go.Candlestick(x=dates,open=df.o, high=df.h, low=df.l, close=df.c, name=ticker),
go.Scatter(x=dates, y=bb_high, name="Bollinger Bands", line_width=0.5, line_color='rgba(164, 224, 248, 1)', legendgroup="bbands"),
go.Scatter(x=dates, y=bb_low, name="low", fill='tonexty', line_width=0.5, line_color='rgba(164, 224, 248, 1)', fillcolor='rgba(164, 224, 248, 0.3)', legendgroup="bbands", showlegend=False, hoverinfo='skip'),
go.Scatter(x=dates, y=bb_mid, name="mean", line_width = 0.5, line_color = 'rgba(164, 224, 255, 1)', legendgroup="bbands",showlegend=False, hoverinfo='skip')
]
trace_data.extend([go.Scatter(x=dates, y=talib.SMA(df.c, per), name=f"SMA{per}", line_width=0.7) for per in sma_periods])
trace_data.extend([go.Scatter(x=dates, y=talib.EMA(df.c, per), name=f"EMA{per}", line_width=0.7) for per in ema_periods])
fig.add_traces(data=trace_data, rows=2, cols=1)
# # Add Volume plot
volume_data = [go.Bar(x=dates, y=df.v, name="Volume", showlegend=False),
go.Scatter(x=dates, y=talib.OBV(df.c, df.v), name="OBV", line_width=0.5)]
fig.add_traces(data=volume_data, rows=3, cols=1)
# # # Add to the bottom subplot
macd, macdsignal, macdhist = talib.MACD(df.c, fastperiod = min(macd_periods), slowperiod = max(macd_periods), signalperiod = macd_signal_period)
gtz_mask = (macdhist > 0).to_numpy()
ltz_mask = (macdhist <= 0).to_numpy()
fig.add_traces(data=[go.Scatter(x=dates, y=macd, name=f"MACD({max(macd_periods)},{min(macd_periods)}, {macd_signal_period})", line_width=0.7, line_color="black", legendgroup="macd"),
go.Scatter(x=dates, y=macdsignal, name=f"Signal({macd_signal_period})", line_width=0.7, line_color="red", showlegend=False, legendgroup="macd"),
go.Bar(x=dates[gtz_mask], y=macdhist[gtz_mask], marker=dict(color='green'), showlegend=False, hoverinfo='skip'),
go.Bar(x=dates[ltz_mask], y=macdhist[ltz_mask], marker=dict(color='red'), showlegend=False, hoverinfo='skip')],
rows=4,
cols=1)
fig.update_layout(
plot_bgcolor='rgba(250,250,250,1)',
paper_bgcolor='rgba(250,250,250,1)',
hovermode='x unified',
legend=dict(orientation="h", xanchor="center", y=1.1, x=0.5),
)
fig.update_xaxes(rangeslider_visible=False, visible=True, range = (dates[max_ta_periods], dates[-1]))
fig.update_yaxes(row=1, col=1, title="RSI", tickvals = [30, 50, 70])
fig.update_yaxes(row=2, col=1, title="Share Price")
fig.update_yaxes(row=3, col=1, title="Volume")
fig.update_yaxes(row=4, col=1, title="MACD")
return [fig, ticker_dict[ticker][1]]
if __name__ == '__main__':
app.run_server(debug=True)
```
#### File: AlgoTrading_Colab/tests/test_api.py
```python
from algotradingcolab.db import config
from algotradingcolab.helpers.decorators import time_func_execution
import alpaca_trade_api as tradeapi
@time_func_execution
def get_all_stocks(api : tradeapi.REST):
return api.list_assets()
def init_alapca_api() -> tradeapi.REST:
# Initialize the alpaca API
API_ENV = "Paper"
alpaca_api = config.API_SETTINGS["Alpaca"]
api_settings = alpaca_api[API_ENV]
api = tradeapi.REST(api_settings["KEY"], alpaca_api["Secret_Key"], api_settings["URL"], api_version='v2')
return api
def test_init_alapca_api():
api = init_alapca_api()
account = api.get_account()
assert account is not None
``` |
{
"source": "jkalish14/UserInputParser",
"score": 4
} |
#### File: UserInputParser/inputparser/inputparser.py
```python
from typing import Any, Tuple
from inputparser.validators import *
class UserInputError(Exception):
"""
Error that gets raised when a user input is invalid
"""
def __init__(self, message: str = ""):
"""
Create an instance of the error
:param message: message describing the error
"""
self.message = message
super().__init__(message)
class InputParser:
"""
InputParser class enables the checking of user's inputs while constructing helpful
error messages in the case the input is invalid.
"""
def __init__(self, default_val: Any, allowable_types: Union[type, Tuple], constraint_func: Callable = None,
constraint_args=None):
"""
Create a new User Input Checker to ensure inputs from the user are valid.
:param default_val: if there is an error with the user's input, this is the value the value field will default to
:param allowable_types: the expected type(s) of the user's input
:param constraint_func: function handle that will be used to validate the user's input
:param constraint_args: dictionary of arguments to be passed to the constraint_func function
.. note::
dictionary keys must match constraint_func function keyword arguments.
"""
if constraint_args is None:
constraint_args = {}
self.__default_arg = default_val
self.__allowable_types = allowable_types
self.__check_callback: Callable = constraint_func
self.__callback_args: dict = constraint_args
self.__error_str: str = ""
self.__user_arg = None
self.__valid: bool = False
self.__value: Any = None
@property
def value(self) -> Any:
"""
When the user's input is valid, this field returns the user's argument.
When the user's input is invalid, it returns the default argument
:return: user's input if valid, default value if invalid
"""
return self.__value
@property
def default(self) -> Any:
"""
Get the default argument provided
:return: specified default argument
"""
return self.__default_arg
@property
def valid(self) -> bool:
"""
Get the boolean flag indicating if the user's input was valid.
.. note::
This field is None until is_valid() is called
:return: boolean flag indicating valid user argument
"""
return self.__valid
@property
def user_arg(self) -> Any:
"""
Get the original argument provided by the user
:return: user's input argument
"""
return self.__user_arg
@property
def error_str(self) -> str:
"""
Get the error string that is constructed when a user's input is invalid
:return: error string
"""
return self.__error_str
def __validate_type(self, user_arg: Union[Any, list]) -> bool:
# Check each element of a list
if isinstance(user_arg, list):
does_pass = are_valid_elements(user_arg, isinstance, [self.__allowable_types])
else:
does_pass = isinstance(user_arg, self.__allowable_types)
return does_pass
def __print_error(self, supress):
if not supress:
raise UserInputError(self.__error_str)
def is_valid(self, user_arg: Any, supress_error: bool = False) -> bool:
"""
Validate the user's argument. If invalid create the error string and
optionally raise an error.
:param user_arg: argument from the user
:param supress_error: boolean flag to supress the raising of an error on invalid input
"""
# Save the provided user arg in case it is needed for error messages
self.__user_arg = user_arg
self.__value = user_arg
self.__valid = True
# Compare the user_arg with the list of acceptable types
# If no check callback is provided, the type verification
# is sufficient
if self.__validate_type(user_arg) is False:
warning_string = [f"Provided value of \'{user_arg}\' is of type \'{type(user_arg)}\'. \n"]
warning_string += ["\t" * 1 + f"Value for this field must be one of the following types: \n"]
warning_string += ["\t" * 2 + f"- {t} \n" for t in self.__allowable_types] if \
isinstance(self.__allowable_types, list) else \
["\t" * 5 + f"- {self.__allowable_types} \n"]
self.__error_str = "".join(warning_string)
self.__valid = False
self.__value = self.__default_arg
self.__print_error(supress_error)
elif self.__check_callback is not None:
self.__valid = self.__check_callback(user_arg, **self.__callback_args)
if self.__valid is False:
# Create the warning
warning_string = [
f"Provided value of \'{user_arg}\' did not meet the constraints enforced by: "
f"{self.__check_callback.__name__}(). \n"]
if self.__callback_args is not None:
warning_string += ["\t" * 1 + "Arguments passed to constraint function: \n"]
warning_string += ["\t" * 2 + f"- {k} : {v.__name__ if isinstance(v, Callable) else v} \n" for k, v
in self.__callback_args.items()]
# Join the lists of messages together and assign
self.__error_str = "".join(warning_string)
self.__value = self.__default_arg
self.__print_error(supress_error)
# We only get here if everything is okay
return self.__valid
``` |
{
"source": "jkalkhof/MediaServer",
"score": 3
} |
#### File: media_server/lib/file_utils.py
```python
import os
import glob
import sys
import re
import time
import subprocess
import datetime
import requests,json,sys
import operator
import requests,json,sys
from sys import stdout, exit
from os import listdir, rename, _exit,path,makedirs,rename,sep,walk
from os.path import isfile, join, isdir, basename
def ext(fname):
ext = fname.split('.')[-1:][0]
return ext
def name(fname):
name = '.'.join(fname.split('.')[0:-1])
return name
def list_files(folder):
list_of_files =[]
stdout.write("list_files: Indexing "+folder+"\n")
for root,dirs,files in walk(folder):
path = root.split(sep)
for file in files:
if file[0] != '.':
list_of_files.append({
'path': '/'.join(path)+'/'+file,
'file': file,
'name': name(file)
})
stdout.write('\n')
list_of_files.sort(key=operator.itemgetter('name'))
return list_of_files
def rescan_base_dir(file_path=None,filename=None):
if file_path is not None:
if filename is not None:
files_list = []
files_list.append({
'path': file_path+'/'+filename,
'file': filename,
'name': filename
})
else:
files_list = list_files(file_path)
return files_list
``` |
{
"source": "jkalleberg/NEAT",
"score": 2
} |
#### File: NEAT/source/SequenceContainer.py
```python
import random
import copy
import pathlib
import bisect
import pickle
import sys
import numpy as np
from Bio.Seq import Seq
from source.neat_cigar import CigarString
from source.probability import DiscreteDistribution, poisson_list
# TODO This whole file is in desperate need of refactoring
"""
Constants needed for analysis
"""
MAX_ATTEMPTS = 100 # max attempts to insert a mutation into a valid position
MAX_MUTFRAC = 0.3 # the maximum percentage of a window that can contain mutations
NUCL = ['A', 'C', 'G', 'T']
TRI_IND = {'AA': 0, 'AC': 1, 'AG': 2, 'AT': 3, 'CA': 4, 'CC': 5, 'CG': 6, 'CT': 7,
'GA': 8, 'GC': 9, 'GG': 10, 'GT': 11, 'TA': 12, 'TC': 13, 'TG': 14, 'TT': 15}
NUC_IND = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
ALL_TRI = [NUCL[i] + NUCL[j] + NUCL[k] for i in range(len(NUCL)) for j in range(len(NUCL)) for k in range(len(NUCL))]
ALL_IND = {ALL_TRI[i]: i for i in range(len(ALL_TRI))}
# DEBUG
IGNORE_TRINUC = False
# percentile resolution used for fraglen quantizing
COV_FRAGLEN_PERCENTILE = 10.
LARGE_NUMBER = 9999999999
"""
DEFAULT MUTATION MODELS
"""
DEFAULT_1_OVERALL_MUT_RATE = 0.001
DEFAULT_1_HOMOZYGOUS_FREQ = 0.010
DEFAULT_1_INDEL_FRACTION = 0.05
DEFAULT_1_INS_VS_DEL = 0.6
DEFAULT_1_INS_LENGTH_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
DEFAULT_1_INS_LENGTH_WEIGHTS = [0.4, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05, 0.034, 0.033, 0.033]
DEFAULT_1_DEL_LENGTH_VALUES = [1, 2, 3, 4, 5]
DEFAULT_1_DEL_LENGTH_WEIGHTS = [0.3, 0.2, 0.2, 0.2, 0.1]
example_matrix_1 = [[0.0, 0.15, 0.7, 0.15],
[0.15, 0.0, 0.15, 0.7],
[0.7, 0.15, 0.0, 0.15],
[0.15, 0.7, 0.15, 0.0]]
DEFAULT_1_TRI_FREQS = [copy.deepcopy(example_matrix_1) for _ in range(16)]
DEFAULT_1_TRINUC_BIAS = [1. / float(len(ALL_TRI)) for _ in ALL_TRI]
DEFAULT_MODEL_1 = [DEFAULT_1_OVERALL_MUT_RATE,
DEFAULT_1_HOMOZYGOUS_FREQ,
DEFAULT_1_INDEL_FRACTION,
DEFAULT_1_INS_VS_DEL,
DEFAULT_1_INS_LENGTH_VALUES,
DEFAULT_1_INS_LENGTH_WEIGHTS,
DEFAULT_1_DEL_LENGTH_VALUES,
DEFAULT_1_DEL_LENGTH_WEIGHTS,
DEFAULT_1_TRI_FREQS,
DEFAULT_1_TRINUC_BIAS]
DEFAULT_2_OVERALL_MUT_RATE = 0.002
DEFAULT_2_HOMOZYGOUS_FREQ = 0.200
DEFAULT_2_INDEL_FRACTION = 0.1
DEFAULT_2_INS_VS_DEL = 0.3
DEFAULT_2_INS_LENGTH_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
DEFAULT_2_INS_LENGTH_WEIGHTS = [0.1, 0.1, 0.2, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
# noinspection DuplicatedCode
DEFAULT_2_DEL_LENGTH_VALUES = [1, 2, 3, 4, 5]
DEFAULT_2_DEL_LENGTH_WEIGHTS = [0.3, 0.2, 0.2, 0.2, 0.1]
example_matrix_2 = [[0.0, 0.15, 0.7, 0.15],
[0.15, 0.0, 0.15, 0.7],
[0.7, 0.15, 0.0, 0.15],
[0.15, 0.7, 0.15, 0.0]]
DEFAULT_2_TRI_FREQS = [copy.deepcopy(example_matrix_2) for _ in range(16)]
DEFAULT_2_TRINUC_BIAS = [1. / float(len(ALL_TRI)) for _ in ALL_TRI]
DEFAULT_MODEL_2 = [DEFAULT_2_OVERALL_MUT_RATE,
DEFAULT_2_HOMOZYGOUS_FREQ,
DEFAULT_2_INDEL_FRACTION,
DEFAULT_2_INS_VS_DEL,
DEFAULT_2_INS_LENGTH_VALUES,
DEFAULT_2_INS_LENGTH_WEIGHTS,
DEFAULT_2_DEL_LENGTH_VALUES,
DEFAULT_2_DEL_LENGTH_WEIGHTS,
DEFAULT_2_TRI_FREQS,
DEFAULT_2_TRINUC_BIAS]
class SequenceContainer:
"""
Container for reference sequences, applies mutations
"""
def __init__(self, x_offset, sequence, ploidy, window_overlap, read_len, mut_models=None, mut_rate=None,
only_vcf=False):
# initialize basic variables
self.only_vcf = only_vcf
self.x = x_offset
self.ploidy = ploidy
self.read_len = read_len
self.sequences = [Seq(str(sequence)) for _ in range(self.ploidy)]
self.seq_len = len(sequence)
self.indel_list = [[] for _ in range(self.ploidy)]
self.snp_list = [[] for _ in range(self.ploidy)]
self.all_cigar = [[] for _ in range(self.ploidy)]
self.fm_pos = [[] for _ in range(self.ploidy)]
self.fm_span = [[] for _ in range(self.ploidy)]
# Blacklist explanation:
# black_list[ploid][pos] = 0 safe to insert variant here
# black_list[ploid][pos] = 1 indel inserted here
# black_list[ploid][pos] = 2 snp inserted here
# black_list[ploid][pos] = 3 invalid position for various processing reasons
self.black_list = [np.zeros(self.seq_len, dtype='<i4') for _ in range(self.ploidy)]
# disallow mutations to occur on window overlap points
self.win_buffer = window_overlap
for p in range(self.ploidy):
self.black_list[p][-self.win_buffer] = 3
self.black_list[p][-self.win_buffer - 1] = 3
# initialize mutation models
if not mut_models:
default_model = [copy.deepcopy(DEFAULT_MODEL_1) for _ in range(self.ploidy)]
self.model_data = default_model[:self.ploidy]
else:
if len(mut_models) != self.ploidy:
print('\nError: Number of mutation models received is not equal to specified ploidy\n')
sys.exit(1)
self.model_data = copy.deepcopy(mut_models)
# do we need to rescale mutation frequencies?
mut_rate_sum = sum([n[0] for n in self.model_data])
self.mut_rescale = mut_rate
if self.mut_rescale is None:
self.mut_scalar = 1.0
else:
self.mut_scalar = float(self.mut_rescale) // (mut_rate_sum / float(len(self.model_data)))
# how are mutations spread to each ploid, based on their specified mut rates?
self.ploid_mut_frac = [float(n[0]) / mut_rate_sum for n in self.model_data]
self.ploid_mut_prior = DiscreteDistribution(self.ploid_mut_frac, range(self.ploidy))
# init mutation models
#
# self.models[ploid][0] = average mutation rate
# self.models[ploid][1] = p(mut is homozygous | mutation occurs)
# self.models[ploid][2] = p(mut is indel | mut occurs)
# self.models[ploid][3] = p(insertion | indel occurs)
# self.models[ploid][4] = distribution of insertion lengths
# self.models[ploid][5] = distribution of deletion lengths
# self.models[ploid][6] = distribution of trinucleotide SNP transitions
# self.models[ploid][7] = p(trinuc mutates)
self.models = []
for n in self.model_data:
self.models.append([self.mut_scalar * n[0], n[1], n[2], n[3], DiscreteDistribution(n[5], n[4]),
DiscreteDistribution(n[7], n[6]), []])
for m in n[8]:
# noinspection PyTypeChecker
self.models[-1][6].append([DiscreteDistribution(m[0], NUCL), DiscreteDistribution(m[1], NUCL),
DiscreteDistribution(m[2], NUCL), DiscreteDistribution(m[3], NUCL)])
self.models[-1].append([m for m in n[9]])
# initialize poisson attributes
self.indel_poisson, self.snp_poisson = self.init_poisson()
# sample the number of variants that will be inserted into each ploid
self.indels_to_add = [n.sample() for n in self.indel_poisson]
self.snps_to_add = [n.sample() for n in self.snp_poisson]
# initialize trinuc snp bias
# compute mutation positional bias given trinucleotide strings of the sequence (ONLY AFFECTS SNPs)
#
# note: since indels are added before snps, it's possible these positional biases aren't correctly utilized
# at positions affected by indels. At the moment I'm going to consider this negligible.
trinuc_snp_bias = [[0. for _ in range(self.seq_len)] for _ in range(self.ploidy)]
self.trinuc_bias = [None for _ in range(self.ploidy)]
for p in range(self.ploidy):
for i in range(self.win_buffer + 1, self.seq_len - 1):
trinuc_snp_bias[p][i] = self.models[p][7][ALL_IND[str(self.sequences[p][i - 1:i + 2])]]
self.trinuc_bias[p] = DiscreteDistribution(trinuc_snp_bias[p][self.win_buffer + 1:self.seq_len - 1],
range(self.win_buffer + 1, self.seq_len - 1))
# initialize coverage attributes
self.window_size = None
self.coverage_distribution = None
self.fraglen_ind_map = None
def update_basic_vars(self, x_offset, sequence, ploidy, window_overlap, read_len):
self.x = x_offset
self.ploidy = ploidy
self.read_len = read_len
self.sequences = [Seq(str(sequence)) for _ in range(self.ploidy)]
self.seq_len = len(sequence)
self.indel_list = [[] for _ in range(self.ploidy)]
self.snp_list = [[] for _ in range(self.ploidy)]
self.all_cigar = [[] for _ in range(self.ploidy)]
self.fm_pos = [[] for _ in range(self.ploidy)]
self.fm_span = [[] for _ in range(self.ploidy)]
self.black_list = [np.zeros(self.seq_len, dtype='<i4') for _ in range(self.ploidy)]
# disallow mutations to occur on window overlap points
self.win_buffer = window_overlap
for p in range(self.ploidy):
self.black_list[p][-self.win_buffer] = 3
self.black_list[p][-self.win_buffer - 1] = 3
def update_mut_models(self, mut_models, mut_rate):
if not mut_models:
default_model = [copy.deepcopy(DEFAULT_MODEL_1) for _ in range(self.ploidy)]
self.model_data = default_model[:self.ploidy]
else:
if len(mut_models) != self.ploidy:
print('\nError: Number of mutation models received is not equal to specified ploidy\n')
sys.exit(1)
self.model_data = copy.deepcopy(mut_models)
# do we need to rescale mutation frequencies?
mut_rate_sum = sum([n[0] for n in self.model_data])
self.mut_rescale = mut_rate
if self.mut_rescale is None:
self.mut_scalar = 1.0
else:
self.mut_scalar = float(self.mut_rescale) // (mut_rate_sum / float(len(self.model_data)))
# how are mutations spread to each ploid, based on their specified mut rates?
self.ploid_mut_frac = [float(n[0]) / mut_rate_sum for n in self.model_data]
self.ploid_mut_prior = DiscreteDistribution(self.ploid_mut_frac, range(self.ploidy))
self.models = []
for n in self.model_data:
self.models.append([self.mut_scalar * n[0], n[1], n[2], n[3], DiscreteDistribution(n[5], n[4]),
DiscreteDistribution(n[7], n[6]), []])
for m in n[8]:
# noinspection PyTypeChecker
self.models[-1][6].append([DiscreteDistribution(m[0], NUCL), DiscreteDistribution(m[1], NUCL),
DiscreteDistribution(m[2], NUCL), DiscreteDistribution(m[3], NUCL)])
self.models[-1].append([m for m in n[9]])
def update_trinuc_bias(self):
trinuc_snp_bias = [[0. for _ in range(self.seq_len)] for _ in range(self.ploidy)]
self.trinuc_bias = [None for _ in range(self.ploidy)]
for p in range(self.ploidy):
for i in range(self.win_buffer + 1, self.seq_len - 1):
trinuc_snp_bias[p][i] = self.models[p][7][ALL_IND[str(self.sequences[p][i - 1:i + 2])]]
self.trinuc_bias[p] = DiscreteDistribution(trinuc_snp_bias[p][self.win_buffer + 1:self.seq_len - 1],
range(self.win_buffer + 1, self.seq_len - 1))
def init_coverage(self, coverage_data, frag_dist=None):
"""
Initializes coverage for the sequence container. Only makes changes if we are not in vcf-only mode.
:param coverage_data: A tuple containing the window size, gc scalars and target coverage values.
:param frag_dist: A probability distribution of the fragment size.
:return: Mean coverage value
"""
# TODO this section is also quite slow and will need further investigation
# If we're only creating a vcf, skip some expensive initialization related to coverage depth
if not self.only_vcf:
(self.window_size, gc_scalars, target_cov_vals) = coverage_data
gc_cov_vals = [[] for _ in self.sequences]
tr_cov_vals = [[] for _ in self.sequences]
avg_out = []
self.coverage_distribution = []
for i in range(len(self.sequences)):
# Zach implemented a change here but I can't remember if I changed it back for some reason.
# If second line below doesn't work, reactivate the first line.
# max_coord = min([len(self.sequences[i]) - self.read_len, len(self.all_cigar[i]) - self.read_len])
max_coord = min([len(self.sequences[i]) - self.read_len, len(self.all_cigar[i]) - 1])
# Trying to fix a problem wherein the above line gives a negative answer
if max_coord <= 0:
max_coord = min([len(self.sequences[i]), len(self.all_cigar[i])])
# compute gc-bias
j = 0
while j + self.window_size < len(self.sequences[i]):
gc_c = self.sequences[i][j:j + self.window_size].count('G') + \
self.sequences[i][j:j + self.window_size].count('C')
gc_cov_vals[i].extend([gc_scalars[gc_c]] * self.window_size)
j += self.window_size
gc_c = self.sequences[i][-self.window_size:].count('G') + \
self.sequences[i][-self.window_size:].count('C')
gc_cov_vals[i].extend([gc_scalars[gc_c]] * (len(self.sequences[i]) - len(gc_cov_vals[i])))
# Targeted values
tr_cov_vals[i].append(target_cov_vals[0])
prev_val = self.fm_pos[i][0]
for j in range(1, max_coord):
if self.fm_pos[i][j] is None:
tr_cov_vals[i].append(target_cov_vals[prev_val])
elif self.fm_span[i][j] - self.fm_pos[i][j] <= 1:
tr_cov_vals[i].append(target_cov_vals[prev_val])
else:
tr_cov_vals[i].append(sum(target_cov_vals[self.fm_pos[i][j]:self.fm_span[i][j]]) / float(
self.fm_span[i][j] - self.fm_pos[i][j]))
prev_val = self.fm_pos[i][j]
# Debug statement
# print(f'({i, j}), {self.all_cigar[i][j]}, {self.fm_pos[i][j]}, {self.fm_span[i][j]}')
# shift by half of read length
if len(tr_cov_vals[i]) > int(self.read_len / 2.):
tr_cov_vals[i] = [0.0] * int(self.read_len // 2) + tr_cov_vals[i][:-int(self.read_len / 2.)]
# fill in missing indices
tr_cov_vals[i].extend([0.0] * (len(self.sequences[i]) - len(tr_cov_vals[i])))
#
coverage_vector = np.cumsum([tr_cov_vals[i][nnn] *
gc_cov_vals[i][nnn] for nnn in range(len(tr_cov_vals[i]))])
coverage_vals = []
# TODO if max_coord is <=0, this is a problem
for j in range(0, max_coord):
coverage_vals.append(coverage_vector[j + self.read_len] - coverage_vector[j])
# Below is Zach's attempt to fix this. The commented out line is the original
# avg_out.append(np.mean(coverage_vals) / float(self.read_len))
avg_out.append(np.mean(coverage_vals)/float(min([self.read_len, max_coord])))
# Debug statement
# print(f'{avg_out}, {np.mean(avg_out)}')
if frag_dist is None:
# Debug statement
# print(f'++++, {max_coord}, {len(self.sequences[i])}, '
# f'{len(self.all_cigar[i])}, {len(coverage_vals)}')
self.coverage_distribution.append(DiscreteDistribution(coverage_vals, range(len(coverage_vals))))
# fragment length nightmare
else:
current_thresh = 0.
index_list = [0]
for j in range(len(frag_dist.cum_prob)):
if frag_dist.cum_prob[j] >= current_thresh + COV_FRAGLEN_PERCENTILE / 100.0:
current_thresh = frag_dist.cum_prob[j]
index_list.append(j)
flq = [frag_dist.values[nnn] for nnn in index_list]
if frag_dist.values[-1] not in flq:
flq.append(frag_dist.values[-1])
flq.append(LARGE_NUMBER)
self.fraglen_ind_map = {}
for j in frag_dist.values:
b_ind = bisect.bisect(flq, j)
if abs(flq[b_ind - 1] - j) <= abs(flq[b_ind] - j):
self.fraglen_ind_map[j] = flq[b_ind - 1]
else:
self.fraglen_ind_map[j] = flq[b_ind]
self.coverage_distribution.append({})
for flv in sorted(list(set(self.fraglen_ind_map.values()))):
buffer_val = self.read_len
for j in frag_dist.values:
if self.fraglen_ind_map[j] == flv and j > buffer_val:
buffer_val = j
max_coord = min([len(self.sequences[i]) - buffer_val - 1,
len(self.all_cigar[i]) - buffer_val + self.read_len - 2])
# print 'BEFORE:', len(self.sequences[i])-buffer_val
# print 'AFTER: ', len(self.all_cigar[i])-buffer_val+self.read_len-2
# print 'AFTER2:', max_coord
coverage_vals = []
for j in range(0, max_coord):
coverage_vals.append(
coverage_vector[j + self.read_len] - coverage_vector[j] + coverage_vector[j + flv] -
coverage_vector[
j + flv - self.read_len])
# EXPERIMENTAL
# quantized_cov_vals = quantize_list(coverage_vals)
# self.coverage_distribution[i][flv] = \
# DiscreteDistribution([n[2] for n in quantized_cov_vals],
# [(n[0], n[1]) for n in quantized_cov_vals])
# TESTING
# import matplotlib.pyplot as mpl
# print len(coverage_vals),'-->',len(quantized_cov_vals)
# mpl.figure(0)
# mpl.plot(range(len(coverage_vals)), coverage_vals)
# for qcv in quantized_cov_vals:
# mpl.plot([qcv[0], qcv[1]+1], [qcv[2],qcv[2]], 'r')
# mpl.show()
# sys.exit(1)
self.coverage_distribution[i][flv] = DiscreteDistribution(coverage_vals,
range(len(coverage_vals)))
return np.mean(avg_out)
def init_poisson(self):
ind_l_list = [self.seq_len * self.models[i][0] * self.models[i][2] * self.ploid_mut_frac[i] for i in
range(len(self.models))]
snp_l_list = [self.seq_len * self.models[i][0] * (1. - self.models[i][2]) * self.ploid_mut_frac[i] for i in
range(len(self.models))]
k_range = range(int(self.seq_len * MAX_MUTFRAC))
# return (indel_poisson, snp_poisson)
# TODO These next two lines are really slow. Maybe there's a better way
return [poisson_list(k_range, ind_l_list[n]) for n in range(len(self.models))], \
[poisson_list(k_range, snp_l_list[n]) for n in range(len(self.models))]
def update(self, x_offset, sequence, ploidy, window_overlap, read_len, mut_models=None, mut_rate=None):
# if mutation model is changed, we have to reinitialize it...
if ploidy != self.ploidy or mut_rate != self.mut_rescale or mut_models is not None:
self.ploidy = ploidy
self.mut_rescale = mut_rate
self.update_mut_models(mut_models, mut_rate)
# if sequence length is different than previous window, we have to redo snp/indel poissons
if len(sequence) != self.seq_len:
self.seq_len = len(sequence)
self.indel_poisson, self.snp_poisson = self.init_poisson()
# basic vars
self.update_basic_vars(x_offset, sequence, ploidy, window_overlap, read_len)
self.indels_to_add = [n.sample() for n in self.indel_poisson]
self.snps_to_add = [n.sample() for n in self.snp_poisson]
# initialize trinuc snp bias
if not IGNORE_TRINUC:
self.update_trinuc_bias()
def insert_mutations(self, input_list):
for input_variable in input_list:
which_ploid = []
wps = input_variable[4][0]
# if no genotype given, assume heterozygous and choose a single ploid based on their mut rates
if wps is None:
which_ploid.append(self.ploid_mut_prior.sample())
which_alt = [0]
else:
if '/' in wps or '|' in wps:
if '/' in wps:
splt = wps.split('/')
else:
splt = wps.split('|')
which_ploid = []
for i in range(len(splt)):
if splt[i] == '1':
which_ploid.append(i)
# assume we're just using first alt for inserted variants?
which_alt = [0 for _ in which_ploid]
# otherwise assume monoploidy
else:
which_ploid = [0]
which_alt = [0]
# ignore invalid ploids
for i in range(len(which_ploid) - 1, -1, -1):
if which_ploid[i] >= self.ploidy:
del which_ploid[i]
for i in range(len(which_ploid)):
p = which_ploid[i]
my_alt = input_variable[2][which_alt[i]]
my_var = (input_variable[0] - self.x, input_variable[1], my_alt)
# This is a potential fix implemented by Zach in a previous commit. He left the next line in.
# in_len = max([len(input_variable[1]), len(my_alt)])
in_len = len(input_variable[1])
if my_var[0] < 0 or my_var[0] >= len(self.black_list[p]):
print('\nError: Attempting to insert variant out of window bounds:')
print(my_var, '--> blackList[0:' + str(len(self.black_list[p])) + ']\n')
sys.exit(1)
if len(input_variable[1]) == 1 and len(my_alt) == 1:
if self.black_list[p][my_var[0]]:
continue
self.snp_list[p].append(my_var)
self.black_list[p][my_var[0]] = 2
else:
indel_failed = False
for k in range(my_var[0], my_var[0] + in_len):
if k >= len(self.black_list[p]):
indel_failed = True
continue
if self.black_list[p][k]:
indel_failed = True
continue
if indel_failed:
continue
for k in range(my_var[0], my_var[0] + in_len):
self.black_list[p][k] = 1
self.indel_list[p].append(my_var)
def random_mutations(self):
# add random indels
all_indels = [[] for _ in self.sequences]
for i in range(self.ploidy):
for j in range(self.indels_to_add[i]):
# insert homozygous indel
if random.random() <= self.models[i][1]:
which_ploid = range(self.ploidy)
# insert heterozygous indel
else:
which_ploid = [self.ploid_mut_prior.sample()]
# try to find suitable places to insert indels
event_pos = -1
for attempt in range(MAX_ATTEMPTS):
event_pos = random.randint(self.win_buffer, self.seq_len - 1)
for p in which_ploid:
if self.black_list[p][event_pos]:
event_pos = -1
if event_pos != -1:
break
if event_pos == -1:
continue
# insertion
if random.random() <= self.models[i][3]:
in_len = self.models[i][4].sample()
# sequence content of random insertions is uniformly random (change this later, maybe)
in_seq = ''.join([random.choice(NUCL) for _ in range(in_len)])
ref_nucl = self.sequences[i][event_pos]
my_indel = (event_pos, ref_nucl, ref_nucl + in_seq)
# deletion
else:
in_len = self.models[i][5].sample()
# skip if deletion too close to boundary
if event_pos + in_len + 1 >= len(self.sequences[i]):
continue
if in_len == 1:
in_seq = self.sequences[i][event_pos + 1]
else:
in_seq = str(self.sequences[i][event_pos + 1:event_pos + in_len + 1])
ref_nucl = self.sequences[i][event_pos]
my_indel = (event_pos, ref_nucl + in_seq, ref_nucl)
# if event too close to boundary, skip. if event conflicts with other indel, skip.
skip_event = False
if event_pos + len(my_indel[1]) >= self.seq_len - self.win_buffer - 1:
skip_event = True
if skip_event:
continue
for p in which_ploid:
for k in range(event_pos, event_pos + in_len + 1):
if self.black_list[p][k]:
skip_event = True
if skip_event:
continue
for p in which_ploid:
for k in range(event_pos, event_pos + in_len + 1):
self.black_list[p][k] = 1
all_indels[p].append(my_indel)
# add random snps
all_snps = [[] for _ in self.sequences]
for i in range(self.ploidy):
for j in range(self.snps_to_add[i]):
# insert homozygous SNP
if random.random() <= self.models[i][1]:
which_ploid = range(self.ploidy)
# insert heterozygous SNP
else:
which_ploid = [self.ploid_mut_prior.sample()]
# try to find suitable places to insert snps
event_pos = -1
for attempt in range(MAX_ATTEMPTS):
# based on the mutation model for the specified ploid, choose a SNP location based on trinuc bias
# (if there are multiple ploids, choose one at random)
if IGNORE_TRINUC:
event_pos = random.randint(self.win_buffer + 1, self.seq_len - 2)
else:
ploid_to_use = which_ploid[random.randint(0, len(which_ploid) - 1)]
event_pos = self.trinuc_bias[ploid_to_use].sample()
for p in which_ploid:
if self.black_list[p][event_pos]:
event_pos = -1
if event_pos != -1:
break
if event_pos == -1:
continue
ref_nucl = self.sequences[i][event_pos]
context = str(self.sequences[i][event_pos - 1]) + str(self.sequences[i][event_pos + 1])
# sample from tri-nucleotide substitution matrices to get SNP alt allele
new_nucl = self.models[i][6][TRI_IND[context]][NUC_IND[ref_nucl]].sample()
my_snp = (event_pos, ref_nucl, new_nucl)
for p in which_ploid:
all_snps[p].append(my_snp)
self.black_list[p][my_snp[0]] = 2
# combine random snps with inserted snps, remove any snps that overlap indels
for p in range(len(all_snps)):
all_snps[p].extend(self.snp_list[p])
all_snps[p] = [n for n in all_snps[p] if self.black_list[p][n[0]] != 1]
# MODIFY REFERENCE STRING: SNPS
for i in range(len(all_snps)):
temp = self.sequences[i].tomutable()
for j in range(len(all_snps[i])):
v_pos = all_snps[i][j][0]
if all_snps[i][j][1] != temp[v_pos]:
print('\nError: Something went wrong!\n', all_snps[i][j], temp[v_pos], '\n')
print(all_snps[i][j])
print(self.sequences[i][v_pos])
sys.exit(1)
else:
temp[v_pos] = all_snps[i][j][2]
self.sequences[i] = temp.toseq()
# organize the indels we want to insert
for i in range(len(all_indels)):
all_indels[i].extend(self.indel_list[i])
all_indels_ins = [sorted([list(m) for m in n]) for n in all_indels]
# MODIFY REFERENCE STRING: INDELS
for i in range(len(all_indels_ins)):
rolling_adj = 0
temp_symbol_list = CigarString.string_to_list(str(len(self.sequences[i])) + "M")
for j in range(len(all_indels_ins[i])):
v_pos = all_indels_ins[i][j][0] + rolling_adj
v_pos2 = v_pos + len(all_indels_ins[i][j][1])
indel_length = len(all_indels_ins[i][j][2]) - len(all_indels_ins[i][j][1])
rolling_adj += indel_length
if all_indels_ins[i][j][1] != str(self.sequences[i][v_pos:v_pos2]):
print('\nError: Something went wrong!\n', all_indels_ins[i][j], [v_pos, v_pos2],
str(self.sequences[i][v_pos:v_pos2]), '\n')
sys.exit(1)
else:
# alter reference sequence
self.sequences[i] = self.sequences[i][:v_pos] + Seq(all_indels_ins[i][j][2]) + \
self.sequences[i][v_pos2:]
# notate indel positions for cigar computation
if indel_length > 0:
temp_symbol_list = temp_symbol_list[:v_pos + 1] + ['I'] * indel_length \
+ temp_symbol_list[v_pos2 + 1:]
elif indel_length < 0:
temp_symbol_list[v_pos + 1] = "D" * abs(indel_length) + "M"
# pre-compute cigar strings
for j in range(len(temp_symbol_list) - self.read_len):
self.all_cigar[i].append(temp_symbol_list[j:j + self.read_len])
# create some data structures we will need later:
# --- self.fm_pos[ploid][pos]: position of the left-most matching base (IN REFERENCE COORDINATES, i.e.
# corresponding to the unmodified reference genome)
# --- self.fm_span[ploid][pos]: number of reference positions spanned by a read originating from
# this coordinate
md_so_far = 0
for j in range(len(temp_symbol_list)):
self.fm_pos[i].append(md_so_far)
# fix an edge case with deletions
if 'D' in temp_symbol_list[j]:
self.fm_pos[i][-1] += temp_symbol_list[j].count('D')
# compute number of ref matches for each read
# This line gets hit a lot and is relatively slow. Might look for an improvement
span_dif = len([n for n in temp_symbol_list[j: j + self.read_len] if 'M' in n])
self.fm_span[i].append(self.fm_pos[i][-1] + span_dif)
md_so_far += temp_symbol_list[j].count('M') + temp_symbol_list[j].count('D')
# tally up all the variants we handled...
count_dict = {}
all_variants = [sorted(all_snps[i] + all_indels[i]) for i in range(self.ploidy)]
for i in range(len(all_variants)):
for j in range(len(all_variants[i])):
all_variants[i][j] = tuple([all_variants[i][j][0] + self.x]) + all_variants[i][j][1:]
t = tuple(all_variants[i][j])
if t not in count_dict:
count_dict[t] = []
count_dict[t].append(i)
# TODO: combine multiple variants that happened to occur at same position into single vcf entry?
output_variants = []
for k in sorted(count_dict.keys()):
output_variants.append(k + tuple([len(count_dict[k]) / float(self.ploidy)]))
ploid_string = ['0' for _ in range(self.ploidy)]
for k2 in [n for n in count_dict[k]]:
ploid_string[k2] = '1'
output_variants[-1] += tuple(['WP=' + '/'.join(ploid_string)])
return output_variants
def sample_read(self, sequencing_model, frag_len=None):
# choose a ploid
my_ploid = random.randint(0, self.ploidy - 1)
# stop attempting to find a valid position if we fail enough times
MAX_READPOS_ATTEMPTS = 100
attempts_thus_far = 0
# choose a random position within the ploid, and generate quality scores / sequencing errors
reads_to_sample = []
if frag_len is None:
r_pos = self.coverage_distribution[my_ploid].sample()
# sample read position and call function to compute quality scores / sequencing errors
r_dat = self.sequences[my_ploid][r_pos:r_pos + self.read_len]
(my_qual, my_errors) = sequencing_model.get_sequencing_errors(r_dat)
reads_to_sample.append([r_pos, my_qual, my_errors, r_dat])
else:
r_pos1 = self.coverage_distribution[my_ploid][self.fraglen_ind_map[frag_len]].sample()
# EXPERIMENTAL
# coords_to_select_from = self.coverage_distribution[my_ploid][self.fraglens_ind_map[frag_len]].sample()
# r_pos1 = random.randint(coords_to_select_from[0],coords_to_select_from[1])
r_pos2 = r_pos1 + frag_len - self.read_len
r_dat1 = self.sequences[my_ploid][r_pos1:r_pos1 + self.read_len]
r_dat2 = self.sequences[my_ploid][r_pos2:r_pos2 + self.read_len]
(my_qual1, my_errors1) = sequencing_model.get_sequencing_errors(r_dat1)
(my_qual2, my_errors2) = sequencing_model.get_sequencing_errors(r_dat2, is_reverse_strand=True)
reads_to_sample.append([r_pos1, my_qual1, my_errors1, r_dat1])
reads_to_sample.append([r_pos2, my_qual2, my_errors2, r_dat2])
# error format:
# myError[i] = (type, len, pos, ref, alt)
"""
examine sequencing errors to-be-inserted.
- remove deletions that don't have enough bordering sequence content to "fill in"
if error is valid, make the changes to the read data
"""
read_out = []
for read in reads_to_sample:
try:
my_cigar = self.all_cigar[my_ploid][read[0]]
except IndexError:
print('Index error when attempting to find cigar string.')
print(my_ploid, len(self.all_cigar[my_ploid]), read[0])
if frag_len is not None:
print((r_pos1, r_pos2))
print(frag_len, self.fraglen_ind_map[frag_len])
sys.exit(1)
total_d = sum([error[1] for error in read[2] if error[0] == 'D'])
total_i = sum([error[1] for error in read[2] if error[0] == 'I'])
avail_b = len(self.sequences[my_ploid]) - read[0] - self.read_len - 1
# add buffer sequence to fill in positions that get deleted
read[3] += self.sequences[my_ploid][read[0] + self.read_len:read[0] + self.read_len + total_d]
# this is leftover code and a patch for a method that isn't used. There is probably a better
# way to structure this than with a boolean
first_time = True
adj = 0
sse_adj = [0 for _ in range(self.read_len + max(sequencing_model.err_p[3]))]
any_indel_err = False
# sort by letter (D > I > S) such that we introduce all indel errors before substitution errors
# secondarily, sort by index
arranged_errors = {'D': [], 'I': [], 'S': []}
for error in read[2]:
arranged_errors[error[0]].append((error[2], error))
sorted_errors = []
for k in sorted(arranged_errors.keys()):
sorted_errors.extend([n[1] for n in sorted(arranged_errors[k])])
skip_indels = False
# FIXED TdB 05JUN2018
# Moved this outside the for error loop, since it messes up the CIGAR string when
# more than one deletion is in the same read
extra_cigar_val = []
# END FIXED TdB
for error in sorted_errors:
e_len = error[1]
e_pos = error[2]
if error[0] == 'D' or error[0] == 'I':
any_indel_err = True
# FIXED TdB 05JUN2018
# Moved this OUTSIDE the for error loop, since it messes up the CIGAR string
# when more than one deletion is in the same read
# extra_cigar_val = []
# END FIXED TdB
if total_d > avail_b: # if not enough bases to fill-in deletions, skip all indel erors
continue
if first_time:
# Again, this whole first time thing is a workaround for the previous
# code, which is simplified. May need to fix this all at some point
first_time = False
fill_to_go = total_d - total_i + 1
if fill_to_go > 0:
try:
extra_cigar_val = self.all_cigar[my_ploid][read[0] + fill_to_go][-fill_to_go:]
except IndexError:
# Applying the deletions we want requires going beyond region boundaries.
# Skip all indel errors
skip_indels = True
if skip_indels:
continue
# insert deletion error into read and update cigar string accordingly
if error[0] == 'D':
my_adj = sse_adj[e_pos]
pi = e_pos + my_adj
pf = e_pos + my_adj + e_len + 1
if str(read[3][pi:pf]) == str(error[3]):
read[3] = read[3][:pi + 1] + read[3][pf:]
my_cigar = my_cigar[:pi + 1] + my_cigar[pf:]
# weird edge case with del at very end of region. Make a guess and add a "M"
if pi + 1 == len(my_cigar):
my_cigar.append('M')
try:
my_cigar[pi + 1] = 'D' * e_len + my_cigar[pi + 1]
except IndexError:
print("Bug!! Index error on expanded cigar")
sys.exit(1)
else:
print('\nError, ref does not match alt while attempting to insert deletion error!\n')
sys.exit(1)
adj -= e_len
for i in range(e_pos, len(sse_adj)):
sse_adj[i] -= e_len
# insert insertion error into read and update cigar string accordingly
else:
my_adj = sse_adj[e_pos]
if str(read[3][e_pos + my_adj]) == error[3]:
read[3] = read[3][:e_pos + my_adj] + error[4] + read[3][e_pos + my_adj + 1:]
my_cigar = my_cigar[:e_pos + my_adj] + ['I'] * e_len + my_cigar[e_pos + my_adj:]
else:
print('\nError, ref does not match alt while attempting to insert insertion error!\n')
print('---', chr(read[3][e_pos + my_adj]), '!=', error[3])
sys.exit(1)
adj += e_len
for i in range(e_pos, len(sse_adj)):
sse_adj[i] += e_len
else: # substitution errors, much easier by comparison...
if str(read[3][e_pos + sse_adj[e_pos]]) == error[3]:
temp = read[3].tomutable()
temp[e_pos + sse_adj[e_pos]] = error[4]
read[3] = temp.toseq()
else:
print('\nError, ref does not match alt while attempting to insert substitution error!\n')
sys.exit(1)
if any_indel_err:
if len(my_cigar):
my_cigar = (my_cigar + extra_cigar_val)[:self.read_len]
read[3] = read[3][:self.read_len]
read_out.append([self.fm_pos[my_ploid][read[0]], my_cigar, read[3], str(read[1])])
# read_out[i] = (pos, cigar, read_string, qual_string)
return read_out
class ReadContainer:
"""
Container for read data: computes quality scores and positions to insert errors
"""
def __init__(self, read_len, error_model, rescaled_error, rescale_qual=False):
self.read_len = read_len
self.rescale_qual = rescale_qual
model_path = pathlib.Path(error_model)
try:
error_dat = pickle.load(open(model_path, 'rb'), encoding="bytes")
except IOError:
print("\nProblem opening the sequencing error model.\n")
sys.exit(1)
self.uniform = False
# uniform-error SE reads (e.g., PacBio)
if len(error_dat) == 4:
self.uniform = True
[q_scores, off_q, avg_error, error_params] = error_dat
self.uniform_q_score = min([max(q_scores), int(-10. * np.log10(avg_error) + 0.5)])
print('Reading in uniform sequencing error model... (q=' + str(self.uniform_q_score) + '+' + str(
off_q) + ', p(err)={0:0.2f}%)'.format(100. * avg_error))
# only 1 q-score model present, use same model for both strands
elif len(error_dat) == 6:
[init_q1, prob_q1, q_scores, off_q, avg_error, error_params] = error_dat
self.pe_models = False
# found a q-score model for both forward and reverse strands
elif len(error_dat) == 8:
[init_q1, prob_q1, init_q2, prob_q2, q_scores, off_q, avg_error, error_params] = error_dat
self.pe_models = True
if len(init_q1) != len(init_q2) or len(prob_q1) != len(prob_q2):
print('\nError: R1 and R2 quality score models are of different length.\n')
sys.exit(1)
# This serves as a sanity check for the input model
else:
print('\nError: Something wrong with error model.\n')
sys.exit(1)
self.q_err_rate = [0.] * (max(q_scores) + 1)
for q in q_scores:
self.q_err_rate[q] = 10. ** (-q / 10.)
self.off_q = off_q
self.err_p = error_params
# Selects a new nucleotide based on the error model
self.err_sse = [DiscreteDistribution(n, NUCL) for n in self.err_p[0]]
# allows for selection of indel length based on the parameters of the model
self.err_sie = DiscreteDistribution(self.err_p[2], self.err_p[3])
# allows for indel insertion based on the length above and the probability from the model
self.err_sin = DiscreteDistribution(self.err_p[5], NUCL)
# adjust sequencing error frequency to match desired rate
if rescaled_error is None:
self.error_scale = 1.0
else:
self.error_scale = rescaled_error / avg_error
if not self.rescale_qual:
print('Warning: Quality scores no longer exactly representative of error probability. '
'Error model scaled by {0:.3f} to match desired rate...'.format(self.error_scale))
if self.uniform:
if rescaled_error <= 0.:
self.uniform_q_score = max(q_scores)
else:
self.uniform_q_score = min([max(q_scores), int(-10. * np.log10(rescaled_error) + 0.5)])
print(' - Uniform quality score scaled to match specified error rate (q=' + str(
self.uniform_qscore) + '+' + str(self.off_q) + ', p(err)={0:0.2f}%)'.format(100. * rescaled_error))
if not self.uniform:
# adjust length to match desired read length
if self.read_len == len(init_q1):
self.q_ind_remap = range(self.read_len)
else:
print('Warning: Read length of error model (' + str(len(init_q1)) + ') does not match -R value (' + str(
self.read_len) + '), rescaling model...')
self.q_ind_remap = [max([1, len(init_q1) * n // read_len]) for n in range(read_len)]
# initialize probability distributions
self.init_dist_by_pos_1 = [DiscreteDistribution(init_q1[i], q_scores) for i in range(len(init_q1))]
self.prob_dist_by_pos_by_prev_q1 = [None]
for i in range(1, len(init_q1)):
self.prob_dist_by_pos_by_prev_q1.append([])
for j in range(len(init_q1[0])):
# if we don't have sufficient data for a transition, use the previous quality score
if np.sum(prob_q1[i][j]) <= 0.:
self.prob_dist_by_pos_by_prev_q1[-1].append(
DiscreteDistribution([1], [q_scores[j]], degenerate_val=q_scores[j]))
else:
self.prob_dist_by_pos_by_prev_q1[-1].append(DiscreteDistribution(prob_q1[i][j], q_scores))
# If paired-end, initialize probability distributions for the other strand
if self.pe_models:
self.init_dist_by_pos_2 = [DiscreteDistribution(init_q2[i], q_scores) for i in range(len(init_q2))]
self.prob_dist_by_pos_by_prev_q2 = [None]
for i in range(1, len(init_q2)):
self.prob_dist_by_pos_by_prev_q2.append([])
for j in range(len(init_q2[0])):
if np.sum(prob_q2[i][
j]) <= 0.: # if we don't have sufficient data for a transition, use the previous qscore
self.prob_dist_by_pos_by_prev_q2[-1].append(
DiscreteDistribution([1], [q_scores[j]], degenerate_val=q_scores[j]))
else:
self.prob_dist_by_pos_by_prev_q2[-1].append(DiscreteDistribution(prob_q2[i][j], q_scores))
def get_sequencing_errors(self, read_data, is_reverse_strand=False):
"""
Inserts errors of type substitution, insertion, or deletion into read_data, and assigns a quality score
based on the container model.
:param read_data: sequence to insert errors into
:param is_reverse_strand: whether to treat this as the reverse strand or not
:return: modified sequence and associate quality scores
"""
# TODO this is one of the slowest methods in the code. Need to investigate how to speed this up.
q_out = [0] * self.read_len
s_err = []
if self.uniform:
my_q = [self.uniform_q_score + self.off_q] * self.read_len
q_out = ''.join([chr(n) for n in my_q])
for i in range(self.read_len):
if random.random() < self.error_scale * self.q_err_rate[self.uniform_q_score]:
s_err.append(i)
else:
if self.pe_models and is_reverse_strand:
my_q = self.init_dist_by_pos_2[0].sample()
else:
my_q = self.init_dist_by_pos_1[0].sample()
q_out[0] = my_q
# Every time this is hit, we loop the entire read length twice. I feel like these two loops
# Could be combined into one fairly easily. The culprit seems to bee too many hits to the sample() method.
for i in range(1, self.read_len):
if self.pe_models and is_reverse_strand:
my_q = self.prob_dist_by_pos_by_prev_q2[self.q_ind_remap[i]][my_q].sample()
else:
my_q = self.prob_dist_by_pos_by_prev_q1[self.q_ind_remap[i]][my_q].sample()
q_out[i] = my_q
if is_reverse_strand:
q_out = q_out[::-1]
for i in range(self.read_len):
if random.random() < self.error_scale * self.q_err_rate[q_out[i]]:
s_err.append(i)
if self.rescale_qual: # do we want to rescale qual scores to match rescaled error?
q_out = [max([0, int(-10. * np.log10(self.error_scale * self.q_err_rate[n]) + 0.5)]) for n in q_out]
q_out = [min([int(self.q_err_rate[-1]), n]) for n in q_out]
q_out = ''.join([chr(n + self.off_q) for n in q_out])
else:
q_out = ''.join([chr(n + self.off_q) for n in q_out])
if self.error_scale == 0.0:
return q_out, []
s_out = []
n_del_so_far = 0
# don't allow indel errors to occur on subsequent positions
prev_indel = -2
# don't allow other sequencing errors to occur on bases removed by deletion errors
del_blacklist = []
# Need to check into this loop, to make sure it isn't slowing us down.
# The culprit seems to bee too many hits to the sample() method. This has a few of those calls.
for ind in s_err[::-1]: # for each error that we're going to insert...
# determine error type
is_sub = True
if ind != 0 and ind != self.read_len - 1 - max(self.err_p[3]) and abs(ind - prev_indel) > 1:
if random.random() < self.err_p[1]:
is_sub = False
# insert substitution error
if is_sub:
my_nucl = str(read_data[ind])
new_nucl = self.err_sse[NUC_IND[my_nucl]].sample()
s_out.append(('S', 1, ind, my_nucl, new_nucl))
# insert indel error
else:
indel_len = self.err_sie.sample()
# insertion error
if random.random() < self.err_p[4]:
my_nucl = str(read_data[ind])
new_nucl = my_nucl + ''.join([self.err_sin.sample() for n in range(indel_len)])
s_out.append(('I', len(new_nucl) - 1, ind, my_nucl, new_nucl))
# deletion error (prevent too many of them from stacking up)
elif ind < self.read_len - 2 - n_del_so_far:
my_nucl = str(read_data[ind:ind + indel_len + 1])
new_nucl = str(read_data[ind])
n_del_so_far += len(my_nucl) - 1
s_out.append(('D', len(my_nucl) - 1, ind, my_nucl, new_nucl))
for i in range(ind + 1, ind + indel_len + 1):
del_blacklist.append(i)
prev_indel = ind
# remove blacklisted errors
for i in range(len(s_out) - 1, -1, -1):
if s_out[i][2] in del_blacklist:
del s_out[i]
return q_out, s_out
# parse mutation model pickle file
def parse_input_mutation_model(model=None, which_default=1):
if which_default == 1:
out_model = [copy.deepcopy(n) for n in DEFAULT_MODEL_1]
elif which_default == 2:
out_model = [copy.deepcopy(n) for n in DEFAULT_MODEL_2]
else:
print('\nError: Unknown default mutation model specified\n')
sys.exit(1)
if model is not None:
pickle_dict = pickle.load(open(model, "rb"))
out_model[0] = pickle_dict['AVG_MUT_RATE']
out_model[2] = 1. - pickle_dict['SNP_FREQ']
ins_list = pickle_dict['INDEL_FREQ']
if len(ins_list):
ins_count = sum([ins_list[k] for k in ins_list.keys() if k >= 1])
del_count = sum([ins_list[k] for k in ins_list.keys() if k <= -1])
ins_vals = [k for k in sorted(ins_list.keys()) if k >= 1]
ins_weight = [ins_list[k] / float(ins_count) for k in ins_vals]
del_vals = [k for k in sorted([abs(k) for k in ins_list.keys() if k <= -1])]
del_weight = [ins_list[-k] / float(del_count) for k in del_vals]
else: # degenerate case where no indel stats are provided
ins_count = 1
del_count = 1
ins_vals = [1]
ins_weight = [1.0]
del_vals = [1]
del_weight = [1.0]
out_model[3] = ins_count / float(ins_count + del_count)
out_model[4] = ins_vals
out_model[5] = ins_weight
out_model[6] = del_vals
out_model[7] = del_weight
trinuc_trans_prob = pickle_dict['TRINUC_TRANS_PROBS']
for k in sorted(trinuc_trans_prob.keys()):
my_ind = TRI_IND[k[0][0] + k[0][2]]
(k1, k2) = (NUC_IND[k[0][1]], NUC_IND[k[1][1]])
out_model[8][my_ind][k1][k2] = trinuc_trans_prob[k]
for i in range(len(out_model[8])):
for j in range(len(out_model[8][i])):
for l in range(len(out_model[8][i][j])):
# if trinuc not present in input mutation model, assign it uniform probability
if float(sum(out_model[8][i][j])) < 1e-12:
out_model[8][i][j] = [0.25, 0.25, 0.25, 0.25]
else:
out_model[8][i][j][l] /= float(sum(out_model[8][i][j]))
trinuc_mut_prob = pickle_dict['TRINUC_MUT_PROB']
which_have_we_seen = {n: False for n in ALL_TRI}
trinuc_mean = np.mean(list(trinuc_mut_prob.values()))
for trinuc in trinuc_mut_prob.keys():
out_model[9][ALL_IND[trinuc]] = trinuc_mut_prob[trinuc]
which_have_we_seen[trinuc] = True
for trinuc in which_have_we_seen.keys():
if not which_have_we_seen[trinuc]:
out_model[9][ALL_IND[trinuc]] = trinuc_mean
return out_model
``` |
{
"source": "Jkallehauge/DCE-DSC-MRI_CodeCollection",
"score": 3
} |
#### File: LEK_UoEdinburghUK/PharmacokineticModelling/models.py
```python
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
# Inputs:
# params numpy array of model parameters, explained for each individual model
# t numpy array of time points in seconds
# AIF numpy array containing the PLASMA AIF as delta R1 in s^-1 (i.e. no relaxivity usually involved but it doesn't matter because it cancels - just be consistent)
# toff offset time in seconds between the AIF and the uptake curve, default is zero
# Output:
#G numpy array of model curve as delta R1 in s^-1 unless the AIF included relaxivity
#####################
# Palak model
#
# params: [Ktrans, vp]
# Units: Ktrans ml (ml tissue)^-1 s^-1
# vp no units (value is between 0 and 1)
#####################
def Patlak(params,t,AIF,toff=0):
# Assign parameter names
Ktrans, vp = params
# Shift the AIF by the toff (if not zero)
if toff !=0:
f=interp1d(t,AIF,kind='linear',bounds_error=False,fill_value=0)
AIF = (t>toff)*f(t-toff)
#Use trapezoidal integration for the AIF
CpIntegral=Ktrans*cumtrapz(AIF,x=t,initial=0)
#Add Cpvp term
G=CpIntegral+(AIF*vp)
return G
#####################
# Kety (Tofts) model
#
# params: [Ktrans, ve]
# Units: Ktrans ml (ml tissue)^-1 s^-1
# ve no units (value is between 0 and 1)
#####################
def Kety(params,t,AIF,toff=0):
# Assign parameter names
Ktrans, ve = params
# Shift the AIF by the toff (if not zero)
if toff !=0:
f=interp1d(t,AIF,kind='linear',bounds_error=False,fill_value=0)
AIF = (t>toff)*f(t-toff)
# Calculate the impulse response function
imp=Ktrans*np.exp(-1*Ktrans*t/ve)
# Convolve impulse response with AIF
convolution=np.convolve(AIF,imp)
# Discard unwanted points and make sure timespacing is correct
G=convolution[0:len(t)]*t[1]
return G
#####################
# Extended Kety (Tofts) model
#
# params: [Ktrans, ve, vp]
# Units: Ktrans ml (ml tissue)^-1 s^-1
# ve no units (value is between 0 and 1)
# vp no units (value is between 0 and 1)
#####################
def ExtKety(params,t,AIF,toff=0):
# Assign parameter names
Ktrans, ve, vp = params
# Shift the AIF by the toff (if not zero)
if toff !=0:
f=interp1d(t,AIF,kind='linear',bounds_error=False,fill_value=0)
AIF = (t>toff)*f(t-toff)
# Calculate the impulse response function
imp=Ktrans*np.exp(-1*Ktrans*t/ve)
# Convolve impulse response with AIF
convolution=np.convolve(AIF,imp)
# Discard unwanted points, make sure timespacing is correct and add Cpvp term
G=convolution[0:len(t)]*t[1]+(vp*AIF)
return G
#####################
# Two compartment uptake model
#
# params: [E, Fp, vp]
# Units: E no units (value is between 0 and 1)
# Fp ml (ml tissue)^-1 s^-1
# vp no units (value is between 0 and 1)
#####################
def TwoCUM(params,t,AIF,toff=0):
# Assign parameter names
E, Fp, vp = params
# Shift the AIF by the toff (if not zero)
if toff !=0:
f=interp1d(t,AIF,kind='linear',bounds_error=False,fill_value=0)
AIF = (t>toff)*f(t-toff)
#First calculate the parameter Tp
Tp=(vp/Fp)*(1-E)
#Calculate the impulse response function
exptTp=np.exp(-1*t/Tp)
imp=exptTp*(1-E) + E
# Convolve impulse response with AIF and make sure time spacing is correct
convolution=np.convolve(AIF,imp)*t[1]
#Discard unwanted points and multiply by Fp
G=Fp*convolution[0:len(t)]
return G
#####################
# Two compartment exchange model
#
# params: [E, Fp, vp]
# Units: E no units (value is between 0 and 1)
# Fp ml (ml tissue)^-1 s^-1
# ve no units (value is between 0 and 1)
# vp no units (value is between 0 and 1)
#####################
def TwoCXM(params,t,AIF,toff=0):
# Assign parameter names
E, Fp, ve, vp = params
# Shift the AIF by the toff (if not zero)
if toff !=0:
f=interp1d(t,AIF,kind='linear',bounds_error=False,fill_value=0)
AIF = (t>toff)*f(t-toff)
#First calculate the parameters TB, TE and Tp
Tp=(vp/Fp)*(1-E)
TE=ve*(1-E)/(E*Fp)
TB=vp/Fp
#And then the impulse response function parameters A, Kplus, Kminus
Kplus=0.5*((1/Tp) + (1/TE) + np.sqrt(((1/Tp) + (1/TE))**2 - (4/(TE*TB))))
Kminus=0.5*((1/Tp) + (1/TE) - np.sqrt(((1/Tp) + (1/TE))**2 - (4/(TE*TB))))
A=(Kplus - (1/TB))/(Kplus - Kminus)
#Calculate the impulse response function
expKplus=np.exp(-1*t*Kplus)
expKminus=np.exp(-1*t*Kminus)
imp=expKplus + A*(expKminus - expKplus)
#Calculate the convolution and make sure time spacing is correct
convolution=np.convolve(AIF,imp)*t[1]
#Discard unwanted points and multiply by Fp
G=Fp*convolution[0:len(t)]
return G
#####################
#Adiabatic approximation to the Tissue Homogeneity (AATH) model
#
# params: [E, Fp, vp]
# Units: E no units (value is between 0 and 1)
# Fp ml (ml tissue)^-1 s^-1
# ve no units (value is between 0 and 1)
# vp no units (value is between 0 and 1)
#####################
def AATH(params,t,AIF,toff=0):
# Assign parameter names
E, Fp, ve, vp = params
Tc=vp/Fp
# Shift the AIF by the toff (if not zero)
if toff !=0:
f=interp1d(t,AIF,kind='linear',bounds_error=False,fill_value=0)
AIF = (t>toff)*f(t-toff)
#Calculate the impulse response function
imp=E*Fp*np.exp(-1*E*Fp*(t-Tc)/ve)
if np.round(Tc/t[1])!=0:
imp[0:(round(Tc/t[1]))]=Fp
#Calculate the convolution and make sure time spacing is correct
convolution=np.convolve(AIF,imp)*t[1]
#Discard unwanted points
G=convolution[0:len(t)]
return G
```
#### File: LEK_UoEdinburghUK/SignalToConcentration/SI2Conc.py
```python
import numpy as np
#####################
# FLASH Signal intensity curve to concentration curve
######################
# Inputs:
# SIcurve numpy array of SI values
# TR TR for FLAHS sequence, in seconds
# flip Flip angle for FLASH sequence in degrees
# T1base Native T1 corresponding to the baseline signal intensity, in seconds
# baselinepts Number of data points before the arrival of contrast agent
# S0 Equilibrium signal, if known. Default is to calculate it here
# Output:
#H numpy array of curve as delta R1 in s^-1
def SI2Conc(SIcurve,TR,flip,T1base,baselinepts,S0=None):
# Convert flip angle to radians
rflip=flip*np.pi/180
# Convert baseline T1 to baseline R1
R1base=1/T1base
# If S0 isn't specified, calculate from baseline
if S0 is None:
SIbase=np.mean(SIcurve[1:baselinepts])
S0=CalcM0(SIbase,TR,flip,T1base)
# Now calculate the R1 curve
R1=np.log(((S0*np.sin(rflip))-SIcurve)/(S0*np.sin(rflip)-(SIcurve*np.cos(rflip))))*(-1/TR)
# And finally the delta R1 curve
H=R1-R1base
#return H
return H
#####################
# Concentration curve (as deltaR1) to FLASH Signal intensity curve
######################
# Inputs:
# deltaR1 numpy array of SI values
# TR TR for FLASH sequence, in seconds
# flip Flip angle for FLASH sequence in degrees
# T1base Native T1 corresponding to the baseline signal intensity, in seconds
# S0 Equilibrium signal
# Output:
#H numpy array of SI curve
def Conc2SI(deltaR1,TR,flip,T1base,S0):
# Convert flip angle to radians
rflip=flip*np.pi/180
# Convert T1 base to R1 base
R1base=1/T1base
# Convert deltaR1 curve to R1 curve
R1curve=deltaR1+R1base
# Convert to SI
SI=FLASH(S0,TR,flip,1/R1curve)
return SI
######################
# Helper functions to calculate S0 and the FLASH signal intensity equation
######################
def CalcM0(SI, TR, flip, T1):
#convert flip angle to radians
rflip=flip*np.pi/180
#Calculate M0
S0=SI*(1-np.cos(rflip)*np.exp(-TR/T1))/(np.sin(rflip)*(1-np.exp(-TR/T1)))
return S0
def FLASH(S0, TR, flip, T1):
rflip=flip*np.pi/180
SI=S0*np.sin(rflip)*(1-np.exp(-TR/T1))/(1-np.cos(rflip)*np.exp(-TR/T1))
return SI
```
#### File: original/ST_USydAUS_DCE/InputFunctions.py
```python
import math
import numpy as np
# Shifts array to the right by n elements
# and inserts n zeros at the beginning of the array
def arr_shift(A,n):
shift = np.zeros(n)
A_shifted = np.insert(A,0,shift)
A_new = A_shifted[0:len(A)]
return(A_new)
# Population AIF from Parker MRM 2006
def AIF(t0,t):
# parameter values defined in table 1 (Parker MRM 2006)
A1 = 0.809
A2 = 0.330
T1 = 0.17046
T2 = 0.365
sigma1 = 0.0563
sigma2 = 0.132
alpha = 1.050
beta = 0.1685
s = 38.078
tau = 0.483
# Eq. 1 (Parker 2006)
Ca = [(A1/sigma1)*(1/math.sqrt(2*math.pi))*math.exp(-(i/60-T1)**2/(2*sigma1**2)) +
(A2/sigma2)*(1/math.sqrt(2*math.pi))*math.exp(-(i/60-T2)**2/(2*sigma2**2)) +
alpha*math.exp(-beta*i/60)/(1+math.exp(-s*(i/60-tau))) for i in t]
# baseline shift
Ca = arr_shift(Ca,int(t0/t[1])-1)
return(Ca)
# Population AIF from Parker MRM 2006 - modified for a longer injection time
def variableAIF(inj_time,t,t0):
# Standard AIF (Parker MRM 2006)
# Injection rate of 3ml/s of a dose of 0.1mmol/kg of CA of concentration 0.5mmol/ml
# Assuming a standard body weight of 70kg, the injection time comes to
I = 70*(1/5)*(1/3) #seconds
Ca = AIF(t0,t) # standard AIF
# Number of times the standard AIF must be shifted by I to match the required injection time
n = int(round(inj_time/I))
# Calculate AIF for each n
shift = int(I/t[1])
Ca_sup = np.zeros(shape=(n+1,len(Ca)))
Ca_sup[0] = Ca
for i in range(1,n+1):
Ca_sup[i] = arr_shift(Ca,shift*i)
Ca_new = (1/n)*np.sum(Ca_sup,axis=0)
inj_time = I*n # Calculate actual injection time
return(Ca_new)
# Population AIF for a preclinical case - from McGrath MRM 2009
def preclinicalAIF(t0,t):
# Model B - parameter values defined in table 1 (McGrath MRM 2009)
A1 = 3.4
A2 = 1.81
k1 = 0.045
k2 = 0.0015
t1 = 7
# Eq. 5 (McGrath MRM 2009)
Ca = [A1*(i/t1)+A2*(i/t1) if i<=t1 else A1*np.exp(-k1*(i-t1))+A2*np.exp(-k2*(i-t1)) for i in t]
# baseline shift
Ca = arr_shift(Ca,int(t0/t[1])-1)
return(Ca)
```
#### File: original/ST_USydAUS_DCE/ModelDictionary.py
```python
import Tools as tools
######################################
# conc = vp x ca + ktrans x exp(-t(ktrans/ve))*ca
def ExtendedTofts(X, vp, ve, ktrans):
t = X[:,0]
ca = X[:,1]
Tc = ve/ktrans
# expconv calculates convolution of ca and (1/Tc)exp(-t/Tc)
conc = vp*ca + ve*tools.expconv(Tc, t, ca)
return(conc)
######################################
# conc = Fp x exp(-t(Fp/vp))*ca
def OneCompartment(X, vp, Fp):
t = X[:,0]
ca = X[:,1]
Tc = vp/Fp
# expconv calculates convolution of ca and (1/Tc)exp(-t/Tc)
conc = vp*tools.expconv(Tc,t,ca)
return(conc)
######################################
def PatlakModel(X,vp,ki):
t = X[:,0]
ca = X[:,1]
conc = ki*tools.integrate(ca,t) + vp*ca
return(conc)
######################################
def conc_HF2CGM(X, ve, kce, kbc):
t = X[:,0]
ca = X[:,1]
Tc = (1-ve)/kbc
# expconv calculates convolution of ca and (1/Tc)exp(-t/Tc)
conc = ve*ca + kce*Tc*tools.expconv(Tc, t, ca)
return(conc)
######################################
def DualInletExtendedTofts(X, fa, fv, vp, ve, ktrans):
# If vif is not given, fv = 0 and fa = 1, and this model reverts to the
# single inlet Ext. Tofts model above.
t = X[:,0]
ca = X[:,1]
cv = X[:,2]
Tc = ve/ktrans
# expconv calculates convolution of input function and (1/Tc)exp(-t/Tc)
c_if = fa*ca + fv*cv
conc = vp*c_if + ve*tools.expconv(Tc, t, c_if)
return(conc)
######################################
def DualInletOneCompartment(X, fa, fv, vp, Fp):
# If vif is not given, fv = 0 and fa = 1, and this model reverts to the
# single inlet One Compartment model above.
t = X[:,0]
ca = X[:,1]
cv = X[:,2]
Tc = vp/Fp
# expconv calculates convolution of input function and (1/Tc)exp(-t/Tc)
c_if = fa*ca + fv*cv
conc = vp*tools.expconv(Tc,t,c_if)
return(conc)
######################################
def DualInletconc_HF2CGM(X, fa, fv, ve, kce, kbc):
# If vif is not given, fv = 0 and fa = 1, and this model reverts to the
# single inlet High Flow 2-Compartment Gadoxetate model above.
t = X[:,0]
ca = X[:,1]
cv = X[:,2]
Tc = (1-ve)/kbc
# expconv calculates convolution of ca and (1/Tc)exp(-t/Tc)
c_if = fa*ca + fv*cv
conc = ve*c_if + kce*Tc*tools.expconv(Tc, t, c_if)
return(conc)
```
#### File: original/ST_USydAUS_DCE/Tools.py
```python
import numpy as np
#####################################
# Shifts array to the right by n elements
# and inserts n zeros at the beginning of the array
def arr_shift(A,n):
shift = np.zeros(n)
A_shifted = np.insert(A,0,shift)
A_new = A_shifted[0:len(A)]
return(A_new)
#####################################
# Performs convolution of (1/T)exp(-t/T) with a
def expconv(T, t, a):
if T==0:
return(a)
n = len(t)
f = np.zeros((n,))
x = (t[1:n-1] - t[0:n-2])/T
da = (a[1:n-1] - a[0:n-2])/x
E = np.exp(-x)
E0 = 1-E
E1 = x-E0
add = a[0:n-2]*E0 + da*E1
for i in range(0,n-2):
f[i+1] = E[i]*f[i] + add[i]
f[n-1] = f[n-2]
return (f)
#####################################
# Performs deconvolution of C and ca_time where
# ca_time = ca times dt
def deconvolve(C,ca,dt):
# Build matrix from given AIF
ca_time = ca*dt
A = np.zeros(shape=(len(ca),len(ca)))
for i in np.arange(0,len(ca)):
A[:,i] = arr_shift(ca_time,i)
# SVD of A
U,S,Vt = np.linalg.svd(A,full_matrices=True)
#Inverse of A
cutoff = 0.01
S[S<cutoff*np.max(S)]=0
nz = S!=0
S[nz]=1/S[nz]
invA = np.matmul(np.transpose(Vt),np.matmul(np.diag(S),np.transpose(U)))
# Solution
X = np.matmul(invA,C)
return(X)
#####################################
# Performs discrete integration of ca
# time t
def integrate(ca,t):
f = np.zeros(len(ca))
dt = t[1]-[0]
f[0] = 0
for n in np.arange(1,len(t)):
f[n] = dt*ca[n]+f[n-1]
return(f)
#####################################
# Calculates the SPGR signal using given
# flip angle (FA in degrees), repetition time (TR in seconds),
# equilibrium signal (S0) and longitudinal
# relaxation rate (R1 in Hz)
def spgress(FA, TR, S0, R1):
E = np.exp(-TR*R1)
c = np.cos(np.array(FA)*np.pi/180)
s = np.sin(np.array(FA)*np.pi/180)
Mz = np.absolute(S0*s*(1-E)/(1-c*E))
return(Mz)
#####################################
# Calculates the post-contrast longitudinal relaxation rate (R1) from the
# dynamic SPGR signal S, given the flip angle (FA in degrees),
# repetition time (TR in seconds),
# equilibrium signal (S0), precontrast longitudinal
# relaxation rate (R10 in Hz)
def spgress_inv(S, FA, TR, S0, R10):
E = np.exp(-TR*R10)
c = np.cos(np.array(FA)*np.pi/180)
Sn = (S/S0)*(1-E)/(1-c*E) # normalised signal
R1 = -np.log((1-Sn)/(1-c*Sn))/TR # Relaxation rate in 1/s
return(R1)
```
#### File: DCE-DSC-MRI_CodeCollection/test/helpers.py
```python
import pytest
def osipi_parametrize(arg_names, test_data, xf_labels=None):
"""
Generate parametrize decorator with XFail marks.
Adds XFail mark to any test case whose label is contained in xf_labels.
Parameters
----------
arg_names: string
Comma-delimited string of parameter names for the test function.
test_data : list of tuples
Input formated as input for pytest parametrize decorator.
Each tuple contains the parameters corresponding to a single test case.
Test case labels must be stored in the first tuple element.
xf_labels : list of strings, optional
Each member should correspond to a test case label that is expected to
fail. These cases will be marked as such in the parametrize decorator.
The default is None.
Returns
-------
p : pytest.mark,parametrize
Decorator for parametrizing test function.
"""
if xf_labels is None:
xf_labels = []
data = [ case if case[0] not in xf_labels
else pytest.param(*case, marks=pytest.mark.xfail)
for case in test_data ]
p = pytest.mark.parametrize(arg_names, data)
return p
``` |
{
"source": "jkalloor3/bqskit",
"score": 3
} |
#### File: bqskit/compiler/compiler.py
```python
from __future__ import annotations
import logging
import uuid
from typing import Any
from dask.distributed import Client
from dask.distributed import Future
from bqskit.compiler.executor import Executor
from bqskit.compiler.task import CompilationTask
from bqskit.ir.circuit import Circuit
_logger = logging.getLogger(__name__)
class Compiler:
"""
The BQSKit compiler class.
A compiler is responsible for accepting and managing compilation tasks.
The compiler class spins up a Dask execution environment, which
compilation tasks can then access to parallelize their operations.
The compiler is implemented as a context manager and it is recommended
to use it as one.
Examples:
>>> with Compiler() as compiler:
... circuit = compiler.compile(task)
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""
Construct a Compiler object.
Notes:
All arguments are passed directly to Dask. You can use
these to connect to and configure a Dask cluster.
"""
if 'silence_logs' not in kwargs:
kwargs['silence_logs'] = logging.getLogger('bqskit').level
self.client = Client(*args, **kwargs)
self.tasks: dict[uuid.UUID, Future] = {}
_logger.info('Started compiler process.')
def __enter__(self) -> Compiler:
"""Enter a context for this compiler."""
return self
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
"""Shutdown compiler."""
self.close()
def __del__(self) -> None:
"""Shutdown compiler."""
self.close()
def close(self) -> None:
"""Shutdown compiler."""
try:
self.client.close()
self.tasks = {}
_logger.info('Stopped compiler process.')
except (AttributeError, TypeError):
pass
def submit(self, task: CompilationTask) -> None:
"""Submit a CompilationTask to the Compiler."""
executor = self.client.scatter(Executor(task))
future = self.client.submit(Executor.run, executor, pure=False)
self.tasks[task.task_id] = future
_logger.info('Submitted task: %s' % task.task_id)
def status(self, task: CompilationTask) -> str:
"""Retrieve the status of the specified CompilationTask."""
return self.tasks[task.task_id].status
def result(self, task: CompilationTask) -> Circuit:
"""Block until the CompilationTask is finished, return its result."""
circ = self.tasks[task.task_id].result()[0]
return circ
def cancel(self, task: CompilationTask) -> None:
"""Remove a task from the compiler's workqueue."""
self.client.cancel(self.tasks[task.task_id])
_logger.info('Cancelled task: %s' % task.task_id)
def compile(self, task: CompilationTask) -> Circuit:
"""Submit and execute the CompilationTask, block until its done."""
_logger.info('Compiling task: %s' % task.task_id)
self.submit(task)
result = self.result(task)
return result
def analyze(self, task: CompilationTask, key: str) -> Any:
"""Gather the value associated with `key` in the task's data."""
if task.task_id not in self.tasks:
self.submit(task)
return self.tasks[task.task_id].result()[1][key]
```
#### File: bqskit/compiler/machine.py
```python
from __future__ import annotations
import itertools as it
from functools import lru_cache
from typing import Iterable
from bqskit.ir.location import CircuitLocation
from bqskit.ir.location import CircuitLocationLike
from bqskit.utils.typing import is_integer
from bqskit.utils.typing import is_valid_coupling_graph
class MachineModel:
"""A model of a quantum processing unit's connectivity."""
def __init__(
self,
num_qudits: int,
coupling_graph: Iterable[tuple[int, int]] | None = None,
) -> None:
"""
MachineModel Constructor.
Args:
num_qudits (int): The total number of qudits in the machine.
coupling_graph (Iterable[tuple[int, int]] | None): A coupling
graph describing which pairs of qudits can interact.
Given as an undirected edge set. If left as None, then
an all-to-all coupling graph is used as a default.
(Default: None)
Raises:
ValueError: If `num_qudits` is nonpositive.
"""
if not is_integer(num_qudits):
raise TypeError(
f'Expected integer num_qudits, got {type(num_qudits)}.',
)
if num_qudits <= 0:
raise ValueError(f'Expected positive num_qudits, got {num_qudits}.')
if coupling_graph is None:
coupling_graph = set(it.combinations(range(num_qudits), 2))
if not is_valid_coupling_graph(coupling_graph, num_qudits):
raise TypeError('Invalid coupling graph.')
self.coupling_graph = set(coupling_graph)
self.num_qudits = num_qudits
self._adjacency_list: list[list[int]] = [[] for _ in range(num_qudits)]
for q0, q1 in self.coupling_graph:
self._adjacency_list[q0].append(q1)
self._adjacency_list[q1].append(q0)
@lru_cache(maxsize=None)
def get_locations(self, block_size: int) -> list[CircuitLocation]:
"""
Returns a list of locations that complies with the machine.
Each location describes a valid spot for a `block_size`-sized gate,
so the number of qudit_indices in each location is `block_size`.
A location is only included if each pair of qudits is directly
connected or connected through other qudits in the location.
Args:
block_size (int): The size of each location in the final list.
Returns:
list[CircuitLocation]: The locations compliant with the machine.
Raises:
ValueError: If `block_size` is nonpositive or too large.
Notes:
Does a breadth first search on all pairs of qudits, keeps paths
that have length equal to block_size. Note that the coupling map
is assumed to be undirected.
"""
if not is_integer(block_size):
raise TypeError(
f'Expected integer for block_size, got {type(block_size)}',
)
if block_size > self.num_qudits:
raise ValueError(
'The block_size is too large; '
f'expected <= {self.num_qudits}, got {block_size}.',
)
if block_size <= 0:
raise ValueError(f'Nonpositive block_size; got {block_size}.')
locations: set[CircuitLocation] = set()
for qudit in range(self.num_qudits):
# Get every valid set containing `qudit` with size == block_size
self._location_search(locations, set(), qudit, block_size)
return list(locations)
def _location_search(
self,
locations: set[CircuitLocation],
path: set[int],
vertex: int,
limit: int,
) -> None:
"""
Add paths with length equal to limit to the `locations` set.
Args:
locations (set[CircuitLocation]): A list that contains all paths
found so far of length equal to `limit`.
path (set[int]): The qudit vertices currently included in
the path.
vertex (int): The vertex in the graph currently being examined.
limit (int): The desired length of paths in the `locations`
list.
"""
if vertex in path:
return
curr_path = path.copy()
curr_path.add(vertex)
if len(curr_path) == limit:
locations.add(CircuitLocation(list(curr_path)))
return
frontier: set[int] = {
qudit
for node in curr_path
for qudit in self._adjacency_list[node]
if qudit not in curr_path
}
for neighbor in frontier:
self._location_search(locations, curr_path, neighbor, limit)
def get_subgraph(
self,
location: CircuitLocationLike,
renumbering: dict[int, int] | None = None,
) -> list[tuple[int, int]]:
"""Returns the sub_coupling_graph with qudits in `location`."""
if not CircuitLocation.is_location(location, self.num_qudits):
raise TypeError('Invalid location.')
location = CircuitLocation(location)
if renumbering is None:
renumbering = {x: x for x in range(self.num_qudits)}
subgraph = []
for q0, q1 in self.coupling_graph:
if q0 in location and q1 in location:
subgraph.append((renumbering[q0], renumbering[q1]))
return subgraph
```
#### File: exec/runners/ibmq.py
```python
from __future__ import annotations
from qiskit import QuantumCircuit
from qiskit.providers.ibmq import IBMQBackend
from bqskit.exec.results import RunnerResults
from bqskit.exec.runner import CircuitRunner
from bqskit.ir.circuit import Circuit
class IBMQRunner(CircuitRunner):
"""Simulate a circuit."""
def __init__(self, backend: IBMQBackend) -> None:
"""Setup an IBMQRunner to execute circuits on `backend`."""
self.backend = backend
def run(self, circuit: Circuit) -> RunnerResults:
"""Execute the circuit, see CircuitRunner.run for more info."""
# 1. Check circuit and self.backend are compatible
# TODO
# 2. Convert to Qiskit IR
qiskit_circ = QuantumCircuit.from_qasm_str(circuit.to('qasm'))
qiskit_circ.measure_all()
# 3. Run circuit
result = self.backend.run(qiskit_circ).result()
shots = result.results[0].shots
probs = [0.0 for i in range(2 ** circuit.num_qudits)]
for bit_str, count in result.get_counts().items():
probs[int(bit_str, 2)] = count / shots
return RunnerResults(circuit.num_qudits, circuit.radixes, probs)
```
#### File: gates/parameterized/crz.py
```python
from __future__ import annotations
import numpy as np
import numpy.typing as npt
from bqskit.ir.gates.qubitgate import QubitGate
from bqskit.qis.unitary.differentiable import DifferentiableUnitary
from bqskit.qis.unitary.unitary import RealVector
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
from bqskit.utils.cachedclass import CachedClass
class CRZGate(
QubitGate,
DifferentiableUnitary,
CachedClass,
):
"""
A gate representing a controlled Z rotation.
It is given by the following parameterized unitary:
.. math::
\\begin{pmatrix}
1 & 0 & 0 & 0 \\\\
0 & 1 & 0 & 0 \\\\
0 & 0 & \\exp({-i\\frac{\\theta}{2}}) & 0 \\\\
0 & 0 & 0 & \\exp({i\\frac{\\theta}{2}}) \\\\
\\end{pmatrix}
"""
_num_qudits = 2
_num_params = 1
_qasm_name = 'crz'
def get_unitary(self, params: RealVector = []) -> UnitaryMatrix:
"""Return the unitary for this gate, see :class:`Unitary` for more."""
self.check_parameters(params)
pos = np.exp(1j * params[0] / 2)
neg = np.exp(-1j * params[0] / 2)
return UnitaryMatrix(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, neg, 0],
[0, 0, 0, pos],
],
)
def get_grad(self, params: RealVector = []) -> npt.NDArray[np.complex128]:
"""
Return the gradient for this gate.
See :class:`DifferentiableUnitary` for more info.
"""
self.check_parameters(params)
dpos = 1j / 2 * np.exp(1j * params[0] / 2)
dneg = -1j / 2 * np.exp(-1j * params[0] / 2)
return np.array(
[
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, dneg, 0],
[0, 0, 0, dpos],
],
], dtype=np.complex128,
)
```
#### File: bqskit/ir/interval.py
```python
from __future__ import annotations
import logging
from typing import Any
from typing import Iterator
from typing import Tuple
from typing import Union
from typing_extensions import TypeGuard
from bqskit.utils.typing import is_integer
_logger = logging.getLogger(__name__)
class CycleInterval(Tuple[int, int]):
"""
The CycleInterval class.
Represents an inclusive range of cycles in a given circuit.
"""
def __new__(
cls,
lower_or_tuple: int | tuple[int, int],
upper: int | None = None,
) -> CycleInterval:
"""
CycleInterval Constructor.
Allows constructing a CycleInterval with either a tuple of ints
or two ints.
Args:
lower_or_tuple (int | tuple[int, int]): Either the lower
bound for the interval or the tuple of lower and upper
bounds.
upper (int | None): The upper bound for the interval. If a
tuple is passed in for `lower_or_tuple` then this should
be None.
Returns:
(CycleInterval): The constructed CycleInterval.
Raises:
ValueError: If `lower_or_tuple` is a tuple and 'upper' is
not None, or if `lower_or_tuple` is an integer and
`upper` is None.
ValueError: If the lower bound is greater than the upper bound.
ValueError: If either bound is negative.
Notes:
The lower and upper bounds are inclusive.
"""
if upper is not None and not is_integer(upper):
raise TypeError(
f'Expected int or None for upper, got {type(upper)}.',
)
if isinstance(lower_or_tuple, tuple):
if not CycleInterval.is_interval(lower_or_tuple):
raise TypeError('Expected two integer arguments.')
if upper is not None:
raise ValueError('Unable to handle extra argument.')
lower = lower_or_tuple[0]
upper = lower_or_tuple[1]
elif is_integer(lower_or_tuple) and is_integer(upper):
lower = lower_or_tuple
elif is_integer(lower_or_tuple) and upper is None:
raise ValueError('Expected two integer arguments.')
else:
raise TypeError('Expected two integer arguments.')
if lower > upper:
raise ValueError(
'Expected lower to be <= upper, got {lower} <= {upper}.',
)
if lower < 0:
raise ValueError(
'Expected positive integers, got {lower} and {upper}.',
)
return super().__new__(cls, (lower, upper)) # type: ignore
@property
def lower(self) -> int:
"""The interval's inclusive lower bound."""
return self[0]
@property
def upper(self) -> int:
"""The interval's inclusive upper bound."""
return self[1]
@property
def indices(self) -> list[int]:
"""The indices contained within the interval."""
return list(range(self.lower, self.upper + 1))
def __contains__(self, cycle_index: object) -> bool:
"""Return true if `cycle_index` is inside this interval."""
return self.lower <= cycle_index <= self.upper # type: ignore
def __iter__(self) -> Iterator[int]:
"""Return an iterator for all indices contained in the interval."""
return range(self.lower, self.upper + 1).__iter__()
def __len__(self) -> int:
"""The length of the interval."""
return self.upper - self.lower + 1
def overlaps(self, other: IntervalLike) -> bool:
"""Return true if `other` overlaps with this interval."""
if not CycleInterval.is_interval(other):
raise TypeError(f'Expected CycleInterval, got {type(other)}.')
other = CycleInterval(other)
return self.lower <= other.upper and self.upper >= other.lower
def intersection(self, other: IntervalLike) -> CycleInterval:
"""Return the range defined by both `self` and `other` interval."""
if not CycleInterval.is_interval(other):
raise TypeError(f'Expected CycleInterval, got {type(other)}.')
other = CycleInterval(other)
if not self.overlaps(other):
raise ValueError('Empty intersection in interval.')
return CycleInterval(
max(self.lower, other.lower),
min(self.upper, other.upper),
)
def union(self, other: IntervalLike) -> CycleInterval:
"""Return the range defined by `self` or `other` interval."""
if not CycleInterval.is_interval(other):
raise TypeError(f'Expected CycleInterval, got {type(other)}.')
other = CycleInterval(other)
if not self.overlaps(other) and (
self.upper + 1 != other[0]
and self.lower - 1 != other[1]
):
raise ValueError('Union would lead to invalid interval.')
return CycleInterval(
min(self.lower, other.lower),
max(self.upper, other.upper),
)
def __lt__(self, other: tuple[int, ...]) -> bool:
"""
Return true if `self` comes before `other`.
The less than operator defines a partial ordering.
"""
if CycleInterval.is_interval(other):
return self.upper < other[0]
return NotImplemented
@staticmethod
def is_interval(interval: Any) -> TypeGuard[IntervalLike]:
"""Return true if `interval` is a IntervalLike."""
if isinstance(interval, CycleInterval):
return True
if not isinstance(interval, tuple):
_logger.debug('Bounds is not a tuple.')
return False
if len(interval) != 2:
_logger.debug(
'Expected interval to contain two values'
f', got {len(interval)}.',
)
return False
if not is_integer(interval[0]):
_logger.debug(
'Expected integer values in interval'
f', got {type(interval[0])}.',
)
return False
if not is_integer(interval[1]):
_logger.debug(
'Expected integer values in interval'
f', got {type(interval[1])}.',
)
return False
return True
def __repr__(self) -> str:
"""Return a string representation of the interval."""
return f'Interval(lower={self.lower}, upper={self.upper})'
IntervalLike = Union[Tuple[int, int], CycleInterval]
```
#### File: ir/lang/language.py
```python
from __future__ import annotations
import abc
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from bqskit.ir.circuit import Circuit
class LangException(Exception):
"""Exceptions related to language encoding and decoding."""
class Language(abc.ABC):
"""The Language base class."""
@abc.abstractmethod
def encode(self, circuit: Circuit) -> str:
"""Write `circuit` in this language."""
@abc.abstractmethod
def decode(self, source: str) -> Circuit:
"""Parse `source` into a circuit."""
```
#### File: lang/qasm2/qasm2.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
from bqskit.ir.lang.language import LangException
from bqskit.ir.lang.language import Language
from bqskit.ir.lang.qasm2.parser import parse
from bqskit.ir.lang.qasm2.visitor import OPENQASMVisitor # type: ignore
if TYPE_CHECKING:
from bqskit.ir.circuit import Circuit
class OPENQASM2Language(Language):
"""The OPENQASM2Language class."""
def encode(self, circuit: Circuit) -> str:
"""Write `circuit` in this language."""
if not circuit.is_qubit_only():
raise LangException('Only qubit circuits can be wrriten to qasm.')
source = "OPENQASM 2.0;\ninclude \"qelib1.inc\";\n"
source += f'qreg q[{circuit.num_qudits}];\n'
for gate in circuit.gate_set:
source += gate.get_qasm_gate_def()
for op in circuit:
source += op.get_qasm()
return source
def decode(self, source: str) -> Circuit:
"""Parse `source` into a circuit."""
tree = parse(source)
visitor = OPENQASMVisitor()
visitor.visit(tree)
return visitor.get_circuit()
```
#### File: passes/control/dowhileloop.py
```python
from __future__ import annotations
import logging
from typing import Any
from typing import Sequence
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
from bqskit.passes.control.predicate import PassPredicate
from bqskit.utils.typing import is_sequence
_logger = logging.getLogger(__name__)
class DoWhileLoopPass(BasePass):
"""
The DoWhileLoopPass class.
This is a control pass that executes a pass and then conditionally executes
it again in a loop.
"""
def __init__(
self,
condition: PassPredicate,
loop_body: BasePass | Sequence[BasePass],
) -> None:
"""
Construct a DoWhileLoopPass.
Args:
condition (PassPredicate): The condition checked.
loop_body (BasePass | Sequence[BasePass]): The pass or passes
to execute while `condition` is true.
Raises:
ValueError: If a Sequence[BasePass] is given, but it is empty.
"""
if not isinstance(condition, PassPredicate):
raise TypeError('Expected PassPredicate, got %s.' % type(condition))
if not is_sequence(loop_body) and not isinstance(loop_body, BasePass):
raise TypeError(
'Expected Pass or sequence of Passes, got %s.'
% type(loop_body),
)
if is_sequence(loop_body):
truth_list = [isinstance(elem, BasePass) for elem in loop_body]
if not all(truth_list):
raise TypeError(
'Expected Pass or sequence of Passes, got %s.'
% type(loop_body[truth_list.index(False)]),
)
if len(loop_body) == 0:
raise ValueError('Expected at least one pass.')
self.condition = condition
self.loop_body = loop_body if is_sequence(loop_body) else [loop_body]
def run(self, circuit: Circuit, data: dict[str, Any] = {}) -> None:
"""Perform the pass's operation, see :class:`BasePass` for more."""
# Perform Work
_logger.debug('Loop body executing...')
for loop_pass in self.loop_body:
loop_pass.run(circuit, data)
while self.condition(circuit, data):
_logger.debug('Loop body executing...')
for loop_pass in self.loop_body:
loop_pass.run(circuit, data)
```
#### File: passes/control/predicate.py
```python
from __future__ import annotations
import abc
from typing import Any
from bqskit.ir.circuit import Circuit
class PassPredicate(abc.ABC):
"""
The PassPredicate abstract base class.
A PassPredicate implements the :func:`get_truth_value` method, which is
called from control passes to determine the flow of execution.
"""
@abc.abstractmethod
def get_truth_value(self, circuit: Circuit, data: dict[str, Any]) -> bool:
"""Call this predicate and retrieve the truth value result."""
def __call__(self, circuit: Circuit, data: dict[str, Any]) -> bool:
"""Call this predicate and retrieve the truth value result."""
if not isinstance(circuit, Circuit):
raise TypeError('Expected Circuit, got %s.' % type(circuit))
if not isinstance(data, dict):
raise TypeError('Expected dictionary, got %s.' % type(data))
return self.get_truth_value(circuit, data)
```
#### File: control/predicates/notpredicate.py
```python
from __future__ import annotations
import logging
from typing import Any
from bqskit.ir.circuit import Circuit
from bqskit.passes.control.predicate import PassPredicate
_logger = logging.getLogger(__name__)
class NotPredicate(PassPredicate):
"""
The NotPredicate class.
The NotPredicate takes a predicate and always returns the opposite truth
value.
"""
def __init__(self, predicate: PassPredicate) -> None:
"""
Construct a NotPredicate.
Args:
predicate (PassPredicate): The predicate to invert.
"""
if not isinstance(predicate, PassPredicate):
raise TypeError('Expected PassPredicate, got %s.' % type(predicate))
self.predicate = predicate
def get_truth_value(self, circuit: Circuit, data: dict[str, Any]) -> bool:
"""Call this predicate, see :class:`PassPredicate` for more info."""
return not self.predicate(circuit, data)
```
#### File: passes/partitioning/quick.py
```python
from __future__ import annotations
import heapq
import logging
from typing import Any
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates.circuitgate import CircuitGate
from bqskit.ir.region import CircuitRegion
from bqskit.utils.typing import is_integer
_logger = logging.getLogger(__name__)
class QuickPartitioner(BasePass):
"""
The QuickPartitioner Pass.
This pass forms partitions in the circuit by iterating over the operations
in a topological order and binning them into blocks.
"""
def __init__(
self,
block_size: int = 3,
) -> None:
"""
Construct a QuickPartitioner.
Args:
block_size (int): Maximum size of partitioned blocks.
(Default: 3)
Raises:
ValueError: If `block_size` is less than 2.
"""
if not is_integer(block_size):
raise TypeError(
f'Expected integer for block_size, got {type(block_size)}.',
)
if block_size < 2:
raise ValueError(
f'Expected block_size to be greater than 2, got {block_size}.',
)
self.block_size = block_size
def run(self, circuit: Circuit, data: dict[str, Any] = {}) -> None:
"""
Partition gates in a circuit into a series of CircuitGates.
Args:
circuit (Circuit): Circuit to be partitioned.
data (dict[str,Any]): Optional data unique to specific run.
"""
# Number of qudits in the circuit
num_qudits = circuit.num_qudits
# If block size > circuit size, return the circuit as a block
if self.block_size > num_qudits:
_logger.warning(
'Configured block size is greater than circuit size; '
'blocking entire circuit.',
)
circuit.fold({
qudit_index: (0, circuit.num_cycles)
for qudit_index in range(circuit.num_qudits)
})
return
# List to hold the active blocks
active_blocks: Any = []
# List to hold the finished blocks
finished_blocks: Any = {}
block_id = 0
# Active qudit cycles and block-qudit dependencies
qudit_actives: Any = [{} for _ in range(num_qudits)]
qudit_dependencies: Any = [{} for _ in range(num_qudits)]
# The partitioned circuit
partitioned_circuit = Circuit(num_qudits, circuit.radixes)
# For each cycle, operation in topological order
for cycle, op in circuit.operations_with_cycles():
# Get the qudits of the operation
qudits = op.location._location
# Update the active locations of the qudits
for qudit in qudits:
qudit_actives[qudit][cycle] = None
# Compile a list of admissible blocks out of the
# active blocks for the operation
admissible_blocks = []
for index, block in enumerate(active_blocks):
if all([qudit not in block[-1] for qudit in qudits]):
admissible_blocks.append(index)
# Boolean indicator to capture if an active block
# has been found for the operation
found = False
# For all admissible blocks, check if all operation
# qudits are in the block. If such a block is found,
# update the upper region bound for the corresponding
# qudits, and raise the found boolean
for block in [active_blocks[index] for index in admissible_blocks]:
if all([qudit in block for qudit in qudits]):
for qudit in qudits:
block[qudit][1] = cycle
found = True
break
updated_qudits = set()
# If such a block is not found
if not found:
# For all admissible blocks, check if the operation
# qudits can be added to the block without breaching
# the size limit. If such a block is found, add the
# new qudits, update the region bounds, check if any
# blocks are finished, and raise the found boolean
for block in [
active_blocks[index]
for index in admissible_blocks
]:
if len(set(list(qudits) + list(block.keys()))) - \
1 <= self.block_size:
for qudit in qudits:
if qudit not in block:
block[qudit] = [cycle, cycle]
else:
block[qudit][1] = cycle
block_id, updated_qudits = self.compute_finished_blocks(
block, qudits, active_blocks,
finished_blocks, block_id, qudit_dependencies, cycle, num_qudits, # noqa
)
found = True
break
# If a block is still not found, check if any blocks are finished
# with the new operation qudits, create a new block, and add it
# to the list of active blocks
if not found:
block_id, updated_qudits = self.compute_finished_blocks(
None, qudits, active_blocks,
finished_blocks, block_id, qudit_dependencies, cycle, num_qudits, # noqa
)
block = {qudit: [cycle, cycle] for qudit in qudits}
block[-1] = set()
active_blocks.append(block)
# Where the active qudit cycles keep getting updated
while updated_qudits:
# Check if any blocks corresponding to updated qudits
# are eligible to be added to the circuit. If eligible,
# update actives, dependencies, and updated qudits.
final_regions = []
new_updated_qudits = set()
for qudit in updated_qudits:
blk_ids = list(qudit_dependencies[qudit].keys())
for blk_id in blk_ids:
num_passed = 0
for qdt, bounds in finished_blocks[blk_id].items():
if len(qudit_actives[qdt]) == 0:
num_passed += 1
elif next(iter(qudit_actives[qdt])) == bounds[0]:
num_passed += 1
if num_passed == len(finished_blocks[blk_id]):
for qdt, bounds in finished_blocks[blk_id].items():
for cycle in range(bounds[0], bounds[1] + 1):
if cycle in qudit_actives[qdt]:
del qudit_actives[qdt][cycle]
del qudit_dependencies[qdt][blk_id]
new_updated_qudits.add(qdt)
final_regions.append(
CircuitRegion(
{qdt: (bounds[0], bounds[1]) for qdt, bounds in finished_blocks[blk_id].items()}, # noqa
),
)
del finished_blocks[blk_id]
# If there are any regions
if final_regions:
# Sort the regions if multiple exist
if len(final_regions) > 1:
final_regions = self.topo_sort(final_regions)
# Fold the final regions into a partitioned circuit
for region in final_regions:
region = circuit.downsize_region(region)
cgc = circuit.get_slice(region.points)
partitioned_circuit.append_gate(
CircuitGate(
cgc, True,
), sorted(
list(
region.keys(),
),
), list(
cgc.params,
),
)
updated_qudits = new_updated_qudits
# Convert all remaining finished blocks and active blocks
# into circuit regions
final_regions = []
for block in finished_blocks.values():
final_regions.append(
CircuitRegion(
{qdt: (bounds[0], bounds[1]) for qdt, bounds in block.items()}, # noqa
),
)
for block in active_blocks:
del block[-1]
final_regions.append(
CircuitRegion(
{qdt: (bounds[0], bounds[1]) for qdt, bounds in block.items()}, # noqa
),
)
# If there are any regions
if final_regions:
# Sort the regions if multiple exist
if len(final_regions) > 1:
final_regions = self.topo_sort(final_regions)
# Fold the final regions into a partitioned circuit
for region in final_regions:
region = circuit.downsize_region(region)
cgc = circuit.get_slice(region.points)
partitioned_circuit.append_gate(
CircuitGate(
cgc, True,
), sorted(
list(
region.keys(),
),
), list(
cgc.params,
),
)
# Copy the partitioned circuit to the original circuit
circuit.become(partitioned_circuit)
def compute_finished_blocks( # type: ignore
self, block, qudits, active_blocks, finished_blocks,
block_id, qudit_dependencies, cycle, num_qudits,
):
"""Add blocks with all inactive qudits to the finished_blocks list and
remove them from the active_blocks list."""
# Compile the qudits from the new operation,
# the active qudits of the block being updated,
# and the qudits in the block's inadmissible list
qudits = set(qudits)
if block:
qudits.update([qudit for qudit in block if qudit != -1])
qudits.update(block[-1])
remove_blocks = []
# For all active blocks
for active_block in active_blocks:
# If the active block is different than the block being updated
if active_block != block:
# If any of the qudits are in the active block or its
# inadmissible list, then add those qudits to the
# inadmissible list of the active block
if any([
qudit in active_block or qudit in active_block[-1]
for qudit in qudits
]):
active_block[-1].update(qudits)
# If the active block has reached its maximum size
# and/or all of its qudits are inadmissible,
# then add it to the remove list
if (
len(active_block) - 1 == self.block_size and # noqa
all([
qudit in active_block[-1]
for qudit in active_block if qudit != -1
])
) or (
cycle - max(
active_block[qudit][1]
for qudit in active_block
if qudit != -1
) > 200
) or len(active_block[-1]) == num_qudits:
remove_blocks.append(active_block)
# Remove all blocks in the remove list from the active list
# and add them to the finished blocks list after deleting
# their inadmissible list and update qudit dependencies
updated_qudits = set()
for remove_block in remove_blocks:
del remove_block[-1]
finished_blocks[block_id] = remove_block
for qudit in remove_block:
qudit_dependencies[qudit][block_id] = None
updated_qudits.add(qudit)
active_blocks.remove(remove_block)
block_id += 1
return block_id, updated_qudits
def topo_sort(self, regions): # type: ignore
"""Topologically sort circuit regions."""
# Number of regions in the circuit
num_regions = len(regions)
# For each region, generate the number of in edges
# and the list of all out edges
in_edges = [0] * num_regions
out_edges: Any = [[] for _ in range(num_regions)]
for i in range(num_regions - 1):
for j in range(i + 1, num_regions):
dependency = regions[i].dependency(regions[j])
if dependency == 1:
in_edges[i] += 1
out_edges[j].append(i)
elif dependency == -1:
in_edges[j] += 1
out_edges[i].append(j)
# Convert the list of number of in edges in to a min-heap
in_edges = [[num_edges, i] for i, num_edges in enumerate(in_edges)]
heapq.heapify(in_edges)
index = 0
sorted_regions = []
# While there are regions remaining to be sorted
while index < num_regions:
# Select the regions with zero remaining in edges
selections = []
while in_edges and not in_edges[0][0]:
selections.append(heapq.heappop(in_edges))
if not selections:
raise RuntimeError('Unable to topologically sort regions.')
# Add the regions to the sorted list
for region in selections:
sorted_regions.append(regions[region[1]])
index += 1
# Remove the regions from all other regions' in edges counts
for i in range(len(in_edges)):
in_edges[i][0] -= sum(
in_edges[i][1] in out_edges[region[1]]
for region in selections
)
# Convert in edges into a min-heap
heapq.heapify(in_edges)
return sorted_regions
```
#### File: search/heuristics/greedy.py
```python
from __future__ import annotations
from bqskit.ir.circuit import Circuit
from bqskit.ir.opt.cost import CostFunctionGenerator
from bqskit.ir.opt.cost import HilbertSchmidtCostGenerator
from bqskit.passes.search.heuristic import HeuristicFunction
from bqskit.qis.state.state import StateVector
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
class GreedyHeuristic(HeuristicFunction):
"""
The GreedyHeuristic HeuristicFunction class.
Defines a heuristic that results in greedy search. This function only looks
at the current distance of the circuit from the target. This will create a
behavior similar to depth-first search.
"""
def __init__(
self,
cost_gen: CostFunctionGenerator = HilbertSchmidtCostGenerator(),
) -> None:
"""
Construct a GreedyHeuristic Function.
Args:
cost_gen (CostFunctionGenerator): This is used to generate
cost functions used during evaluations.
"""
if not isinstance(cost_gen, CostFunctionGenerator):
raise TypeError(
'Expected CostFunctionGenerator for cost_gen, got %s.'
% type(cost_gen),
)
self.cost_gen = cost_gen
def get_value(
self,
circuit: Circuit,
target: UnitaryMatrix | StateVector,
) -> float:
"""Return the heuristic's value, see HeuristicFunction for more info."""
return self.cost_gen.calc_cost(circuit, target)
```
#### File: passes/synthesis/synthesis.py
```python
from __future__ import annotations
import logging
from abc import abstractmethod
from typing import Any
from typing import Callable
from dask.distributed import as_completed
from dask.distributed import Client
from dask.distributed import Future
from dask.distributed import rejoin
from dask.distributed import secede
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
from bqskit.qis.state.state import StateLike
from bqskit.qis.unitary.unitarymatrix import UnitaryLike
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
_logger = logging.getLogger(__name__)
class SynthesisPass(BasePass):
"""
SynthesisPass class.
The SynthesisPass is a base class that exposes an abstract
synthesize function. Inherit from this class and implement the
synthesize function to create a synthesis tool.
A SynthesisPass will synthesize a new circuit targeting the input
circuit's unitary.
"""
@abstractmethod
def synthesize(self, utry: UnitaryMatrix, data: dict[str, Any]) -> Circuit:
"""
Synthesis abstract method to synthesize a UnitaryMatrix into a Circuit.
Args:
utry (UnitaryMatrix): The unitary to synthesize.
data (Dict[str, Any]): Associated data for the pass.
Can be used to provide auxillary information from
previous passes. This function should never error based
on what is in this dictionary.
Note:
This function should be self-contained and have no side effects.
"""
def run(self, circuit: Circuit, data: dict[str, Any] = {}) -> None:
"""Perform the pass's operation, see :class:`BasePass` for more."""
target_utry = circuit.get_unitary()
circuit.become(self.synthesize(target_utry, data))
def batched_instantiate(
self,
circuits: list[Circuit],
target: UnitaryLike | StateLike,
client: Client,
**kwargs: Any,
) -> list[list[Future]]:
"""
Batch instantiate `circuits`.
Args:
circuits (list[Circuit]): The circuit batch to instantiate.
target (UnitaryLike | StateLike): The instantiation target.
client (Client): The Dask client used to submit jobs.
kwargs (Any): Other keyword arguments are passed directly to
instantiate calls.
Returns:
(list[list[Future]]): The Dask Futures corresponding to the
the instantiate jobs submitted. This returns a double list,
where the other list is indexed by input circuits, and the
inner list is indexed by multistart.
"""
multistarts = 1
if 'multistarts' in kwargs:
multistarts = kwargs['multistarts']
kwargs['multistarts'] = 1
futures: list[list[Future]] = []
client.scatter(circuits)
for circuit in circuits:
futures.append([])
for i in range(multistarts):
futures[-1].append(
client.submit(
Circuit.instantiate,
circuit,
pure=False,
target=target,
**kwargs,
),
)
return futures
def gather_best_results(
self,
futures: list[list[Future]],
client: Client,
fn: Callable[..., float],
*args: Any,
**kwargs: Any,
) -> list[Circuit]:
"""
Gather best results from a `batched_instantiate` call.
Args:
futures (list[list[Future]]): The futures return from a
`batched_instantiate` call.
client (Client): The Dask client used to submit the jobs.
fn (Callable[..., float]): The function used to sort
instantiated circuits. This should take a circuit as
the first parameter.
args (Any): Arguments passed directly to fn.
kwargs (Any): Keyword arguments passed directly to fn.
Returns:
(list[Circuit]): The resulting circuits. There is one circuit
for each future list, i.e., `len(output) == len(futures)`.
"""
score_futures: list[list[Future]] = []
for future_list in futures:
score_futures.append([])
for future in future_list:
score_futures[-1].append(
client.submit(
fn,
future,
*args,
**kwargs,
),
)
flat_score_list = []
for future_list in score_futures:
flat_score_list.extend(future_list)
secede()
client.gather(flat_score_list)
rejoin()
best_circuit_futures = []
for i, score_list in enumerate(score_futures):
scores = client.gather(score_list)
best_index = scores.index(min(scores))
best_circuit_futures.append(futures[i][best_index])
return client.gather(best_circuit_futures)
def gather_first_results(
self,
futures: list[list[Future]],
client: Client,
) -> list[Circuit]:
"""
Gather first results from a `batched_instantiate` call.
Args:
futures (list[list[Future]]): The futures return from a
`batched_instantiate` call.
client (Client): The Dask client used to submit the jobs.
Returns:
(list[Circuit]): The resulting circuits. There is one circuit
for each future list, i.e., `len(output) == len(futures)`.
This call will return the first multistart instantiate
job for each circuit.
"""
all_futures = []
future_to_index_map = {}
for index, future_list in enumerate(futures):
all_futures.extend(future_list)
for future in future_list:
future_to_index_map[future] = index
secede()
circuits = []
indices_seen = set()
for future, circuit in as_completed(all_futures, with_results=True):
index = future_to_index_map[future]
if index not in indices_seen:
indices_seen.add(index)
circuits.append((index, circuit))
for other_future in futures[index]:
if not other_future.done():
client.cancel(other_future)
rejoin()
return [c for _, c in sorted(circuits, key=lambda x: x[0])]
```
#### File: passes/util/conversion.py
```python
from __future__ import annotations
import logging
from typing import Any
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates import CircuitGate
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.ir.gates import VariableUnitaryGate
from bqskit.ir.point import CircuitPoint
_logger = logging.getLogger(__name__)
class BlockConversionPass(BasePass):
"""
Converts blocks of one type to another type.
Blocks are either described by a constant or variable unitary gate or as a
circuit gate. Often during the flow of compilatin we will need to convert
them from one form to another for a future pass.
"""
def __init__(
self,
convert_target: str,
convert_variable: bool = True,
convert_constant: bool = True,
convert_circuitgates: bool = True,
):
"""
Construct a BlockConversionPass.
Args:
convert_target (str): Either `variable` or `constant`.
Blocks will be converted to the form described here. If
this is `variable` all gates will be converted to
`VariableUnitaryGate` s. If this is `constant` blocks
will be converted to `ConstantUnitaryGate` s. Blocks
cannot be converted to circuit gates, that can be caried
out by synthesis.
convert_variable (bool): If this is true, this will replace
VariableUnitaryGate's in the circuit with one's specified
in convert_target.
convert_constant (bool): If this is true, this will replace
ConstantUnitaryGate's in the circuit with one's specified
in convert_target.
convert_circuitgates (bool): If this is true, this will replace
CircuitGate's in the circuit with one's specified
in convert_target. The subcircuit information captured
in the circuit gate will be lost.
"""
self.convert_variable = convert_variable
self.convert_constant = convert_constant
self.convert_circuitgates = convert_circuitgates
if convert_target == 'variable':
self.convert_target = 'variable'
self.convert_variable = False
elif convert_target == 'constant':
self.convert_target = 'constant'
self.convert_constant = False
else:
raise ValueError('Unexpected input for conversion target.')
def run(self, circuit: Circuit, data: dict[str, Any] = {}) -> None:
"""Perform the pass's operation, see :class:`BasePass` for more."""
# Variable -> Constant
if self.convert_variable and self.convert_target == 'constant':
_logger.debug('Converting variable gates to constant gates.')
for cycle, op in circuit.operations_with_cycles():
if isinstance(op.gate, VariableUnitaryGate):
cgate = ConstantUnitaryGate(op.get_unitary(), op.radixes)
point = CircuitPoint(cycle, op.location[0])
circuit.replace_gate(point, cgate, op.location)
# CircuitGates -> Constant
if self.convert_circuitgates and self.convert_target == 'constant':
_logger.debug('Converting circuit gates to constant gates.')
for cycle, op in circuit.operations_with_cycles():
if isinstance(op.gate, CircuitGate):
cgate = ConstantUnitaryGate(op.get_unitary(), op.radixes)
point = CircuitPoint(cycle, op.location[0])
circuit.replace_gate(point, cgate, op.location)
# Constant -> Variable
if self.convert_constant and self.convert_target == 'variable':
_logger.debug('Converting constant gates to variable gates.')
for cycle, op in circuit.operations_with_cycles():
if isinstance(op.gate, ConstantUnitaryGate):
params = VariableUnitaryGate.get_params(op.get_unitary())
vgate = VariableUnitaryGate(op.num_qudits, op.radixes)
point = CircuitPoint(cycle, op.location[0])
circuit.replace_gate(point, vgate, op.location, params)
# CircuitGates -> Variable
if self.convert_constant and self.convert_target == 'variable':
_logger.debug('Converting circuit gates to variable gates.')
for cycle, op in circuit.operations_with_cycles():
if isinstance(op.gate, CircuitGate):
params = VariableUnitaryGate.get_params(op.get_unitary())
vgate = VariableUnitaryGate(op.num_qudits, op.radixes)
point = CircuitPoint(cycle, op.location[0])
circuit.replace_gate(point, vgate, op.location, params)
```
#### File: passes/util/converttou3.py
```python
from __future__ import annotations
import logging
from typing import Any
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates.parameterized.pauli import PauliGate
from bqskit.ir.gates.parameterized.u3 import U3Gate
from bqskit.ir.gates.parameterized.unitary import VariableUnitaryGate
from bqskit.ir.point import CircuitPoint
_logger = logging.getLogger(__name__)
class ToU3Pass(BasePass):
"""Converts single-qubit general unitary gates to U3 Gates."""
def run(self, circuit: Circuit, data: dict[str, Any] = {}) -> None:
"""Perform the pass's operation, see :class:`BasePass` for more."""
_logger.debug('Converting single-qubit general gates to U3Gates.')
for cycle, op in circuit.operations_with_cycles():
if (
(
isinstance(op.gate, VariableUnitaryGate)
or isinstance(op.gate, PauliGate)
)
and len(op.location) == 1
and op.radixes == (2,)
):
params = U3Gate.calc_params(op.get_unitary())
point = CircuitPoint(cycle, op.location[0])
circuit.replace_gate(point, U3Gate(), op.location, params)
```
#### File: passes/util/update.py
```python
from __future__ import annotations
import logging
from typing import Any
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
_logger = logging.getLogger(__name__)
class UpdateDataPass(BasePass):
"""
The UpdateDataPass class.
The UpdateDataPass adds a key-value pair to data dictionary.
"""
def __init__(self, key: str, val: Any) -> None:
"""
Construct a UpdateDataPass.
Args:
key (str): The key to add.
val (Any): The value to associate with the key.
"""
if not isinstance(key, str):
raise TypeError('Expected string for key, got %s.' % type(key))
self.key = key
self.val = val
def run(self, circuit: Circuit, data: dict[str, Any] = {}) -> None:
"""Perform the pass's operation, see :class:`BasePass` for more."""
_logger.debug(f'Injecting {self.key}:{self.val} into the data dict.')
data[self.key] = self.val
```
#### File: qis/unitary/meta.py
```python
from __future__ import annotations
import abc
from typing import Any
class UnitaryMeta(abc.ABCMeta):
"""
The UnitaryMeta metaclass.
Necessary to provide isinstance checks for composed classes.
"""
def __instancecheck__(cls, instance: Any) -> bool:
"""
Check if an instance is a `Unitary` instance.
Additional checks for DifferentiableUnitary and
LocallyOptimizableUnitary. We check if the object has
the is_differentiable or is_locally_optimizable callable, an
instance method that maps nothing to a bool. If the object has
the method, then it must return true for isinstance to pass.
This can be used with composed classes to implement
conditional inheritance.
"""
if cls.__name__ == 'DifferentiableUnitary':
if hasattr(instance, 'is_differentiable'):
if not instance.is_differentiable():
return False
if cls.__name__ == 'LocallyOptimizableUnitary':
if hasattr(instance, 'is_locally_optimizable'):
if not instance.is_locally_optimizable():
return False
return super().__instancecheck__(instance)
```
#### File: bqskit/utils/docs.py
```python
from __future__ import annotations
import os
def building_docs() -> bool:
"""Return true if currently building documentations."""
if 'READTHEDOCS' in os.environ:
return True
if '__SPHINX_BUILD__' in os.environ:
return True
return False
```
#### File: tests/exec/test_exec.py
```python
from __future__ import annotations
from bqskit.exec.runners.sim import SimulationRunner
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates import CNOTGate
from bqskit.ir.gates import HGate
def test_sim() -> None:
circuit = Circuit(2)
circuit.append_gate(HGate(), 0)
circuit.append_gate(CNOTGate(), (0, 1))
results = SimulationRunner().run(circuit)
counts = results.get_counts(1024)
assert counts[0] == 512
assert counts[1] == 0
assert counts[2] == 0
assert counts[3] == 512
```
#### File: ir/circuit/test_properties.py
```python
from __future__ import annotations
from typing import Any
import numpy as np
import pytest
from bqskit.ir.circuit import Circuit
from bqskit.ir.gate import Gate
from bqskit.ir.gates import CNOTGate
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.ir.gates import CSUMGate
from bqskit.ir.gates import HGate
from bqskit.ir.gates import TdgGate
from bqskit.ir.gates import TGate
from bqskit.ir.gates import U3Gate
from bqskit.ir.gates import XGate
from bqskit.ir.gates import ZGate
from bqskit.qis.unitary.differentiable import DifferentiableUnitary
from bqskit.utils.typing import is_integer
from bqskit.utils.typing import is_numeric
from bqskit.utils.typing import is_valid_coupling_graph
from bqskit.utils.typing import is_valid_radixes
class TestSimpleCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a simple circuit."""
def test_num_params(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_params == 0
def test_radixes(self, simple_circuit: Circuit) -> None:
assert len(simple_circuit.radixes) == simple_circuit.num_qudits
assert isinstance(simple_circuit.radixes, tuple)
assert all(r == 2 for r in simple_circuit.radixes)
def test_num_qudits(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_qudits == 2
def test_dim(self, simple_circuit: Circuit) -> None:
assert simple_circuit.dim == 4
def test_is_qubit_only(self, simple_circuit: Circuit) -> None:
assert simple_circuit.is_qubit_only()
def test_is_qutrit_only(self, simple_circuit: Circuit) -> None:
assert not simple_circuit.is_qutrit_only()
def test_is_parameterized(self, simple_circuit: Circuit) -> None:
assert not simple_circuit.is_parameterized()
def test_is_constant(self, simple_circuit: Circuit) -> None:
assert simple_circuit.is_constant()
def test_num_operations(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_operations == 4
def test_num_cycles(self, simple_circuit: Circuit) -> None:
assert simple_circuit.num_cycles == 4
def test_params(self, simple_circuit: Circuit) -> None:
assert len(simple_circuit.params) == 0
assert isinstance(simple_circuit.params, np.ndarray)
def test_depth(self, simple_circuit: Circuit) -> None:
assert simple_circuit.depth == 4
def test_parallelism(self, simple_circuit: Circuit) -> None:
assert simple_circuit.parallelism == 1.5
def test_coupling_graph(self, simple_circuit: Circuit) -> None:
cgraph = simple_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 2)
assert len(cgraph) == 1
assert (0, 1) in cgraph
def test_gate_set(self, simple_circuit: Circuit) -> None:
gate_set = simple_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 2
assert XGate() in gate_set
assert CNOTGate() in gate_set
def test_active_qudits(self, simple_circuit: Circuit) -> None:
qudits = simple_circuit.active_qudits
assert len(qudits) == simple_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(simple_circuit.num_qudits))
class TestSwapCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a swap circuit."""
def test_num_params(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_params == 0
def test_radixes(self, swap_circuit: Circuit) -> None:
assert len(swap_circuit.radixes) == swap_circuit.num_qudits
assert isinstance(swap_circuit.radixes, tuple)
assert all(r == 2 for r in swap_circuit.radixes)
def test_num_qudits(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_qudits == 2
def test_dim(self, swap_circuit: Circuit) -> None:
assert swap_circuit.dim == 4
def test_is_qubit_only(self, swap_circuit: Circuit) -> None:
assert swap_circuit.is_qubit_only()
def test_is_qutrit_only(self, swap_circuit: Circuit) -> None:
assert not swap_circuit.is_qutrit_only()
def test_is_parameterized(self, swap_circuit: Circuit) -> None:
assert not swap_circuit.is_parameterized()
def test_is_constant(self, swap_circuit: Circuit) -> None:
assert swap_circuit.is_constant()
def test_num_operations(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_operations == 3
def test_num_cycles(self, swap_circuit: Circuit) -> None:
assert swap_circuit.num_cycles == 3
def test_params(self, swap_circuit: Circuit) -> None:
assert len(swap_circuit.params) == 0
assert isinstance(swap_circuit.params, np.ndarray)
def test_depth(self, swap_circuit: Circuit) -> None:
assert swap_circuit.depth == 3
def test_parallelism(self, swap_circuit: Circuit) -> None:
assert swap_circuit.parallelism == 2
def test_coupling_graph(self, swap_circuit: Circuit) -> None:
cgraph = swap_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 2)
assert len(cgraph) == 1
assert (0, 1) in cgraph
def test_gate_set(self, swap_circuit: Circuit) -> None:
gate_set = swap_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 1
assert CNOTGate() in gate_set
def test_active_qudits(self, swap_circuit: Circuit) -> None:
qudits = swap_circuit.active_qudits
assert len(qudits) == swap_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(swap_circuit.num_qudits))
class TestToffoliCircuit:
"""This set of tests will ensure that all circuit properties are correct for
a toffoli circuit."""
def test_num_params(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_params == 0
def test_radixes(self, toffoli_circuit: Circuit) -> None:
assert len(toffoli_circuit.radixes) == toffoli_circuit.num_qudits
assert isinstance(toffoli_circuit.radixes, tuple)
assert all(r == 2 for r in toffoli_circuit.radixes)
def test_num_qudits(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_qudits == 3
def test_dim(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.dim == 8
def test_is_qubit_only(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.is_qubit_only()
def test_is_qutrit_only(self, toffoli_circuit: Circuit) -> None:
assert not toffoli_circuit.is_qutrit_only()
def test_is_parameterized(self, toffoli_circuit: Circuit) -> None:
assert not toffoli_circuit.is_parameterized()
def test_is_constant(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.is_constant()
def test_num_operations(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_operations == 15
def test_num_cycles(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.num_cycles == 11
def test_params(self, toffoli_circuit: Circuit) -> None:
assert len(toffoli_circuit.params) == 0
assert isinstance(toffoli_circuit.params, np.ndarray)
def test_depth(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.depth == 11
def test_parallelism(self, toffoli_circuit: Circuit) -> None:
assert toffoli_circuit.parallelism == 21 / 11
def test_coupling_graph(self, toffoli_circuit: Circuit) -> None:
cgraph = toffoli_circuit.coupling_graph
assert isinstance(cgraph, set)
assert is_valid_coupling_graph(cgraph, 3)
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
def test_gate_set(self, toffoli_circuit: Circuit) -> None:
gate_set = toffoli_circuit.gate_set
assert isinstance(gate_set, set)
assert len(gate_set) == 4
assert CNOTGate() in gate_set
assert HGate() in gate_set
assert TdgGate() in gate_set
assert TGate() in gate_set
def test_active_qudits(self, toffoli_circuit: Circuit) -> None:
qudits = toffoli_circuit.active_qudits
assert len(qudits) == toffoli_circuit.num_qudits
assert isinstance(qudits, list)
assert all(x in qudits for x in range(toffoli_circuit.num_qudits))
class TestGetNumParams:
"""This tests `circuit.num_params`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_params, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_params >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit = Circuit(4)
assert circuit.num_params == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_params == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 3
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 6
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_params == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 3
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 6
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_params == 9
def test_removing_gate(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
circuit.remove(U3Gate())
assert circuit.num_params == 6
circuit.remove(U3Gate())
assert circuit.num_params == 3
circuit.remove(U3Gate())
assert circuit.num_params == 0
def test_freezing_param(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_params == 9
circuit.freeze_param(0)
assert circuit.num_params == 8
circuit.freeze_param(0)
assert circuit.num_params == 7
circuit.freeze_param(0)
assert circuit.num_params == 6
circuit.freeze_param(0)
def test_r1(self, r3_qubit_circuit: Circuit) -> None:
start = r3_qubit_circuit.num_params
r3_qubit_circuit.append_gate(U3Gate(), [0])
assert r3_qubit_circuit.num_params == start + 3
r3_qubit_circuit.insert_gate(0, U3Gate(), [1])
assert r3_qubit_circuit.num_params == start + 6
r3_qubit_circuit.insert_gate(0, CNOTGate(), [0, 2])
assert r3_qubit_circuit.num_params == start + 6
r3_qubit_circuit.remove(U3Gate())
assert r3_qubit_circuit.num_params == start + 3
class TestGetRadixes:
"""This tests `circuit.radixes`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.radixes, tuple)
assert all(is_integer(r) for r in r6_qudit_circuit.radixes)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert is_valid_radixes(r6_qudit_circuit.radixes, 6)
def test_empty(self) -> None:
circuit = Circuit(1)
assert len(circuit.radixes) == 1
assert circuit.radixes[0] == 2
circuit = Circuit(4)
assert len(circuit.radixes) == 4
assert circuit.radixes[0] == 2
assert circuit.radixes[1] == 2
assert circuit.radixes[2] == 2
assert circuit.radixes[3] == 2
circuit = Circuit(4, [2, 2, 3, 3])
assert len(circuit.radixes) == 4
assert circuit.radixes[0] == 2
assert circuit.radixes[1] == 2
assert circuit.radixes[2] == 3
assert circuit.radixes[3] == 3
class TestGetSize:
"""This tests `circuit.num_qudits`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_qudits, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_qudits == 6
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_qudits == 1
circuit = Circuit(4)
assert circuit.num_qudits == 4
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.num_qudits == 4
class TestGetDim:
"""This tests `circuit.dim`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.dim, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.dim >= 64
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.dim == 2
circuit = Circuit(4)
assert circuit.dim == 16
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.dim == 36
class TestIsQubitOnly:
"""This tests `circuit.is_qubit_only`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_qubit_only(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
if r6_qudit_circuit.radixes.count(2) == 6:
assert r6_qudit_circuit.is_qubit_only()
else:
assert not r6_qudit_circuit.is_qubit_only()
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.is_qubit_only()
circuit = Circuit(4)
assert circuit.is_qubit_only()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_qubit_only()
class TestIsQutritOnly:
"""This tests `circuit.is_qutrit_only`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_qutrit_only(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
if r6_qudit_circuit.radixes.count(3) == 6:
assert r6_qudit_circuit.is_qutrit_only()
else:
assert not r6_qudit_circuit.is_qutrit_only()
def test_empty(self) -> None:
circuit = Circuit(1)
assert not circuit.is_qutrit_only()
circuit = Circuit(4)
assert not circuit.is_qutrit_only()
circuit = Circuit(4, [3, 3, 3, 3])
assert circuit.is_qutrit_only()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_qutrit_only()
class TestIsParameterized:
"""This tests `circuit.is_parameterized`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_parameterized(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.is_parameterized()
!= r6_qudit_circuit.is_constant()
)
if any(g.is_parameterized() for g in r6_qudit_circuit.gate_set):
assert r6_qudit_circuit.is_parameterized()
else:
assert not r6_qudit_circuit.is_parameterized()
def test_empty(self) -> None:
circuit = Circuit(1)
assert not circuit.is_parameterized()
circuit = Circuit(4)
assert not circuit.is_parameterized()
circuit = Circuit(4, [2, 2, 3, 3])
assert not circuit.is_parameterized()
class TestIsConstant:
"""This tests `circuit.is_constant`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_constant(), bool)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.is_parameterized()
!= r6_qudit_circuit.is_constant()
)
if all(g.is_constant() for g in r6_qudit_circuit.gate_set):
assert r6_qudit_circuit.is_constant()
else:
assert not r6_qudit_circuit.is_constant()
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.is_constant()
circuit = Circuit(4)
assert circuit.is_constant()
circuit = Circuit(4, [2, 2, 3, 3])
assert circuit.is_constant()
class TestGetNumOperations:
"""This tests `circuit.num_operations`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_operations, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_operations >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit = Circuit(4)
assert circuit.num_operations == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_operations == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_operations == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_operations == 3
def test_removing_gate(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_operations == 3
circuit.remove(U3Gate())
assert circuit.num_operations == 2
circuit.remove(U3Gate())
assert circuit.num_operations == 1
circuit.remove(U3Gate())
assert circuit.num_operations == 0
def test_r1(self, r3_qubit_circuit: Circuit) -> None:
assert r3_qubit_circuit.num_operations == 10
r3_qubit_circuit.append_gate(U3Gate(), [0])
assert r3_qubit_circuit.num_operations == 11
r3_qubit_circuit.insert_gate(0, U3Gate(), [1])
assert r3_qubit_circuit.num_operations == 12
r3_qubit_circuit.insert_gate(0, CNOTGate(), [0, 2])
assert r3_qubit_circuit.num_operations == 13
r3_qubit_circuit.remove(U3Gate())
assert r3_qubit_circuit.num_operations == 12
r3_qubit_circuit.remove(CNOTGate())
assert r3_qubit_circuit.num_operations == 11
class TestGetNumCycles:
"""This tests `circuit.num_cycles`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.num_cycles, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_cycles >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit = Circuit(4)
assert circuit.num_cycles == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.num_cycles == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.num_cycles == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.num_cycles == 3
def test_removing_gate1(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.num_cycles == 3
circuit.remove(U3Gate())
assert circuit.num_cycles == 2
circuit.remove(U3Gate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 0
def test_removing_gate2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [1])
assert circuit.num_cycles == 3
circuit.remove(U3Gate())
assert circuit.num_cycles == 2
circuit.remove(CNOTGate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 1
circuit.remove(U3Gate())
assert circuit.num_cycles == 0
class TestGetParams:
"""This tests `circuit.params`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
params = r6_qudit_circuit.params
assert isinstance(params, np.ndarray)
assert all(is_numeric(param) for param in params)
def test_count(self, r6_qudit_circuit: Circuit) -> None:
num_params = r6_qudit_circuit.num_params
params = r6_qudit_circuit.params
assert len(params) == num_params
def test_no_modify(self, r6_qudit_circuit: Circuit) -> None:
params = r6_qudit_circuit.params
if len(params) == 0:
return
params[0] = -params[0] + 1
assert params[0] != r6_qudit_circuit.params[0]
def test_empty(self) -> None:
circuit = Circuit(1)
assert len(circuit.params) == 0
circuit = Circuit(4)
assert len(circuit.params) == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert len(circuit.params) == 0
class TestGetDepth:
"""This tests `circuit.depth`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.depth, int)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.depth >= 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit = Circuit(4)
assert circuit.depth == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.depth == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 3
def test_inserting_gate(self) -> None:
circuit = Circuit(1)
assert circuit.depth == 0
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 1
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 2
circuit.insert_gate(0, U3Gate(), [0])
assert circuit.depth == 3
def test_removing_gate1(self) -> None:
circuit = Circuit(1)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [0])
assert circuit.depth == 3
circuit.remove(U3Gate())
assert circuit.depth == 2
circuit.remove(U3Gate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 0
def test_removing_gate2(self) -> None:
circuit = Circuit(2)
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(U3Gate(), [1])
assert circuit.depth == 3
circuit.remove(U3Gate())
assert circuit.depth == 2
circuit.remove(CNOTGate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 1
circuit.remove(U3Gate())
assert circuit.depth == 0
def test_vs_cycles(self, r6_qudit_circuit: Circuit) -> None:
assert (
r6_qudit_circuit.depth
<= r6_qudit_circuit.num_cycles
)
class TestGetParallelism:
"""This tests `circuit.parallelism`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.parallelism, float)
def test_value(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.parallelism > 0
def test_empty(self) -> None:
circuit = Circuit(1)
assert circuit.parallelism == 0
circuit = Circuit(4)
assert circuit.parallelism == 0
circuit = Circuit(4, [2, 3, 4, 5])
assert circuit.parallelism == 0
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
def test_adding_gate_2(self) -> None:
circuit = Circuit(2)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1.5
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism - 5 / 3 < 1e-12
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
def test_adding_gate_3(self) -> None:
circuit = Circuit(2)
assert circuit.parallelism == 0
circuit.append_gate(U3Gate(), [0])
assert circuit.parallelism == 1
circuit.append_gate(U3Gate(), [1])
assert circuit.parallelism == 2
circuit.append_gate(CNOTGate(), [0, 1])
assert circuit.parallelism == 2
class TestGetCouplingGraph:
"""This tests `circuit.coupling_graph`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert is_valid_coupling_graph(
r6_qudit_circuit.coupling_graph, 6,
)
def test_empty(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
assert isinstance(circuit.coupling_graph, set)
def test_single_qubit_1(self) -> None:
circuit = Circuit(1)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.coupling_graph) == 0
def test_single_qubit_2(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
for i in range(4):
circuit.append_gate(U3Gate(), [i])
assert len(circuit.coupling_graph) == 0
for j in range(4):
for i in range(4):
circuit.append_gate(U3Gate(), [i])
assert len(circuit.coupling_graph) == 0
def test_two_qubit_1(self) -> None:
circuit = Circuit(2)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(CNOTGate(), [0, 1])
cgraph = circuit.coupling_graph
assert len(cgraph) == 1
assert (0, 1) in cgraph
circuit.append_gate(CNOTGate(), [1, 0])
cgraph = circuit.coupling_graph
assert len(cgraph) == 1
assert (0, 1) in cgraph
circuit.remove(CNOTGate())
circuit.remove(CNOTGate())
assert len(circuit.coupling_graph) == 0
def test_two_qubit_2(self) -> None:
circuit = Circuit(4)
assert len(circuit.coupling_graph) == 0
circuit.append_gate(CNOTGate(), [0, 1])
circuit.append_gate(CNOTGate(), [1, 2])
circuit.append_gate(CNOTGate(), [2, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(CNOTGate(), [2, 3])
circuit.append_gate(CNOTGate(), [1, 2])
circuit.append_gate(CNOTGate(), [0, 1])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(CNOTGate(), [0, 2])
circuit.append_gate(CNOTGate(), [3, 0])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (2, 3) in cgraph
assert (0, 2) in cgraph
assert (0, 3) in cgraph
def test_multi_qubit_1(self, gen_random_utry_np: Any) -> None:
circuit = Circuit(6)
assert len(circuit.coupling_graph) == 0
three_qubit_gate = ConstantUnitaryGate(gen_random_utry_np(8))
circuit.append_gate(three_qubit_gate, [0, 1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
circuit.append_gate(three_qubit_gate, [0, 1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
circuit.append_gate(three_qubit_gate, [1, 2, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
assert (1, 3) in cgraph
assert (2, 3) in cgraph
circuit.append_gate(three_qubit_gate, [3, 4, 5])
cgraph = circuit.coupling_graph
assert len(cgraph) == 8
assert (0, 1) in cgraph
assert (1, 2) in cgraph
assert (0, 2) in cgraph
assert (1, 3) in cgraph
assert (2, 3) in cgraph
assert (3, 4) in cgraph
assert (3, 5) in cgraph
assert (4, 5) in cgraph
def test_multi_qudit_2(self, gen_random_utry_np: Any) -> None:
circuit = Circuit(6, [2, 2, 2, 3, 3, 3])
assert len(circuit.coupling_graph) == 0
three_qubit_gate = ConstantUnitaryGate(
gen_random_utry_np(12), [2, 2, 3],
)
circuit.append_gate(three_qubit_gate, [0, 1, 3])
cgraph = circuit.coupling_graph
assert len(cgraph) == 3
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
circuit.append_gate(CNOTGate(), [1, 2])
cgraph = circuit.coupling_graph
assert len(cgraph) == 4
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
assert (1, 2) in cgraph
circuit.append_gate(CSUMGate(), [4, 5])
cgraph = circuit.coupling_graph
assert len(cgraph) == 5
assert (0, 1) in cgraph
assert (1, 3) in cgraph
assert (0, 3) in cgraph
assert (1, 2) in cgraph
assert (4, 5) in cgraph
class TestGetGateSet:
"""This tests `circuit.gate_set`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.gate_set, set)
assert all(
isinstance(gate, Gate)
for gate in r6_qudit_circuit.gate_set
)
def test_empty(self) -> None:
circuit = Circuit(4)
assert len(circuit.gate_set) == 0
assert isinstance(circuit.gate_set, set)
def test_adding_gate(self) -> None:
circuit = Circuit(1)
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.append_gate(XGate(), [0])
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
circuit.append_gate(ZGate(), [0])
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
def test_removing_gate(self) -> None:
circuit = Circuit(1)
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
circuit.append_gate(XGate(), [0])
circuit.append_gate(ZGate(), [0])
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
circuit.remove(TGate())
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.remove(XGate())
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.remove(ZGate())
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.remove(U3Gate())
assert len(circuit.gate_set) == 0
def test_qudit(self) -> None:
circuit = Circuit(3, [2, 3, 3])
assert len(circuit.gate_set) == 0
circuit.append_gate(U3Gate(), [0])
assert len(circuit.gate_set) == 1
assert U3Gate() in circuit.gate_set
circuit.append_gate(XGate(), [0])
assert len(circuit.gate_set) == 2
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
circuit.append_gate(ZGate(), [0])
assert len(circuit.gate_set) == 3
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
circuit.append_gate(TGate(), [0])
assert len(circuit.gate_set) == 4
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
circuit.append_gate(CSUMGate(), [1, 2])
assert len(circuit.gate_set) == 5
assert U3Gate() in circuit.gate_set
assert XGate() in circuit.gate_set
assert ZGate() in circuit.gate_set
assert TGate() in circuit.gate_set
assert CSUMGate() in circuit.gate_set
class TestIsDifferentiable:
"""This tests `circuit.is_differentiable`."""
def test_type(self, r6_qudit_circuit: Circuit) -> None:
assert isinstance(r6_qudit_circuit.is_differentiable(), bool)
def test_value(self, gate: Gate) -> None:
circuit = Circuit(gate.num_qudits, gate.radixes)
assert circuit.is_differentiable()
circuit.append_gate(gate, list(range(gate.num_qudits)))
if isinstance(gate, DifferentiableUnitary):
assert circuit.is_differentiable()
else:
assert not circuit.is_differentiable()
@pytest.mark.parametrize(
'circuit', [
Circuit(1),
Circuit(4),
Circuit(4, [2, 3, 4, 5]),
],
)
def test_empty(self, circuit: Circuit) -> None:
assert circuit.is_differentiable()
```
#### File: gates/constant/test_identity.py
```python
from __future__ import annotations
import numpy as np
from hypothesis import given
from bqskit.ir.gates import IdentityGate
from bqskit.utils.test.strategies import num_qudits_and_radixes
@given(num_qudits_and_radixes())
def test_identity(pair: tuple[int, tuple[int, ...]]) -> None:
num_qudits, radixes = pair
i = IdentityGate(num_qudits, radixes)
assert i.num_qudits == num_qudits
assert i.num_params == 0
assert i.get_unitary() == np.identity(int(np.prod(radixes)))
```
#### File: gates/constant/test_unitary.py
```python
from __future__ import annotations
from hypothesis import given
from bqskit.ir.gates import ConstantUnitaryGate
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
from bqskit.utils.test.strategies import unitaries
@given(unitaries())
def test_constant_unitary(utry: UnitaryMatrix) -> None:
u = ConstantUnitaryGate(utry)
assert u.num_qudits == utry.num_qudits
assert u.radixes == utry.radixes
assert u.num_params == 0
assert u.get_unitary() == u
@given(unitaries())
def test_constant_unitary_like(utry: UnitaryMatrix) -> None:
u = ConstantUnitaryGate(utry.numpy, utry.radixes)
assert u.num_qudits == utry.num_qudits
assert u.radixes == utry.radixes
assert u.num_params == 0
assert u.get_unitary() == u
```
#### File: gates/parameterized/test_rxx.py
```python
from __future__ import annotations
import numpy as np
from bqskit.ir.gates import RXXGate
from bqskit.ir.gates import XXGate
def test_get_unitary() -> None:
g = RXXGate()
u = XXGate().get_unitary()
assert g.get_unitary([np.pi / 2]).get_distance_from(u) < 1e-7
```
#### File: gates/parameterized/test_ry.py
```python
from __future__ import annotations
import numpy as np
from bqskit.ir.gates import RYGate
from bqskit.ir.gates import YGate
def test_get_unitary() -> None:
g = RYGate()
u = YGate().get_unitary()
assert g.get_unitary([np.pi]).get_distance_from(u) < 1e-7
```
#### File: gates/parameterized/test_rz.py
```python
from __future__ import annotations
import numpy as np
from bqskit.ir.gates import RZGate
from bqskit.ir.gates import ZGate
def test_get_unitary() -> None:
g = RZGate()
u = ZGate().get_unitary()
assert g.get_unitary([np.pi]).get_distance_from(u) < 1e-7
```
#### File: gates/parameterized/test_u2.py
```python
from __future__ import annotations
import numpy as np
from hypothesis import given
from hypothesis.strategies import floats
from bqskit.ir.gates import RYGate
from bqskit.ir.gates import RZGate
from bqskit.ir.gates import U2Gate
@given(
floats(allow_nan=False, allow_infinity=False, width=32),
floats(allow_nan=False, allow_infinity=False, width=32),
)
def test_get_unitary(angle1: float, angle2: float) -> None:
u = U2Gate().get_unitary([angle1, angle2])
z1 = RZGate().get_unitary([angle1])
y = RYGate().get_unitary([np.pi / 2])
z2 = RZGate().get_unitary([angle2])
assert u.get_distance_from(z1 @ y @ z2) < 1e-7
```
#### File: ir/gates/test_circuitgate.py
```python
from __future__ import annotations
import pickle
from hypothesis import given
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates import CircuitGate
from bqskit.utils.test.strategies import circuits
class TestPickle:
@given(circuits([2, 2], max_gates=3), circuits([2, 2], max_gates=3))
def test_pickle_individual(self, c1: Circuit, c2: Circuit) -> None:
gate1 = CircuitGate(c1)
gate2 = CircuitGate(c2)
utry1 = gate1.get_unitary()
utry2 = gate2.get_unitary()
pickled_utry1 = pickle.loads(pickle.dumps(gate1)).get_unitary()
pickled_utry2 = pickle.loads(pickle.dumps(gate2)).get_unitary()
assert utry1 == pickled_utry1
assert utry2 == pickled_utry2
@given(circuits([2, 2], max_gates=3), circuits([2, 2], max_gates=3))
def test_pickle_list(self, c1: Circuit, c2: Circuit) -> None:
gate1 = CircuitGate(c1)
gate2 = CircuitGate(c2)
utry1 = gate1.get_unitary()
utry2 = gate2.get_unitary()
pickled = pickle.loads(pickle.dumps([gate1, gate2]))
assert utry1 == pickled[0].get_unitary()
assert utry2 == pickled[1].get_unitary()
@given(circuits([2, 2], max_gates=5), circuits([2, 2], max_gates=5))
def test_pickle_circuit(self, c1: Circuit, c2: Circuit) -> None:
gate1 = CircuitGate(c1)
gate2 = CircuitGate(c2)
circuit = Circuit(2)
circuit.append_gate(gate1, [0, 1])
circuit.append_gate(gate2, [0, 1])
utry = circuit.get_unitary()
pickled = pickle.loads(pickle.dumps(circuit))
assert utry == pickled.get_unitary()
```
#### File: opt/cost/test_residuals.py
```python
from __future__ import annotations
import numpy as np
from bqskit.ir.circuit import Circuit
from bqskit.ir.opt import HilbertSchmidtResidualsGenerator
def test_hilbert_schmidt_residuals(r3_qubit_circuit: Circuit) -> None:
x0 = np.random.random((r3_qubit_circuit.num_params,))
cost = HilbertSchmidtResidualsGenerator().gen_cost(
r3_qubit_circuit, r3_qubit_circuit.get_unitary(x0),
)
assert cost.get_cost(x0) < 1e-10
```
#### File: opt/instantiaters/test_qfactor.py
```python
from __future__ import annotations
import numpy as np
from scipy.stats import unitary_group
from bqskit.ir.circuit import Circuit
from bqskit.ir.gates.parameterized import RXGate
from bqskit.ir.gates.parameterized.unitary import VariableUnitaryGate
from bqskit.ir.opt.instantiaters.qfactor import QFactor
class TestQFactorEndToEnd:
def test_no_change(self) -> None:
u1 = unitary_group.rvs(8)
g1 = VariableUnitaryGate(3)
circuit = Circuit(3)
circuit.append_gate(g1, [0, 1, 2])
utry_before = circuit.get_unitary()
# The following call should not make any changes in circuit
QFactor().instantiate(circuit, u1, circuit.params)
utry_after = circuit.get_unitary()
assert np.allclose(
utry_before,
utry_after,
)
def test_1_gate(self) -> None:
u1 = unitary_group.rvs(8)
g1 = VariableUnitaryGate(3)
circuit = Circuit(3)
circuit.append_gate(g1, [0, 1, 2])
params = QFactor().instantiate(circuit, u1, circuit.params)
circuit.set_params(params)
g1_params = list(np.reshape(u1, (64,)))
g1_params = list(np.real(g1_params)) + list(np.imag(g1_params))
assert np.allclose(
circuit.get_unitary(),
g1.get_unitary(g1_params),
)
def test_2_gate(self) -> None:
g1 = VariableUnitaryGate(2)
g2 = VariableUnitaryGate(3)
g3 = RXGate()
circuit = Circuit(4)
circuit.append_gate(g1, [0, 1])
circuit.append_gate(g2, [1, 2, 3])
circuit.append_gate(g3, [1])
utry = circuit.get_unitary(np.random.random(circuit.num_params))
params = QFactor().instantiate(circuit, utry, circuit.params)
circuit.set_params(params)
assert np.allclose(
circuit.get_unitary(),
utry,
)
```
#### File: tests/ir/test_operation.py
```python
from __future__ import annotations
import numpy as np
from hypothesis import assume
from hypothesis import given
from bqskit.ir.gates import CXGate
from bqskit.ir.gates import CYGate
from bqskit.ir.gates import U3Gate
from bqskit.ir.operation import Operation
from bqskit.utils.test.strategies import operations
@given(operations())
def test_init(op: Operation) -> None:
new_op = Operation(op.gate, op.location, op.params)
assert new_op.gate == op.gate
assert new_op.location == op.location
assert new_op.params == op.params
assert new_op.radixes == op.radixes
assert new_op.get_unitary() == op.get_unitary()
class TestGetQasm:
def test_cx(self) -> None:
op = Operation(CXGate(), (0, 1))
assert op.get_qasm() == 'cx q[0], q[1];\n'
def test_cy(self) -> None:
op = Operation(CYGate(), (3, 0))
assert op.get_qasm() == 'cy q[3], q[0];\n'
def test_u3(self) -> None:
op = Operation(U3Gate(), 0, [0, 1, 2])
assert op.get_qasm() == 'u3(0, 1, 2) q[0];\n'
@given(operations())
def test_get_unitary(op: Operation) -> None:
assert op.get_unitary() == op.gate.get_unitary(op.params)
new_params = [1] * op.num_params
assert op.get_unitary(new_params) == op.get_unitary(new_params)
@given(operations())
def test_get_grad(op: Operation) -> None:
assume(op.is_differentiable())
assert np.allclose(op.get_grad(), op.gate.get_grad(op.params)) # type: ignore # noqa
new_params = [1] * op.num_params
assert np.allclose(op.get_grad(new_params), op.gate.get_grad(new_params)) # type: ignore # noqa
@given(operations())
def test_get_unitary_and_grad(op: Operation) -> None:
assume(op.is_differentiable())
utry, grads = op.get_unitary_and_grad()
exp_utry, exp_grads = op.gate.get_unitary_and_grad(op.params) # type: ignore # noqa
assert utry == exp_utry
assert np.allclose(grads, exp_grads)
new_params = [1] * op.num_params
utry, grads = op.get_unitary_and_grad(new_params)
exp_utry, exp_grads = op.gate.get_unitary_and_grad(new_params) # type: ignore # noqa
assert utry == exp_utry
assert np.allclose(grads, exp_grads)
```
#### File: bqskit/tests/test_conftest.py
```python
from __future__ import annotations
from typing import Any
import numpy as np
from bqskit.ir.circuit import Circuit
from bqskit.qis.unitary import UnitaryMatrix
from bqskit.utils.typing import is_complex
from bqskit.utils.typing import is_integer
from bqskit.utils.typing import is_numeric
from bqskit.utils.typing import is_sequence
class TestGenRandomUtryNp:
"""Ensure random utry generator behaves as expected."""
def test_invalid_1(self, gen_random_utry_np: Any) -> None:
try:
utry = gen_random_utry_np('a') # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_invalid_2(self, gen_random_utry_np: Any) -> None:
try:
utry = gen_random_utry_np(['a', 3]) # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_valid_single_dim(self, gen_random_utry_np: Any) -> None:
utry = gen_random_utry_np(8)
assert isinstance(utry, np.ndarray)
assert utry.shape == (8, 8)
assert UnitaryMatrix.is_unitary(utry)
def test_valid_multi_dim(self, gen_random_utry_np: Any) -> None:
utry = gen_random_utry_np([4, 8])
assert isinstance(utry, np.ndarray)
assert utry.shape == (8, 8) or utry.shape == (4, 4)
assert UnitaryMatrix.is_unitary(utry)
class TestGenInvalidUtryNp:
"""Ensure invalid utry generator behaves as expected."""
def test_invalid_1(self, gen_invalid_utry_np: Any) -> None:
try:
iutry = gen_invalid_utry_np('a') # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_invalid_2(self, gen_invalid_utry_np: Any) -> None:
try:
iutry = gen_invalid_utry_np(['a', 3]) # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_valid_single_dim(self, gen_invalid_utry_np: Any) -> None:
iutry = gen_invalid_utry_np(8)
assert isinstance(iutry, np.ndarray)
assert iutry.shape == (8, 8)
assert not UnitaryMatrix.is_unitary(iutry)
def test_valid_multi_dim(self, gen_invalid_utry_np: Any) -> None:
iutry = gen_invalid_utry_np([4, 8])
assert isinstance(iutry, np.ndarray)
assert iutry.shape == (8, 8) or iutry.shape == (4, 4)
assert not UnitaryMatrix.is_unitary(iutry)
class TestGenRandomCircuit:
"""Ensure random circuit generator behaves as expected."""
def test_invalid_type_1(self, gen_random_circuit: Any) -> None:
try:
circuit = gen_random_circuit('a') # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_invalid_type_2(self, gen_random_circuit: Any) -> None:
try:
circuit = gen_random_circuit(3, 'a') # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_invalid_type_3(self, gen_random_circuit: Any) -> None:
try:
circuit = gen_random_circuit(3, [2, 2, 2], 'a') # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_invalid_type_4(self, gen_random_circuit: Any) -> None:
try:
circuit = gen_random_circuit(3, [2, 2, 2], 5, 'a') # noqa
except TypeError:
return
except BaseException:
assert False, 'Unexpected error.'
def test_invalid_value_1(self, gen_random_circuit: Any) -> None:
try:
circuit = gen_random_circuit(3, [2, 2]) # noqa
except ValueError:
return
except BaseException:
assert False, 'Unexpected error.'
# r3_qubit_circuit
def test_r3_size(self, r3_qubit_circuit: Circuit) -> None:
assert r3_qubit_circuit.num_qudits == 3
def test_r3_radix(self, r3_qubit_circuit: Circuit) -> None:
assert r3_qubit_circuit.is_qubit_only()
def test_r3_depth(self, r3_qubit_circuit: Circuit) -> None:
assert r3_qubit_circuit.num_operations == 10
# r3_qubit_constant_circuit
def test_r3_con_size(self, r3_qubit_constant_circuit: Circuit) -> None:
assert r3_qubit_constant_circuit.num_qudits == 3
def test_r3_con_radix(self, r3_qubit_constant_circuit: Circuit) -> None:
assert r3_qubit_constant_circuit.is_qubit_only()
def test_r3_con_depth(self, r3_qubit_constant_circuit: Circuit) -> None:
assert r3_qubit_constant_circuit.num_operations == 25
def test_r3_con_constant(self, r3_qubit_constant_circuit: Circuit) -> None:
assert r3_qubit_constant_circuit.is_constant()
# r3_qutrit_circuit
def test_r3_qutrit_size(self, r3_qutrit_circuit: Circuit) -> None:
assert r3_qutrit_circuit.num_qudits == 3
def test_r3_qutrit_radix(self, r3_qutrit_circuit: Circuit) -> None:
assert r3_qutrit_circuit.is_qutrit_only()
def test_r3_qutrit_depth(self, r3_qutrit_circuit: Circuit) -> None:
assert r3_qutrit_circuit.num_operations == 10
# r6_qudit_circuit
def test_r6_size(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_qudits == 6
def test_r6_radix(self, r6_qudit_circuit: Circuit) -> None:
count = r6_qudit_circuit.radixes.count(2)
count += r6_qudit_circuit.radixes.count(3)
assert count == r6_qudit_circuit.num_qudits
def test_r6_depth(self, r6_qudit_circuit: Circuit) -> None:
assert r6_qudit_circuit.num_operations == 10
class TestTypedValues:
"""This tests the type-categorized value fixtures."""
def test_a_str(self, a_str: Any) -> None:
assert isinstance(a_str, str)
def test_not_a_str(self, not_a_str: Any) -> None:
assert not isinstance(not_a_str, str)
def test_an_int(self, an_int: Any) -> None:
assert is_integer(an_int)
def test_not_an_int(self, not_an_int: Any) -> None:
assert not is_integer(not_an_int)
def test_a_float(self, a_float: Any) -> None:
assert (
is_numeric(a_float)
and not is_integer(a_float)
)
def test_not_a_float(self, not_a_float: Any) -> None:
assert (
not is_numeric(not_a_float)
or is_integer(not_a_float)
or is_complex(not_a_float)
)
def test_a_complex(self, a_complex: Any) -> None:
assert is_complex(a_complex)
def test_a_bool(self, a_bool: Any) -> None:
assert isinstance(a_bool, (bool, np.bool_))
def test_not_a_bool(self, not_a_bool: Any) -> None:
assert not isinstance(not_a_bool, (bool, np.bool_))
def test_a_seq_str(self, a_seq_str: Any) -> None:
assert is_sequence(a_seq_str)
assert len(a_seq_str) >= 0
assert all(isinstance(s, str) for s in a_seq_str)
def test_not_a_seq_str(self, not_a_seq_str: Any) -> None:
assert (
not is_sequence(not_a_seq_str)
or isinstance(not_a_seq_str, str)
or any(not isinstance(s, str) for s in not_a_seq_str)
)
def test_a_seq_int(self, a_seq_int: Any) -> None:
assert is_sequence(a_seq_int)
assert len(a_seq_int) >= 0
assert all(is_integer(i) for i in a_seq_int)
def test_not_a_seq_int(self, not_a_seq_int: Any) -> None:
assert (
not is_sequence(not_a_seq_int)
or isinstance(not_a_seq_int, str)
or any(not is_integer(i) for i in not_a_seq_int)
)
def test_not_a_seq_float(self, not_a_seq_float: Any) -> None:
assert (
not is_sequence(not_a_seq_float)
or isinstance(not_a_seq_float, str)
or any(
not is_numeric(f)
or is_integer(f)
or is_complex(f)
for f in not_a_seq_float
)
)
def test_a_seq_complex(self, a_seq_complex: Any) -> None:
assert is_sequence(a_seq_complex)
assert len(a_seq_complex) >= 0
assert all(is_complex(c) for c in a_seq_complex)
def test_a_seq_bool(self, a_seq_bool: Any) -> None:
assert is_sequence(a_seq_bool)
assert len(a_seq_bool) >= 0
assert all(isinstance(b, (bool, np.bool_)) for b in a_seq_bool)
def test_not_a_seq_bool(self, not_a_seq_bool: Any) -> None:
assert (
not is_sequence(not_a_seq_bool)
or isinstance(not_a_seq_bool, str)
or any(not isinstance(b, (bool, np.bool_)) for b in not_a_seq_bool)
)
``` |
{
"source": "jkalmar/PythonDays",
"score": 4
} |
#### File: code/basicInput/input.py
```python
def one():
""" One """
print("Zavolana 1")
def two():
""" Two """
print("Zavolana 2")
def three():
""" Three """
print("Zavolana 3")
def main():
""" Main func """
name = input("Tvoje meno: ")
print("Tvoje meno je: %s" % name)
count = input("Kolko krat chces vypisat tvoje meno: ")
print("%s\n" % name * int(count))
while True:
func = input("Ktoru funkciu chces vykonat? ")
if func == "1":
one()
elif func == "2":
two()
elif func == "3":
three()
elif func.lower() == "ziadnu":
break
else:
print("Zle cislo")
if __name__ == '__main__':
main()
```
#### File: language_vs/dictionaries/dictionaries.py
```python
def main():
"""
Dictionary is an associative container that associates key with value and allows
Fast lookups, inserts and erases.
They are usually implemented as some kind of a hashtable
"""
# dictionary in python is symbolized by curly brackets {}
di1 = {}
# we can easily init dictionary with a pair of key : value elements
di2 = {1 : "one", 2 : "two", 3 : "three", 4 : "four", 5 : "five"}
di3 = {6 : "six", 7 : "seven", 8 : "eight", 9 : "nine", 10 : "ten"}
print("Type of di1: %s" % type(di1))
print("Type of di2: %s" % type(di2))
print("Type of di3: %s" % type(di3))
# we can add value to a key into dictionary using [] operator
di1["key"] = "value"
# printing of a dictionary is native in python
print(di1)
# removing a key from a dictionary is easy, however removing by value needs a bit of work
del di1["key"]
print(di1)
# dictionaries can be "merged" together
# in dictionary the key is unique and as such if di2 and di3 would have two same keys then it would get overwritten
# by the element from the later dictiornary in this case di4 is first initialized to di2 and then d3 is added
# so di3 elements with the same key would overwrite elements from di2
di4 = di2.copy()
di4.update(di3)
print("di4: " + str(di4))
# true power is when it comes to searching
di5 = {"jedna" : 1, "dva" : 2, "tri" : 3, "styri" : 4}
# this prints the world two, first it will lookup the value of a key "dva" in di5 which is 2 and then it
# lookup the key 2 in di4 which is two
print(di4[di5["dva"]])
# python supports dictionary comprehension
di6 = {k: di4[v] for (k, v) in di5.items()}
print(di6)
# switching key with values in python's dictionaries is very easy
di7 = {v: k for (k, v) in di4.items()}
print(di7)
if __name__ == '__main__':
main()
```
#### File: workshop/checkout/checkout.py
```python
from .discount_exception import ItemPriceIsBad
class Checkout:
"""
Class representing checkout
"""
def __init__(self):
self.items = []
self.discounts = []
self.total_price = 0
def add_discount(self, discount):
"""
Adds discount to one shopping list
"""
self.discounts.append(discount)
def add_item(self, item):
"""
Adds item to list
"""
if item.price > 0:
self.items.append(item)
else:
raise ItemPriceIsBad("zla cena")
def calculate_total(self):
"""
Calculates the total price of items with discounts
"""
if self.total_price == 0:
for discount in self.discounts:
for item in self.items:
item.add_discount(discount)
for item in self.items:
self.total_price += item.final_price()
return self.total_price
```
#### File: workshop/checkout/item.py
```python
from .discount_exception import ItemIsNowFreeException
class Item:
"""
Class representing one item that can be added to checkout
"""
def __init__(self, name, price, kind):
self.name = name
self.price = price
self.price_final = price
self.kind = kind
self.discount = []
def add_discount(self, discount):
"""
Adds discount to Item
"""
discount.add_to(self)
def final_price(self):
"""
Calculates the price of this Item after all discounts is aplied
"""
for discount in self.discount:
discount.apply(self)
return self.price_final
```
#### File: workshop/tests/test_checkout.py
```python
import pytest
from checkout.checkout import Checkout
from checkout.item import Item
from checkout.discount import Discount
from checkout.discount import Flavor
from checkout.discount_exception import DiscountNotApplicableException
from checkout.discount_exception import ItemPriceIsBad
@pytest.fixture()
def items():
"""
Fixture that creates a list of items
"""
milk = Item("Milk", 1.2, 0)
sugar = Item("Sugar", 0.8, 0)
candy = Item("Candy", 1.0, 0)
return [milk, sugar, candy]
@pytest.fixture
def checkout_with_items(items):
"""
Fixtures that creates a checkout with some items
"""
checkout = Checkout()
for item in items:
checkout.add_item(item)
return checkout
def test_basic_add(checkout_with_items):
"""
Basic test that test 3 item without discount
"""
checkout = checkout_with_items
total = checkout.calculate_total()
total = checkout.calculate_total()
total = checkout.calculate_total()
assert total == 3
def test_basic_add_negative():
"""
Basic test that test 3 item without discount
"""
milk = Item("Milk", -1.2, 0)
sugar = Item("Sugar", -0.8, 0)
candy = Item("Candy", -1.0, 0)
checkout = Checkout()
with pytest.raises(ItemPriceIsBad):
checkout.add_item(milk)
checkout.add_item(sugar)
checkout.add_item(candy)
total = checkout.calculate_total()
assert total == 0
def test_with_discount(checkout_with_items):
"""
Test that checks discount
"""
checkout = checkout_with_items
mega_discount = Discount(Flavor.Multiple, 50)
checkout.add_discount(mega_discount)
checkout.add_discount(mega_discount)
assert checkout.calculate_total() == 0.75
def test_with_abs_discount(checkout_with_items):
"""
Test that checks discount
"""
checkout = checkout_with_items
mega_discount = Discount(Flavor.Multiple, 5)
mega_discount.set_absolute()
checkout.add_discount(mega_discount)
assert checkout.calculate_total() == 0.75
def test_exception(checkout_with_items):
"""
Test that checks discount
"""
checkout = checkout_with_items
mega_discount = Discount(Flavor.Exclusive, 50)
checkout.add_discount(mega_discount)
checkout.add_discount(mega_discount)
with pytest.raises(DiscountNotApplicableException):
checkout.calculate_total()
``` |
{
"source": "jkalmos/AMSZ-Python-CalcGUI",
"score": 3
} |
#### File: AMSZ-Python-CalcGUI/CalcGui/PlotFunctions.py
```python
import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.patches as patches
import numpy as np
# PLOT SHAPE --------------------------------------------------------------------------------------------------------------------------------------------------------
def plot(parent, shape, coordinate_on, dimension_lines_on, transformed_coordinate_on, thickness_on, colors, a = 1.6, b = 0.8, d = 0.8):
if parent.plotted == True:
parent.canvas._tkcanvas.destroy()
circ = False
fig = Figure()
parent.canvas = FigureCanvasTkAgg(fig, master = parent)
parent.canvas.get_tk_widget().pack()
parent.canvas._tkcanvas.pack(side="top", fill="both", expand=1,padx = (10,20), pady = 20)
parent.plotted = True
parent.ax = fig.add_subplot(111)
parent.ax.set_aspect("equal")
fig.patch.set_facecolor(colors["secondary_color"])
parent.ax.xaxis.set_visible(False)
parent.ax.yaxis.set_visible(False)
parent.ax.set_frame_on(False)
if shape == "Rectangle":
x, y, proportional = set_dimensions(a, b)
rect_x = [-x/2, -x/2, x/2, x/2, -x/2]
rect_y = [y/2, -y/2, -y/2, y/2, y/2]
rect_x_th = [-x/2+0.1, -x/2+0.1, x/2-0.1, x/2-0.1, -x/2+0.1]
rect_y_th = [y/2-0.1, -y/2+0.1, -y/2+0.1, y/2-0.1, y/2-0.1]
parent.ax.plot(rect_x, rect_y, colors["draw_main"], lw=2)
parent.ax.fill(rect_x,rect_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(rect_x_th, rect_y_th, colors["draw_main"], lw=2)
parent.ax.fill(rect_x_th,rect_y_th,color=colors["secondary_color"])
coordinate_displacement = 0
elif shape == "Ellipse":
x, y, proportional = set_dimensions(a, b)
t = np.linspace(0, 2*np.pi, 100)
ell_x = x/2*np.cos(t)
ell_y = y/2*np.sin(t)
ell_x_th = (x/2-0.1)*np.cos(t)
ell_y_th = (y/2-0.1)*np.sin(t)
parent.ax.plot(ell_x, ell_y, colors["draw_main"], lw=2)
parent.ax.fill(ell_x,ell_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(ell_x_th, ell_y_th, colors["draw_main"], lw=2)
parent.ax.fill(ell_x_th,ell_y_th,color=colors["secondary_color"])
coordinate_displacement = 0
elif shape == "Circle":
t = np.linspace(0, 2*np.pi, 100)
x = y = d = 2
proportional = True
circ_x = d/2*np.cos(t)
circ_y = d/2*np.sin(t)
circ_x_th = (d/2-0.1)*np.cos(t)
circ_y_th = (d/2-0.1)*np.sin(t)
circ = True
parent.ax.plot(circ_x, circ_y, colors["draw_main"], lw=2)
parent.ax.fill(circ_x,circ_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(circ_x_th, circ_y_th, colors["draw_main"], lw=2)
parent.ax.fill(circ_x_th,circ_y_th,color=colors["secondary_color"])
coordinate_displacement = 0
elif shape == "Isosceles_triangle":
x, y, proportional = set_dimensions(a, b)
tri_x = [-x/2, x/2, 0, -x/2]
tri_y = [-y/3, -y/3, y/3*2, -y/3]
tri_x_th = [-x/2+0.175, x/2-0.175, 0, -x/2+0.175]
tri_y_th = [-y/3+0.075, -y/3+0.075, y/3*2-0.1, -y/3+0.075]
parent.ax.plot(tri_x, tri_y, colors["draw_main"], lw=2)
parent.ax.fill(tri_x,tri_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(tri_x_th, tri_y_th, colors["draw_main"], lw=2)
parent.ax.fill(tri_x_th,tri_y_th,color=colors["secondary_color"])
coordinate_displacement = y/6
elif shape == "Right_triangle":
x, y, proportional = set_dimensions(a, b)
tri_x = [-x/2, x/2, -x/2, -x/2]
tri_y = [-y/3, -y/3, y/3*2, -y/3]
tri_x_th = [-x/2+0.1, x/2-0.4, -x/2+0.1, -x/2+0.1]
tri_y_th = [-y/3+0.1, -y/3+0.1, y/3*2-0.175, -y/3+0.1]
parent.ax.plot(tri_x, tri_y, colors["draw_main"], lw=2)
parent.ax.fill(tri_x,tri_y,color=colors["draw_main"],alpha=0.9)
if thickness_on == True:
parent.ax.plot(tri_x_th, tri_y_th, colors["draw_main"], lw=2)
parent.ax.fill(tri_x_th,tri_y_th,color=colors["secondary_color"])
coordinate_displacement = y/6
elif shape == None:
None
if coordinate_on == True:
coordinate_system(x, y, parent.ax, coordinate_displacement, colors)
if dimension_lines_on == True:
dimension_lines(x, y, parent.ax, r"$a$", r"$b$", coordinate_displacement, colors, circ)
if transformed_coordinate_on == True:
transformed_coordinate_system(x, y, parent.ax, 15, colors)
transformation_dimensions(x, y, parent.ax, colors)
if shape != None:
if proportional == False:
parent.ax.text(-x, -y, "NEM arányos!!!", verticalalignment='center', size='large', color = colors["text_color"])
print(x,y)
parent.canvas.draw()
# USEFUL FUNCTIONS --------------------------------------------------------------------------------------------------------------------------------------------------------
def set_dimensions(a, b):
ab_rate = a/b
if ab_rate > 3:
x = 3
y = 1
proportional = False
elif ab_rate < 0.33:
x = 1
y = 3
proportional = False
else:
x = a
y = b
proportional = True
return x, y, proportional
def dimension_lines(x, y, ax, t1, t2, e, colors, circ = False):
transparency = 1
color = colors['draw_tertiary']
hw = 0.015*max(x,y)
hl = 2*hw
if circ == False:
line1_x = [-x/2-max(x,y)/4, 0]
line1_y = [y/2+e, y/2+e]
line2_x = [-x/2-max(x,y)/4, 0]
line2_y = [-y/2+e, -y/2+e]
line3_x = [-x/2, -x/2]
line3_y = [-y/2-max(x,y)/4+e, -2*e]
line4_x = [x/2, x/2]
line4_y = [-y/2-max(x,y)/4+e, -2*e]
ax.arrow(line1_x[0]+x/32, line2_y[0], 0, y, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line1_x[0]+x/32, line2_y[0], 0, y, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line1_x[0]+x/32, line1_y[0], 0, -y, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line3_x[0], line3_y[0]+x/32, x, 0, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line4_x[0], line3_y[0]+x/32, -x, 0, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.plot(line1_x, line1_y, color,zorder=0)
ax.plot(line2_x, line2_y, color,zorder=0)
ax.plot(line3_x, line3_y, color,zorder=0)
ax.plot(line4_x, line4_y, color,zorder=0)
ax.text(
0, -y/2-max(x,y)/16*5+e,
t1,
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
ax.text(
-x/2-max(x,y)/16*5, e,
t2,
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
elif circ == True:
line1_x = [-1, 1]
line1_y = [1.732, -1.732]
ax.plot(line1_x, line1_y, color,zorder=3)
ax.arrow(line1_x[0], line1_y[0], 0.5, -0.866, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.arrow(line1_x[1], line1_y[1], -0.5, 0.866, head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True)
ax.text(
1.1, -1.4,
"Ød",
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
ax.text(
-x/2-x*y/16*5, e,
t2,
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
def coordinate_system(x, y, ax, e, colors):
# if plot.shape == "Right_triangle":
# color = colors['draw_secondary']
# transparency = 1
# hw = 0.015*max(x,y)
# hl = 2*hw
# ax.arrow(
# -x/2-x*y/8, 0, x+x*y/3, 0,
# head_width=hw,
# head_length=hl,
# fc=color, ec=color,
# length_includes_head = True,
# alpha=transparency,
# zorder=3)
# ax.arrow(
# -x/3/2, -y/2-x*y/8+e, 0, y+x*y/3,
# head_width=hw,
# head_length=hl,
# fc=color, ec=color,
# length_includes_head = True,
# alpha=transparency,
# zorder=3)
# ax.text(
# x/2+x*y/5, -x*y/20,
# r"$x$",
# horizontalalignment='center',
# verticalalignment='center',
# size='large',
# color = color,
# alpha=transparency)
# ax.text(
# -x*y/20, y/2+x*y/5+e,
# r"$y$",
# horizontalalignment='center',
# verticalalignment='center',
# size='large',
# color = color,
# alpha=transparency)
# else:
color = colors['draw_secondary']
transparency = 1
hw = 0.015*max(x,y)
hl = 2*hw
ax.arrow(
-x/2-max(x,y)/8, 0, x+max(x,y)/3, 0,
head_width=hw,
head_length=hl,
fc=color, ec=color,
length_includes_head = True,
alpha=transparency,
zorder=3)
ax.arrow(
0, -y/2-max(x,y)/8+e, 0, y+max(x,y)/3,
head_width=hw,
head_length=hl,
fc=color, ec=color,
length_includes_head = True,
alpha=transparency,
zorder=3)
ax.text(
x/2+max(x,y)/5, -max(x,y)/20,
r"$x$",
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
ax.text(
-max(x,y)/20, y/2+max(x,y)/5+e,
r"$y$",
horizontalalignment='center',
verticalalignment='center',
size='large',
color = color,
alpha=transparency)
def transformed_coordinate_system(x, y, ax, phi, colors):
color = colors['draw_tertiary']
hw = 0.015*max(x,y)
hl = 2*hw
phi = phi/180*np.pi
ar1_x = (-x*3/4)*np.cos(phi)+x/5
ar1_y = -x*3/4*np.sin(phi)+y/5
ar1_dx = (x*3/2)*np.cos(phi)
ar1_dy = x*3/2*np.sin(phi)
ar2_x = y*3/4*np.sin(phi)+x/5
ar2_y = -y*3/4*np.cos(phi)+y/5
ar2_dx = (-y*3/2)*np.sin(phi)
ar2_dy = y*3/2*np.cos(phi)
ax.arrow(ar1_x, ar1_y, ar1_dx, ar1_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=3)
ax.arrow(ar2_x, ar2_y, ar2_dx, ar2_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=3)
ax.text(ar1_x+ar1_dx+x/20, ar1_y+ar1_dy+y/20, r"$x_1$", horizontalalignment='center', color = color,
verticalalignment='center', size='large')
ax.text(ar2_x+ar2_dx+x/20, ar2_y+ar2_dy+y/20, r"$y_1$", horizontalalignment='center', color = color,
verticalalignment='center', size='large')
def transformation_dimensions(x, y, ax, colors):
color = colors['draw_tertiary']
transparency = 1 #0.7
hw = 0.015*max(x,y)
hl = 2*hw
y_disp_x = [x/5, x]
y_disp_y = [y/5, y/5]
ax.plot(y_disp_x, y_disp_y, color, lw=1, zorder=5, alpha=transparency)
ax.arrow(x/2+x/8, 0, 0, y/5,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.arrow(x/2+x/8, y/5, 0, -y/5,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.text(x/2+x/6, y/8, r"$v$", horizontalalignment='center', color = color,
verticalalignment='center', alpha=transparency)
x_disp_x = [x/5, x/5]
x_disp_y = [y/5, -y/5]
ax.plot(x_disp_x, x_disp_y, color, lw=1, zorder=5, alpha=transparency)
ax.arrow(0, -y/8, x/5, 0,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.arrow(x/5, -y/8, -x/5, 0,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, alpha=transparency)
ax.text(x/8, -y/12, r"$u$", horizontalalignment='center', color = color,
verticalalignment='center', alpha=transparency)
style = "Simple, tail_width=0.2, head_width=4, head_length=8"
kw = dict(arrowstyle=style, color=color)
a3 = patches.FancyArrowPatch((x/2+x/3, y/5), (x/2+x/5+x/20, y/5+x*3/20),
connectionstyle="arc3,rad=.2", **kw, alpha=transparency)
ax.add_patch(a3)
ax.text(x/2+x/4+x/8, y/4+y/12, r"$\varphi$", horizontalalignment='center', color = color,
verticalalignment='center', alpha=transparency)
def sign_evaluation(alpha, angle_unit, beta = False):
if angle_unit == "deg":
alpha = alpha/180*np.pi
if beta == True:
alpha = alpha+np.pi/2
print('deg')
else:
if beta == True:
alpha = alpha+np.pi/2
print('rad')
if alpha >= 0 and alpha < np.pi/2:
signx = 1
signy = 1
if beta == True:
signx = -1
signy = 1
elif alpha >= np.pi/2 and alpha < np.pi:
signx = -1
signy = 1
if beta == True:
signx = 1
signy = 1
elif alpha >= np.pi and alpha < np.pi*3/2:
signx = -1
signy = -1
if beta == True:
signx = -1
signy = 1
elif alpha >= np.pi*3/2 and alpha < np.pi*2:
signx = 1
signy = -1
if beta == True:
signx = -1
signy = -1
else:
signx = 1
signy = 1
if beta == True:
signx = -1
signy = 1
return signx, signy, alpha
def plot_principal_axes(parent, colors, ax, alpha, angle_unit, transformed_coordinate_on,shape, a = 1.6, b = 0.8, d = 0.8):
principal_x = True
a_init, b_init, proportional = set_dimensions(a, b)
try:
parent.principal_axis1.remove()
parent.principal_axis2.remove()
parent.principal_axis1_text.remove()
parent.principal_axis2_text.remove()
except:
None
if transformed_coordinate_on == False:
color = colors['draw_principal']
# evaluate orientation signs of the principal axis
signx1, signy1, beta1 = sign_evaluation(alpha, angle_unit)
signx2, signy2, beta2 = sign_evaluation(alpha, angle_unit, True)
if shape == "Rectangle":
x = (a_init+max(a_init,b_init)/4)/2
y = (b_init+max(a_init,b_init)/4)/2
x_offset = 0
y_offset = 0
elif shape == "Circle":
x = 2*d*3/4
y = 2*d*3/4
x_offset = 0
y_offset = 0
elif shape == "Ellipse":
x = (a_init+max(a_init,b_init)/4)/2
y = (b_init+max(a_init,b_init)/4)/2
x_offset = 0
y_offset = 0
elif shape == "Isosceles_triangle":
x = (a_init+max(a_init,b_init)/4)/2
y = (b_init+max(a_init,b_init)/4)/2
if x>y:
principal_x = True
else:
principal_x = False
x_offset = 0
y_offset = b_init/5
hw = 0.03*max(x,y)
hl = 2*hw
arrow_length = (x**2+y**2)**0.5
# first principal axis
x_val1 = arrow_length*np.cos(beta1)
y_val1 = arrow_length*np.sin(beta1)
sign_x1 = np.sign(x_val1)
sign_y1 = np.sign(y_val1)
if abs(x_val1) >= x:
x_val1 = sign_x1*x
y_val1 = x_val1*np.tan(beta1)
elif abs(y_val1) >= y:
y_val1 = sign_y1*y
x_val1 = y_val1/np.tan(beta1)
ar1_x1 = -signx1*x_val1
ar1_y1 = -signy1*y_val1
ar1_x2 = signx1*x_val1
ar1_y2 = signy1*y_val1
ar1_dx = ar1_x2-ar1_x1
ar1_dy = ar1_y2-ar1_y1
# second principal axis
x_val2 = arrow_length*np.cos(beta2)
y_val2 = arrow_length*np.sin(beta2)
sign_x2 = np.sign(x_val2)
sign_y2 = np.sign(y_val2)
if abs(x_val2) >= x:
x_val2 = sign_x2*x
y_val2 = x_val2*np.tan(beta2)
elif abs(y_val2) >= y:
y_val2 = sign_y2*y
x_val2 = y_val2/np.tan(beta2)
ar2_x1 = -signx2*x_val2
ar2_y1 = -signy2*y_val2
ar2_x2 = signx2*x_val2
ar2_y2 = signy2*y_val2
ar2_dx = ar2_x2-ar2_x1
ar2_dy = ar2_y2-ar2_y1
if principal_x == False:
parent.principal_axis1 = ax.arrow(ar1_x1+x_offset, ar1_y1, ar1_dx, ar1_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis2 = ax.arrow(ar2_x1, ar2_y1+y_offset, ar2_dx, ar2_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis1_text = ax.text(ar1_dx/2+0.06*max(ar1_dx,ar1_dy)+x_offset, ar1_dy/2+0.06*max(ar1_dx,ar1_dy), r"$I_1$", horizontalalignment='center', color = color,
verticalalignment='center')
parent.principal_axis2_text = ax.text(ar2_dx/2+0.06*max(ar1_dx,ar1_dy), ar2_dy/2+0.06*max(ar1_dx,ar1_dy)+y_offset, r"$I_2$", horizontalalignment='center', color = color,
verticalalignment='center')
else:
parent.principal_axis1 = ax.arrow(ar1_x1, ar1_y1+y_offset, ar1_dx, ar1_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis2 = ax.arrow(ar2_x1+x_offset, ar2_y1, ar2_dx, ar2_dy,
head_width=hw, head_length=hl, fc=color, ec=color,length_includes_head = True, zorder=5)
parent.principal_axis1_text = ax.text(ar1_dx/2+0.06*max(ar1_dx,ar1_dy), ar1_dy/2+0.06*max(ar1_dx,ar1_dy)+y_offset, r"$I_1$", horizontalalignment='center', color = color,
verticalalignment='center')
parent.principal_axis2_text = ax.text(ar2_dx/2+0.06*max(ar1_dx,ar1_dy)+x_offset, ar2_dy/2+0.06*max(ar1_dx,ar1_dy), r"$I_2$", horizontalalignment='center', color = color,
verticalalignment='center')
parent.canvas.draw()
``` |
{
"source": "JKalnins/Fourier_MLOOP",
"score": 2
} |
#### File: Fourier_MLOOP/test/test_floop.py
```python
import matplotlib.pyplot as plt
import numpy as np
from floop import floop
repeats = 3
n_ab = 3
savename = "test" # ignored because save==False
max_allowed_runs = 100
tcost = 0.0
y_targets = None # if not none, get from FourierFromParams
noise_type = "add"
noise_scale = 0.0
sleep_time = 0.0
save = False
# Run Once
def test_once():
np.random.seed(777) # Acer picked this seed
n_ab = 3
max_allowed_runs = 100
tcost = 0.0
noise_type = "add"
noise_scale = 0.0
a_t = np.random.random(n_ab) * 2 - 1
b_t = np.random.random(n_ab) * 2 - 1
y_target = floop.FourierFromParams(a_t, b_t)
costs, num_runs = floop.RunOnce(
max_allowed_runs, tcost, n_ab, y_target, noise_type, noise_scale
)
print(a_t, b_t)
print("mean cost:\n", np.average(costs))
print("runs:\n", num_runs)
runs = np.arange(num_runs)
mins = floop._MinCosts(costs)
plt.scatter(runs, costs)
plt.scatter(runs, mins)
plt.yscale("log")
plt.show()
runs_correct = bool(num_runs == 100)
if runs_correct:
print("passed test_once (Still requires manual sense check of graph)")
else:
print("failed test_once")
# Run Repeats
# outputs
def test_repeat():
repeats = 3
n_ab = 3
savename = "test" # ignored because save==False
max_allowed_runs = 100
tcost = 0.0
y_targets = None # if not none, get from FourierFromParams
noise_type = "add"
noise_scale = 0.0
sleep_time = 0.0
save = False
(
_, # start_times
max_runs,
_, # costs_arr
_, # min_costs_arr
min_costs_mean,
min_costs_stderr,
) = floop.RepeatRuns( # repeat function
repeats,
n_ab,
savename,
max_allowed_runs,
tcost,
y_targets,
noise_type,
noise_scale,
sleep_time,
save,
)
runs = np.arange(max_runs)
plt.errorbar(
runs,
min_costs_mean,
yerr=min_costs_stderr,
fmt=".",
c="black",
ms=5,
capsize=2,
capthick=1,
)
plt.show()
if __name__ == "__main__":
# test_once()
test_repeat()
``` |
{
"source": "JKalnins/M-LOOP",
"score": 3
} |
#### File: M-LOOP/mloop/visualizations.py
```python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import mloop.utilities as mlu
import mloop.learners as mll
import mloop.controllers as mlc
import numpy as np
import logging
import matplotlib.pyplot as plt
import matplotlib as mpl
import warnings
figure_counter = 0
cmap = plt.get_cmap('hsv')
run_label = 'Run number'
fit_label = 'Fit number'
cost_label = 'Cost'
generation_label = 'Generation number'
scale_param_label = 'Min (0) to max (1) parameters'
param_label = 'Parameter'
log_length_scale_label = 'Log of length scale'
noise_label = 'Noise level'
_DEFAULT_LEGEND_LOC = 2
legend_loc = _DEFAULT_LEGEND_LOC
def set_legend_location(loc=None):
'''
Set the location of the legend in future figures.
Note that this function doesn't change the location of legends in existing
figures. It only changes where legends will appear in figures generated
after the call to this function. If called without arguments, the legend
location for future figures will revert to its default value.
Keyword Args:
loc (Optional str, int, or pair of floats): The value to use for loc in
the calls to matplotlib's legend(). Can be e.g. 2, 'upper right',
(1, 0). See matplotlib's documentation for more options and
additional information. If set to None then the legend location will
be set back to its default value. Default None.
'''
# Set default value for loc if necessary.
if loc is None:
loc = _DEFAULT_LEGEND_LOC
# Update the global used for setting the legend location.
global legend_loc
legend_loc = loc
def show_all_default_visualizations(controller,
show_plots=True,
max_parameters_per_plot=None):
'''
Plots all visualizations available for a controller, and it's internal learners.
Args:
controller (Controller): The controller to extract plots from
Keyword Args:
show_plots (Optional, bool): Determine whether to run plt.show() at the
end or not. For debugging. Default True.
max_parameters_per_plot (Optional, int): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
parameters_per_plot, then the parameters will be divided into groups
and each group will be plotted in its own figure. If set to None,
then all parameters will be included in the same plot regardless of
how many there are. Default None.
'''
log = logging.getLogger(__name__)
configure_plots()
log.debug('Creating controller visualizations.')
create_controller_visualizations(
controller.total_archive_filename,
max_parameters_per_plot=max_parameters_per_plot,
)
# For machine learning controllers, the controller.learner is actually the
# learner for the trainer while controller.ml_learner is the machine
# learning controller. For other controllers, controller.learner is the
# actual learner.
try:
learner_archive_filename = controller.ml_learner.total_archive_filename
except AttributeError:
learner_archive_filename = controller.learner.total_archive_filename
log.debug('Creating learner visualizations.')
create_learner_visualizations(
learner_archive_filename,
max_parameters_per_plot=max_parameters_per_plot,
)
log.info('Showing visualizations, close all to end M-LOOP.')
if show_plots:
plt.show()
def show_all_default_visualizations_from_archive(controller_filename,
learner_filename,
controller_type=None,
show_plots=True,
max_parameters_per_plot=None,
controller_visualization_kwargs=None,
learner_visualization_kwargs=None,
learner_visualizer_init_kwargs=None):
'''
Plots all visualizations available for a controller and its learner from their archives.
Args:
controller_filename (str): The filename, including path, of the
controller archive.
learner_filename (str): The filename, including path, of the learner
archive.
Keyword Args:
controller_type (str): The value of controller_type type used in the
optimization corresponding to the learner learner archive, e.g.
'gaussian_process', 'neural_net', or 'differential_evolution'. If
set to None then controller_type will be determined automatically.
Default None.
show_plots (bool): Determine whether to run plt.show() at the end or
not. For debugging. Default True.
max_parameters_per_plot (Optional [int]): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
parameters_per_plot, then the parameters will be divided into groups
and each group will be plotted in its own figure. If set to None,
then all parameters will be included in the same plot regardless of
how many there are. If a value for max_parameters_per_plot is
included in controller_visualization_kwargs, then the value in that
dictionary will override this setting. The same applies to
learner_visualization_kwargs. Default None.
controller_visualization_kwargs (dict): Keyword arguments to pass to the
controller visualizer's create_visualizations() method. If set to
None, no additional keyword arguments will be passed. Default None.
learner_visualization_kwargs (dict): Keyword arguments to pass to the
learner visualizer's create_visualizations() method. If set to
None, no additional keyword arguments will be passed. Default None.
learner_visualizer_init_kwargs (dict): Keyword arguments to pass to the
learner visualizer's __init__() method. If set to None, no
additional keyword arguments will be passed. Default None.
'''
# Set default value for controller_visualization_kwargs if necessary.
if controller_visualization_kwargs is None:
controller_visualization_kwargs = {}
# Update controller_visualization_kwargs with max_parameters_per_plot if
# necessary.
if 'max_parameters_per_plot' not in controller_visualization_kwargs:
controller_visualization_kwargs['max_parameters_per_plot'] = max_parameters_per_plot
log = logging.getLogger(__name__)
configure_plots()
# Create visualizations for the controller archive.
log.debug('Creating controller visualizations.')
create_controller_visualizations(
controller_filename,
**controller_visualization_kwargs,
)
# Create visualizations for the learner archive.
create_learner_visualizations(
learner_filename,
max_parameters_per_plot=max_parameters_per_plot,
learner_visualization_kwargs=learner_visualization_kwargs,
learner_visualizer_init_kwargs=learner_visualizer_init_kwargs,
)
log.info('Showing visualizations, close all to end M-LOOP.')
if show_plots:
plt.show()
def create_learner_visualizer_from_archive(filename, controller_type=None, **kwargs):
'''
Create an instance of the appropriate visualizer class for a learner archive.
Args:
filename (String): Filename of the learner archive.
Keyword Args:
controller_type (String): The type of controller used during the
optimization that created the provided learner archive. Options
include 'gaussian_process', 'neural_net', and
'differential_evolution'. If set to None, then controller_type will
be determined automatically from the archive. Default None.
**kwargs: Additional keyword arguments are passed to the visualizer's
__init__() method.
Returns:
visualizer: An instance of the appropriate visualizer class for plotting
data from filename.
'''
# Automatically determine controller_type if necessary.
if controller_type is None:
controller_type = mlu.get_controller_type_from_learner_archive(filename)
# Create an instance of the appropriate visualizer class for the archive.
log = logging.getLogger(__name__)
if controller_type == 'neural_net':
log.debug('Creating neural net visualizer.')
visualizer = NeuralNetVisualizer(filename, **kwargs)
elif controller_type == 'gaussian_process':
log.debug('Creating gaussian process visualizer.')
visualizer = GaussianProcessVisualizer(filename, **kwargs)
elif controller_type == 'differential_evolution':
log.debug('Creating differential evolution visualizer.')
visualizer = DifferentialEvolutionVisualizer(filename, **kwargs)
else:
message = ('create_learner_visualizer_from_archive() not implemented '
'for type: {type_}.').format(type_=controller_type)
log.error(message)
raise ValueError(message)
return visualizer
def create_learner_visualizations(filename,
max_parameters_per_plot=None,
learner_visualization_kwargs=None,
learner_visualizer_init_kwargs=None):
'''
Runs the plots for a learner archive file.
Args:
filename (str): Filename for the learner archive.
Keyword Args:
max_parameters_per_plot (Optional [int]): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
parameters_per_plot, then the parameters will be divided into groups
and each group will be plotted in its own figure. If set to None,
then all parameters will be included in the same plot regardless of
how many there are. If a value for max_parameters_per_plot is
included in learner_visualization_kwargs, then the value in that
dictionary will override this setting. Default None.
learner_visualization_kwargs (dict): Keyword arguments to pass to the
learner visualizer's create_visualizations() method. If set to
None, no additional keyword arguments will be passed. Default None.
learner_visualizer_init_kwargs (dict): Keyword arguments to pass to the
learner visualizer's __init__() method. If set to None, no
additional keyword arguments will be passed. Default None.
'''
# Set default values as necessary.
if learner_visualization_kwargs is None:
learner_visualization_kwargs = {}
if learner_visualizer_init_kwargs is None:
learner_visualizer_init_kwargs = {}
# Update controller_visualization_kwargs with max_parameters_per_plot if
# necessary.
if 'max_parameters_per_plot' not in learner_visualization_kwargs:
learner_visualization_kwargs['max_parameters_per_plot'] = max_parameters_per_plot
# Create a visualizer and have it make the plots.
visualizer = create_learner_visualizer_from_archive(
filename,
**learner_visualizer_init_kwargs,
)
visualizer.create_visualizations(**learner_visualization_kwargs)
def _color_from_controller_name(controller_name):
'''
Gives a color (as a number between zero an one) corresponding to each controller name string.
'''
global cmap
return cmap(float(mlc.controller_dict[controller_name])/float(mlc.number_of_controllers))
def _color_list_from_num_of_params(num_of_params):
'''
Gives a list of colors based on the number of parameters.
'''
global cmap
return [cmap(float(x)/num_of_params) for x in range(num_of_params)]
def _ensure_parameter_subset_valid(visualizer, parameter_subset):
'''
Make sure indices in parameter_subset are acceptable.
Args:
visualizer (ControllerVisualizer-like): An instance of one of the
visualization classes defined in this module, which should have the
attributes param_numbers and log.
parameter_subset (list-like): The indices corresponding to a subset of
the optimization parameters. The indices should be 0-based, i.e. the
first parameter is identified with index 0. Generally the values of
the indices in parameter_subset should be integers between 0 and the
number of parameters minus one, inclusively.
'''
for ind in parameter_subset:
if ind not in visualizer.param_numbers:
message = '{ind} is not a valid parameter index.'.format(ind=ind)
visualizer.log.error(message)
raise ValueError(message)
def configure_plots():
'''
Configure the setting for the plots.
'''
mpl.rcParams['lines.linewidth'] = 2.0
mpl.rcParams['lines.markersize'] = 6.0
mpl.rcParams['font.size'] = 16.0
mpl.rcParams['savefig.format'] = 'pdf'
mpl.rcParams['legend.framealpha'] = 0.5
mpl.rcParams['legend.numpoints'] = 1
mpl.rcParams['legend.scatterpoints'] = 1
mpl.rcParams['legend.fontsize']= 'medium'
def create_controller_visualizations(filename,
file_type=None,
**kwargs):
'''
Runs the plots for a controller file.
Args:
filename (String): Filename of the controller archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
**kwargs: Additional keyword arguments are passed to the visualizer's
create_visualizations() method.
'''
visualization = ControllerVisualizer(filename,file_type=file_type)
visualization.create_visualizations(**kwargs)
class ControllerVisualizer():
'''
ControllerVisualizer creates figures from a Controller Archive.
Note that the data from the training archive, if one was provided to the
learner at the beginning of the optimization, is NOT included in the
controller archive generated during the optimization. Therefore any data
from the training archive is not included in the plots generated by this
class. This is in contrast to some of the learner visualizer classes.
Args:
filename (String): Filename of the controller archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
'''
def __init__(self, filename,
file_type=None,
**kwargs):
self.log = logging.getLogger(__name__)
self.filename = str(filename)
# Automatically determine file_type if necessary.
if file_type is None:
file_type = mlu.get_file_type(self.filename)
self.file_type = str(file_type)
if not mlu.check_file_type_supported(self.file_type):
self.log.error('File type not supported: ' + repr(self.file_type))
controller_dict = mlu.get_dict_from_file(self.filename, self.file_type)
self.archive_type = controller_dict['archive_type']
if 'archive_type' in controller_dict and not (controller_dict['archive_type'] == 'controller'):
self.log.error('The archive appears to be the wrong type.')
raise ValueError
self.num_in_costs = int(controller_dict['num_in_costs'])
self.num_out_params = int(controller_dict['num_out_params'])
self.out_params = np.array(controller_dict['out_params'])
self.out_type = [x.strip() for x in list(controller_dict['out_type'])]
self.in_costs = np.squeeze(np.array(controller_dict['in_costs']))
self.in_uncers = np.squeeze(np.array(controller_dict['in_uncers']))
self.in_bads = np.squeeze(list(controller_dict['in_bads']))
self.best_index = int(controller_dict['best_index'])
self.num_params = int(controller_dict['num_params'])
self.min_boundary = np.squeeze(np.array(controller_dict['min_boundary']))
self.max_boundary = np.squeeze(np.array(controller_dict['max_boundary']))
self.param_names = mlu._param_names_from_file_dict(controller_dict)
if np.all(np.isfinite(self.min_boundary)) and np.all(np.isfinite(self.max_boundary)):
self.finite_flag = True
self.param_scaler = lambda p: (p-self.min_boundary)/(self.max_boundary - self.min_boundary)
self.scaled_params = np.array([self.param_scaler(self.out_params[ind,:]) for ind in range(self.num_out_params)])
else:
self.finite_flag = False
self.unique_types = set(self.out_type)
self.cost_colors = [_color_from_controller_name(x) for x in self.out_type]
self.in_numbers = np.arange(1,self.num_in_costs+1)
self.out_numbers = np.arange(1,self.num_out_params+1)
self.param_numbers = np.arange(self.num_params)
def create_visualizations(self,
plot_cost_vs_run=True,
plot_parameters_vs_run=True,
plot_parameters_vs_cost=True,
max_parameters_per_plot=None):
'''
Runs the plots for a controller file.
Keyword Args:
plot_cost_vs_run (Optional [bool]): If True plot cost versus run
number, else do not. Default True.
plot_parameters_vs_run (Optional [bool]): If True plot parameters
versus run number, else do not. Default True.
plot_parameters_vs_cost (Optional [bool]): If True plot parameters
versus cost number, else do not. Default True.
max_parameters_per_plot (Optional [int]): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
parameters_per_plot, then the parameters will be divided into
groups and each group will be plotted in its own figure. If set
to None, then all parameters will be included in the same plot
regardless of how many there are. Default None.
'''
parameter_chunks = mlu.chunk_list(
self.param_numbers,
max_parameters_per_plot,
)
if plot_cost_vs_run:
self.plot_cost_vs_run()
if plot_parameters_vs_run:
for parameter_chunk in parameter_chunks:
self.plot_parameters_vs_run(parameter_subset=parameter_chunk)
if plot_parameters_vs_cost:
for parameter_chunk in parameter_chunks:
self.plot_parameters_vs_cost(parameter_subset=parameter_chunk)
def plot_cost_vs_run(self):
'''
Create a plot of the costs versus run number.
Note that the data from the training archive, if one was provided to the
learner at the beginning of the optimization, will NOT be plotted here.
'''
global figure_counter, run_label, cost_label, legend_loc
figure_counter += 1
plt.figure(figure_counter)
# Only plot points for which a cost was actually measured. This may not
# be the case for all parameter sets if the optimization is still in
# progress, or ended by a keyboard interupt, etc..
in_numbers = self.in_numbers[:self.num_in_costs]
in_costs = self.in_costs[:self.num_in_costs]
in_uncers = self.in_uncers[:self.num_in_costs]
cost_colors = self.cost_colors[:self.num_in_costs]
plt.scatter(in_numbers, in_costs+in_uncers, marker='_', color='k')
plt.scatter(in_numbers, in_costs-in_uncers, marker='_', color='k')
plt.scatter(in_numbers, in_costs,marker='o', c=cost_colors, s=5*mpl.rcParams['lines.markersize'])
plt.xlabel(run_label)
plt.ylabel(cost_label)
plt.title('Controller: Cost vs run number.')
artists = []
for ut in self.unique_types:
artists.append(plt.Line2D((0,1),(0,0), color=_color_from_controller_name(ut), marker='o', linestyle=''))
plt.legend(artists,self.unique_types,loc=legend_loc)
def _ensure_parameter_subset_valid(self, parameter_subset):
_ensure_parameter_subset_valid(self, parameter_subset)
def plot_parameters_vs_run(self, parameter_subset=None):
'''
Create a plot of the parameters versus run number.
Note that the data from the training archive, if one was provided to the
learner at the beginning of the optimization, will NOT be plotted here.
Args:
parameter_subset (list-like): The indices of parameters to plot. The
indices should be 0-based, i.e. the first parameter is
identified with index 0. Generally the values of the indices in
parameter_subset should be between 0 and the number of
parameters minus one, inclusively. If set to `None`, then all
parameters will be plotted. Default None.
'''
# Get default value for parameter_subset if necessary.
if parameter_subset is None:
parameter_subset = self.param_numbers
# Make sure that the provided parameter_subset is acceptable.
self._ensure_parameter_subset_valid(parameter_subset)
# Generate set of distinct colors for plotting.
num_params = len(parameter_subset)
param_colors = _color_list_from_num_of_params(num_params)
global figure_counter, run_label, scale_param_label, legend_loc
figure_counter += 1
plt.figure(figure_counter)
if self.finite_flag:
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(self.out_numbers,self.scaled_params[:,param_index],'o',color=color)
plt.ylabel(scale_param_label)
plt.ylim((0,1))
else:
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(self.out_numbers,self.out_params[:,param_index],'o',color=color)
plt.ylabel(run_label)
plt.xlabel(run_label)
plt.title('Controller: Parameters vs run number.')
artists=[]
for ind in range(num_params):
color = param_colors[ind]
artists.append(plt.Line2D((0,1),(0,0), color=color,marker='o',linestyle=''))
legend_labels = mlu._generate_legend_labels(
parameter_subset,
self.param_names,
)
plt.legend(artists, legend_labels ,loc=legend_loc)
def plot_parameters_vs_cost(self, parameter_subset=None):
'''
Create a plot of the parameters versus run number.
Note that the data from the training archive, if one was provided to the
learner at the beginning of the optimization, will NOT be plotted here.
Args:
parameter_subset (list-like): The indices of parameters to plot. The
indices should be 0-based, i.e. the first parameter is
identified with index 0. Generally the values of the indices in
parameter_subset should be between 0 and the number of
parameters minus one, inclusively. If set to `None`, then all
parameters will be plotted. Default None.
'''
# Only plot points for which a cost was actually measured. This may not
# be the case for all parameter sets if the optimization is still in
# progress, or ended by a keyboard interupt, etc..
in_costs = self.in_costs[:self.num_in_costs]
in_uncers = self.in_uncers[:self.num_in_costs]
# Get default value for parameter_subset if necessary.
if parameter_subset is None:
parameter_subset = self.param_numbers
# Make sure that the provided parameter_subset is acceptable.
self._ensure_parameter_subset_valid(parameter_subset)
# Generate set of distinct colors for plotting.
num_params = len(parameter_subset)
param_colors = _color_list_from_num_of_params(num_params)
global figure_counter, run_label, run_label, scale_param_label, legend_loc
figure_counter += 1
plt.figure(figure_counter)
if self.finite_flag:
scaled_params = self.scaled_params[:self.num_in_costs,:]
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(scaled_params[:,param_index], in_costs + in_uncers,'_',color=color)
plt.plot(scaled_params[:,param_index], in_costs - in_uncers,'_',color=color)
plt.plot(scaled_params[:,param_index], in_costs,'o',color=color)
plt.xlabel(scale_param_label)
plt.xlim((0,1))
else:
out_params = self.out_params[:self.num_in_costs, :]
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(out_params[:,param_index], in_costs + in_uncers,'_',color=color)
plt.plot(out_params[:,param_index], in_costs - in_uncers,'_',color=color)
plt.plot(out_params[:,param_index], in_costs,'o',color=color)
plt.xlabel(run_label)
plt.ylabel(cost_label)
plt.title('Controller: Cost vs parameters.')
artists=[]
for ind in range(num_params):
color = param_colors[ind]
artists.append(plt.Line2D((0,1),(0,0), color=color,marker='o',linestyle=''))
legend_labels = mlu._generate_legend_labels(
parameter_subset,
self.param_names,
)
plt.legend(artists, legend_labels ,loc=legend_loc)
def create_differential_evolution_learner_visualizations(filename,
file_type=None,
**kwargs):
'''
Runs the plots from a differential evolution learner file.
Args:
filename (String): Filename for the differential evolution learner
archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
**kwargs: Additional keyword arguments are passed to the visualizer's
create_visualizations() method.
'''
visualization = DifferentialEvolutionVisualizer(filename, file_type=file_type)
visualization.create_visualizations(**kwargs)
class DifferentialEvolutionVisualizer():
'''
DifferentialEvolutionVisualizer creates figures from a differential evolution archive.
Args:
filename (String): Filename of the DifferentialEvolutionVisualizer archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
'''
def __init__(self, filename,
file_type=None,
**kwargs):
self.log = logging.getLogger(__name__)
self.filename = str(filename)
# Automatically determine file_type if necessary.
if file_type is None:
file_type = mlu.get_file_type(self.filename)
self.file_type = str(file_type)
if not mlu.check_file_type_supported(self.file_type):
self.log.error('File type not supported: ' + repr(self.file_type))
learner_dict = mlu.get_dict_from_file(self.filename, self.file_type)
if 'archive_type' in learner_dict and not (learner_dict['archive_type'] == 'differential_evolution'):
self.log.error('The archive appears to be the wrong type.' + repr(learner_dict['archive_type']))
raise ValueError
self.archive_type = learner_dict['archive_type']
self.num_generations = int(learner_dict['generation_count'])
self.num_population_members = int(learner_dict['num_population_members'])
self.num_params = int(learner_dict['num_params'])
self.min_boundary = np.squeeze(np.array(learner_dict['min_boundary']))
self.max_boundary = np.squeeze(np.array(learner_dict['max_boundary']))
self.param_names = mlu._param_names_from_file_dict(learner_dict)
self.params_generations = np.array(learner_dict['params_generations'])
self.costs_generations = np.array(learner_dict['costs_generations'])
self.finite_flag = True
self.param_scaler = lambda p: (p-self.min_boundary)/(self.max_boundary - self.min_boundary)
self.scaled_params_generations = np.array([[self.param_scaler(self.params_generations[inda,indb,:]) for indb in range(self.num_population_members)] for inda in range(self.num_generations)])
self.param_numbers = np.arange(self.num_params)
self.gen_numbers = np.arange(1,self.num_generations+1)
self.param_colors = _color_list_from_num_of_params(self.num_params)
self.gen_plot = np.array([np.full(self.num_population_members, ind, dtype=int) for ind in self.gen_numbers]).flatten()
def create_visualizations(self,
plot_params_vs_generations=True,
plot_costs_vs_generations=True,
max_parameters_per_plot=None):
'''
Runs the plots from a differential evolution learner file.
Keyword Args:
plot_params_generations (Optional [bool]): If True plot parameters
vs generations, else do not. Default True.
plot_costs_generations (Optional [bool]): If True plot costs vs
generations, else do not. Default True.
max_parameters_per_plot (Optional [int]): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
parameters_per_plot, then the parameters will be divided into
groups and each group will be plotted in its own figure. If set
to None, then all parameters will be included in the same plot
regardless of how many there are. Default None.
'''
parameter_chunks = mlu.chunk_list(
self.param_numbers,
max_parameters_per_plot,
)
if plot_params_vs_generations:
for parameter_chunk in parameter_chunks:
self.plot_params_vs_generations(
parameter_subset=parameter_chunk,
)
if plot_costs_vs_generations:
self.plot_costs_vs_generations()
def plot_costs_vs_generations(self):
'''
Create a plot of the costs versus run number.
'''
if self.costs_generations.size == 0:
self.log.warning('Unable to plot DE: costs vs generations as the initial generation did not complete.')
return
global figure_counter, cost_label, generation_label
figure_counter += 1
plt.figure(figure_counter)
plt.plot(self.gen_plot,self.costs_generations.flatten(),marker='o',linestyle='',color='k')
plt.xlabel(generation_label)
plt.ylabel(cost_label)
plt.title('Differential evolution: Cost vs generation number.')
def _ensure_parameter_subset_valid(self, parameter_subset):
_ensure_parameter_subset_valid(self, parameter_subset)
def plot_params_vs_generations(self, parameter_subset=None):
'''
Create a plot of the parameters versus run number.
Args:
parameter_subset (list-like): The indices of parameters to plot. The
indices should be 0-based, i.e. the first parameter is
identified with index 0. Generally the values of the indices in
parameter_subset should be between 0 and the number of
parameters minus one, inclusively. If set to `None`, then all
parameters will be plotted. Default None.
'''
# Get default value for parameter_subset if necessary.
if parameter_subset is None:
parameter_subset = self.param_numbers
# Make sure that the provided parameter_subset is acceptable.
self._ensure_parameter_subset_valid(parameter_subset)
# Generate set of distinct colors for plotting.
num_params = len(parameter_subset)
param_colors = _color_list_from_num_of_params(num_params)
if self.params_generations.size == 0:
self.log.warning('Unable to plot DE: params vs generations as the initial generation did not complete.')
return
global figure_counter, generation_label, scale_param_label, legend_loc
figure_counter += 1
plt.figure(figure_counter)
artists=[]
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(self.gen_plot,self.params_generations[:,:,param_index].flatten(),marker='o',linestyle='',color=color)
artists.append(plt.Line2D((0,1),(0,0), color=color,marker='o',linestyle=''))
plt.ylim((0,1))
plt.title('Differential evolution: Params vs generation number.')
plt.xlabel(generation_label)
plt.ylabel(scale_param_label)
legend_labels = mlu._generate_legend_labels(
parameter_subset,
self.param_names,
)
plt.legend(artists, legend_labels ,loc=legend_loc)
def create_gaussian_process_learner_visualizations(filename,
file_type=None,
**kwargs):
'''
Runs the plots from a gaussian process learner file.
Args:
filename (String): Filename for the gaussian process learner archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
**kwargs: Additional keyword arguments are passed to the visualizer's
create_visualizations() method.
'''
visualization = GaussianProcessVisualizer(filename, file_type=file_type)
visualization.create_visualizations(**kwargs)
class GaussianProcessVisualizer(mll.GaussianProcessLearner):
'''
GaussianProcessVisualizer extends of GaussianProcessLearner, designed not to be used as a learner, but to instead post process a GaussianProcessLearner archive file and produce useful data for visualization of the state of the learner. Fixes the Gaussian process hyperparameters to what was last found during the run.
If a training archive was provided at the start of the optimization as
`gp_training_filename` and that training archive was generated by a Gaussian
process optimization, then some of its data is saved in the new learner
archive generated during the optimization. That implies that some of the
data, such as fitted hyperparameter values, from the training archive will
be included in the plots generated by this class.
Args:
filename (String): Filename of the GaussianProcessLearner archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
'''
def __init__(self, filename, file_type=None, **kwargs):
super(GaussianProcessVisualizer, self).__init__(gp_training_filename = filename,
gp_training_file_type = file_type,
gp_training_override_kwargs=True,
update_hyperparameters = False,
**kwargs)
self.log = logging.getLogger(__name__)
training_dict = self.training_dict
# Optimization options not loaded by parent class.
self.param_names = mlu._param_names_from_file_dict(training_dict)
self.cost_has_noise = bool(training_dict['cost_has_noise'])
#Trust region
self.has_trust_region = bool(np.array(training_dict['has_trust_region']))
self.trust_region = np.squeeze(np.array(training_dict['trust_region'], dtype=float))
# Try to extract options not present in archives from M-LOOP <= 3.1.1
if 'length_scale_bounds' in training_dict:
self.length_scale_bounds = mlu.safe_cast_to_array(training_dict['length_scale_bounds'])
if 'noise_level_bounds' in training_dict:
self.noise_level_bounds = mlu.safe_cast_to_array(training_dict['noise_level_bounds'])
self.fit_gaussian_process()
self.param_numbers = np.arange(self.num_params)
self.log_length_scale_history = np.log10(np.array(self.length_scale_history, dtype=float))
self.noise_level_history = np.array(self.noise_level_history)
if np.all(np.isfinite(self.min_boundary)) and np.all(np.isfinite(self.max_boundary)):
self.finite_flag = True
self.param_scaler = lambda p: (p-self.min_boundary)/self.diff_boundary
else:
self.finite_flag = False
if self.has_trust_region:
self.scaled_trust_min = self.param_scaler(np.maximum(self.best_params - self.trust_region, self.min_boundary))
self.scaled_trust_max = self.param_scaler(np.minimum(self.best_params + self.trust_region, self.max_boundary))
# Record value of update_hyperparameters used for optimization. Note that
# self.update_hyperparameters is always set to False here above
# regardless of its value during the optimization.
self.used_update_hyperparameters = training_dict['update_hyperparameters']
def run(self):
'''
Overides the GaussianProcessLearner multiprocessor run routine. Does nothing but makes a warning.
'''
self.log.warning('You should not have executed start() from the GaussianProcessVisualizer. It is not intended to be used as a independent process. Ending.')
def return_cross_sections(self, points=100, cross_section_center=None):
'''
Finds the predicted global minima, then returns a list of vectors of parameters values, costs and uncertainties, corresponding to the 1D cross sections along each parameter axis through the predicted global minima.
Keyword Args:
points (int): the number of points to sample along each cross section. Default value is 100.
cross_section_center (array): parameter array where the centre of the cross section should be taken. If None, the parameters for the best returned cost are used.
Returns:
a tuple (cross_arrays, cost_arrays, uncer_arrays)
cross_parameter_arrays (list): a list of arrays for each cross section, with the values of the varied parameter going from the minimum to maximum value.
cost_arrays (list): a list of arrays for the costs evaluated along each cross section about the minimum.
uncertainty_arrays (list): a list of uncertainties
'''
points = int(points)
if points <= 0:
self.log.error('Points provided must be larger than zero:' + repr(points))
raise ValueError
if cross_section_center is None:
cross_section_center = self.best_params
else:
cross_section_center = np.array(cross_section_center)
if not self.check_in_boundary(cross_section_center):
self.log.error('cross_section_center not in boundaries:' + repr(cross_section_center))
raise ValueError
cross_parameter_arrays = [ np.linspace(min_p, max_p, points) for (min_p,max_p) in zip(self.min_boundary,self.max_boundary)]
scaled_cost_arrays = []
scaled_uncertainty_arrays = []
for ind in range(self.num_params):
sample_parameters = np.array([cross_section_center for _ in range(points)])
sample_parameters[:, ind] = cross_parameter_arrays[ind]
(costs, uncers) = self.gaussian_process.predict(sample_parameters,return_std=True)
scaled_cost_arrays.append(costs)
scaled_uncertainty_arrays.append(uncers)
cross_parameter_arrays = np.array(cross_parameter_arrays)
cost_arrays = self.cost_scaler.inverse_transform(np.array(scaled_cost_arrays))
uncertainty_arrays = np.array(scaled_uncertainty_arrays) * self.cost_scaler.scale_
return (cross_parameter_arrays,cost_arrays,uncertainty_arrays)
def create_visualizations(self,
plot_cross_sections=True,
plot_hyperparameters_vs_fit=True,
plot_noise_level_vs_fit=True,
max_parameters_per_plot=None,
**kwargs):
'''
Runs the plots from a gaussian process learner file.
Keyword Args:
plot_cross_sections (Optional [bool]): If `True` plot predicted
landscape cross sections, else do not. Default `True`.
plot_hyperparameters_vs_fit (Optional [bool]): If `True` plot fitted
hyperparameters as a function of fit number, else do not.
Default `True`.
plot_noise_level_vs_fit (Optional [bool]): If `True` plot the fitted
noise level as a function of fit number, else do not. If there
is no fitted noise level (i.e. `cost_has_noise` was set to
`False`), then this plot will not be made regardless of the
value passed for `plot_noise_level_vs_fit`. Default `True`.
max_parameters_per_plot (Optional [int]): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
`parameters_per_plot`, then the parameters will be divided into
groups and each group will be plotted in its own figure. If set
to `None`, then all parameters will be included in the same plot
regardless of how many there are. Default `None`.
'''
# Check for deprecated argument names.
if 'plot_hyperparameters_vs_run' in kwargs:
msg = ("create_visualizations() argument "
"plot_hyperparameters_vs_run is deprecated; "
"use plot_hyperparameters_vs_fit instead.")
warnings.warn(msg)
plot_hyperparameters_vs_fit = kwargs['plot_hyperparameters_vs_run']
if 'plot_noise_level_vs_run' in kwargs:
msg = ("create_visualizations() argument "
"plot_noise_level_vs_run is deprecated; "
"use plot_noise_level_vs_fit instead.")
warnings.warn(msg)
plot_noise_level_vs_fit = kwargs['plot_noise_level_vs_run']
# Determine which parameters belong on plots together.
parameter_chunks = mlu.chunk_list(
self.param_numbers,
max_parameters_per_plot,
)
# Generate the requested plots.
if plot_cross_sections:
for parameter_chunk in parameter_chunks:
self.plot_cross_sections(
parameter_subset=parameter_chunk,
)
if plot_hyperparameters_vs_fit:
for parameter_chunk in parameter_chunks:
self.plot_hyperparameters_vs_fit(
parameter_subset=parameter_chunk,
)
if plot_noise_level_vs_fit:
self.plot_noise_level_vs_fit()
def _ensure_parameter_subset_valid(self, parameter_subset):
_ensure_parameter_subset_valid(self, parameter_subset)
def plot_cross_sections(self, parameter_subset=None):
'''
Produce a figure of the cross section about best cost and parameters.
Args:
parameter_subset (list-like): The indices of parameters to plot. The
indices should be 0-based, i.e. the first parameter is
identified with index 0. Generally the values of the indices in
parameter_subset should be between 0 and the number of
parameters minus one, inclusively. If set to `None`, then all
parameters will be plotted. Default None.
'''
# Get default value for parameter_subset if necessary.
if parameter_subset is None:
parameter_subset = self.param_numbers
# Make sure that the provided parameter_subset is acceptable.
self._ensure_parameter_subset_valid(parameter_subset)
# Generate set of distinct colors for plotting.
num_params = len(parameter_subset)
param_colors = _color_list_from_num_of_params(num_params)
global figure_counter, legend_loc
figure_counter += 1
plt.figure(figure_counter)
points = 100
(_,cost_arrays,uncertainty_arrays) = self.return_cross_sections(points=points)
rel_params = np.linspace(0,1,points)
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(rel_params,cost_arrays[param_index,:] + uncertainty_arrays[param_index,:],'--',color=color)
plt.plot(rel_params,cost_arrays[param_index,:] - uncertainty_arrays[param_index,:],'--',color=color)
plt.plot(rel_params,cost_arrays[param_index,:],'-',color=color)
if self.has_trust_region:
axes = plt.gca()
ymin, ymax = axes.get_ylim()
ytrust = ymin + 0.1*(ymax - ymin)
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot([self.scaled_trust_min[param_index],self.scaled_trust_max[param_index]],[ytrust,ytrust],'s', color=color)
plt.xlabel(scale_param_label)
plt.xlim((0,1))
plt.ylabel(cost_label)
plt.title('GP Learner: Predicted landscape' + (' with trust regions.' if self.has_trust_region else '.'))
artists = []
for ind in range(num_params):
color = param_colors[ind]
artists.append(plt.Line2D((0,1),(0,0), color=color, linestyle='-'))
legend_labels = mlu._generate_legend_labels(
parameter_subset,
self.param_names,
)
plt.legend(artists, legend_labels ,loc=legend_loc)
'''
Method is currently not supported. Of questionable usefulness. Not yet deleted.
def plot_all_minima_vs_cost(self):
#Produce figure of the all the local minima versus cost.
if not self.has_all_minima:
self.find_all_minima()
global figure_counter, legend_loc
figure_counter += 1
plt.figure(figure_counter)
self.minima_num = self.all_minima_costs.size
scaled_minima_params = np.array([self.param_scaler(self.all_minima_parameters[ind,:]) for ind in range(self.minima_num)])
global run_label, run_label, scale_param_label
if self.finite_flag:
for ind in range(self.num_params):
plt.plot(scaled_minima_params[:,ind],self.all_minima_costs+self.all_minima_uncers,'_',color=self.param_colors[ind])
plt.plot(scaled_minima_params[:,ind],self.all_minima_costs-self.all_minima_uncers,'_',color=self.param_colors[ind])
plt.plot(scaled_minima_params[:,ind],self.all_minima_costs,'o',color=self.param_colors[ind])
plt.xlabel(scale_param_label)
else:
for ind in range(self.num_params):
plt.plot(self.all_minima_parameters[:,ind],self.all_minima_costs+self.all_minima_uncers,'_',color=self.param_colors[ind])
plt.plot(self.all_minima_parameters[:,ind],self.all_minima_costs-self.all_minima_uncers,'_',color=self.param_colors[ind])
plt.plot(self.all_minima_parameters[:,ind],self.all_minima_costs,'o',color=self.param_colors[ind])
plt.xlabel(run_label)
plt.xlabel(scale_param_label)
plt.xlim((0,1))
plt.ylabel(cost_label)
plt.title('GP Learner: Cost vs parameters.')
artists = []
for ind in range(self.num_params):
artists.append(plt.Line2D((0,1),(0,0), color=self.param_colors[ind],marker='o',linestyle=''))
plt.legend(artists, [str(x) for x in range(self.num_params)], loc=legend_loc)
'''
def plot_hyperparameters_vs_run(self, *args, **kwargs):
'''
Deprecated. Use `plot_hyperparameters_vs_fit()` instead.
'''
msg = ("plot_hyperparameters_vs_run() is deprecated; "
"use plot_hyperparameters_vs_fit() instead.")
warnings.warn(msg)
self.plot_hyperparameters_vs_fit(*args, **kwargs)
def plot_hyperparameters_vs_fit(self, parameter_subset=None):
'''
Produce a figure of the hyperparameters as a function of fit number.
Only one fit is performed per generation, and multiple parameter sets
are run each generation. Therefore the number of fits is generally less
than the number of runs.
The plot generated will include the data from the training archive if
one was provided as `gp_training_filename` and the training archive was
generated by a Gaussian process optimization.
Args:
parameter_subset (list-like): The indices of parameters to plot. The
indices should be 0-based, i.e. the first parameter is
identified with index 0. Generally the values of the indices in
`parameter_subset` should be between 0 and the number of
parameters minus one, inclusively. If set to `None`, then all
parameters will be plotted. Default `None`.
'''
# Get default value for parameter_subset if necessary.
if parameter_subset is None:
parameter_subset = self.param_numbers
# Make sure that the provided parameter_subset is acceptable.
self._ensure_parameter_subset_valid(parameter_subset)
# Get the indices corresponding to the number of fits. If
# update_hyperparameters was set to False, then we'll say that there
# were zero fits of the hyperparameters.
if self.used_update_hyperparameters:
log_length_scale_history = self.log_length_scale_history
fit_numbers = np.arange(1, len(log_length_scale_history)+1)
else:
fit_numbers = [0]
log_length_scale_history = np.log10(np.array([self.length_scale], dtype=float))
# Generate set of distinct colors for plotting.
num_params = len(parameter_subset)
param_colors = _color_list_from_num_of_params(num_params)
global figure_counter, fit_label, legend_loc, log_length_scale_label
figure_counter += 1
plt.figure(figure_counter)
if type(self.length_scale) is float:
# First treat the case of an isotropic kernel with one length scale
# shared by all parameters.
plt.plot(fit_numbers, log_length_scale_history,'o',color=param_colors[0])
plt.title('GP Learner: Log of length scale vs fit number.')
else:
# Now treat case of non-isotropic kernels with one length scale per
# parameter.
artists=[]
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
plt.plot(fit_numbers, log_length_scale_history[:,param_index],'o',color=color)
artists.append(plt.Line2D((0,1),(0,0), color=color,marker='o',linestyle=''))
legend_labels = mlu._generate_legend_labels(
parameter_subset,
self.param_names,
)
plt.legend(artists, legend_labels ,loc=legend_loc)
plt.title('GP Learner: Log of length scales vs fit number.')
plt.xlabel(fit_label)
plt.ylabel(log_length_scale_label)
def plot_noise_level_vs_run(self, *args, **kwargs):
'''
Deprecated. Use `plot_noise_level_vs_fit()` instead.
'''
msg = ("plot_noise_level_vs_run() is deprecated; "
"use plot_noise_level_vs_fit() instead.")
warnings.warn(msg)
self.plot_noise_level_vs_fit(*args, **kwargs)
def plot_noise_level_vs_fit(self):
'''
This method plots the fitted noise level as a function of fit number.
The `noise_level` approximates the variance of values that would be
measured if the cost were repeatedly measured for the same set of
parameters. Note that this is the variance in those costs; not the
standard deviation.
This plot is only relevant to optimizations for which `cost_has_noise`
is `True`. If `cost_has_noise` is `False` then this method does nothing
and silently returns.
Only one fit is performed per generation, and multiple parameter sets
are run each generation. Therefore the number of fits is generally less
than the number of runs.
The plot generated will include the data from the training archive if
one was provided as `gp_training_filename` and the training archive was
generated by a Gaussian process optimization.
'''
# Make plot of noise level vs run number if cost has noise.
if self.cost_has_noise:
global figure_counter, fit_label, noise_label
if self.used_update_hyperparameters:
noise_level_history = self.noise_level_history
fit_numbers = np.arange(1, len(noise_level_history)+1)
else:
# As in self.plot_hyperparameters_vs_run(), if
# update_hyperparameters was set to False, we'll say there were
# zero fits and plot the only value.
fit_numbers = [0]
noise_level_history = [self.noise_level]
figure_counter += 1
plt.figure(figure_counter)
plt.plot(fit_numbers, noise_level_history,'o',color='k')
plt.xlabel(fit_label)
plt.ylabel(noise_label)
plt.title('GP Learner: Noise level vs fit number.')
def create_neural_net_learner_visualizations(filename,
file_type=None,
**kwargs):
'''
Creates plots from a neural net's learner file.
Args:
filename (String): Filename for the neural net learner archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
**kwargs: Additional keyword arguments are passed to the visualizer's
create_visualizations() method.
'''
visualization = NeuralNetVisualizer(filename, file_type=file_type)
visualization.create_visualizations(**kwargs)
class NeuralNetVisualizer(mll.NeuralNetLearner):
'''
NeuralNetVisualizer extends of NeuralNetLearner, designed not to be used as a learner, but to instead post process a NeuralNetLearner archive file and produce useful data for visualization of the state of the learner.
Args:
filename (String): Filename of the NeuralNetLearner archive.
Keyword Args:
file_type (String): Can be 'mat' for matlab, 'pkl' for pickle or 'txt'
for text. If set to None, then the type will be determined from the
extension in filename. Default None.
'''
def __init__(self, filename, file_type = None, **kwargs):
super(NeuralNetVisualizer, self).__init__(nn_training_filename = filename,
nn_training_file_type = file_type,
update_hyperparameters = False,
**kwargs)
self.log = logging.getLogger(__name__)
training_dict = self.training_dict
# Archive data not loaded by parent class
self.param_names = mlu._param_names_from_file_dict(training_dict)
#Trust region
self.has_trust_region = bool(np.array(training_dict['has_trust_region']))
self.trust_region = np.squeeze(np.array(training_dict['trust_region'], dtype=float))
self.nn_training_file_dir = self.training_file_dir
# Cost scaler
self.cost_scaler_init_index = training_dict['cost_scaler_init_index']
if not self.cost_scaler_init_index is None:
self._init_cost_scaler()
# update_hyperparameters wasn't used or saved by M-LOOP versions 3.1.1
# and below, but effectively was set to False. Default to that value for
# archives that don't have an entry for it.
update_hyperparameters = training_dict.get(
'update_hyperparameters',
False,
)
self.update_hyperparameters = bool(update_hyperparameters)
self.import_neural_net()
if np.all(np.isfinite(self.min_boundary)) and np.all(np.isfinite(self.max_boundary)):
self.finite_flag = True
self.param_scaler = lambda p: (p-self.min_boundary)/self.diff_boundary
else:
self.finite_flag = False
if self.has_trust_region:
self.scaled_trust_min = self.param_scaler(np.maximum(self.best_params - self.trust_region, self.min_boundary))
self.scaled_trust_max = self.param_scaler(np.minimum(self.best_params + self.trust_region, self.max_boundary))
self.param_numbers = np.arange(self.num_params)
def run(self):
'''
Overides the GaussianProcessLearner multiprocessor run routine. Does nothing but makes a warning.
'''
self.log.warning('You should not have executed start() from the GaussianProcessVisualizer. It is not intended to be used as a independent process. Ending.')
def create_visualizations(self,
plot_cross_sections=True,
max_parameters_per_plot=None):
'''
Creates plots from a neural net's learner file.
Keyword Args:
plot_cross_sections (Optional [bool]): If True plot predicted
landscape cross sections, else do not. Default True.
max_parameters_per_plot (Optional [int]): The maximum number of
parameters to include in plots that display the values of
parameters. If the number of parameters is larger than
parameters_per_plot, then the parameters will be divided into
groups and each group will be plotted in its own figure. If set
to None, then all parameters will be included in the same plot
regardless of how many there are. Default None.
'''
parameter_chunks = mlu.chunk_list(
self.param_numbers,
max_parameters_per_plot,
)
if plot_cross_sections:
for parameter_chunk in parameter_chunks:
self.do_cross_sections(parameter_subset=parameter_chunk)
self.plot_surface()
self.plot_density_surface()
self.plot_losses()
self.plot_regularization_history()
def return_cross_sections(self, points=100, cross_section_center=None):
'''
Finds the predicted global minima, then returns a list of vectors of parameters values, costs and uncertainties, corresponding to the 1D cross sections along each parameter axis through the predicted global minima.
Keyword Args:
points (int): the number of points to sample along each cross section. Default value is 100.
cross_section_center (array): parameter array where the centre of the cross section should be taken. If None, the parameters for the best returned cost are used.
Returns:
a tuple (cross_arrays, cost_arrays, uncer_arrays)
cross_parameter_arrays (list): a list of arrays for each cross section, with the values of the varied parameter going from the minimum to maximum value.
cost_arrays (list): a list of arrays for the costs evaluated along each cross section about the minimum.
uncertainty_arrays (list): a list of uncertainties
'''
points = int(points)
if points <= 0:
self.log.error('Points provided must be larger than zero:' + repr(points))
raise ValueError
if cross_section_center is None:
cross_section_center = self.best_params
else:
cross_section_center = np.array(cross_section_center)
if not self.check_in_boundary(cross_section_center):
self.log.error('cross_section_center not in boundaries:' + repr(cross_section_center))
raise ValueError
res = []
for net_index in range(self.num_nets):
cross_parameter_arrays = [ np.linspace(min_p, max_p, points) for (min_p,max_p) in zip(self.min_boundary,self.max_boundary)]
scaled_cost_arrays = []
for ind in range(self.num_params):
sample_parameters = np.array([cross_section_center for _ in range(points)])
sample_parameters[:, ind] = cross_parameter_arrays[ind]
costs = self.predict_costs_from_param_array(sample_parameters, net_index)
scaled_cost_arrays.append(costs)
cross_parameter_arrays = np.array(cross_parameter_arrays)
cost_arrays = self.cost_scaler.inverse_transform(np.array(scaled_cost_arrays))
res.append((cross_parameter_arrays, cost_arrays))
return res
def _ensure_parameter_subset_valid(self, parameter_subset):
_ensure_parameter_subset_valid(self, parameter_subset)
def do_cross_sections(self, parameter_subset=None):
'''
Produce a figure of the cross section about best cost and parameters.
Args:
parameter_subset (list-like): The indices of parameters to plot. The
indices should be 0-based, i.e. the first parameter is
identified with index 0. Generally the values of the indices in
parameter_subset should be between 0 and the number of
parameters minus one, inclusively. If set to `None`, then all
parameters will be plotted. Default None.
'''
# Get default value for parameter_subset if necessary.
if parameter_subset is None:
parameter_subset = self.param_numbers
# Make sure that the provided parameter_subset is acceptable.
self._ensure_parameter_subset_valid(parameter_subset)
# Generate set of distinct colors for plotting.
num_params = len(parameter_subset)
param_colors = _color_list_from_num_of_params(num_params)
# Generate labels for legends.
legend_labels = mlu._generate_legend_labels(
parameter_subset,
self.param_names,
)
points = 100
rel_params = np.linspace(0,1,points)
all_cost_arrays = [a for _,a in self.return_cross_sections(points=points)]
for net_index, cost_arrays in enumerate(all_cost_arrays):
def prepare_plot():
global figure_counter
figure_counter += 1
fig = plt.figure(figure_counter)
axes = plt.gca()
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
axes.plot(rel_params,cost_arrays[param_index,:],'-',color=color,label=str(param_index))
if self.has_trust_region:
ymin, ymax = axes.get_ylim()
ytrust = ymin + 0.1*(ymax - ymin)
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
axes.plot([self.scaled_trust_min[param_index],self.scaled_trust_max[param_index]],[ytrust,ytrust],'s', color=color)
axes.set_xlabel(scale_param_label)
axes.set_xlim((0,1))
axes.set_ylabel(cost_label)
axes.set_title('NN Learner: Predicted landscape' + (' with trust regions.' if self.has_trust_region else '.') + ' (' + str(net_index) + ')')
return fig
prepare_plot()
artists = []
for ind in range(num_params):
color = param_colors[ind]
artists.append(plt.Line2D((0,1),(0,0), color=color, linestyle='-'))
plt.legend(artists, legend_labels ,loc=legend_loc)
if self.num_nets > 1:
# And now create a plot showing the average, max and min for each cross section.
def prepare_plot():
global figure_counter
figure_counter += 1
fig = plt.figure(figure_counter)
axes = plt.gca()
for ind in range(num_params):
param_index = parameter_subset[ind]
color = param_colors[ind]
this_param_cost_array = np.array(all_cost_arrays)[:,param_index,:]
mn = np.mean(this_param_cost_array, axis=0)
m = np.min(this_param_cost_array, axis=0)
M = np.max(this_param_cost_array, axis=0)
axes.plot(rel_params,mn,'-',color=color,label=str(param_index))
axes.plot(rel_params,m,'--',color=color,label=str(param_index))
axes.plot(rel_params,M,'--',color=color,label=str(param_index))
axes.set_xlabel(scale_param_label)
axes.set_xlim((0,1))
axes.set_ylabel(cost_label)
axes.set_title('NN Learner: Average predicted landscape')
return fig
prepare_plot()
for ind in range(num_params):
color = param_colors[ind]
artists.append(plt.Line2D((0,1),(0,0), color=color, linestyle='-'))
plt.legend(artists, legend_labels ,loc=legend_loc)
def plot_surface(self):
'''
Produce a figure of the cost surface (only works when there are 2 parameters)
'''
if self.num_params != 2:
return
global figure_counter
figure_counter += 1
fig = plt.figure(figure_counter)
from mpl_toolkits.mplot3d import Axes3D
ax = fig.add_subplot(111, projection='3d')
points = 50
param_set = [ np.linspace(min_p, max_p, points) for (min_p,max_p) in zip(self.min_boundary,self.max_boundary)]
params = [(x,y) for x in param_set[0] for y in param_set[1]]
costs = self.predict_costs_from_param_array(params)
ax.scatter([param[0] for param in params], [param[1] for param in params], costs)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('cost')
ax.scatter(self.all_params[:,0], self.all_params[:,1], self.all_costs, c='r')
def plot_density_surface(self):
'''
Produce a density plot of the cost surface (only works when there are 2 parameters)
'''
if self.num_params != 2:
return
global figure_counter
figure_counter += 1
fig = plt.figure(figure_counter)
points = 50
xs, ys = np.meshgrid(
np.linspace(self.min_boundary[0], self.max_boundary[0], points),
np.linspace(self.min_boundary[1], self.max_boundary[1], points))
zs_list = self.predict_costs_from_param_array(list(zip(xs.flatten(),ys.flatten())))
zs = np.array(zs_list).reshape(points,points)
plt.pcolormesh(xs,ys,zs)
plt.scatter(self.all_params[:,0], self.all_params[:,1], c=self.all_costs, vmin=np.min(zs), vmax=np.max(zs), s=100)
plt.colorbar()
plt.xlabel("Param 0")
plt.ylabel("Param 1")
def plot_losses(self):
'''
Produce a figure of the loss as a function of epoch for each net.
The loss is the mean-squared fitting error of the neural net plus the
regularization loss, which is the regularization coefficient times the
mean L2 norm of the neural net weight arrays (without the square root).
Note that the fitting error is calculated after normalizing the data, so
it is in arbitrary units.
As the neural nets are fit, the loss is recorded every 10 epochs. The
number of epochs per fit varies, and may be different for different
nets. The loss will generally increase at the begining of each fit as
new data points will have been added.
Also note that a lower loss isn't always better; a loss that is too low
can be a sign of overfitting.
'''
global figure_counter
figure_counter += 1
fig = plt.figure(figure_counter)
all_losses = self.get_losses()
# Generate set of distinct colors for plotting.
num_nets = len(all_losses)
net_colors = _color_list_from_num_of_params(num_nets)
artists=[]
legend_labels=[]
for ind, losses in enumerate(all_losses):
color = net_colors[ind]
epoch_numbers = 10 * np.arange(len(losses))
plt.plot(epoch_numbers, losses, color=color, marker='o', linestyle='')
artists.append(plt.Line2D((0,1),(0,0), color=color,marker='o',linestyle=''))
legend_labels.append('Net {net_index}'.format(net_index=ind))
plt.yscale('log')
plt.xlabel("Epoch")
plt.ylabel("Fitting Loss")
plt.title('Loss vs Epoch')
plt.legend(artists, legend_labels, loc=legend_loc)
def plot_regularization_history(self):
'''
Produces a plot of the regularization coefficient values used.
The neural nets use L2 regularization to smooth their predicted
landscapes in an attempt to avoid overfitting the data. The strength of
the regularization is set by the regularization coefficient, which is a
hyperparameter that is tuned during the optimization if
`update_hyperparameters` is set to `True`. Generally larger
regularization coefficient values force the landscape to be smoother
while smaller values allow it to vary more quickly. A value too large
can lead to underfitting while a value too small can lead to
overfitting. The ideal regularization coefficient value will depend on
many factors, such as the shape of the actual cost landscape, the SNR of
the measured costs, and even the number of measured costs.
This method plots the initial regularization coefficient value and the
optimal values found for the regularization coefficient when performing
the hyperparameter tuning. One curve showing the history of values used
for the regularization coefficient is plotted for each neural net. If
`update_hyperparameters` was set to `False` during the optimization,
then only the initial default value will be plotted.
'''
global figure_counter
figure_counter += 1
fig = plt.figure(figure_counter)
regularization_histories = self.get_regularization_histories()
# Generate set of distinct colors for plotting.
num_nets = len(regularization_histories)
net_colors = _color_list_from_num_of_params(num_nets)
artists=[]
legend_labels=[]
for ind, regularization_history in enumerate(regularization_histories):
color = net_colors[ind]
hyperparameter_fit_numbers = np.arange(len(regularization_history))
plt.plot(hyperparameter_fit_numbers, regularization_history, color=color, marker='o', linestyle='-')
artists.append(plt.Line2D((0,1),(0,0), color=color,marker='o',linestyle=''))
legend_labels.append('Net {net_index}'.format(net_index=ind))
plt.yscale('log')
plt.xlabel("Hyperparameter Fit Number")
plt.ylabel("Regularization Coefficient")
plt.title("Regularization Tuning History")
plt.legend(artists, legend_labels, loc=legend_loc)
``` |
{
"source": "jkalscheuer/runway",
"score": 2
} |
#### File: runway/commands/modules_command.py
```python
from __future__ import print_function
# pylint trips up on this in virtualenv
# https://github.com/PyCQA/pylint/issues/73
from distutils.util import strtobool # noqa pylint: disable=no-name-in-module,import-error
import copy
import glob
import logging
import os
import sys
from builtins import input
import boto3
import six
import yaml
from .runway_command import RunwayCommand, get_env
from ..context import Context
from ..util import (
change_dir, load_object_from_string, merge_dicts,
merge_nested_environment_dicts
)
LOGGER = logging.getLogger('runway')
def assume_role(role_arn, session_name=None, duration_seconds=None,
region='us-east-1', env_vars=None):
"""Assume IAM role."""
if session_name is None:
session_name = 'runway'
assume_role_opts = {'RoleArn': role_arn,
'RoleSessionName': session_name}
if duration_seconds:
assume_role_opts['DurationSeconds'] = int(duration_seconds)
boto_args = {}
if env_vars:
for i in ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']:
if env_vars.get(i.upper()):
boto_args[i] = env_vars[i.upper()]
sts_client = boto3.client('sts', region_name=region, **boto_args)
LOGGER.info("Assuming role %s...", role_arn)
response = sts_client.assume_role(**assume_role_opts)
return {'AWS_ACCESS_KEY_ID': response['Credentials']['AccessKeyId'],
'AWS_SECRET_ACCESS_KEY': response['Credentials']['SecretAccessKey'], # noqa
'AWS_SESSION_TOKEN': response['Credentials']['SessionToken']}
def determine_module_class(path, class_path):
"""Determine type of module and return deployment module class."""
if not class_path:
# First check directory name for type-indicating suffix
basename = os.path.basename(path)
if basename.endswith('.sls'):
class_path = 'runway.module.serverless.Serverless'
elif basename.endswith('.tf'):
class_path = 'runway.module.terraform.Terraform'
elif basename.endswith('.cdk'):
class_path = 'runway.module.cdk.CloudDevelopmentKit'
elif basename.endswith('.cfn'):
class_path = 'runway.module.cloudformation.CloudFormation'
if not class_path:
# Fallback to autodetection
if os.path.isfile(os.path.join(path, 'serverless.yml')):
class_path = 'runway.module.serverless.Serverless'
elif glob.glob(os.path.join(path, '*.tf')):
class_path = 'runway.module.terraform.Terraform'
elif os.path.isfile(os.path.join(path, 'cdk.json')) \
and os.path.isfile(os.path.join(path, 'package.json')):
class_path = 'runway.module.cdk.CloudDevelopmentKit'
elif glob.glob(os.path.join(path, '*.env')) or (
glob.glob(os.path.join(path, '*.yaml'))) or (
glob.glob(os.path.join(path, '*.yml'))):
class_path = 'runway.module.cloudformation.CloudFormation'
if not class_path:
LOGGER.error('No module class found for %s', os.path.basename(path))
sys.exit(1)
return load_object_from_string(class_path)
def path_is_current_dir(path):
"""Determine if defined path is reference to current directory."""
if path in ['.', '.' + os.sep]:
return True
return False
def load_module_opts_from_file(path, module_options):
"""Update module_options with any options defined in module path."""
module_options_file = os.path.join(path,
'runway.module.yml')
if os.path.isfile(module_options_file):
with open(module_options_file, 'r') as stream:
module_options = merge_dicts(module_options,
yaml.safe_load(stream))
return module_options
def post_deploy_assume_role(assume_role_config, context):
"""Revert to previous credentials, if necessary."""
if isinstance(assume_role_config, dict):
if assume_role_config.get('post_deploy_env_revert'):
context.restore_existing_iam_env_vars()
def pre_deploy_assume_role(assume_role_config, context):
"""Assume role (prior to deployment)."""
if isinstance(assume_role_config, dict):
assume_role_arn = ''
if assume_role_config.get('post_deploy_env_revert'):
context.save_existing_iam_env_vars()
if assume_role_config.get('arn'):
assume_role_arn = assume_role_config['arn']
assume_role_duration = assume_role_config.get('duration')
elif assume_role_config.get(context.env_name):
if isinstance(assume_role_config[context.env_name], dict):
assume_role_arn = assume_role_config[context.env_name]['arn'] # noqa
assume_role_duration = assume_role_config[context.env_name].get('duration') # noqa pylint: disable=line-too-long
else:
assume_role_arn = assume_role_config[context.env_name]
assume_role_duration = None
else:
LOGGER.info('Skipping assume-role; no role found for '
'environment %s...',
context.env_name)
if assume_role_arn:
context.env_vars = merge_dicts(
context.env_vars,
assume_role(
role_arn=assume_role_arn,
session_name=assume_role_config.get('session_name', None),
duration_seconds=assume_role_duration,
region=context.env_region,
env_vars=context.env_vars
)
)
else:
context.env_vars = merge_dicts(
context.env_vars,
assume_role(role_arn=assume_role_config,
region=context.env_region,
env_vars=context.env_vars)
)
def validate_account_alias(iam_client, account_alias):
"""Exit if list_account_aliases doesn't include account_alias."""
# Super overkill here using pagination when an account can only
# have a single alias, but at least this implementation should be
# future-proof
current_account_aliases = []
paginator = iam_client.get_paginator('list_account_aliases')
response_iterator = paginator.paginate()
for page in response_iterator:
current_account_aliases.extend(page.get('AccountAliases', []))
if account_alias in current_account_aliases:
LOGGER.info('Verified current AWS account alias matches required '
'alias %s.',
account_alias)
else:
LOGGER.error('Current AWS account aliases "%s" do not match '
'required account alias %s in Runway config.',
','.join(current_account_aliases),
account_alias)
sys.exit(1)
def validate_account_id(sts_client, account_id):
"""Exit if get_caller_identity doesn't match account_id."""
resp = sts_client.get_caller_identity()
if 'Account' in resp:
if resp['Account'] == account_id:
LOGGER.info('Verified current AWS account matches required '
'account id %s.',
account_id)
else:
LOGGER.error('Current AWS account %s does not match '
'required account %s in Runway config.',
resp['Account'],
account_id)
sys.exit(1)
else:
LOGGER.error('Error checking current account ID')
sys.exit(1)
def validate_account_credentials(deployment, context):
"""Exit if requested deployment account doesn't match credentials."""
boto_args = {'region_name': context.env_vars['AWS_DEFAULT_REGION']}
for i in ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']:
if context.env_vars.get(i.upper()):
boto_args[i] = context.env_vars[i.upper()]
if isinstance(deployment.get('account-id'), (int, six.string_types)):
account_id = str(deployment['account-id'])
elif deployment.get('account-id', {}).get(context.env_name):
account_id = str(deployment['account-id'][context.env_name])
else:
account_id = None
if account_id:
validate_account_id(boto3.client('sts', **boto_args), account_id)
if isinstance(deployment.get('account-alias'), six.string_types):
account_alias = deployment['account-alias']
elif deployment.get('account-alias', {}).get(context.env_name):
account_alias = deployment['account-alias'][context.env_name]
else:
account_alias = None
if account_alias:
validate_account_alias(boto3.client('iam', **boto_args),
account_alias)
def echo_detected_environment(env_name, env_vars):
"""Print a helper note about how the environment was determined."""
env_override_name = 'DEPLOY_ENVIRONMENT'
LOGGER.info("")
if env_override_name in env_vars:
LOGGER.info("Environment \"%s\" was determined from the %s environment variable.",
env_name,
env_override_name)
LOGGER.info("If this is not correct, update "
"the value (or unset it to fall back to the name of "
"the current git branch or parent directory).")
else:
LOGGER.info("Environment \"%s\" was determined from the current "
"git branch or parent directory.",
env_name)
LOGGER.info("If this is not the environment name, update the branch/folder name or "
"set an override value via the %s environment variable",
env_override_name)
LOGGER.info("")
class ModulesCommand(RunwayCommand):
"""Env deployment class."""
def run(self, deployments=None, command='plan'): # noqa pylint: disable=too-many-branches,too-many-statements
"""Execute apps/code command."""
if deployments is None:
deployments = self.runway_config['deployments']
context = Context(env_name=get_env(self.env_root,
self.runway_config.get('ignore_git_branch', False)),
env_region=None,
env_root=self.env_root,
env_vars=os.environ.copy())
echo_detected_environment(context.env_name, context.env_vars)
# set default names if needed
for i, deployment in enumerate(deployments):
if not deployment.get('name'):
deployment['name'] = 'deployment_' + str(i+1)
if command == 'destroy':
LOGGER.info('WARNING!')
LOGGER.info('Runway is running in DESTROY mode.')
if context.env_vars.get('CI', None):
if command == 'destroy':
deployments_to_run = self.reverse_deployments(deployments)
else:
deployments_to_run = deployments
else:
if command == 'destroy':
LOGGER.info('Any/all deployment(s) selected will be '
'irrecoverably DESTROYED.')
deployments_to_run = self.reverse_deployments(
self.select_deployment_to_run(
context.env_name,
deployments,
command=command
)
)
else:
deployments_to_run = self.select_deployment_to_run(
context.env_name,
deployments
)
LOGGER.info("Found %d deployment(s)", len(deployments_to_run))
for i, deployment in enumerate(deployments_to_run):
LOGGER.info("")
LOGGER.info("")
LOGGER.info("======= Processing deployment '%s' ===========================",
deployment.get('name'))
if deployment.get('regions'):
if deployment.get('env_vars'):
deployment_env_vars = merge_nested_environment_dicts(
deployment.get('env_vars'), env_name=context.env_name,
env_root=self.env_root
)
if deployment_env_vars:
LOGGER.info("OS environment variable overrides being "
"applied this deployment: %s",
str(deployment_env_vars))
context.env_vars = merge_dicts(context.env_vars, deployment_env_vars)
LOGGER.info("")
LOGGER.info("Attempting to deploy '%s' to region(s): %s",
context.env_name,
", ".join(deployment['regions']))
for region in deployment['regions']:
LOGGER.info("")
LOGGER.info("======= Processing region %s ================"
"===========", region)
context.env_region = region
context.env_vars = merge_dicts(
context.env_vars,
{'AWS_DEFAULT_REGION': context.env_region,
'AWS_REGION': context.env_region}
)
if deployment.get('assume-role'):
pre_deploy_assume_role(deployment['assume-role'], context)
if deployment.get('account-id') or (deployment.get('account-alias')):
validate_account_credentials(deployment, context)
modules = deployment.get('modules', [])
if deployment.get('current_dir'):
modules.append('.' + os.sep)
for module in modules:
self._deploy_module(module, deployment, context, command)
if deployment.get('assume-role'):
post_deploy_assume_role(deployment['assume-role'], context)
else:
LOGGER.error('No region configured for any deployment')
sys.exit(1)
def _deploy_module(self, module, deployment, context, command):
module_opts = {}
if deployment.get('environments'):
module_opts['environments'] = deployment['environments'].copy() # noqa
if deployment.get('module_options'):
module_opts['options'] = deployment['module_options'].copy() # noqa
if isinstance(module, six.string_types):
module = {'path': module}
if path_is_current_dir(module['path']):
module_root = self.env_root
else:
module_root = os.path.join(self.env_root, module['path'])
module_opts = merge_dicts(module_opts, module)
module_opts = load_module_opts_from_file(module_root, module_opts)
if deployment.get('skip-npm-ci'):
module_opts['skip_npm_ci'] = True
LOGGER.info("")
LOGGER.info("---- Processing module '%s' for '%s' in %s --------------",
module['path'],
context.env_name,
context.env_region)
LOGGER.info("Module options: %s", module_opts)
with change_dir(module_root):
# dynamically load the particular module's class, 'get' the method
# associated with the command, and call the method
module_class = determine_module_class(module_root,
module_opts.get('class_path'))
module_instance = module_class(
context=context,
path=module_root,
options=module_opts
)
if hasattr(module_instance, command):
command_method = getattr(module_instance, command)
command_method()
else:
LOGGER.error("'%s' is missing method '%s'",
module_instance, command)
sys.exit(1)
@staticmethod
def reverse_deployments(deployments=None):
"""Reverse deployments and the modules/regions in them."""
if deployments is None:
deployments = []
reversed_deployments = []
for i in deployments[::-1]:
deployment = copy.deepcopy(i)
for config in ['modules', 'regions']:
if deployment.get(config):
deployment[config] = deployment[config][::-1]
reversed_deployments.append(deployment)
return reversed_deployments
@staticmethod
def select_deployment_to_run(env_name, deployments=None, command='build'): # noqa pylint: disable=too-many-branches,too-many-statements,too-many-locals
"""Query user for deployments to run."""
if deployments is None or not deployments:
return []
deployments_to_run = []
num_deployments = len(deployments)
if num_deployments == 1:
selected_deployment_index = 1
else:
print('')
print('Configured deployments:')
for i, deployment in enumerate(deployments):
print(" %d: %s" % (i+1, _deployment_menu_entry(deployment)))
print('')
print('')
if command == 'destroy':
print('(Operating in destroy mode -- "all" will destroy all '
'deployments in reverse order)')
selected_deployment_index = input('Enter number of deployment to run (or "all"): ')
if selected_deployment_index == 'all':
return deployments
if selected_deployment_index == '':
LOGGER.error('Please select a valid number (or "all")')
sys.exit(1)
selected_deployment = deployments[int(selected_deployment_index) - 1]
if selected_deployment.get('current_dir', False):
deployments_to_run.append(selected_deployment)
elif not selected_deployment.get('modules', []):
LOGGER.error('No modules configured in selected deployment')
sys.exit(1)
elif len(selected_deployment['modules']) == 1:
# No need to select a module in the deployment - there's only one
if command == 'destroy':
LOGGER.info('(only one deployment detected; all modules '
'automatically selected for termination)')
if not strtobool(input('Proceed?: ')):
sys.exit(0)
deployments_to_run.append(selected_deployment)
else:
modules = selected_deployment['modules']
print('')
print('Configured modules in deployment \'%s\':' % selected_deployment.get('name'))
for i, module in enumerate(modules):
print(" %s: %s" % (i+1, _module_menu_entry(module, env_name)))
print('')
print('')
if command == 'destroy':
print('(Operating in destroy mode -- "all" will destroy all '
'deployments in reverse order)')
selected_module_index = input('Enter number of module to run (or "all"): ')
if selected_module_index == 'all':
deployments_to_run.append(selected_deployment)
elif selected_module_index == '' or (
not selected_module_index.isdigit() or (
not 0 < int(selected_module_index) <= len(modules))):
LOGGER.error('Please select a valid number (or "all")')
sys.exit(1)
else:
selected_deployment['modules'] = [modules[int(selected_module_index) - 1]]
deployments_to_run.append(selected_deployment)
LOGGER.debug('Selected deployment is %s...', deployments_to_run)
return deployments_to_run
def _module_name_for_display(module):
"""Extract a name for the module."""
if isinstance(module, dict):
return module['path']
return str(module)
def _module_menu_entry(module, environment_name):
"""Build a string to display in the 'select module' menu."""
name = _module_name_for_display(module)
if isinstance(module, dict):
environment_config = module.get('environments', {}).get(environment_name)
if environment_config:
return "%s (%s)" % (name, environment_config)
return "%s" % (name)
def _deployment_menu_entry(deployment):
"""Build a string to display in the 'select deployment' menu."""
paths = ", ".join([_module_name_for_display(module) for module in deployment['modules']])
regions = ", ".join(deployment.get('regions', []))
return "%s - %s (%s)" % (deployment.get('name'), paths, regions)
```
#### File: commands/runway/gen_sample.py
```python
import logging
import os
import shutil
import sys
from ..runway_command import RunwayCommand
from ...tfenv import get_latest_tf_version
LOGGER = logging.getLogger('runway')
ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def generate_sample_module(module_dir):
"""Generate skeleton sample module."""
if os.path.isdir(module_dir):
LOGGER.error("Error generating sample module -- directory %s "
"already exists!",
module_dir)
sys.exit(1)
os.mkdir(module_dir)
def generate_sample_sls_module(env_root, module_dir=None):
"""Generate skeleton Serverless sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.sls')
generate_sample_module(module_dir)
for i in ['config-dev-us-east-1.json', 'handler.py', 'package.json',
'serverless.yml']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'serverless',
i),
os.path.join(module_dir, i),
)
LOGGER.info("Sample Serverless module created at %s",
module_dir)
def generate_sample_sls_tsc_module(env_root, module_dir=None):
"""Generate skeleton Serverless TypeScript sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.sls')
generate_sample_module(module_dir)
for i in ['package.json', 'serverless.yml', 'tsconfig.json',
'webpack.config.js']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'sls-tsc',
i),
os.path.join(module_dir, i),
)
os.mkdir(os.path.join(module_dir, 'src'))
for i in ['handler.spec.ts', 'handler.ts']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'sls-tsc',
'src',
i),
os.path.join(module_dir, 'src', i),
)
LOGGER.info("Sample Serverless TypeScript module created at %s",
module_dir)
def generate_sample_cdk_tsc_module(env_root, module_dir=None):
"""Generate skeleton CDK TS sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cdk')
generate_sample_module(module_dir)
for i in ['.npmignore', 'cdk.json', 'package.json', 'runway.module.yml',
'tsconfig.json', 'README.md']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-tsc',
i),
os.path.join(module_dir, i),
)
for i in [['bin', 'sample.ts'], ['lib', 'sample-stack.ts']]:
os.mkdir(os.path.join(module_dir, i[0]))
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-tsc',
i[0],
i[1]),
os.path.join(module_dir, i[0], i[1]),
)
with open(os.path.join(module_dir, '.gitignore'), 'w') as stream:
stream.write('*.js\n')
stream.write('*.d.ts\n')
stream.write('node_modules\n')
LOGGER.info("Sample CDK module created at %s", module_dir)
LOGGER.info('To finish its setup, change to the %s directory and execute '
'"npm install" to generate its lockfile.', module_dir)
def generate_sample_cdk_cs_module(env_root, module_dir=None):
"""Generate skeleton CDK C# sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cdk')
generate_sample_module(module_dir)
for i in ['add-project.hook.d.ts', 'cdk.json', 'package.json',
'runway.module.yml', 'README.md']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
i),
os.path.join(module_dir, i),
)
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
'dot_gitignore'),
os.path.join(module_dir, '.gitignore'),
)
os.mkdir(os.path.join(module_dir, 'src'))
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
'src',
'HelloCdk.sln'),
os.path.join(module_dir, 'src', 'HelloCdk.sln'),
)
os.mkdir(os.path.join(module_dir, 'src', 'HelloCdk'))
for i in ['HelloCdk.csproj', 'HelloConstruct.cs', 'HelloStack.cs',
'Program.cs']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-csharp',
'src',
'HelloCdk',
i),
os.path.join(module_dir, 'src', 'HelloCdk', i),
)
LOGGER.info("Sample C# CDK module created at %s", module_dir)
LOGGER.info('To finish its setup, change to the %s directory and execute '
'"npm install" to generate its lockfile.', module_dir)
def generate_sample_cdk_py_module(env_root, module_dir=None):
"""Generate skeleton CDK python sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cdk')
generate_sample_module(module_dir)
for i in ['app.py', 'cdk.json', 'lambda-index.py', 'package.json',
'runway.module.yml', 'Pipfile']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cdk-py',
i),
os.path.join(module_dir, i),
)
with open(os.path.join(module_dir, '.gitignore'), 'w') as stream:
stream.write('node_modules')
LOGGER.info("Sample CDK module created at %s", module_dir)
LOGGER.info('To finish its setup, change to the %s directory and execute '
'"npm install" and "pipenv update -d --three" to generate its '
'lockfiles.', module_dir)
def generate_sample_cfn_module(env_root, module_dir=None):
"""Generate skeleton CloudFormation sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.cfn')
generate_sample_module(module_dir)
for i in ['stacks.yaml', 'dev-us-east-1.env']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cfn',
i),
os.path.join(module_dir, i)
)
os.mkdir(os.path.join(module_dir, 'templates'))
shutil.copyfile(
os.path.join(ROOT,
'templates',
'cfn',
'templates',
'tf_state.yml'),
os.path.join(module_dir, 'templates', 'tf_state.yml')
)
LOGGER.info("Sample CloudFormation module created at %s",
module_dir)
def generate_sample_stacker_module(env_root, module_dir=None):
"""Generate skeleton Stacker sample module."""
if module_dir is None:
module_dir = os.path.join(env_root,
'runway-sample-tfstate.cfn')
generate_sample_module(module_dir)
for i in ['stacks.yaml', 'dev-us-east-1.env']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'stacker',
i),
os.path.join(module_dir, i)
)
os.mkdir(os.path.join(module_dir, 'tfstate_blueprints'))
for i in ['__init__.py', 'tf_state.py']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'stacker',
'tfstate_blueprints',
i),
os.path.join(module_dir, 'tfstate_blueprints', i)
)
os.chmod( # make blueprint executable
os.path.join(module_dir, 'tfstate_blueprints', 'tf_state.py'),
os.stat(os.path.join(module_dir,
'tfstate_blueprints',
'tf_state.py')).st_mode | 0o0111
)
LOGGER.info("Sample Stacker module created at %s",
module_dir)
def generate_sample_tf_module(env_root, module_dir=None):
"""Generate skeleton Terraform sample module."""
if module_dir is None:
module_dir = os.path.join(env_root, 'sampleapp.tf')
generate_sample_module(module_dir)
for i in ['backend-us-east-1.tfvars', 'dev-us-east-1.tfvars', 'main.tf']:
shutil.copyfile(
os.path.join(ROOT,
'templates',
'terraform',
i),
os.path.join(module_dir, i),
)
tf_ver_template = os.path.join(ROOT,
'templates',
'terraform',
'.terraform-version')
if os.path.isfile(tf_ver_template):
shutil.copyfile(
tf_ver_template,
os.path.join(module_dir, '.terraform-version'),
)
else: # running directly from git
latest_tf_ver = get_latest_tf_version()
with open(os.path.join(module_dir,
'.terraform-version'), 'w') as stream:
stream.write(latest_tf_ver)
LOGGER.info("Sample Terraform app created at %s",
module_dir)
class GenSample(RunwayCommand):
"""Extend Base with execute to run the module generators."""
def execute(self):
"""Run selected module generator."""
if self._cli_arguments['cfn']:
generate_sample_cfn_module(self.env_root)
elif self._cli_arguments['sls']:
generate_sample_sls_module(self.env_root)
elif self._cli_arguments['sls-tsc']:
generate_sample_sls_tsc_module(self.env_root)
elif self._cli_arguments['stacker']:
generate_sample_stacker_module(self.env_root)
elif self._cli_arguments['tf']:
generate_sample_tf_module(self.env_root)
elif self._cli_arguments['cdk-tsc']:
generate_sample_cdk_tsc_module(self.env_root)
elif self._cli_arguments['cdk-py']:
generate_sample_cdk_py_module(self.env_root)
elif self._cli_arguments['cdk-csharp']:
generate_sample_cdk_cs_module(self.env_root)
```
#### File: runway/module/serverless.py
```python
from __future__ import print_function
import logging
import os
import re
import subprocess
import sys
from . import (
RunwayModule, format_npm_command_for_logging, generate_node_command,
run_module_command, run_npm_install, warn_on_boto_env_vars
)
from ..util import change_dir, which
LOGGER = logging.getLogger('runway')
def gen_sls_config_files(stage, region):
"""Generate possible SLS config files names."""
names = []
for ext in ['yml', 'json']:
# Give preference to explicit stage-region files
names.append(
os.path.join('env',
"%s-%s.%s" % (stage, region, ext))
)
names.append("config-%s-%s.%s" % (stage, region, ext))
# Fallback to stage name only
names.append(
os.path.join('env',
"%s.%s" % (stage, ext))
)
names.append("config-%s.%s" % (stage, ext))
return names
def get_sls_config_file(path, stage, region):
"""Determine Serverless config file name."""
for name in gen_sls_config_files(stage, region):
if os.path.isfile(os.path.join(path, name)):
return name
return "config-%s.json" % stage # fallback to generic json name
def run_sls_remove(sls_cmd, env_vars):
"""Run sls remove command."""
sls_process = subprocess.Popen(sls_cmd,
stdout=subprocess.PIPE,
env=env_vars)
stdoutdata, _stderrdata = sls_process.communicate()
sls_return = sls_process.wait()
if int(sys.version[0]) > 2:
stdoutdata = stdoutdata.decode('UTF-8') # bytes -> string
print(stdoutdata)
if sls_return != 0 and (sls_return == 1 and not (
re.search(r"Stack '.*' does not exist", stdoutdata))):
sys.exit(sls_return)
class Serverless(RunwayModule):
"""Serverless Runway Module."""
def run_serverless(self, command='deploy'):
"""Run Serverless."""
response = {'skipped_configs': False}
sls_opts = [command]
if not which('npm'):
LOGGER.error('"npm" not found in path or is not executable; '
'please ensure it is installed correctly.')
sys.exit(1)
if 'CI' in self.context.env_vars and command != 'remove':
sls_opts.append('--conceal') # Hide secrets from serverless output
if 'DEBUG' in self.context.env_vars:
sls_opts.append('-v') # Increase logging if requested
warn_on_boto_env_vars(self.context.env_vars)
sls_opts.extend(['-r', self.context.env_region])
sls_opts.extend(['--stage', self.context.env_name])
sls_env_file = get_sls_config_file(self.path,
self.context.env_name,
self.context.env_region)
sls_cmd = generate_node_command(command='sls',
command_opts=sls_opts,
path=self.path)
if (not self.options.get('environments') and os.path.isfile(os.path.join(self.path, sls_env_file))) or ( # noqa pylint: disable=line-too-long
self.options.get('environments', {}).get(self.context.env_name)): # noqa
if os.path.isfile(os.path.join(self.path, 'package.json')):
with change_dir(self.path):
run_npm_install(self.path, self.options, self.context)
LOGGER.info("Running sls %s on %s (\"%s\")",
command,
os.path.basename(self.path),
format_npm_command_for_logging(sls_cmd))
if command == 'remove':
# Need to account for exit code 1 on any removals after
# the first
run_sls_remove(sls_cmd, self.context.env_vars)
else:
run_module_command(cmd_list=sls_cmd,
env_vars=self.context.env_vars)
else:
LOGGER.warning(
"Skipping serverless %s of %s; no \"package.json\" "
"file was found (need a package file specifying "
"serverless in devDependencies)",
command,
os.path.basename(self.path))
else:
response['skipped_configs'] = True
LOGGER.info(
"Skipping serverless %s of %s; no config file for "
"this stage/region found (looking for one of \"%s\")",
command,
os.path.basename(self.path),
', '.join(gen_sls_config_files(self.context.env_name,
self.context.env_region)))
return response
def plan(self):
"""Skip sls planning."""
LOGGER.info('Planning not currently supported for Serverless')
def deploy(self):
"""Run sls deploy."""
self.run_serverless(command='deploy')
def destroy(self):
"""Run serverless remove."""
self.run_serverless(command='remove')
```
#### File: src/runway/tfenv.py
```python
from distutils.version import LooseVersion # noqa pylint: disable=import-error,no-name-in-module
import glob
import json
import logging
import os
import platform
import re
import shutil
import sys
import tempfile
import zipfile
# Old pylint on py2.7 incorrectly flags these
from six.moves.urllib.request import urlretrieve # noqa pylint: disable=import-error,line-too-long
from six.moves.urllib.error import URLError # noqa pylint: disable=import-error,relative-import,line-too-long
from botocore.vendored import requests
# embedded until this is merged - https://github.com/virtuald/pyhcl/pull/57
from runway.embedded import hcl
from .util import get_hash_for_filename, sha256sum
LOGGER = logging.getLogger('runway')
TF_VERSION_FILENAME = '.terraform-version'
# Branch and local variable count will go down when py2 support is dropped
def download_tf_release(version, # noqa pylint: disable=too-many-locals,too-many-branches
versions_dir, command_suffix, tf_platform=None,
arch=None):
"""Download Terraform archive and return path to it."""
version_dir = os.path.join(versions_dir, version)
if arch is None:
arch = (
os.environ.get('TFENV_ARCH') if os.environ.get('TFENV_ARCH')
else 'amd64')
if tf_platform:
tfver_os = tf_platform + '_' + arch
else:
if platform.system().startswith('Darwin'):
tfver_os = "darwin_%s" % arch
elif platform.system().startswith('Windows') or (
platform.system().startswith('MINGW64') or (
platform.system().startswith('MSYS_NT') or (
platform.system().startswith('CYGWIN_NT')))):
tfver_os = "windows_%s" % arch
else:
tfver_os = "linux_%s" % arch
download_dir = tempfile.mkdtemp()
filename = "terraform_%s_%s.zip" % (version, tfver_os)
shasums_name = "terraform_%s_SHA256SUMS" % version
tf_url = "https://releases.hashicorp.com/terraform/" + version
try:
for i in [filename, shasums_name]:
urlretrieve(tf_url + '/' + i,
os.path.join(download_dir, i))
# IOError in py2; URLError in 3+
except (IOError, URLError) as exc:
if sys.version_info[0] == 2:
url_error_msg = str(exc.strerror)
else:
url_error_msg = str(exc.reason)
if 'CERTIFICATE_VERIFY_FAILED' in url_error_msg:
LOGGER.error('Attempted to download Terraform but was unable to '
'verify the TLS certificate on its download site.')
LOGGER.error("Full TLS error message: %s", url_error_msg)
if platform.system().startswith('Darwin') and (
'unable to get local issuer certificate' in url_error_msg):
LOGGER.error("This is likely caused by your Python "
"installation missing root certificates. Run "
"\"/Applications/Python %s.%s/"
"\"Install Certificates.command\" to fix it "
"(https://stackoverflow.com/a/42334357/2547802)",
sys.version_info[0],
sys.version_info[1])
sys.exit(1)
else:
raise
tf_hash = get_hash_for_filename(filename, os.path.join(download_dir,
shasums_name))
if tf_hash != sha256sum(os.path.join(download_dir, filename)):
LOGGER.error("Downloaded Terraform %s does not match sha256 %s",
filename, tf_hash)
sys.exit(1)
tf_zipfile = zipfile.ZipFile(os.path.join(download_dir, filename))
os.mkdir(version_dir)
tf_zipfile.extractall(version_dir)
tf_zipfile.close()
shutil.rmtree(download_dir)
os.chmod( # ensure it is executable
os.path.join(version_dir,
'terraform' + command_suffix),
os.stat(os.path.join(version_dir,
'terraform' + command_suffix)).st_mode | 0o0111
)
def get_available_tf_versions(include_prerelease=False):
"""Return available Terraform versions."""
tf_releases = json.loads(
requests.get('https://releases.hashicorp.com/index.json').text
)['terraform']
tf_versions = sorted([k # descending
for k, _v in tf_releases['versions'].items()],
key=LooseVersion,
reverse=True)
if include_prerelease:
return tf_versions
return [i for i in tf_versions if '-' not in i]
def get_latest_tf_version(include_prerelease=False):
"""Return latest Terraform version."""
return get_available_tf_versions(include_prerelease)[0]
def find_min_required(path):
"""Inspect terraform files and find minimum version."""
found_min_required = ''
for filename in glob.glob(os.path.join(path, '*.tf')):
with open(filename, 'r') as stream:
tf_config = hcl.load(stream)
if tf_config.get('terraform', {}).get('required_version'):
found_min_required = tf_config.get('terraform',
{}).get('required_version')
break
if found_min_required:
if re.match(r'^!=.+', found_min_required):
LOGGER.error('Min required Terraform version is a negation (%s) '
'- unable to determine required version',
found_min_required)
sys.exit(1)
else:
found_min_required = re.search(r'[0-9]*\.[0-9]*(?:\.[0-9]*)?',
found_min_required).group(0)
LOGGER.debug("Detected minimum terraform version is %s",
found_min_required)
return found_min_required
LOGGER.error('Terraform version specified as min-required, but unable to '
'find a specified version requirement in this module\'s tf '
'files')
sys.exit(1)
def get_version_requested(path):
"""Return string listing requested Terraform version."""
tf_version_path = os.path.join(path,
TF_VERSION_FILENAME)
if not os.path.isfile(tf_version_path):
LOGGER.error("Terraform install attempted and no %s file present to "
"dictate the version. Please create it (e.g. write "
"\"0.11.13\" (without quotes) to the file and try again",
TF_VERSION_FILENAME)
sys.exit(1)
with open(tf_version_path, 'r') as stream:
ver = stream.read().rstrip()
return ver
def ensure_versions_dir_exists(tfenv_path):
"""Ensure versions directory is available."""
versions_dir = os.path.join(tfenv_path, 'versions')
if not os.path.isdir(tfenv_path):
os.mkdir(tfenv_path)
if not os.path.isdir(versions_dir):
os.mkdir(versions_dir)
return versions_dir
class TFEnv(object): # pylint: disable=too-few-public-methods
"""Terraform version management.
Designed to be compatible with https://github.com/tfutils/tfenv .
"""
def __init__(self, path=None):
"""Initialize class."""
if path is None:
self.path = os.getcwd()
else:
self.path = path
if platform.system() == 'Windows':
if 'APPDATA' in os.environ:
self.tfenv_dir = os.path.join(os.environ['APPDATA'],
'tfenv')
else:
for i in [['AppData'], ['AppData', 'Roaming']]:
if not os.path.isdir(os.path.join(os.path.expanduser('~'),
*i)):
os.mkdir(os.path.join(os.path.expanduser('~'),
*i))
self.tfenv_dir = os.path.join(os.path.expanduser('~'),
'AppData',
'Roaming',
'tfenv')
else:
self.tfenv_dir = os.path.join(os.path.expanduser('~'),
'.tfenv')
def install(self, version_requested=None):
"""Ensure terraform is available."""
command_suffix = '.exe' if platform.system() == 'Windows' else ''
versions_dir = ensure_versions_dir_exists(self.tfenv_dir)
if not version_requested:
version_requested = get_version_requested(self.path)
if re.match(r'^min-required$', version_requested):
LOGGER.debug('tfenv: detecting minimal required version')
version_requested = find_min_required(self.path)
if re.match(r'^latest:.*$', version_requested):
regex = re.search(r'latest:(.*)', version_requested).group(1)
include_prerelease_versions = False
elif re.match(r'^latest$', version_requested):
regex = r'^[0-9]+\.[0-9]+\.[0-9]+$'
include_prerelease_versions = False
else:
regex = "^%s$" % version_requested
include_prerelease_versions = True
# Return early (i.e before reaching out to the internet) if the
# matching version is already installed
if os.path.isdir(os.path.join(versions_dir,
version_requested)):
LOGGER.info("Terraform version %s already installed; using "
"it...", version_requested)
return os.path.join(versions_dir,
version_requested,
'terraform') + command_suffix
try:
version = next(i
for i in get_available_tf_versions(
include_prerelease_versions)
if re.match(regex, i))
except StopIteration:
LOGGER.error("Unable to find a Terraform version matching regex: %s",
regex)
sys.exit(1)
# Now that a version has been selected, skip downloading if it's
# already been downloaded
if os.path.isdir(os.path.join(versions_dir,
version)):
LOGGER.info("Terraform version %s already installed; using it...",
version)
return os.path.join(versions_dir,
version,
'terraform') + command_suffix
LOGGER.info("Downloading and using Terraform version %s ...",
version)
download_tf_release(version, versions_dir, command_suffix)
LOGGER.info("Downloaded Terraform %s successfully", version)
return os.path.join(versions_dir, version, 'terraform') + command_suffix
``` |
{
"source": "jkaluzka/symmetrical-guide",
"score": 2
} |
#### File: src/tests/test_search.py
```python
from selenium.webdriver.common.by import By
class TestSearch:
def test_find_input_success(self, search_page):
selector = "#search_form_input_homepage"
search_input = search_page.find_element(By.CSS_SELECTOR, selector)
assert search_input
def test_find_input_error(self, search_page):
selector = "#search"
search_input = search_page.find_element(By.CSS_SELECTOR, selector)
assert search_input
``` |
{
"source": "jkamalu/POMDPy",
"score": 2
} |
#### File: examples/traffic_light/light_model.py
```python
import os
import json
import numpy as np
from scipy.stats import truncnorm
from .light_action import TrafficLightAction, Acceleration
from .light_state import TrafficLightState
from .light_observation import TrafficLightObservation
from .light_data import TrafficLightData, Belief
from .util import Acceleration, LightColor
from .util import max_distance, state_to_color_index, calculate_trunc_norm_prob
from .util import MIN_DISTANCE_OBS, MAX_DISTANCE_OBS, MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS, INDEX_TO_ACTION
from pomdpy.pomdp import model
from pomdpy.discrete_pomdp import DiscreteActionPool
from pomdpy.discrete_pomdp import DiscreteObservationPool
class TrafficLightModel(model.Model):
def __init__(self, problem_name="TrafficLight"):
super().__init__(problem_name)
self.num_actions = len(Acceleration)
path = os.path.join(*__name__.split('.')[:-1], "config.json")
with open(path, "rt") as fp:
self.config = json.load(fp)
self.init_speed = self.config["init_speed"]
def start_scenario(self):
position = self.config["init_position"]
speed = self.config["init_speed"]
light = self.config["init_light"]
return TrafficLightState(position, speed, light)
''' --------- Abstract Methods --------- '''
def is_terminal(self, state):
return state.position >= self.road_length + self.intersection_length
def sample_an_init_state(self):
random_position = np.random.randint(self.config["road_length"] // 2)
speed = self.init_speed
random_light = np.random.randint(sum(self.config["light_cycle"]))
return TrafficLightState(random_position, speed, random_light)
def create_observation_pool(self, solver):
return DiscreteObservationPool(solver)
def sample_state_uninformed(self):
random_position = np.random.randint(self.config["road_length"] // 2)
random_speed = np.random.randint(self.config["speed_limit"])
random_light = np.random.randint(sum(self.config["light_cycle"]))
return TrafficLightState(random_position, random_speed, random_light)
def sample_state_informed(self, belief):
return belief.sample_particle()
def get_all_states(self):
states = []
for position in range(len(self.road_length)):
for speed in range(self.max_speed):
for light in range(sum(self.light_cycle)):
states.append(TrafficLightState(position, speed, light))
return states
def get_all_actions(self):
return [TrafficLightAction(index) for index in INDEX_TO_ACTION]
def get_all_observations(self):
observations = []
for distance_measurement in range(MIN_DISTANCE_OBS, MAX_DISTANCE_OBS + 1):
for wavelength_measurement in range(MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS + 1):
for speed in range(self.config["max_speed"] + 1):
observations.append(TrafficLightObservation((distance_measurement, wavelength_measurement, speed)))
return observations
def get_legal_actions(self, state):
legal_actions = []
for index in INDEX_TO_ACTION:
if state.speed + INDEX_TO_ACTION[index] >= 0 and state.speed + INDEX_TO_ACTION[index] <= self.config["max_speed"]:
legal_actions.append(TrafficLightAction(index))
return legal_actions
def is_valid(self, state):
return state.position >= 0 and state.speed >= 0
def reset_for_simulation(self):
self.start_scenario()
def reset_for_epoch(self):
self.start_scenario()
def update(self, sim_data):
pass
def get_max_undiscounted_return(self):
return 10
@staticmethod
def state_transition(state, action):
speed = state.speed + action
position = state.position + speed
light = (state.light) + 1 % sum(self.config["light_cycle"])
new_state = TrafficLightState(position, speed, light)
@staticmethod
def get_transition_matrix():
"""
|A| x |S| x |S'| matrix, for tiger problem this is 3 x 2 x 2
:return:
"""
action_state_state_combos = []
for action in self.get_all_actions():
state_state_combos = []
for state in self.get_all_states():
transition_state = state_transition(state, action)
state_combos = []
for state in self.get_all_states():
value = 1 if state == transition_state else 0
state_combos.append(value)
state_state_combos.append(np.array(state_combos))
action_state_combos.append(np.array(state_state_combos))
return np.array(action_state_combos)
@staticmethod
def get_observation_matrix():
"""
|A| x |S| x |O| matrix
:return:
"""
observations = []
for action in self.get_all_actions():
for state in self.get_all_states():
state_obs_probs = []
color = state_to_color_index(state)
observation_probs = []
for observation in self.get_all_observations():
if state.speed + INDEX_TO_ACTION(action.index) != observation.speed:
observation_probs.append(0)
continue
color_mean = self.config["color_means"][color]
color_std = self.config["color_stdev"]
color_probab = calculate_trunc_norm_prob(observation.wavelength_observed, color_mean, color_std, MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS)
dist_mean = state.position
dist_std = self.config["distance_stdev"]
distance_probab = calculate_trunc_norm_prob(observation.distance_observed, dist_mean, dist_std, MIN_DISTANCE_OBS, MAX_DISTANCE_OBS)
observation_probs.append(color_probab * distance_probab)
state_obs_probs.append(np.array(observation_probs))
observations.append(np.array(state_obs_probs))
return np.array(observations)
def get_reward_matrix(self):
"""
|A| x |S| matrix
:return:
"""
reward_matrix = []
for action in self.get_all_actions():
state_rewards = []
for state in self.get_all_states():
terminal = state.position >= self.config["road_length"] + self.config["intersection_length"]
state_rewards.append(self.make_reward(action, state, terminal))
reward_matrix.append(np.array(state_rewards))
return np.array(reward_matrix)
@staticmethod
def get_initial_belief_state():
return Belief()
''' Factory methods '''
def create_action_pool(self):
return DiscreteActionPool(self)
def create_root_historical_data(self, agent):
return TrafficLightData(self, self.init_speed)
''' --------- BLACK BOX GENERATION --------- '''
def generate_step(self, state, action):
if action is None:
print("ERROR: Tried to generate a step with a null action")
return None
elif not isinstance(action, TrafficLightAction):
action = TrafficLightAction(action)
result = model.StepResult()
result.next_state, result.is_terminal = self.make_next_state(state, action)
result.action = action.copy()
result.observation = self.make_observation(action, result.next_state)
result.reward = self.make_reward(action, result.next_state, result.is_terminal)
return result, self.is_valid(state)
def make_next_state(self, state, action):
max_position = self.config["road_length"] + self.config["intersection_length"]
terminal = state.position >= max_position
new_speed = state.speed + INDEX_TO_ACTION[action.index]
new_position = state.position + new_speed
new_light = (state.light + 1) % sum(self.config["light_cycle"])
new_state = TrafficLightState(new_position, new_speed, new_light)
return new_state, terminal
def make_reward(self, action, state, terminal):
"""
:param action:
:param is_terminal:
:return: reward
"""
if terminal:
return 10
## Penalize for every timestep not at the goal state.
rewards = -1
## Penalize if the car stops outside the buffer.
if state.speed == 0 and (state.position > self.config["road_length"] or state.position < self.config["road_length"] - self.config["buffer_length"]):
rewards -= 5
## Penalize if we're in the intersection on a red light.
if state_to_color_index(state) == 2 and (state.position > self.config["road_length"] and state.position <= self.config["road_length"] + self.config["intersection_length"]):
rewards -= 100
## Penalize for going over the speed limit.
if state.speed > self.config["speed_limit"]:
rewards -= (state.speed - self.config["speed_limit"])
return rewards
def make_observation(self, action, next_state):
"""
:param action:
:return:
"""
color_index = state_to_color_index(next_state)
color_mean = self.config["color_means"][color_index]
color_stdev = self.config["color_stdev"]
sampled_wavelength = truncnorm.rvs((MIN_WAVELENGTH_OBS - color_mean) / color_stdev, (MAX_WAVELENGTH_OBS - color_mean) / color_stdev, loc=color_mean, scale=color_stdev, size=1)
wl = int(sampled_wavelength + 0.5)
dist_mean = self.config["road_length"] - next_state.position
dist_stdev = self.config["distance_stdev"]
sampled_distance = truncnorm.rvs((MIN_DISTANCE_OBS - dist_mean) / dist_stdev, (MAX_DISTANCE_OBS - dist_mean) / dist_stdev, loc=dist_mean, scale=dist_stdev, size=1)
try:
dist = int(sampled_distance + 0.5)
except:
print("sampled_distance = -inf")
dist = dist_mean
return TrafficLightObservation((wl, dist, next_state.speed))
def belief_update(self, old_belief, action, observation):
if old_belief.dist is not None:
b_dist = (old_belief.dist * old_belief.dist_confidence + observation.distance_observed * self.config["distance_stdev"]) / (old_belief.dist_confidence + self.config["distance_stdev"])
b_dist_stdev = (old_belief.dist_confidence * self.config["distance_stdev"]) / (old_belief.dist_confidence + self.config["distance_stdev"])
else:
b_dist = (observation.distance_observed * self.config["distance_stdev"]) / self.config["distance_stdev"]
b_dist_stdev = self.config["distance_stdev"]
b_dist += observation.speed
b_colors = [old_belief.green, old_belief.yellow, old_belief.red]
for color in LightColor:
color_mean = self.config["color_means"][color.value]
color_stdev = self.config["color_stdev"]
color_probab = calculate_trunc_norm_prob(observation.wavelength_observed, color_mean, color_stdev, MIN_WAVELENGTH_OBS, MAX_WAVELENGTH_OBS)
b_colors[color.value] *= color_probab
new_belief = Belief(p_green=b_colors[0], p_yellow=b_colors[1], p_red=b_colors[2], belief_d=b_dist, confidence_d=b_dist_stdev)
new_belief.normalize()
return new_belief
``` |
{
"source": "jkamalu/trashbots-RL",
"score": 3
} |
#### File: jkamalu/trashbots-RL/Environment.py
```python
import random
import numpy as np
from Motion import Motion
from GaussianTrashSource import GaussianTrashSource
from Agent import Agent
#####################################################
# Environment (class) #
# Models and administrates the environment / states #
# and manages the time discrete updates #
#####################################################
EMPTY_TILE_ID = 0 #Defines the value assigned to a tile in self.agent_grid if there is no agent on a field
class Environment:
"""
Variabels
----------
self.trash_grid_complete : ndarray
Numpy array (dimension is chosen in init function) that stores where trash is
self.trash_grid_visible : ndarray
Numpy array (dimension is chosen in init function) that stores where currently trash AND agents are
self.agent_grid : ndarray
Numpy array (dimension is chosen in init function) that stores where
the different agents are
self.agents : list
Stores all the numbers of actual agents
self.history_agent_grids : list
List of the n last agent_grids
self.history_visible_trash_grids: list
List of the n last tash_grids
self.dim : tuple
Dimension of the Grids (trash and agent grid use this size)
self.trash_sources : list
List of Trashsources who can generate some trash each round.
----------
"""
def __init__(self, dim, reward_eat_trash=10, reward_invalid_move=0, reward_nothing_happend=0, trash_appearence_prob=0.1, number_trash_sources=1, saved_timesteps=1):
"""Initial function for the environment.
Called to set up all basic things for the environment.
Parameters
----------
dim : int tuple
Dimension of the field.
Returns
-------
"""
#Important Parameter initialization
self.saved_timesteps = saved_timesteps # Number of timesteps saved for the neural network
self.dim = dim # (y,x)
# Constants that will be used all throughout the code
self.REWARD_EAT_TRASH = reward_eat_trash #Default is 1
self.REWARD_INVALID_MOVE = reward_invalid_move #Default is -1
self.REWARD_NOTHING_HAPPEND = reward_nothing_happend #Default is 0
self.TRASH_APPEARENCE_PROB = trash_appearence_prob #Default is 0.1
self.NUMBER_TRASH_SOURCES = number_trash_sources #Default is 4
# initialize trash grid
self.trash_grid_visible = np.zeros(shape=(self.dim[0], self.dim[1]), dtype=int)
self.trash_grid_complete = np.zeros(shape=(self.dim[0], self.dim[1]), dtype=int)
# initialize robot grid
self.agent_grid = np.zeros(shape=(self.dim[0], self.dim[1]), dtype=int) * EMPTY_TILE_ID
# History is the list of grids seen over time, first element is the oldest one,
# last element in the list is the newest one, for the initialisation they are filled
# up with empty grids
self.history_agent_grids = []
self.history_visible_trash_grids = []
# Create some random trash sources
self.trash_sources = []
for i in range(self.NUMBER_TRASH_SOURCES):
self.trash_sources.append(self.create_random_trash_source())
# Keep track of all agents
self.agents = []
self.number_agents_total = 0 #Number of every created agents (even if they have been deleted)
for timestep_counter in range(0, self.saved_timesteps):
self.history_agent_grids.append(self.agent_grid)
self.history_visible_trash_grids.append(self.trash_grid_visible)
# Getter Methods
def get_agent_position(self, id):
# agents id is equivalent to its index in the agents list
return self.agents[id].pos
def get_agent(self, agent_idx):
return self.agents[agent_idx]
def is_position_free_valid(self, coord):
"""
Checks if a field is free so that an agent can move/appear there.
a field is free and valid if it is inside the grid and there is
no robot on the field.
Parameters
----------
coord : int tuple / None
Coordinates where the new agent should appear (place in the grid
has to be free that the agent appears there) . And has to be valid
return
-------
bool
Is True when the agent could appear on the passed coordinates and
if the coordinates are valid and false if not
"""
if((0 <= coord[0] and coord[0] < self.dim[0]) and (0 <= coord[1] and coord[1] < self.dim[1]) and (self.agent_grid[coord] == EMPTY_TILE_ID)):
# Valid and free coordinate
return True
# At coord no agent could appear / move to
return False
def get_complete_trash_grid(self):
return self.trash_grid_complete
def get_agent_grid(self):
return self.agent_grid
def get_rnd_free_position(self):
"""
Returns a coordinate of the grid that is currently not occupied
by any agent.
If no free coordinate was found an exception is raised. To find a free
coordinate it tries randomly for 100 times if there is a free spot.
return
-------
tuple int:
If there is a free position, this is returned as tuple
"""
count_tries = 0
while count_tries < 100:
coord_y = random.randint(0, self.dim[0])
coord_x = random.randint(0, self.dim[1])
if self.is_position_free_valid((coord_y, coord_x)):
return (coord_y, coord_x)
count_tries += 1
raise Exception("No free coordinate found")
def add_agent(self, coord=None, capacity=10):
"""Initial function for the environment.
Called to set up all basic things for the environment.
Parameters
----------
coord : int tuple / None
Coordinates where the new agent should appear (place in the grid has to
be free that the agent appears there). Succesful created agents will have an idea
that correspondend to the number of already created agents.
capacity: int
Default is 10. Defines how much the agent could carry
Return
-------
bool:
Returns if adding the agent was succesful.
"""
exception_caught = False
if coord is None:
try:
coord = self.get_rnd_free_position()
except Exception:
print("Handled exception")
exception_caught = True
elif not self.is_position_free_valid(coord):
print("Can not add agent at this position")
exception_caught = True
if exception_caught:
return False
#TODO: see Issue #4
id = self.number_agents_total +1 #Every Agent has the ID of Agents that have been created before +1 (doesn't get reduced if the agents are removed, first Agent ID is 1)
self.number_agents_total += 1 #Update the number of agents which have ever been created
# Add agent to list and grid
self.agents.append(Agent(pos=coord, id=id, capacity=capacity))
self.agent_grid[coord] = id
return True
def move_agent(self, agent_idx, delta_coords):
"""
- Moves agent (if move is valid)
- Eats trash if there is some on the new position.
- Returns the Reward that is collected with this move (e.g. eat trash)
Parameters
----------
agent_idx : int tuple
ID of the agent.
delta_coords: int tuple
Defines how the y,x coordinates should change
Return
-------
int:
Amount of collected reward with this move
"""
# Check move for validity
my_agent = self.agents[agent_idx]
old_pos = my_agent.pos
new_pos = (old_pos[0] + delta_coords[0], old_pos[1] + delta_coords[1])
wants_to_move = (delta_coords[0] != 0) or (delta_coords[1] != 0)
reward = 0
# Does the robot see trash on the new position?
self.trash_grid_visible[old_pos] = 0 #Resets the Visible Trash Grid on the old position
if self.is_position_free_valid(new_pos) or not wants_to_move:
# TODO: See issue #5
# Update the agents position
my_agent.pos = new_pos
self.agent_grid[old_pos] = EMPTY_TILE_ID
self.agent_grid[new_pos] = my_agent.id
# Trash eating
trash_eaten = self.move_agent_in_trash_world(old_pos = old_pos, new_pos = new_pos, my_agent = my_agent)
if trash_eaten:
reward = self.REWARD_EAT_TRASH
else:
# TODO: See issue #6
# Invalid move
reward = self.REWARD_INVALID_MOVE
return reward
def move_agent_in_trash_world(self, old_pos, new_pos, my_agent):
"""
Called from move_agent() to move an agent from old_pos to new_pos.
Applies the agents move (old_pos -> new_pos) to the "trash world".
Updates all trash related attributes, trash_grids etc.
Returns True if the agent eats trash at new_pos
Parameters
----------
old_pos : int tuple
y,x coordinate of the old position.
new_pos: int tuple
y,x coordinate of the agent after the move (could be the same as of old position if the move is invalid or stay)
my_agent: Agent object
The instance of the Agent that should be moved
Return
-------
int:
Amount of trash that have been eaten
"""
trash_eaten = False
trash_present = self.trash_grid_complete[new_pos] > 0
# Eat trash if there is some
if trash_present:
# visible only stores whether there is currently an agent collecting trash
self.trash_grid_visible[new_pos] = 1
# complete stores the amount of trash present
self.trash_grid_complete[new_pos] -= 1
my_agent.load += 1
my_agent.totally_collected += 1
trash_eaten = True
else:
self.trash_grid_visible[new_pos] = 0
return trash_eaten
def create_random_trash_source(self):
"""
Creates a Trashsource with a random position on the grid.
The trash source is NOT automatically added somewhere!
Return
-------
GaussianTrashSource:
Returns the create Trashsource as an instance of the GaussianTrashSource class
"""
mean_x = random.randint(0, self.dim[1]-1)
mean_y = random.randint(0, self.dim[0]-1)
mean = [mean_y,mean_x]
return GaussianTrashSource(mean=mean, max_y=self.dim[0]-1, max_x=self.dim[1]-1, cov = [[0,0],[0,0]])
def generate_new_trash(self, alpha=None):
"""
Each trashsource of the environment is, with probability alpha,
asked to generate a piece of trash that will then appear on the grid.
New trash will be added to the trash_grid_complete
Parameters
----------
alpha : float / None
Probability for each Trash Source to generate Trash. If alpha is None the
self.TRASH_APPEARENCE_PROB probability is used.
"""
if alpha is None:
alpha = self.TRASH_APPEARENCE_PROB
for source in self.trash_sources:
if random.random() < alpha:
trash_y, trash_x = source.get_trash()
self.trash_grid_complete[trash_y, trash_x] += 1
def move_agents(self, action_list):
"""Updates the environment with the actions in the list.
Conversion from the action into the actual change of coordinate (check
if this action is possible is in self.move_agent)
Returns the current state that is used by the neural net as well as the rewards
collected by the moves of the action_list
Parameters
----------
action_list : list
Containing the actions for each agent (0: up, 1: right, 2: down, 3: left, 4: stay)
Agents are ordered as in the self.agents list.
Dimension: 1 x Number of agents
Return
-------
ndarray:
History over the visible trash, for each saved timestep. (Dimension: grid_height x grid_width x saved_timesteps)
ndarray:
"""
agent_idx = 0
reward_list = []
for action in action_list:
d_pos = Motion(action).value
reward_list.append(self.move_agent(agent_idx, d_pos))
agent_idx = agent_idx + 1
self.generate_new_trash()
# Save the current conditions (Stempeluhr) as next Timestep
self.save_condition_new_timestep()
history_visible_trash, history_agents, current_pos_agent = self.export_known_data()
# numpy array n_agents x grid_height x grid_widht X (n_number_timesteps x Channel (own_position (one_hot_vector), other_position (one_hot_vector), garbish)
return history_visible_trash, history_agents, current_pos_agent, reward_list
#numpy array n_agents x grid_height x grid_widht X (n_number_timesteps x Channel (own_position (one_hot_vector), other_position (one_hot_vector), garbish)
def save_condition_new_timestep(self):
"""Adds the current condition to the state space and removes the oldest one
Saves the agent_grid and the trash_grid_visible matrix
"""
#Add the new ones
self.history_agent_grids.append(self.agent_grid.copy())
self.history_visible_trash_grids.append(self.trash_grid_visible.copy()) #Only the visible trash is saved
# remove the oldest appended data
del(self.history_agent_grids[0])
del(self.history_visible_trash_grids[0])
def export_known_data(self):
"""Exports the data (states) to the neural network.
n: number of saved timesteps
Return
-------
history_visible_trash:
Matrix of format n * self.dim[0] * self.dim[1], is 1 where trash is eaten at each timestep, zero elsewhere
history_agent:
Matrix of format n* self.dim[0] * self.dim[1], is 1 where the agents are at one timestep, zero elsewhere
current_pos_agent:
Matrix of format nb_agents * self.dim[0] * self.dim[1], one hot matrix for each agent (in the same order as the agents are in self.agents)
indicating the position of the agent
"""
ret_history_visible_trash_grids = np.array(self.history_visible_trash_grids)
ret_history_visible_trash_grids[ret_history_visible_trash_grids>0] = 1 # 1 indicates trash, 0 elsewhere
ret_history_agents = np.array(self.history_agent_grids)
ret_history_agents[ret_history_agents > 0] = 1 # 1 indicates an agent, 0 if there is no agent
current_pos_agent = np.zeros((len(self.agents), self.dim[0], self.dim[1]), dtype = int)
#Iterating over the list of agents to set the position of each agent in another field to 1
agent_counter = 0
for agent in self.agents:
y, x = agent.pos[0], agent.pos[1]
current_pos_agent[agent_counter][y][x] = 1
agent_counter += 1
return ret_history_visible_trash_grids, ret_history_agents, current_pos_agent
def debug_data_export(self):
"""Exports all data of the current stats for debug reasons. Extends the export_known_data_function with complete_trash_grid
n: number of saved timesteps
Return
-------
history_visible_trash:
Matrix of format n * self.dim[0] * self.dim[1], is 1 where trash is eaten at each timestep, zero elsewhere
history_agent:
Matrix of format n* self.dim[0] * self.dim[1], is 1 where the agents are at one timestep, zero elsewhere
current_pos_agent:
Matrix of format nb_agents * self.dim[0] * self.dim[1], one hot matrix for each agent (in the same order as the agents are in self.agents)
indicating the position of the agent
trash_grid_complete:
Matrix of format self.dim[0] * self.dim[1]. Indicates the complete (partly for the agents unknown) distribution of trash
trash_sources:
List of all TrashShource Objects that are generating currently trash.
"""
ret_history_visible_trash_grids, ret_history_agents, current_pos_agent = self.export_known_data()
return ret_history_visible_trash_grids, ret_history_agents, current_pos_agent , self.trash_grid_complete ,self.trash_sources
```
#### File: jkamalu/trashbots-RL/GaussianTrashSource.py
```python
from numpy.random import multivariate_normal
class GaussianTrashSource:
def __init__(self, mean, max_y, max_x, cov=[[1,0],[0,1]], id=None):
"""
Creates a trashsource
Parameters
----------
cov: 2x2 matrix, covariance of the rnd var.
coords: (int,int), (y,x) trash hotspot, mean of the rnd var.
max_x : int, limits to the trash coordinates / grid of the environment
max_y : int, limits to the trash coordinates / grid of the environment
Returns
-------
"""
# mean of the gaussian
self.mean = mean
# covariance matrix of the multivariate gaussian
self.cov = cov
# strict limits to the gaussian
self.max_x = max_x
self.max_y = max_y
# Just an id of the trashsource
self.id = id
def draw_sample_in_limits(self):
"""
"""
y, x = multivariate_normal(self.mean, self.cov,1)[0]
y = int(min(self.max_y, round(y)))
x = int(min(self.max_x, round(x)))
return [y, x]
def get_trash(self, n=None):
"""
Returns a list of n coordinates drawn from the distribution
"""
if n:
return [self.draw_sample_in_limits() for i in range(n)]
return self.draw_sample_in_limits()
``` |
{
"source": "jkamelin/open_model_zoo",
"score": 2
} |
#### File: tools/accuracy_checker/setup.py
```python
import importlib
import os
import re
import sys
import warnings
import platform
import subprocess # nosec - disable B404:import-subprocess check
from distutils.version import LooseVersion
from pathlib import Path
from setuptools import find_packages, setup # pylint:disable=W9902
from setuptools.command.test import test as test_command # pylint:disable=W9902
from setuptools.command.install import install as install_command # pylint:disable=W9902
here = Path(__file__).parent
class PyTest(test_command):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
test_command.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex # pylint:disable=C0415
# import here, cause outside the eggs aren't loaded
import pytest # pylint:disable=C0415
error_code = pytest.main(shlex.split(self.pytest_args))
sys.exit(error_code)
def read(*path):
input_file = os.path.join(here, *path)
with open(str(input_file), encoding='utf-8') as file:
return file.read()
def check_and_update_numpy(min_acceptable='1.15'):
try:
import numpy as np # pylint:disable=C0415
update_required = LooseVersion(np.__version__) < LooseVersion(min_acceptable)
except ImportError:
update_required = True
if update_required:
subprocess.call([sys.executable, '-m', 'pip', 'install', 'numpy>={}'.format(min_acceptable)])
def install_dependencies_with_pip(dependencies):
for dep in dependencies:
if dep.startswith('#'):
continue
subprocess.call([sys.executable, '-m', 'pip', 'install', str(dep)])
class CoreInstall(install_command):
pass
def find_version(*path):
version_file = read(*path)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
is_arm = platform.processor() == 'aarch64'
long_description = read("README.md")
version = find_version("openvino/tools/accuracy_checker", "__init__.py")
def prepare_requirements():
requirements_core = read('requirements-core.in').split('\n')
if 'install_core' in sys.argv:
return requirements_core
requirements = read("requirements.in").split('\n')
return requirements_core + requirements
_requirements = prepare_requirements()
try:
importlib.import_module('cv2')
except ImportError as opencv_import_error:
if platform.processor() != 'aarch64':
warnings.warn(
"Problem with cv2 import: \n{}\n opencv-python will be added to requirements".format(opencv_import_error)
)
_requirements.append('opencv-python')
else:
warnings.warn(
"Problem with cv2 import: \n{}".format(opencv_import_error)
+ "\n Probably due to unsuitable numpy version, will be updated")
check_and_update_numpy()
if is_arm:
install_dependencies_with_pip(_requirements)
setup(
name="accuracy_checker",
description="Deep Learning Accuracy validation framework",
version=version,
long_description=long_description,
packages=find_packages(),
entry_points={
"console_scripts": [
"accuracy_check=openvino.tools.accuracy_checker.main:main",
"convert_annotation=openvino.tools.accuracy_checker.annotation_converters.convert:main"]},
zip_safe=False,
python_requires='>=3.5',
install_requires=_requirements if not is_arm else '',
tests_require=[read("requirements-test.in")],
cmdclass={'test': PyTest, 'install_core': CoreInstall},
extras_require={'extra': ['pycocotools>=2.0.2', 'torch>=0.4.0', 'torchvision>=0.2.1', 'lpips',
'kenlm @ git+https://github.com/kpu/kenlm.git#egg=kenlm']}
)
``` |
{
"source": "jkamiya5/flask",
"score": 3
} |
#### File: jkamiya5/flask/my_function.py
```python
from pylab import *
from pandas import *
import matplotlib.pyplot as plt
import numpy as np
import pylab
import pandas as pd
from matplotlib.backends.backend_agg import FigureCanvasAgg
import io
import random
import re
import os
from matplotlib.font_manager import FontProperties
class my_function:
@staticmethod
def setData(player):
df = pd.read_csv('./kings/data/Kings_Game_HIST_' + player + '.csv', encoding='shift-jis')
df2 = pd.read_csv('./kings/data/Kings_Game_HISTORY.csv', encoding='shift-jis')
left = pd.DataFrame(df)
right = pd.DataFrame(df2)
output = pd.merge(left, right, how='left', on=['Time'])
return output
@staticmethod
def getData(statsItem, player):
output = my_function.setData(player);
lose = output.where(output.WinLose == "L")
win = output.where(output.WinLose == "W")
win = win.dropna(subset=['No'])
lose = lose.dropna(subset=['No'])
plt.plot(win[statsItem], "ko--", color="b", label="1")
plt.plot(lose[statsItem], "ko--", color="r", label="2")
plt.legend([u'WIN', u'LOSE'])
plt.title(player + ":" + statsItem + "_HIST")
plt.xlabel("TIME")
plt.ylabel(statsItem)
return plt
@staticmethod
def getCorr(player):
output = my_function.setData(player);
output = output.replace(re.compile('^W$'), 1)
output = output.replace(re.compile('^L$'), 0)
obj = output.corr()
after = obj.sort_values(by="WinLose", ascending=True)
ax= after.plot.bar(y=['WinLose'])
return ax
@staticmethod
def delFiles(targetPath):
for root, dirs, files in os.walk(targetPath, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
```
#### File: jkamiya5/flask/tempImage.py
```python
from flask import Flask, render_template, session, redirect, url_for, flash, send_file
import matplotlib.pyplot
from matplotlib.backends.backend_agg import FigureCanvasAgg
import random
import string
import os
import my_function as func
class TempImage(object):
def __init__(self, file_name):
self.file_name = file_name
def create_png(self, player, itemName):
fig, ax = matplotlib.pyplot.subplots()
ax.set_title(player + "のスタッツ・・・" + itemName)
obj = func.my_function()
obj.getData(itemName, player)
canvas = FigureCanvasAgg(fig)
canvas.print_figure(self.file_name)
def create_png1(self, player):
fig, ax = matplotlib.pyplot.subplots()
ax.set_title(player + "のスタッツ・・・")
obj = func.my_function()
obj.getCorr(player)
matplotlib.pyplot.savefig(self.file_name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
#os.remove(self.file_name)
print()
def getPng(self, player, itemName):
chars = string.digits + string.ascii_letters
target = ''.join(random.choice(chars) for i in range(64)) + '.png'
img_name = "static/img/" + target
with TempImage(img_name) as img:
img.create_png(player, itemName)
send_file(img_name, mimetype='image/png')
return img_name
def getPng1(self, player):
chars = string.digits + string.ascii_letters
target = ''.join(random.choice(chars) for i in range(64)) + '.png'
img_name = "static/img/" + target
with TempImage(img_name) as img:
img.create_png1(player)
send_file(img_name, mimetype='image/png')
return img_name
``` |
{
"source": "JKamlah/akf-dbTools",
"score": 3
} |
#### File: akf-dbTools/dblib/kennGetter.py
```python
from sqlalchemy import create_engine, MetaData, select
import configparser
################ START ################
def akf_kenngetter(config):
"""
Main function of the akf-kennGetter!
"""
print("Start kennGetter")
#Connection to SQLite
dbPath = config['DEFAULT']['DBPath']
db_akf = dbPath
engine = create_engine(db_akf)
conn = engine.connect()
# Create a MetaData instance
metadata = MetaData(bind=engine, reflect=True)
# Get all the referenz values
s = select([metadata.tables['Main'].c.referenz])
result = conn.execute(s)
mainresults = result.fetchall()
# Get all the years which are bind to the referenz
# And updates the kennnummer in the maintable
for ref in mainresults:
s = select([metadata.tables['MainRelation']]).where(metadata.tables['MainRelation'].c.referenz == ref[0])
knn = ""
try:
result = conn.execute(s)
mainrelationresults = result.fetchall()
for uids in mainrelationresults:
s = select([metadata.tables['WKN']]).where(
metadata.tables['WKN'].c.unternehmenId == uids[1])
resultwkns = conn.execute(s).fetchall()
for wkn in resultwkns:
if wkn[2] not in knn:
knn += wkn[2]+" "
if wkn[3] not in knn:
knn += wkn[3]+" "
stmt = metadata.tables['Main'].update().values(Kennnummern=knn).where(
metadata.tables['Main'].c.referenz == ref[0])
conn.execute(stmt)
except:
continue
conn.close()
engine.dispose()
return 0
if __name__ == "__main__":
"""
Entrypoint: Searches for the files and parse them into the mainfunction (can be multiprocessed)
"""
# The filespath are stored in the config.ini file.
# And can be changed there.
config = configparser.ConfigParser()
config.sections()
config.read('config.ini')
akf_kenngetter(config)
print("Finished!")
```
#### File: akf-dbTools/dblib/output_analysis.py
```python
from copy import deepcopy
import io
import os
import os.path as path
import shutil
import ntpath
import regex
class OutputAnalysis(object):
def __init__(self, config):
self.config_default = config["BOOKS"]
self.analysis_enabled = False
self.output_path = None
if self.config_default['OutputLogging'] is False:
print("OutputAnalysis: won't do anything OutputAnalysis not enabled in config")
else:
print("OutputAnalysis: analysis is enabled")
self.analysis_enabled = True
self.output_path = self.config_default['OutputRootPath']
self.delete_directory_tree(self.output_path) # clear the output path to prevent rest content there
# add data holder
self.data = {}
self.current_key = None # key for the current file in the data holder
def change_file(self, data, file):
if not self.analysis_enabled: return
# fetch the basic info like path and name from the file
# ( this should contain the origposts which will be subtracted)
self.data[file] = deepcopy(data)
self.current_key = file # use the full file path as current key
# create copy of origdata
for key in self.data[file]:
if "overall_info" in key:
continue
entry = self.data[file][key]
origpost = entry[0]['origpost']
origpost_rest = deepcopy(origpost) # create another copy of origpost where the data is substracted from
self.data[file][key][0]['origpost_rest'] = origpost_rest
self.data[file][key][0]['origpost_rest_wo_sc'] = self.remove_sc_from_text(origpost_rest)
def remove_sc_from_text(self,text):
# rest without special characters
my_text_wo_sc = regex.sub("[^\w]+", "", text)
return my_text_wo_sc
def subtract_entry(self, tag, texts_to_subtract, subtag1= None, subtag2=None ):
if not self.analysis_enabled: return
# current_data = self.data[self.current_key]
# my_subtract = None
if tag not in self.data[self.current_key].keys():
print("subtract_entry-> Tag:", tag, "not in ", self.current_key)
return
my_text = self.data[self.current_key][tag][0]['origpost_rest']
my_orig_text_wo_sc = self.remove_sc_from_text(my_text)
len_bef = len(my_text)
#todo order by length ? texts to subtract
# cast necessary texts
texts_to_sort = []
for text in texts_to_subtract:
if not isinstance(text, str):
text = str(text)
texts_to_sort.append(text)
# sort necessary texts
texts_to_subtract_sorted = sorted(texts_to_sort, key=len, reverse=True)
# do the actual subtraction
for text in texts_to_subtract_sorted:
my_text = my_text.replace(text, "", 1)
len_post = len(my_text)
#if len_bef != len_post:
# print("asd")
self.data[self.current_key][tag][0]['origpost_rest'] = my_text
## ['origpost_rest'].replace(text, "")
# rest without special characters
#my_text_wo_sc = self.remove_sc_from_text(my_text) # old version
my_text_wo_sc = my_orig_text_wo_sc
for text in texts_to_subtract_sorted:
text_wo_sc = self.remove_sc_from_text(text)
my_text_wo_sc = my_text_wo_sc.replace(text_wo_sc,"",1)
#self.data[self.current_key][tag][0]['origpost_rest_wo_sc'] = my_text_wo_sc
self.data[self.current_key][tag][0]['origpost_rest_wo_sc'] = my_text_wo_sc
# subtract entry('origpost_rest') by tag and subtag
# mind charset and stuff here
def output_result_folder(self, base_folder_name):
if not self.analysis_enabled:
return
acc__statements = {} # overall length results for each file in the folder
output_folder = path.join(self.output_path, base_folder_name)
output_folder = output_folder.replace(".db", "") + "/"
for filekey in self.data:
data_to_print = self.data[filekey]
table_name = ntpath.basename(filekey)
file_statements = {}
for key in data_to_print:
if key == "overall_info":
continue
entry = data_to_print[key]
if not isinstance(entry, list) or len(entry) < 1:
continue
# output_folder_final = output_folder
if "/" in key:
key = key.replace("/", "_") # replace all slashes with underscores to prevent misinterpretation as folder
output_path = path.join(output_folder, key + ".txt")
final_lines, origpost_len, origpost_rest_len, origpost_rest_wo_sc_len = \
self.create_data_for_file(entry[0], self.current_key, table_name)
statement = {
"table_name": table_name,
"origpost_len": origpost_len,
"origpost_rest_len": origpost_rest_len,
"origpost_rest_wo_sc_len": origpost_rest_wo_sc_len
}
file_statements[key] = statement
self.write_array_to_root_simple(output_folder, output_path, final_lines, append_mode=True)
# accumulate all statements for one file
acc__statements = self.accumulate_file_statements(file_statements, acc__statements)
# write statement data to current folder
output_path_folder_res = path.join(output_folder, "aaa_folder_results.txt")
if bool(acc__statements):
folder_report = self.create_report_for_folder(acc__statements, output_path)
self.write_array_to_root_simple(output_folder, output_path_folder_res, folder_report, append_mode=True)
# pass statement data to overall processing
return acc__statements
def log_final_results(self, results):
path = self.output_path
final_report = self.create_report_for_folder(results, path)
self.write_array_to_root_simple(path, path+"final_report.txt", final_report, append_mode=True)
def accumulate_final_results(self, folder_statements, acc_statements):
for key in folder_statements:
entry = folder_statements[key]
#table_name = entry['table_name']
origpost_len = entry['acc_orig_len']
origpost_rest_len = entry['acc_rest_len']
origpost_rest_wo_sc_len = entry['acc_rest_wo_sc_len']
if key not in acc_statements.keys():
acc_statements[key] = {
"acc_orig_len": origpost_len,
"acc_rest_len": origpost_rest_len,
"acc_rest_wo_sc_len": origpost_rest_wo_sc_len
}
else:
acc_statements[key]["acc_orig_len"] += origpost_len
acc_statements[key]["acc_rest_len"] += origpost_rest_len
acc_statements[key]["acc_rest_wo_sc_len"] += origpost_rest_wo_sc_len
return acc_statements
def accumulate_file_statements(self, file_statements, acc_statements):
for key in file_statements:
entry = file_statements[key]
#table_name = entry['table_name']
origpost_len = entry['origpost_len']
origpost_rest_len = entry['origpost_rest_len']
origpost_rest_wo_sc_len = entry['origpost_rest_wo_sc_len']
if key not in acc_statements.keys():
acc_statements[key] = {
"acc_orig_len": origpost_len,
"acc_rest_len": origpost_rest_len,
"acc_rest_wo_sc_len": origpost_rest_wo_sc_len
}
else:
acc_statements[key]["acc_orig_len"] += origpost_len
acc_statements[key]["acc_rest_len"] += origpost_rest_len
acc_statements[key]["acc_rest_wo_sc_len"] += origpost_rest_wo_sc_len
return acc_statements
def create_report_for_folder(self, folder_statements, output_path):
final_report = []
separators = '%-70s%-30s%-30s%-30s%-30s'
final_report.append("Folder:"+ output_path)
headline_to_add = separators % ("category_name", "original_text_length", "rest_text_length", "rest_text_wo_sc_length", "subtracted_chars")
final_report.append(headline_to_add)
final_report.append("----------------------------------------------------------------")
for key in folder_statements:
statement = folder_statements[key]
subtacted_chars = int(statement['acc_orig_len']) - int(statement['acc_rest_len'])
text_to_add = separators % (key,
statement['acc_orig_len'],
statement['acc_rest_len'],
statement['acc_rest_wo_sc_len'],
subtacted_chars
)
final_report.append(text_to_add)
return final_report
def create_data_for_file(self, data , source_file, table_name):
origpost = data['origpost']
origpost_rest = data['origpost_rest']
origpost_rest_wo_sc = data['origpost_rest_wo_sc']
origpost_len = len(origpost)
origpost_rest_len = len(origpost_rest)
origpost_rest_wo_sc_len = len(origpost_rest_wo_sc)
final_lines = []
separators = '%-30s%-30s'
final_lines.append(table_name + "------------------------")
final_lines.append(
separators % ("origpost: ", origpost))
final_lines.append(
separators % ("origpost_rest: ", origpost_rest))
final_lines.append(
separators % ("origpost_rest_wo_sc: ", origpost_rest_wo_sc))
final_lines.append("") # empty lines for overview
final_lines.append("")
return final_lines, origpost_len, origpost_rest_len, origpost_rest_wo_sc_len
# create a file for each tag in output
# log file info and compared output
# (optional) per file create stats and accumulate them
def write_array_to_root_simple(self, full_dir, full_path, text_lines, append_mode=False):
self.create_directory_tree(full_dir)
# write append or normal
if append_mode is True:
my_file = io.open(full_path, 'a', encoding='utf8')
else:
my_file = io.open(full_path, 'w', encoding='utf8')
for text_line in text_lines:
my_file.write(text_line + "\n")
my_file.close()
def create_directory_tree(self, path):
# abspath = os.path.abspath(path)
path_exists = os.path.exists(path)
if not path_exists:
os.makedirs(path)
def delete_directory_tree(self, path):
if os.path.exists(path):
shutil.rmtree(path)
def change_folder(self):
self.data = {}
```
#### File: akf-dbTools/dblib/refGetter.py
```python
from sqlalchemy import create_engine, MetaData, select
import configparser
################ START ################
def get_pretty_dates(dates):
"""
It takes alle the years
and pretty printed it.
E.g. 2001,2002,2003,2004 -> 2001-2004
"""
pretty_date = str(dates[0])+"-"
last = dates[0]
for idx, date in enumerate(dates[1:]):
if (date-1 == last):
if idx == len(dates) - 2:
pretty_date = pretty_date + str(date)
else:
if idx == len(dates) - 2:
pretty_date = pretty_date+str(dates[idx])+", "+str(date)
else:
if int(pretty_date[-5:-1]) == dates[idx]:
pretty_date = pretty_date[:-1]+", " + str(date) + "-"
else:
pretty_date = pretty_date + str(dates[idx]) + ", " + str(date) + "-"
last = date
return pretty_date
def akf_refgetter(config):
"""
Main function of the AKF_RefGetter!
"""
print("Start RefGetter")
#Connection to SQLite
dbPath = config['DEFAULT']['DBPath']
db_akf = dbPath
engine = create_engine(db_akf)
conn = engine.connect()
# Create a MetaData instance
metadata = MetaData(bind=engine, reflect=True)
# Get all the referenz values
s = select([metadata.tables['Main'].c.referenz])
result = conn.execute(s)
mainresults = result.fetchall()
# Get all the years which are bind to the referenz
# And collect all years and prints them as year span to the "Jahresspanne" column in Main
for ref in mainresults:
s = select([metadata.tables['MainRelation']]).where(metadata.tables['MainRelation'].c.referenz == ref[0])
dates = []
try:
result = conn.execute(s)
mainrelationresults = result.fetchall()
for date in mainrelationresults:
dates.append(int(date[1][:4]))
dates = sorted(list(set(dates)))
if not dates: continue
pretty_dates = dates[0]
if len(dates)>2:
pretty_dates = get_pretty_dates(dates)
elif len(dates) == 2:
pretty_dates = str(dates[0])+"-"+str(dates[1])
print(pretty_dates)
stmt = metadata.tables['Main'].update().values(Jahresspanne=pretty_dates).where(
metadata.tables['Main'].c.referenz == ref[0])
conn.execute(stmt)
except:
continue
conn.close()
engine.dispose()
return 0
if __name__ == "__main__":
"""
Entrypoint: Searches for the files and parse them into the mainfunction (can be multiprocessed)
"""
# The filespath are stored in the config.ini file.
# And can be changed there.
config = configparser.ConfigParser()
config.sections()
config.read('config.ini')
akf_refgetter(config)
print("Finished!")
``` |
{
"source": "JKamlah/AST-PST-Tablerecognizer",
"score": 3
} |
#### File: JKamlah/AST-PST-Tablerecognizer/main.py
```python
import sys
from string import digits
from pathlib import Path
import logging
from tesserocr import PyTessBaseAPI
import numpy as np
import cv2 as cv
from PIL import Image
import pandas as pd
logging.basicConfig(filename='error.log',format='%(asctime)s %(message)s', level=logging.WARNING)
def extract_tableinformation_from_images(img_paths, lang="ASTPST_0.522000_951_6100", whitelist_num=False,
template=1, auto_template=True,
suspicious_threshold=10,
pixel_density_template_threshold=179,
pixel_density_row_threshold=0.95,
ocr_padding=15,
debug=False, visual_debug=False):
"""
This program process AST-PST files, it:
- deskews via object clustering
- analyzes the layout via 1d-projection profile and pixel density and templating
- extract tableinformation via OCR with tesseract
- stores the data as table in a csv-file
For this special case magic number, thresholds and the templating are hardcoded.
:return:
"""
with PyTessBaseAPI(lang=lang, psm=4) as api:
for img_path in img_paths:
print(f"Processing {img_path}")
try:
# Read the image
img = cv.imread(img_path, 0)
# Binarize the image
img = cv.medianBlur(img, 5)
img = cv.medianBlur(img, 3)
bin = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 5, 2)
# Dilate and erode to glue the borderelements and get a solid border
kernel = np.ones((3, 3), np.uint8)
bin = cv.dilate(bin, kernel, iterations=1)
kernel = np.ones((2, 13), np.uint8)
bin = cv.erode(bin, kernel, iterations=3)
kernel = np.ones((13, 2), np.uint8)
bin = cv.erode(bin, kernel, iterations=3)
kernel = np.ones((5, 5), np.uint8)
bin = cv.dilate(bin, kernel, iterations=8)
# Get a dict with all {area:contours}
areas = find_contour_areas(bin)
# Find the biggest area which ends above the middle of the image
for idx, (area, contour) in enumerate(reversed(sorted(areas.items()))):
x, y, w, h = cv.boundingRect(contour)
if x+h > 0.5*bin.shape[1]:
continue
# Draw the rectangle to see what you've found
#cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Region of interest
roi = bin[y:y+h, x:x+w]
# In each col search for the first black pixel and than calculate the deskewangle (tilt) for the image
y_idxs = []
for x in range(0 + 25, w - 25):
for y in range(0, h):
if roi[y, x] == 0:
y_idxs.append(y)
break
polyfit_value = np.polyfit(range(0, len(y_idxs)), y_idxs, 1)
deskewangle = np.arctan(polyfit_value[0]) * (360 / (2 * np.pi))
# Deskew the image and bin image
M = cv.getRotationMatrix2D((bin.shape[1] // 2, bin.shape[0] // 2), deskewangle, 1.0)
rotated_bin = cv.warpAffine(bin, M, (bin.shape[1], bin.shape[0]),
flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)
rotated_img = cv.warpAffine(img, M, (bin.shape[1], bin.shape[0]),
flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)
break
else:
print("The roi in the original image could not be found.")
return
# Again find the biggest area which ends above the middle of the image
# (you could also calculate it from the results above.. :)
areas = find_contour_areas(rotated_bin)
for idx, (area, contour) in enumerate(reversed(sorted(areas.items()))):
x, y, w, h = cv.boundingRect(contour)
if x+h > 0.5*bin.shape[1]:
continue
# Region of interest
rotated_roi = bin[y:y + h, x:x + w]
if debug:
cv.rectangle(rotated_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
break
else:
print("The roi in the rotated image could not be found.")
return
# Set rotated img to tessapi
rotated_img = cv.cvtColor(rotated_img, cv.COLOR_BGR2RGB)
rotated_img_pil = Image.fromarray(rotated_img)
api.SetImage(rotated_img_pil)
# Get Land/-kreis approximately w*.2, h*0.6 above x,y
api.SetRectangle(x, int(y-h*0.5), int(w*0.20), int(h*0.4))
BL_LK = api.GetUTF8Text().replace('- ','-').strip().split('\n')
if len(BL_LK) == 2:
bundesland = BL_LK[0]
landkreis = BL_LK[1]
else:
bundesland = ""
landkreis = ' '.join(BL_LK)
# Autodetect template with the pixel density of the roi?
# Measurements of testfiles: 331 -> 182, 332 -> 140, 333 -> 180, 334-> 143
if debug:
print('Pixel density: ',np.sum(rotated_roi)/area)
if auto_template:
if np.sum(rotated_roi)/area > pixel_density_template_threshold:
template = 1
else:
template = 2
# Cols: Now either use fix coordinates or find the columns with the rotated_roi
cols = []
if template == 1:
sep_vals = [580, 1055, 1210, 1395, 1620, 1830, 2035, 2240, 2445, 2610]
elif template == 2:
sep_vals = [440, 580, 705, 875, 1045, 1245, 1445, 1645, 1875, 2080, 2290, 2490, 2690]
for col in sep_vals:
col = int(w*col/sep_vals[-1])
cols.append(col)
if debug:
cv.line(rotated_img, (x+col, y+h), (x+col, img.shape[1]), (255, 0, 0), 3)
# Rows: To get the rows you could use the 1D-projection of the rotated_bin
projection = []
th = 255*(rotated_bin.shape[1]-x-cols[1])*pixel_density_row_threshold
kernel = np.ones((2, 13), np.uint8)
rotated_bin_cols = cv.erode(rotated_bin, kernel, iterations=2)
cv.imwrite("./rotated_cols.png", rotated_bin_cols)
for yy in range(0, rotated_bin.shape[0]):
projection.append(th if sum(rotated_bin_cols[yy,cols[1]+x:]) > th else 0)
projection_smooth = np.convolve(projection, 10, mode='same')
minimas = np.diff(np.sign(np.diff(projection_smooth)))
skip = False
linepad = 15
rows = []
for idx, minima in enumerate(minimas):
if idx > y + h:
if minima == -1:
if not skip:
rows.append(idx-linepad)
if debug:
cv.line(rotated_img, (0, idx-linepad), (img.shape[1], idx-linepad), (255, 0, 0), 3)
skip = True
else:
skip = False
cv.imwrite("./debug.png", rotated_img)
# Create pandas (pd) dataframe (df) for the templates
if template==1:
tmpl_header = ["Bundesland",
"Landkreis",
"Wirtschaftszweig",
"Sypro",
"Pst_n",
"Beschaeftigte",
"Grundmittel",
"Gebaeude",
"Ausruestungen",
"Grundstueck_Flaeche",
"Gebaeude_Flaeche",
"Gleisanschluss",
"Dateiname",
"Fulltext"]
else:
tmpl_header = ["Bundesland",
"Landkreis",
"Wirtschaftszweig",
"Sypro",
"Pst_n",
"Personal_insg",
"Personal_mehrsch",
"Waren_prod",
"Produktion_flaeche",
"Elektro_verbrauch",
"Waerme_verbrauch",
"Waerme_erzeugung",
"Wasser_verbrauch",
"Wasser_erzeugung",
"Wasser_verwendung",
"Dateiname",
"Fulltext"]
df = pd.DataFrame(columns=tmpl_header)
# OCR: Use tesseract via tesserocr and the roi
suspicious_counter = 0
rows.append(rotated_img.shape[0]-10-ocr_padding)
whitelist = api.GetVariableAsString('tessedit_char_whitelist')
for ridx, row in enumerate(rows[1:]):
print(f"Zeile: {ridx+1}")
if whitelist_num:
api.SetVariable('tessedit_char_whitelist', whitelist)
df_row = []
df_row.append(bundesland)
df_row.append(landkreis)
x_start = x
y_start = rows[ridx+1-1]
y_end = row
# Read the full row
api.SetRectangle(x_start - ocr_padding, y_start - ocr_padding, x_start+w+ocr_padding,
y_end - y_start + ocr_padding)
fullrow_text = api.GetUTF8Text().strip().replace('- ','-')
print('Fulltext: ',fullrow_text)
for cidx, col in enumerate(cols):
print(f"Spalte: {cidx+1}")
if whitelist_num:
# Use for col > 0 only digits as allowed chars
if cidx > 0:
api.SetVariable('tessedit_char_whitelist', digits)
if cidx > 0:
col = col-cols[cidx-1]
api.SetRectangle(x_start-ocr_padding, y_start-ocr_padding,
col+ocr_padding, y_end-y_start+ocr_padding)
if debug and False:
crop = rotated_img_pil.crop([x_start-ocr_padding, y_start-ocr_padding,
col+ocr_padding+(x_start-ocr_padding),
y_end-y_start+ocr_padding+(y_start-ocr_padding)])
crop.save(f"./{ridx}_{cidx}.png")
ocrResult = api.GetUTF8Text().replace('\n', ' ').replace('- ','-').strip()
if ocrResult == "":
# Set psm mode to single character
api.SetPageSegMode(10)
ocrResult = api.GetUTF8Text().replace('\n', ' ').strip()
# Check if the single char is a digit else use empty text
ocrResult = ocrResult if ocrResult.isdigit() else ''
# Reset to psm mode singe column
api.SetPageSegMode(4)
# Find suspicious ocr results when checking non-empty ocr-results with the result of the
# row fulltext (0 not included cause it often gets not recognized by in the row fulltext)
if len(cols)-1 > cidx > 0 and ocrResult not in ['', '0'] and (not ocrResult.isdigit() or
ocrResult not in fullrow_text):
ocrResult += " ?!?"
suspicious_counter += 1
elif len(cols)-1==cidx and ocrResult != "" and not ocrResult.isdigit():
ocrResult += " ?!?"
suspicious_counter += 1
print(ocrResult)
x_start += col
df_row.append(ocrResult)
df_row.append(Path(img_path).name)
df_row.append(fullrow_text)
df.loc[len(df)+1] = df_row
# Et voilá: Deskew images, ocr results and extracted the information
df.to_csv(Path(img_path).with_suffix('.csv'), index=False)
df = None
if suspicious_counter > suspicious_threshold:
logging.warning(f'More than 10 suspicious occurrences in {img_path}')
# Plot the results (for debugging)
if visual_debug:
from matplotlib import pyplot as plt
plt.subplot(1,2,1), plt.imshow(roi, cmap='gray')
plt.subplot(1,2,2), plt.imshow(rotated_img, cmap='gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.show()
except Exception as excp:
# Log all files who could not be processed properly
logging.error(f'Error while processing: {img_path}\n\t\tError:{excp}\n')
def find_contour_areas(bin):
# Find contours
contours, hierachies = cv.findContours(bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)
# Find the convex hull object for each contour
areas = {}
size_th = 0.5*bin.size
for idx, contour in enumerate(contours):
cnt_area = cv.contourArea(contour, False)
if cnt_area < size_th:
areas[cnt_area] = contour
return areas
if __name__ == '__main__':
img_list = []
for idx, img_path in enumerate(reversed(sys.argv[1:])):
if Path(img_path).is_dir():
img_list.extend([str(img_path.resolve()) for img_path in Path(img_path).rglob('*.png')])
else:
img_list.append(str(Path(img_path).resolve()))
extract_tableinformation_from_images(list(set(img_list)))
``` |
{
"source": "JKamlah/dinglehopper",
"score": 3
} |
#### File: qurator/dinglehopper/align.py
```python
from .edit_distance import *
def align(t1, t2):
"""Align text."""
s1 = list(grapheme_clusters(unicodedata.normalize('NFC', t1)))
s2 = list(grapheme_clusters(unicodedata.normalize('NFC', t2)))
return seq_align(s1, s2)
def seq_align(s1, s2):
"""Align general sequences."""
s1 = list(s1)
s2 = list(s2)
ops = seq_editops(s1, s2)
i = 0
j = 0
while i < len(s1) or j < len(s2):
o = None
try:
ot = ops[0]
if ot[1] == i and ot[2] == j:
ops = ops[1:]
o = ot
except IndexError:
pass
if o:
if o[0] == 'insert':
yield (None, s2[j])
j += 1
elif o[0] == 'delete':
yield (s1[i], None)
i += 1
elif o[0] == 'replace':
yield (s1[i], s2[j])
i += 1
j += 1
else:
yield (s1[i], s2[j])
i += 1
j += 1
```
#### File: dinglehopper/tests/test_integ_ocrd_cli.py
```python
import os
import re
import shutil
import json
from pathlib import Path
from click.testing import CliRunner
import pytest
from .util import working_directory
from ..ocrd_cli import ocrd_dinglehopper
data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def test_ocrd_cli(tmp_path):
"""Test OCR-D interface"""
# XXX Path.str() is necessary for Python 3.5
# Copy test workspace
test_workspace_dir_source = Path(data_dir) / 'actevedef_718448162'
test_workspace_dir = tmp_path / 'test_ocrd_cli'
shutil.copytree(str(test_workspace_dir_source), str(test_workspace_dir))
# Run through the OCR-D interface
with working_directory(str(test_workspace_dir)):
runner = CliRunner()
result = runner.invoke(ocrd_dinglehopper, [
'-m', 'mets.xml',
'-I', 'OCR-D-GT-PAGE,OCR-D-OCR-CALAMARI',
'-O', 'OCR-D-OCR-CALAMARI-EVAL'
])
assert result.exit_code == 0
result_json = list((test_workspace_dir / 'OCR-D-OCR-CALAMARI-EVAL').glob('*.json'))
assert json.load(open(str(result_json[0])))['cer'] < 0.03
``` |
{
"source": "JKamlah/mocrin",
"score": 2
} |
#### File: mocrin/mocrinlib/abbyyrunner.py
```python
import shlex
import subprocess
from mocrinlib.common import create_dir
def start_abbyy(file, path_out):
create_dir(path_out)
file_out = path_out + file.split('/')[-1]+".xml"
parameters = f"-if {file} -recc -f XML -xcam Ascii -of {file_out}"
parameters = shlex.split(parameters)
subprocess.Popen(args=['abbyyocr11']+parameters).wait()
return
```
#### File: mocrin/mocrinlib/tessapi.py
```python
from tesserocr import PyTessBaseAPI, RIL, iterate_level
import string as string
from skimage.io import imread, imsave
import re
from mocrinlib.common import create_dir
from mocrinlib.imgproc import safe_imread
import lxml.etree as ET
from io import StringIO
import json
########## EXTENDED HOCR FUNCTION ##########
def extend_hocr(file:str, fileout:str, tess_profile:dict=None):
"""
Prodcues an extended hocr-file with char_confidences
:param file:
:param fileout:
:param tess_profile:
:return:
"""
parameters = get_param(tess_profile)
with PyTessBaseAPI(**parameters) as api:
set_vars(api, file, tess_profile)
ri = api.GetIterator()
# TODO: Need to fix header ...
#lang = api.GetInitLanguagesAsString()
version = api.Version()
hocrparse = ET.parse(StringIO(
f'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>OCR Results</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name='ocr-system' content='tesseract {version}' />
<meta name='ocr-capabilities' content='ocr_page ocr_carea ocr_par ocr_line ocrx_word'/>
</head>
</html>'''))
hocrroot = hocrparse.getroot()
hocrtess = ET.fromstring(api.GetHOCRText(0))
hocrtess.set("title", "image "+file+"; bbox"+hocrtess.get("title").split("bbox")[-1])
allwordinfo = hocrtess.findall('.//div/p/span/span')
level = RIL.SYMBOL
bbinfo = tuple()
conf = ""
charinfo = {}
for r in iterate_level(ri, level):
if bbinfo != r.BoundingBoxInternal(RIL.WORD):
if bbinfo != ():
bbox = "bbox " + " ".join(map(str, bbinfo))
for wordinfo in allwordinfo:
if bbox in wordinfo.get("title"):
wordinfo.set("title", wordinfo.get("title")+";x_confs"+conf)
allwordinfo.remove(wordinfo)
break
conf = ""
bbinfo = r.BoundingBoxInternal(RIL.WORD)
conf += " "+str(r.Confidence(level))
#symbol = r.GetUTF8Text(level)
#if symbol not in charinfo:
# charinfo[symbol]=[r.Confidence(level)]
#else:
# charinfo[symbol].append(r.Confidence(level))
bbox = "bbox " + " ".join(map(str, bbinfo))
for wordinfo in allwordinfo:
if bbox in wordinfo.get("title"):
wordinfo.set("title", wordinfo.get("title") + ";x_confs" + conf)
#with open(fileout+"_charinfo.json", "w") as output:
# json.dump(charinfo, output, indent=4)
hocrbody = ET.SubElement(hocrroot, "body")
hocrbody.append(hocrtess)
hocrparse.write(fileout+".hocr", xml_declaration=True,encoding='UTF-8')
return 0
def get_param(tess_profile:dict):
"""
Read the parameters for the api func call
:param tess_profile:
:return:
"""
# Set Parameters
parameters = {}
if 'parameters' in tess_profile:
for param in tess_profile['parameters']:
if param != "":
if "tessdata-dir" in param:
parameters["path"] = tess_profile['parameters'][param]['value']
elif "l" in param:
parameters["lang"] = tess_profile['parameters'][param]['value']
elif "oem" in param:
parameters["oem"] = int(tess_profile['parameters'][param]['value'])
elif "psm" in param:
parameters["psm"] = int(tess_profile['parameters'][param]['value'])
return parameters
def set_vars(api, file:str, tess_profile:dict):
"""
Reads the user-specific variables from the tess_profile
:param api:
:param file:
:param tess_profile:
:return:
"""
# Set necessary information
api.SetImageFile(file)
# Set Variable
api.SetVariable("save_blob_choices", "T")
if 'variables' in tess_profile:
for var in tess_profile['variables']:
api.SetVariable(var, str(tess_profile['variables'][var]['value']))
api.Recognize()
return 0
########## CUTTER FUNCTION ##########
def cutter(file:str, fileout:str, tess_profile:dict):
"""
Cuts areas (char, word, line) which contains user-specific expression
:param file: inputfile
:param fileout: output filename
:param tess_profile: profile containing user-specific informations and options
:return:
"""
try:
cutter = tess_profile["cutter"]
# Init the api
parameters = get_param(tess_profile)
with PyTessBaseAPI(**parameters) as api:
set_vars(api, file, tess_profile)
ri = api.GetIterator()
# The char method is not quite correct,
# it seems that charbboxes get calculated after recognition, which leads sometimes to false cutouts.
level = {"char":RIL.SYMBOL,"word":RIL.WORD,"line":RIL.TEXTLINE}.get(cutter["level"], "char")
expr_finder = init_expr_finder(cutter)
img = safe_imread(file)
count = 0
for r in iterate_level(ri, level):
symbol = r.GetUTF8Text(level) # r == ri
conf = r.Confidence(level)
if cutter["regex"] == "":
expr_result = expr_finder(cutter,symbol)
else:
expr_result = re.search(cutter["regex"],symbol)
if expr_result:
if cutter["min_conf"] < conf < cutter["max_conf"]:
origsymbol = symbol[:]
symbol = re.sub('[^0-9a-zA-Z]+', '_', symbol)
count += 1
bbox = r.BoundingBoxInternal(level)
pad = get_pad(bbox, cutter["pad"], cutter["padprc"])
cutarea = img[bbox[1] - pad[0]:bbox[3] + pad[0], bbox[0] - pad[1]:bbox[2] + pad[1], :]
if tess_profile["cutter"]["gen_traindata"]:
##if origsymbol == "":continue
cutdir = "/".join(fileout.split("/")[:-1]) + "/cut/" + fileout.split("/")[-1].split(".")[0] + "/"
create_dir(cutdir)
imsave(cutdir + '{:06d}'.format(count)+"." + file.split(".")[-1], cutarea)
with open("/".join(fileout.split("/")[:-1]) + "/cut/" + fileout.split("/")[-1].split(".")[0]+".linenr", "a") as cutinfo:
# Information (Number of cut, Line/Word/Char Text, Confidence, BBOX)
cutinfo.write('{:06d}'.format(count)+"\n"*(len(origsymbol.split("\n"))-1))
with open("/".join(fileout.split("/")[:-1]) + "/cut/" + fileout.split("/")[-1].split(".")[0]+".linetxt", "a") as cutinfo:
# Information (Number of cut, Line/Word/Char Text, Confidence, BBOX)
cutinfo.write(origsymbol)
with open(cutdir + '{:06d}'.format(count)+".gt.txt", "a") as cutinfo:
# Information (Number of cut, Line/Word/Char Text, Confidence, BBOX)
cutinfo.write(origsymbol)
else:
cutdir = "/".join(fileout.split("/")[:-1]) + "/cut/" + symbol + "/"
create_dir(cutdir)
fprefix = '{:06d}'.format(count) + "_" + symbol + "_" + '{:.3f}'.format(conf).replace(".", "-")
imsave(cutdir + "_" + fprefix + fileout.split("/")[-1] + "." + file.split(".")[-1], cutarea)
with open("/".join(fileout.split("/")[:-1])+"/cutinfo.txt","a") as cutinfo:
# Information (Number of cut, Line/Word/Char Text, Confidence, BBOX)
cutinfo.write('{:06d}'.format(count)
+"\t"+origsymbol
+"\t"+'{:.3f}'.format(conf)
+"\t"+str(bbox[1] - pad[0])
+"\t" +str(bbox[3] + pad[0])
+"\t" +str(bbox[0] - pad[1])
+"\t" +str(bbox[2] + pad[1]))
except:
print("Some nasty things while cutting happens.")
return 0
def init_expr_finder(cutter:dict):
"""
Initialize the callback func with expr-dict, 'op' - 'filteroperator' and 'filter' - 'user given filter characters'
:param cutter: dict containg information for cutting. Here are used 'filterop(erator)' and 'gr(ou)pop(erator)'.
:return:
"""
expr = {}
expr["op"] = {"and": all, "or": any}
expr["filter"] = get_filter(cutter)
def find_expr(cutter:dict,symbol:str)->bool:
# searches the symbol for the filterexpr with given filteroperator
try:
filterres =[]
for filter in expr["filter"]:
filterres.append(expr["op"].get(cutter["filterop"],"and")([True if s in symbol else False for s in filter]))
result = expr["op"].get(cutter["grpop"],"and")(filterres)
except:
print("Something nasty happens while finding expressions!")
result = False
return result
return find_expr
def get_filter(cutter:dict)->list:
"""
Sets up the filtergroups which are divided by '||' and
:param cutter:
:return:
"""
filterarrgrps = cutter["filter"].split("||")
filter = []
for filterarrgrp in filterarrgrps:
# do set for unique values
filter.append(set(filterarrgrp.split("|")))
for exgrp in filter:
for starex in ("*ascii_letters","*ascii_lowercase","*ascii_uppercase","*digits","*punctuation"):
if starex in exgrp:
exgrp.discard(starex)
exgrp.update(set(getattr(string, starex[1:])))
return filter
def get_pad(bbox,padval:int=0, padprc:float=0.0)->tuple:
"""
Calculates the padding values for cutting
:param bbox: boundingbox information
:param padval: padding value (pixel)
:param padprc: padding value (percantage)
:return:
"""
pad = [0,0]
try:
if padval != 0:
pad = pad+padval
if padprc != 0.0:
pad[0] = int((pad[0]+abs(bbox[3]-bbox[1]))*padprc)
pad[1] = int((pad[0]+abs(bbox[2]-bbox[0]))*padprc)
except:
print("Padding values are incorrect.")
return tuple(pad)
########## MAIN FUNCTION ##########
def tess_pprocess(file:str,fileout:str,cut:bool, tess_profile:dict=None)->int:
"""
Starts either the cutting or the extended_hocr process
:param file: inputfile
:param fileout: outputfile name
:param cut: flag for cutting options
:param tess_profile: containing user-specfic information and options
:return: None
"""
if cut and tess_profile != None:
print("Start cut")
cutter(file, fileout, tess_profile)
else:
print("Start hocr")
extend_hocr(file, fileout, tess_profile)
return 0
########## ENTRYPOINT ##########
if __name__=="__main__":
extend_hocr('','')
``` |
{
"source": "JKamlah/ocromore",
"score": 3
} |
#### File: ocromore/machine_learning_components/special_char_predictor_generate_text_evaluation.py
```python
from random import randint
from pickle import load
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from machine_learning_components.special_char_predictor_create_sequences import config, main_create_aufsichtsrat_sequences, generate_tokens
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# generate a sequence from a language model
def generate_seq(model, tokenizer, seq_length, seed_text, n_words):
result = list()
in_text = seed_text
# generate a fixed number of words
for _ in range(n_words):
# encode the text as integer
encoded = tokenizer.texts_to_sequences([in_text])[0]
# truncate sequences to a fixed length
encoded = pad_sequences([encoded], maxlen=seq_length, truncating='pre')
# predict probabilities for each word
yhat = model.predict_classes(encoded, verbose=0)
# map predicted word index to word
out_word = ''
for word, index in tokenizer.word_index.items():
if index == yhat:
out_word = word
break
# append to input
in_text += ' ' + out_word
result.append(out_word)
return ' '.join(result)
aufsichtsrat_sequences,sequences_as_array, af_seq_length= main_create_aufsichtsrat_sequences(config.FILEGLOB_LEARNDICTS, config.FILEPATH_SEQUENCES)
aufsichtsrat_sequences_eval,sequences_as_array_eval, af_seq_length_eval = main_create_aufsichtsrat_sequences(config.FILEGLOB_EVALUATIONDICTS, config.FILEPATH_EVAL_SEQUENCES, sequence_length=af_seq_length)
# load cleaned text sequences
# in_filename = config.FILEPATH_SEQUENCES
# doc = load_doc(in_filename)
# lines = doc.split('\n')
# seq_length = len(lines[0].split()) - 1
lines = aufsichtsrat_sequences
lines_eval = aufsichtsrat_sequences_eval
seq_length = af_seq_length -1
# load the model
model = load_model(config.PREDICTOR_AUFSICHTSRAT_MODEL)
# load the tokenizer
tokenizer = load(open(config.PREDICTOR_AUFSICHTSRAT_TOKENIZER, 'rb'))
# select a seed text
line_index = randint(0,len(lines))
seed_text = lines[line_index]
print(seed_text + '\n')
# check some line indices wich predict
# generate new text
# generated = generate_seq(model, tokenizer, seq_length, seed_text, 15)
evaluate_direct = False
if evaluate_direct:
# evaluation with learn data
generated_1 = generate_seq(model, tokenizer, seq_length, lines[89], 3)
generated_2 = generate_seq(model, tokenizer, seq_length, lines[85], 3)
generated_3 = generate_seq(model, tokenizer, seq_length, lines[55], 3)
generated_4 = generate_seq(model, tokenizer, seq_length, lines[249], 3)
# evaluation with evaluation data
generated_e1 = generate_seq(model, tokenizer, seq_length, aufsichtsrat_sequences_eval[18], 3)
generated_e2 = generate_seq(model, tokenizer, seq_length, aufsichtsrat_sequences_eval[45], 3)
generated_e3 = generate_seq(model, tokenizer, seq_length, aufsichtsrat_sequences_eval[108], 3)
generated_e4 = generate_seq(model, tokenizer, seq_length, aufsichtsrat_sequences_eval[166], 3)
print("done")
def is_special_character(text):
if len(text) > 1:
return False
is_alphanumberical = text.isalnum()
if is_alphanumberical:
return False
else:
return True
numchecks = 0
numfaults = 0
numrights = 0
numadds = 0
for seq_eval_index, seq_eval in enumerate(aufsichtsrat_sequences_eval):
if seq_eval_index >= len(aufsichtsrat_sequences_eval)-4:
break
# obtain first token which is the proclaimed result
res_next_tokens = sequences_as_array_eval[seq_eval_index+3][seq_length-2:seq_length+1]
res_next = sequences_as_array_eval[seq_eval_index+1][seq_length]
# generate result for input sequence
res_gen = generate_seq(model, tokenizer, seq_length, seq_eval, 3)
res_gen_tokens = generate_tokens(res_gen)
res_gen_next = res_gen_tokens[0]
res_next_is_specialchar = is_special_character(res_next)
res_gen_next_is_specialchar = is_special_character(res_gen_next)
print("Sequence:", seq_eval)
print("ResultT_act:", res_next_tokens,"is_special_char:", res_next_is_specialchar)
print("ResultT_gen:", res_gen_tokens,"is_special_char:", res_gen_next_is_specialchar)
if not res_next_is_specialchar and res_gen_next_is_specialchar:
print("add special char")
#reduce errors by looking atfurther generated next tokens
numfaults += 1
numadds += 1
if res_next_is_specialchar and res_gen_next_is_specialchar:
if res_next != res_gen_next:
# similarity or also look at next generated tokens
print("swapping special char")
numfaults += 1
else:
numrights += 1
numchecks +=1
print("Number of runs:", numchecks)
print("Number of faults:", numfaults)
print("Number of rights:", numrights)
print("Number of adds:", numadds)
print("Number of faults without adds:", numfaults-numadds)
```
#### File: ocromore/n_dist_keying/hocr_line_normalizer.py
```python
from n_dist_keying.hocr_bbox_comparator import HocrBBoxComparator
class HocrLineNormalizer(object):
def __init__(self):
self.hocr_comparator = HocrBBoxComparator()
def unify_list_entries(self, ocr_listlist, mode = "OCROPUS"):
final_list = []
for entry in ocr_listlist:
if len(entry) == 1:
final_list.append(entry[0])
else:
text_accu = ""
for line in entry:
if mode is "OCROPUS":
text_accu = text_accu + " " + line._hocr_html.contents[0]
else:
text_accu = text_accu + " " + line.ocr_text
# refactor the first element with accumulated text
if mode is "OCROPUS":
entry[0].ocr_text_normalized = text_accu
else:
entry[0].ocr_text_normalized = text_accu
final_list.append(entry[0])
return final_list
def linify_list(self, ocr_list):
"""
Writes all elements which are in one line to the same line to the same list entry
:param ocr_list:
:return:
"""
final_list = []
for base_line in ocr_list:
if not hasattr(base_line, 'marked'):
base_line.marked = False
if not base_line.marked:
bl_coordinates = base_line.coordinates
list_for_baseline = [] # each baseline gets a list
list_for_baseline.append(base_line)
for comparison_line in ocr_list:
if base_line is comparison_line:
# prevent same lines in array
continue
cl_coordinates = comparison_line.coordinates
match = self.hocr_comparator.compare_coordinates(bl_coordinates, cl_coordinates)
if match:
# line which already has been matched to a cluster can't be baseline anymore
comparison_line.marked = True
list_for_baseline.append(comparison_line)
final_list.append(list_for_baseline)
return final_list
def normalize_ocropus_list(self, ocropus_list):
"""
Do the normalize
:param ocropus_list:
:return:
"""
ocrolistlist_linified = self.linify_list(ocropus_list)
ocrolist_linified = self.unify_list_entries(ocrolistlist_linified)
return_list = []
# normalize ocr_text property
for line in ocrolist_linified:
text_to_add = line._hocr_html.contents[0]
line.ocr_text_normalized = text_to_add
return_list.append(line)
return return_list
def normalize_abbyy_list(self, abbyy_list):
"""
Do the normalize
:param abbyy_list:
:return:
"""
abbyylistlist_linified = self.linify_list(abbyy_list)
abbyylist_linified = self.unify_list_entries(abbyylistlist_linified, "ABBYY")
return_list = []
for line in abbyylist_linified:
if line.ocr_text_normalized is None:
line.ocr_text_normalized = line.ocr_text
return_list.append(line)
return return_list
def normalize_tesseract_list(self, tesseract_list):
return_list = []
for line in tesseract_list:
if line.ocr_text_normalized is None:
line.ocr_text_normalized = line.ocr_text
return_list.append(line)
return return_list
```
#### File: ocromore/n_dist_keying/marker.py
```python
class Marker:
@staticmethod
def is_not_marked(element):
"""
This changes element property marked
If a given element hasn't got marked property - add marked=False and return false
If a given element has marked property, but marked=False return False
If a given element has marked property and marked=True return True
:param element: element to check upon
:return: see description
"""
if not hasattr(element, 'marked'):
element.marked = False
if element.marked is True or element.marked is False:
return not element.marked
else:
raise Exception("THIS SHOULDN'T HAPPEN!")
return False
@staticmethod
def mark_element(element):
"""
Set property marked in element to True
:param element: element to mark
:return:
"""
element.marked = True
@staticmethod
def unmark_element(element):
element.marked = False
@staticmethod
def mark_element_custom_tag(element, tag):
element[tag] = True
@staticmethod
def unmark_element_custom_tag(element, tag):
element[tag] = False
@staticmethod
def is_element_marked_with_custom_tag(element,tag):
if not hasattr(element, tag):
return False
if element[tag] is True or element[tag] is False:
return element[tag]
else:
raise Exception("THIS SHOULDN'T HAPPEN!")
return False
```
#### File: ocromore/n_dist_keying/n_distance_voter.py
```python
from n_dist_keying.distance_storage import DistanceStorage
from n_dist_keying.text_comparator import TextComparator
import numpy as np
class NDistanceVoter(object):
def __init__(self, texts):
self.d_storage = DistanceStorage()
self._texts = texts
def set_texts(self, new_texts):
self._texts = new_texts
def get_texts(self):
return self._texts
def reset(self):
self.d_storage = DistanceStorage()
self._texts = []
def compare_texts(self, take_longest_on_empty_lines=False, vote_without_spaces=False):
"""
Compares an array of texts and gives the n_distance vote
:param texts:
:return:
"""
texts_loc = self.get_texts()
if vote_without_spaces:
for text_index, text in enumerate(texts_loc):
texts_loc[text_index] = text.replace(" ","")
if take_longest_on_empty_lines is True:
texts = self.get_texts()
textlens = []
number_empty = 0
for text in texts:
if text is False or text is True:
text = "" # todo verify this correct j4t 20.02
textlens.append(len(text))
if text.strip(" ") == "":
number_empty += 1
too_less_text = (len(texts)-number_empty) <= 2
if too_less_text:
# if too less strings to compare, just take the longest string as result
selected_index = np.argmax(textlens)
return selected_index
# do a text-wise comparison, which calculates a distance between all texts in this set
for text_index, text in enumerate(texts_loc):
self.compare_with_other_texts(text_index, text)
# calculate the distance from each item in set to all others
for text_index, text in enumerate(texts_loc):
self.d_storage.calculate_accumulated_distance(text_index)
# get the index of the item in set, which has the shortest distance to all others
self.d_storage.calculate_shortest_distance_index()
shortest_dist_index = self.d_storage.get_shortest_distance_index()
return shortest_dist_index
def compare_with_other_texts(self, text_index, text):
for text_index_cmp, text_cmp in enumerate(self.get_texts()):
# if line has the same index, continue
if text_index is text_index_cmp:
continue
existing_distance = self.d_storage.fetch_value(text_index, text_index_cmp)
# if line was already compared, continue
if existing_distance is not None:
continue
distance = self.get_distance(text, text_cmp)
self.d_storage.store_value(text_index, text_index_cmp, distance)
def get_distance(self, text1, text2):
# todo add more possibilities for distance measurement, i.e confidences, edit distance, context weighting
MODE_DIFFLIB = 'difflib' #best bet
MODE_NORMED_LEVENSHTEIN = 'normed_levenshtein' # longest alignment normed levenshtein distance
MODE_SORENSEN = 'sorensen'
MODE_JACCARD = 'jaccard'
MODE_HAMMING = 'hamming'
MODE_MYERS = 'myers' # use myers special difflib sequence matcher
mode = MODE_DIFFLIB # set your mode here
# return a fixed negative value if one of the strings is not defined
if text1 is False and text2 is False or text1 is None and text2 is None:
return 0
# One is false and one is not false
if (text1 is False or text2 is False) or (text1 is None or text2 is None):
return 1
dist = 1
if mode == MODE_DIFFLIB:
dist = TextComparator.compare_ocr_strings_difflib_seqmatch(text1, text2)
elif mode == MODE_NORMED_LEVENSHTEIN:
dist = TextComparator.compare_ocr_strings_levensthein_normed(text1, text2)
elif mode == MODE_HAMMING:
dist = TextComparator.compare_ocr_strings_hamming(text1, text2)
elif mode == MODE_SORENSEN:
dist = TextComparator.compare_ocr_strings_sorensen(text1, text2)
elif mode == MODE_JACCARD:
dist = TextComparator.compare_ocr_strings_jaccard(text1, text2)
elif mode == MODE_MYERS:
dist = TextComparator.compare_ocr_strings_myers(text1, text2)
return dist
```
#### File: ocromore/n_dist_keying/text_corrector.py
```python
import re
class TextCorrector(object):
def __init__(self):
print("init corrector")
@staticmethod
def correct_line_text(line_text):
CORRECT_ROMAN_II = True
CORRECT_EXLAMATIONS = False
if CORRECT_ROMAN_II:
line_text = TextCorrector.correct_roman_ii(line_text)
if CORRECT_EXLAMATIONS:
line_text = TextCorrector.correct_exclamation_marks(line_text)
return line_text
@staticmethod
def correct_exclamation_marks(line_text):
if line_text is None or line_text is False or line_text is True:
return line_text
DEFAULT_REPLACEMENT = "\""
possible_exclamation_combos = \
["'\"", "\"'", "''", "\"\""]
def do_correction(line_text, pec):
line_text_new = line_text.replace(pec, DEFAULT_REPLACEMENT)
return line_text_new
for pec in possible_exclamation_combos:
if pec in line_text:
line_text = do_correction(line_text, pec)
return line_text
@staticmethod
def correct_roman_ii(line_text):
"""
Corrects occurrences of roman II letters which get confused with I1 or 11 sometimes
Example:
Groundtruth: 1948/II/49 bis 1954: je 0 %
line_text : 1948/11/49 bis 1954: je 0%
or
Groundtruth: II/1955/42 text text
line_text: 1I/1955/42 text text
:param line_text:
:return:
"""
# check if the possibly erronous line occurs:
# if the first 2 or more characters are 1, or I, or l, or i
# and following 3 or more are decimals, and following 2 or more are decimals
# (seperated by slashes)
rii_regex = re.compile('[1Ili]{2,}\/\d{3,}/\d{2,}')
rii_match = rii_regex.match(line_text)
rii_regex2 = re.compile('\d{4,}\/[1Ili]{2}/\d{2,}')
rii_match2 = rii_regex2.match(line_text)
# return the changed text, if the pattern occurs
if rii_match is not None:
subst_string = re.sub('[1Ili]{2,}\/', "II/", line_text)
# place the corrected roman two in line_text
return subst_string
elif rii_match2 is not None:
# todo verify this once with bp the case definitely exists but not in manyyears
subst_string = re.sub('[1Ili]{2}\/', "II/", line_text)
# place the corrected roman two in line_text
return subst_string
# return unchanged if pattern doesn't occur
return line_text
```
#### File: ocromore/n_dist_keying/text_unspacer.py
```python
from akf_corelib.queues import Ranged_Filo
from akf_corelib.random import Random
from akf_corelib.typecasts import TypeCasts
from n_dist_keying.text_comparator import TextComparator
class TextUnspacer:
"""
Class which helps to filter out spaced text like this ('s p a c e d' with a comparison text like 'spaced')
#todo unspace_text can be improved, sometimes repeating patterns like ultimo ultimo 1998, in comparison to ultimo 1998 produce weird results
#todo ...this can be filtered or the matching parts of both strings can be compared, non matched part acts as a rest
"""
def get_tuples(self, text, size_filo, search_range_filo, non_spaced_only = False):
text_split = list(text)
text_split_size = len(text_split)
# print("Text is ", text)
tuples_list = []
tuples_adjacent_list = []
# pad up the rest, for fifo clearance
for i in range(0, size_filo+2):
text_split.append('¦')
current_chars_filo = Ranged_Filo(size_filo+2, search_range_filo, True)
for index_char, char in enumerate(text_split):
current_chars_filo.push(char)
middle_items = current_chars_filo.get_middle_items_for_range(search_range_filo)
middle_items_adjacent = current_chars_filo.get_middle_items_for_range(search_range_filo+1)
current_size = current_chars_filo.size()
# wait until filo is filled, unless the amount of chars is smaller than filo
if current_size < size_filo and text_split_size >= size_filo:
continue
if non_spaced_only is False:
if middle_items_adjacent[current_chars_filo.get_middle_index()] is ' ':
# print("Index_char:", index_char, "Char: ", char, "Tuple:", middle_items)
tuples_list.append(middle_items)
tuples_adjacent_list.append(middle_items_adjacent)
else:
if middle_items_adjacent[current_chars_filo.get_middle_index()] is not ' ':
# print("Index_char:", index_char, "Char: ", char, "Tuple:", middle_items)
tuples_list.append(middle_items)
tuples_adjacent_list.append(middle_items_adjacent)
return tuples_list, tuples_adjacent_list
def add_adjacent_tuple_information(self, tuples, tuples_with_adjacent_info):
final_tuple_list = []
change = False
for tuple_index, tuple in enumerate(tuples):
tuplec = tuple[:]
tuplec_with_adjacent_info = tuples_with_adjacent_info[tuple_index][:]
tuplec_low_end = tuplec[0]
tuplec_high_end = tuple[len(tuple)-1]
tupleca_low_end = tuplec_with_adjacent_info[0]
tupleca_high_end = tuplec_with_adjacent_info[len(tuplec_with_adjacent_info)-1]
if tuplec_low_end == ' ':
if tupleca_low_end != None and tupleca_low_end != ' ':
tuplec = Random.replace_value_in_tuple(tuplec, tupleca_low_end, 0)
change = True
if tuplec_high_end == ' ':
if tupleca_high_end != None and tupleca_low_end !=' ':
tuplec = Random.replace_value_in_tuple(tuplec, tupleca_high_end, len(tuplec)-1)
change = True
final_tuple_list.append(tuplec)
return final_tuple_list, change
def unspace_texts(self, text_list, list_index_to_unspace, unspaced_list_index):
PRINT_OUTPUT = False
SEARCH_RANGE_FILO = 1
SIZE_FILO = 3
WILDCARD_MODE = True # possibility to tolerate a certain amount of errors in subtraction of arrays
WILDCARD_COUNT = 2 # amount of errors tolerated with wildcards
if WILDCARD_MODE:
SEARCH_RANGE_FILO = 2
SIZE_FILO = 5
line_to_process = text_list[list_index_to_unspace]
line_unspaced = text_list[unspaced_list_index]
# just return if there is no text for comparison
if not hasattr(line_to_process, 'ocr_text_normalized') \
or not hasattr(line_unspaced, 'ocr_text_normalized'):
return text_list
text = line_to_process.ocr_text_normalized
text_cmp = line_unspaced.ocr_text_normalized
if "Dividenden auf" in text_cmp:
print("im here")
text_tuple, text_tuple_adj = self.get_tuples(text, SIZE_FILO, SEARCH_RANGE_FILO)
text_tuple_cmp, text_tuple_cmp_adj = self.get_tuples(text_cmp, SIZE_FILO, SEARCH_RANGE_FILO)
if WILDCARD_MODE:
text_tuple2, change1 = self.add_adjacent_tuple_information(text_tuple, text_tuple_adj)
text_tuple_cmp2, change2 = self.add_adjacent_tuple_information(text_tuple_cmp, text_tuple_cmp_adj)
# if change1 or change2:
# print("test")
diff1 = self.subtract_arrays(text_tuple, text_tuple_cmp, WILDCARD_MODE, WILDCARD_COUNT, text_tuple2,0)
# if len(text_tuple) != len(text_tuple2):
# print("test")
# put text to process together with the process information
text_unspaced = self.create_non_spaced_string(text, diff1, SIZE_FILO, SEARCH_RANGE_FILO)
if PRINT_OUTPUT:
print("US-> line:", text)
print("US-> cmpr:", text_cmp)
print("US-> uspc:", text_unspaced)
# apply to list again
text_list[list_index_to_unspace].ocr_text_normalized = text_unspaced
return text_list
def refspace_texts(self, text_list, list_index_to_process, list_index_reference):
PRINT_OUTPUT = False
SEARCH_RANGE_FILO = 1
SIZE_FILO = 3
WILDCARD_MODE = True # possibility to tolerate a certain amount of errors in subtraction of arrays
WILDCARD_COUNT = 2 # amount of errors tolerated with wildcards
if WILDCARD_MODE:
SEARCH_RANGE_FILO = 2
SIZE_FILO = 5
line_to_process = text_list[list_index_to_process]
line_reference = text_list[list_index_reference]
# just return if there is no text for comparison
if not hasattr(line_to_process, 'ocr_text_normalized') \
or not hasattr(line_reference, 'ocr_text_normalized'):
return text_list
text = line_to_process.ocr_text_normalized
text_reference = line_reference.ocr_text_normalized
# text = "der A u f s i c h t s ra t hier ist"
# text_reference = "der Aufsichtsrat hier i s t"
#if "Aus den G" in text_cmp:
# print("im here")
text_tuple, text_tuple_adj = self.get_tuples(text, SIZE_FILO, SEARCH_RANGE_FILO)
text_tuple_nonsp, text_tuple_adj_nonsp = self.get_tuples(text, SIZE_FILO, SEARCH_RANGE_FILO, True)
text_tuple_ref, text_tuple_cmp_adj = self.get_tuples(text_reference, SIZE_FILO, SEARCH_RANGE_FILO)
text_tuple_ref_nonsp, text_tuple_cmp_adj_nonsp = self.get_tuples(text, SIZE_FILO, SEARCH_RANGE_FILO, True)
if WILDCARD_MODE:
text_tuple_with_adj, change1 = self.add_adjacent_tuple_information(text_tuple, text_tuple_adj)
text_tuple_ref2, change2 = self.add_adjacent_tuple_information(text_tuple_ref, text_tuple_cmp_adj)
text_tuple2_nonsp, change1_nonsp = self.add_adjacent_tuple_information(text_tuple_nonsp, text_tuple_adj_nonsp)
text_tuple_ref2_nonsp, change2_nonsp = self.add_adjacent_tuple_information(text_tuple_ref_nonsp, text_tuple_cmp_adj_nonsp)
# if change1 or change2:
# print("test")
# print("text", text, "text_cmp", text_reference)
diff1 = self.subtract_arrays(text_tuple, text_tuple_ref, WILDCARD_MODE, WILDCARD_COUNT, text_tuple_with_adj, 0)
diff2 = self.subtract_arrays(text_tuple_ref, text_tuple, WILDCARD_MODE, WILDCARD_COUNT, text_tuple_with_adj, 1)
# if len(text_tuple) != len(text_tuple2):
# print("test")
# put text to process together with the process information
text_unspaced = self.create_non_spaced_string(text, diff1, SIZE_FILO, SEARCH_RANGE_FILO)
text_final = self.create_spaced_string(text_unspaced, diff2, SIZE_FILO, SEARCH_RANGE_FILO)
if PRINT_OUTPUT:
print("RS-> line:", text)
print("RS-> refr:", text_reference)
print("RS-> uspc:", text_unspaced)
print("RS-> rspc:", text_final)
# apply to list again
text_list[list_index_to_process].ocr_text_normalized = text_final
return text_list
def create_spaced_string(self, text, diff_tuples, size_filo, search_range_filo):
PADDING_CHAR = '¦'
MID_FILL_CHAR = '¯'
final_text = text
for current_tuple in diff_tuples:
current_tuple_list = list(current_tuple)
middle_index_list = Random.find_middle(len(current_tuple_list),True)
current_tuple_list[middle_index_list] = MID_FILL_CHAR
stringed_tuple = TypeCasts.list_to_string(current_tuple_list)
stringed_tuple = stringed_tuple.strip() # trim outbound spaces
stringed_tuple = stringed_tuple.replace(PADDING_CHAR, '')
stringed_tuple_final = stringed_tuple.replace(MID_FILL_CHAR, '')
stringed_replacement = stringed_tuple.replace(MID_FILL_CHAR,' ')
# found_in_text = text.find(stringed_tuple_final)
new_text = final_text.replace(stringed_tuple_final, stringed_replacement)
final_text = new_text
return final_text
def create_non_spaced_string(self, text, diff_tuples, size_filo, search_range_filo):
PADDING_CHAR = '¦'
# pad values because of filos
text_padded = Random.append_pad_values(text, size_filo, PADDING_CHAR)
text_split = list(text_padded)
current_chars_filo = Ranged_Filo(size_filo, search_range_filo, True)
filo_mid_index = current_chars_filo.get_middle_index()
final_text = ""
for char_index, char in enumerate(text_split):
current_chars_filo.push(char)
# if current middle char is ' ' and there is a diff tuple for that, don't push it to final string
current_tuple = current_chars_filo.get_middle_items(True, True)
current_middle_char = current_tuple[filo_mid_index]
its_a_diff_tuple = False
for diff_tuple_index, diff_tuple in enumerate(diff_tuples):
if current_tuple == diff_tuple:
diff_tuples[diff_tuple_index] = "done" # mark this tuple as corrected
its_a_diff_tuple = True
break # escape inner loop
if current_middle_char is not PADDING_CHAR: # do not append padded chars
if not its_a_diff_tuple and current_middle_char is not None:
final_text += current_middle_char
return final_text
def subtract_arrays(self, arr1_in, arr2_in, wildcard_mode=False, wildcard_count=0, arr_ref_in_adj=[], non_reference_switch=0):
"""
Subtract arr2 from arr1 (arr1-arr2), same contents get subtracted once
i.e. arr1 = ['a','a','b'], arr2 = ['a'], result is ['a', 'b']
:param arr1_in: input list one
:param arr2_in: input list two
:return: subtracted output
"""
ORDER_CONFUSION_TRESHOLD = 0.4
# create copies of input arrays, which can be modified
arr1 = arr1_in[:]
arr2 = arr2_in[:]
# mark everything which is equal once
for index_a1, entry_a1 in enumerate(arr1):
#if index_a1 == 12:
# print("reachd breaking point")
for index_a2, entry_a2 in enumerate(arr2):
entry_ref_adj = None
#entry_a2_adj = None
if wildcard_mode:
used_ref_index = -1
if non_reference_switch == 0:
used_ref_index = index_a1
else:
used_ref_index = index_a2
#if len(arr_ref_in_adj) <= used_ref_index:
# print("what")
entry_ref_adj = arr_ref_in_adj[used_ref_index]
#entry_a2_adj = arr2_in_adj[index_a2]
if wildcard_mode is False:
if entry_a1 is not None and entry_a1 == entry_a2:
arr1[index_a1] = None
arr2[index_a2] = None
break
else:
if entry_a1 is not None and entry_a2 is not None:
tpldif_ctr, tpldif, order_confusion = TextComparator.compare_tuples(entry_a1, entry_a2)
if tpldif_ctr <= wildcard_count:
if order_confusion > ORDER_CONFUSION_TRESHOLD:
continue
if tpldif_ctr != 0:
# additional condition, the string shouldn't be order confused
# print("1st", entry_a1)
# print("2nd", entry_a2)
#if order_confusion > ORDER_CONFUSION_TRESHOLD:
# continue
if non_reference_switch == 0:
tpldif_ctr2, tpldif2, order_confusion2 = TextComparator.compare_tuples(entry_ref_adj, entry_a2)
else:
tpldif_ctr2, tpldif2, order_confusion2 = TextComparator.compare_tuples(entry_ref_adj, entry_a1)
if tpldif_ctr2 == 0:
# this means the adjusted version is equal and this is a match
arr1[index_a1] = None
arr2[index_a2] = None
break
else:
continue # else don't remove cause no match, search on
arr1[index_a1] = None
arr2[index_a2] = None
break
#res1 = TextComparator.compare_ocr_strings_difflib_seqmatch(entry_a1, entry_a2)
#res2 = TextComparator.compare_ocr_strings_difflib_difftool(entry_a1, entry_a2)
#print("done")
# subtract nones from arr1
arr1_reduced = [value for value in arr1 if value is not None]
return arr1_reduced
```
#### File: ocromore/ocr_validation/isri_handler.py
```python
from subprocess import call
import os
from akf_corelib.conditional_print import ConditionalPrint
from configuration.configuration_handler import ConfigurationHandler
class IsriHandler(object):
def __init__(self):
self.os = os.name.lower()
config_handler = ConfigurationHandler(first_init=False)
self.config = config_handler.get_config()
if 'ExceptionInitializing' in self.config:
print("Exception initializing config, don't print")
self.cpr = ConditionalPrint(False, False, False)
else:
self.cpr = ConditionalPrint(self.config.PRINT_MSA_HANDLER, self.config.PRINT_EXCEPTION_LEVEL,
self.config.PRINT_WARNING_LEVEL)
if self.os != 'linux' and self.os != 'posix':
raise OSError("Untested operating system adapt code and continue at own risk")
def accuracy(self, path_correctfile, path_generatedfile, path_accuracy_report=""):
try:
call(["accuracy", path_correctfile, path_generatedfile, path_accuracy_report])
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
class SynctextConfig(object):
def __init__(self):
self._used_config_acc =[]
def use_T_algorithm(self):
self._used_config_acc.append("-T")
def use_H_algorithm(self):
self._used_config_acc.append("-H")
def use_case_insensitive(self):
self._used_config_acc.append("-i")
def use_display_suspect_markers_in_output(self):
self._used_config_acc.append("-s")
def get_used_config(self):
return self._used_config_acc
def clear_used_config(self):
self._used_config_acc = []
def synctext(self, filepaths, path_generatedfile=None, synctext_config = SynctextConfig()):
try:
flags = synctext_config.get_used_config()
calls = ["synctext"]
calls.extend(flags)
calls.extend(filepaths)
if path_generatedfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_generatedfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def accsum(self, filepaths_accreports, path_generatedfile=None):
try:
calls = ["accsum"]
calls.extend(filepaths_accreports)
if path_generatedfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_generatedfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def groupacc(self, path_groupfile, path_accuracy_report, path_groupacc_report = None):
try:
calls = ["groupacc"]
calls.append(path_groupfile)
calls.append(path_accuracy_report)
if path_groupacc_report is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_groupacc_report, True)
filehandle.close()
calls.append(path_groupacc_report)
call(calls)
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def accdist(self, filepaths_accreports, path_generated_xyfile=None):
try:
calls = ["accdist"]
calls.extend(filepaths_accreports)
if path_generated_xyfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_generated_xyfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
class NGramConfig(object):
def __init__(self):
self._used_config_acc =[]
def set_ngram_size(self, number):
if number>=1 and number <= 3:
self._used_config_acc.append("-n")
self._used_config_acc.append(str(number))
def clear_used_config(self):
self._used_config_acc = []
def get_used_config(self):
return self._used_config_acc
def ngram(self, filepaths, path_generatedfile = None, ngram_config = NGramConfig()):
try:
flags = ngram_config.get_used_config()
calls = ["ngram"]
calls.extend(flags)
calls.extend(filepaths)
if path_generatedfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_generatedfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
class VoteConfig(object):
def __init__(self):
self._used_config_acc =[]
def enable_O_optimization(self):
self._used_config_acc.append("-O")
def set_s(self, fraction_counter, fraction_denominator):
self._used_config_acc.append("-s")
self._used_config_acc.append(fraction_counter+"/"+fraction_denominator)
def set_w(self, fraction_counter, fraction_denominator):
self._used_config_acc.append("-w")
self._used_config_acc.append(fraction_counter+"/"+fraction_denominator)
def set_output_file(self, path_outputfile):
self._used_config_acc.append("-o")
self._used_config_acc.append(path_outputfile) #ok?
def clear_used_config(self):
self._used_config_acc = []
def get_used_config(self):
return self._used_config_acc
def vote(self, filepaths, ngram_config = VoteConfig()):
try:
flags = ngram_config.get_used_config()
calls = ["vote"]
calls.extend(flags)
calls.extend(filepaths)
call(calls)
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def wordacc(self, path_correctfile, path_comparison_file, path_stopwordfile = None, path_wordacc_report = None):
try:
calls = ["wordacc"]
if path_stopwordfile is not None:
calls.append("-S")
calls.append(path_stopwordfile)
calls.append(path_correctfile)
calls.append(path_comparison_file)
if path_wordacc_report is not None:
calls.append(path_wordacc_report)
call(calls)
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def wordaccsum(self, filepaths_wordacc_reports, path_accsumreport=None):
try:
calls = ["wordaccsum"]
calls.extend(filepaths_wordacc_reports)
if path_accsumreport is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_accsumreport, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def nonstopacc(self, path_stopwordfile, path_wordacc_report, path_output_xyfile=None):
try:
calls = ["nonstopacc"]
calls.append(path_stopwordfile)
calls.append(path_wordacc_report)
if path_output_xyfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_output_xyfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def wordaccci(self, filepaths_wordacc_reports, path_outputfile=None):
try:
calls = ["wordaccci"]
calls.extend(filepaths_wordacc_reports)
if path_outputfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_outputfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def wordaccdist(self, filepaths_wordacc_reports, path_output_xyfile=None):
try:
calls = ["wordaccdist"]
calls.extend(filepaths_wordacc_reports)
if path_output_xyfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_output_xyfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def wordfreq(self, filepaths_inputtext, path_resultfile=None):
try:
calls = ["wordfreq"]
calls.extend(filepaths_inputtext)
if path_resultfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_resultfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
#todo add the zoning programs some day: point 4 in doc
def editop(self, path_correctfile, path_comparison_file, path_editop_report = None):
try:
calls = ["editop"]
calls.append(path_correctfile)
calls.append(path_comparison_file)
if path_editop_report is not None:
calls.append(path_editop_report)
call(calls)
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def editopsum(self, filepaths_editopreports, path_summed_report=None):
try:
calls = ["editopsum"]
calls.extend(filepaths_editopreports)
if path_summed_report is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_summed_report, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def editopcost(self, path_editop_report, path_editop_report2=None, path_output_xyfile=None):
try:
calls = ["editopcost"]
calls.append(path_editop_report)
if path_editop_report2 is not None:
calls.append(path_editop_report2)
if path_output_xyfile is None:
call(calls)
else:
filehandle = self.create_file_if_doesnt_exist(path_output_xyfile, True)
call(calls, stdout=filehandle)
filehandle.close()
except Exception as ex:
self.cpr.printex("Exception calling pycharm", ex)
def create_file_if_doesnt_exist(self, filepath, overwrite = False):
file = open(filepath, 'w+')
if overwrite:
self.delete_file_content(file)
return file
def delete_file_content(self, pfile):
pfile.seek(0)
pfile.truncate()
```
#### File: ocromore/ocr_validation/ocrolib_edist.py
```python
from scipy.ndimage import filters
import re
class Edist3:
@staticmethod
def normed_levenshtein(a, b):
""" or nlevenshtein (with longest alignment)
Calculates a normalized version of the levenshtein distance.
Divided through the maximum length (which is in levenshtein the length
of the longer string)
:param a: first string
:param b: second string
:return: normed levenshtein distance and levenshtein distance
"""
len_a = len(a)
len_b = len(b)
max_len = max(len_a, len_b)
ldist = Edist3.levenshtein(a, b)
normed_ldist = ldist / max_len
return normed_ldist, ldist
@staticmethod
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
(Clever compact Pythonic implementation from hetland.org)"""
n, m = len(a), len(b)
if n > m: a,b = b,a; n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous,current = current,[i]+[0]*n
for j in range(1,n+1):
add,delete = previous[j]+1,current[j-1]+1
change = previous[j-1]
if a[j-1]!=b[i-1]: change = change+1
current[j] = min(add, delete, change)
return current[n]
@staticmethod
def xlevenshtein(a, b, context=1):
"""Calculates the Levensthein distance between a and b
and generates a list of differences by context."""
n, m = len(a), len(b)
assert m > 0 # xlevenshtein should only be called with non-empty b string (ground truth)
if a == b: return 0, [] # speed up for the easy case
sources = empty((m+1, n+1), object)
sources[:,:] = None
dists = 99999*ones((m+1, n+1))
dists[0, :] = arange(n+1)
for i in range(1, m+1):
previous = dists[i-1,:]
current = dists[i, :]
current[0] = i
for j in range(1,n+1):
if previous[j]+1 < current[j]:
sources[i, j] = (i-1, j)
dists[i, j] = previous[j]+1
if current[j-1]+1 < current[j]:
sources[i,j] = (i, j-1)
dists[i, j] = current[j-1]+1
delta = 1*(a[j-1] != b[i-1])
if previous[j-1] + delta < current[j]:
sources[i, j] = (i-1, j-1)
dists[i, j] = previous[j-1]+delta
cost = current[n]
# reconstruct the paths and produce two aligned strings
l = sources[i, n]
path = []
while l is not None:
path.append(l)
i, j = l
l = sources[i,j]
al, bl = [], []
path = [(n+2,m+2)]+path
for k in range(len(path)-1):
i, j = path[k]
i0, j0 = path[k+1]
u = "_"
v = "_"
if j != j0 and j0 < n: u = a[j0]
if i != i0 and i0 < m: v = b[i0]
al.append(u)
bl.append(v)
al = "".join(al[::-1])
bl = "".join(bl[::-1])
# now compute a splittable string with the differences
assert len(al) == len(bl)
al = " "*context+al+" "*context
bl = " "*context+bl+" "*context
assert "~" not in al and "~" not in bl
same = array([al[i] == bl[i] for i in range(len(al))], 'i')
same = filters.minimum_filter(same, 1+2*context)
als = "".join([al[i] if not same[i] else "~" for i in range(len(al))])
bls = "".join([bl[i] if not same[i] else "~" for i in range(len(bl))])
# print(als)
# print(bls)
ags = re.split(r'~+', als)
bgs = re.split(r'~+', bls)
confusions = [(a, b) for a, b in zip(ags,bgs) if a != "" or b != ""]
return cost, confusions
```
#### File: JKamlah/ocromore/tableparser.py
```python
from akf_corelib.df_objectifier import DFObjectifier
from n_dist_keying.database_handler import DatabaseHandler
from ocr_validation.visualization_handler import VisualizationHandler
from ocr_validation.isri_handler import IsriHandler
from os import listdir
from os.path import isfile, join
import os
import shutil
from vocabulary_checker.vocabulary_checker import VocabularyChecker
class TableParser(object):
def __init__(self, config, voter_mode = True):
self._config = config
# give the last element in split path
if voter_mode is True:
dbpath = config.DB_DIR_VOTER
else:
dbpath = config.DB_DIR_READER
self.vocab_checker = None
if config.KEYING_RESULT_VOCABULARY_CORRECTION_POST or config.KEYING_RESULT_VOCABULARY_CORRECTION_VOTE:
try:
# initialize spellchecker, if one of the vote modes is active
self.vocab_checker = VocabularyChecker()
self.vocab_checker.initialize_lines(config.KEYING_RESULT_VC_DICT_PATH,
config.KEYING_RESULT_VC_DICT_REMOVE_SPECIAL_BORDER_CHARS)
self.vocab_checker.initialize_lines(config.KEYING_RESULT_VC_DICT_PATH_2,
config.KEYING_RESULT_VC_DICT_REMOVE_SPECIAL_BORDER_CHARS)
self.vocab_checker.initialize_spellchecker()
except Exception as e:
config.KEYING_RESULT_VOCABULARY_CORRECTION_POST = False
config.KEYING_RESULT_VOCABULARY_CORRECTION_VOTE = False
self._base_db_dir = os.path.basename(os.path.normpath(dbpath))
def delete_output_dir(self):
# delete database directory
if os.path.exists(self._config.OUTPUT_ROOT_PATH):
shutil.rmtree(self._config.OUTPUT_ROOT_PATH)
def create_output_dir(self):
# dcreate database directory
os.makedirs(self._config.OUTPUT_ROOT_PATH)
def create_isri_reports_old(self, filestructs, addendum):
acc_reports = []
wacc_reports = []
db_root_path = ""
for db in filestructs:
files = filestructs[db]
file = files[0]
# assume that each db has different root folder, just take first file for path reference
dbpath = 'sqlite:////' + file.dbpath
dbname = file.dbname
db_root_path = self.get_basic_output_directory(dbpath, addendum)
if os.path.exists(db_root_path):
fp_gen_acc_report, fp_gen_wacc_report = \
self.summarize_accuracy_reports(db_root_path, dbname)
acc_reports.append(fp_gen_acc_report)
wacc_reports.append(fp_gen_wacc_report)
# create big accumulated report
output_root_path = os.path.dirname(db_root_path)
self.summarize_accuracy_report_sums(wacc_reports, acc_reports, output_root_path)
def create_isri_reports(self, databases, filestructs, addendum):
acc_reports = []
wacc_reports = []
db_root_path = ""
for db in databases:
temp = os.path.splitext(db)[0]
db_keyname = os.path.basename(temp)
files = filestructs[db_keyname]
# file = files[0]
# assume that each db has different root folder, just take first file for path reference
dbpath = 'sqlite:////' + db
dbname = db_keyname
db_root_path = self.get_basic_output_directory(dbpath, addendum)
if os.path.exists(db_root_path):
fp_gen_acc_report, fp_gen_wacc_report = \
self.summarize_accuracy_reports(db_root_path, dbname)
acc_reports.append(fp_gen_acc_report)
wacc_reports.append(fp_gen_wacc_report)
# create big accumulated report
output_root_path = os.path.dirname(db_root_path)
self.summarize_accuracy_report_sums(wacc_reports, acc_reports, output_root_path)
def get_basic_output_directory(self, dbdir_abs, addendum):
basename_db_ext = os.path.basename(os.path.normpath(dbdir_abs))
basename_db = os.path.splitext(basename_db_ext)[0] # remove extension
basic_output_dir = self._config.OUTPUT_ROOT_PATH + "/" + self._base_db_dir+"_"+addendum + "/" + basename_db
return basic_output_dir
def parse_a_table(self, dbdir_abs, table):
# basename_db_ext = os.path.basename(os.path.normpath(dbdir_abs))
# basename_db = os.path.splitext(basename_db_ext)[0] # remove extension
additional_created_files = []
predictor = None
if self._config.PREDICTOR_AUFSICHTSRAT_ENABLED:
# care: import statement within condition, cause this causes keras to load
from machine_learning_components.special_character_predictor import SpecialCharPredictor
predictor = SpecialCharPredictor()
predictor.load_prediction_model()
dataframe_wrapper = DFObjectifier(dbdir_abs, table)
database_handler = DatabaseHandler(dataframe_wrapper, self._config.NUMBER_OF_INPUTS, predictor, self.vocab_checker)
ocr_comparison = database_handler.create_ocr_comparison(predictor=predictor)
if self._config.KEYING_RESULT_VOCABULARY_CORRECTION_POST or self._config.KEYING_RESULT_VOCABULARY_CORRECTION_VOTE:
# hand over vocabulary checker if spellchecking is enabled
ocr_comparison.set_vocabulary_checker(self.vocab_checker)
ocr_comparison.sort_set()
# print("Print mean||decision||abbyy||tesseract||ocropus|||| without unspacing-------------------")
# ocr_comparison.print_sets(False)
if self._config.SAVE_INPUT_DATASETS_TO_FILE:
output_path_abbyy = self.get_basic_output_directory(dbdir_abs, "abbyy") + "/" + table + "_abbyy.txt"
output_path_tess = self.get_basic_output_directory(dbdir_abs, "tess") + "/" + table + "_tess.txt"
output_path_ocro = self.get_basic_output_directory(dbdir_abs, "ocro") + "/" + table + "_ocro.txt"
ocr_comparison.save_dataset_to_file(output_path_abbyy, 0, mode_add_linebreaks=False)
ocr_comparison.save_dataset_to_file(output_path_tess, 1, mode_add_linebreaks=False)
ocr_comparison.save_dataset_to_file(output_path_ocro, 2, mode_add_linebreaks=False)
if self._config.WRITE_HOCR:
ocr_comparison.save_dataset_to_hocr(output_path_abbyy, 0, mode_add_linebreaks=False)
ocr_comparison.save_dataset_to_hocr(output_path_tess, 1, mode_add_linebreaks=False)
ocr_comparison.save_dataset_to_hocr(output_path_ocro, 2, mode_add_linebreaks=False)
additional_created_files.append(output_path_abbyy)
additional_created_files.append(output_path_tess)
additional_created_files.append(output_path_ocro)
# ocr_comparison.save_dataset_to_file()
if self._config.DO_N_DIST_KEYING:
print("Doing: N_DIST_KEYING, WORDWISE KEYING: ", self._config.NDIST_USE_WORDWISE_KEYING)
ocr_comparison.do_n_distance_keying(self._config.NDIST_USE_WORDWISE_KEYING) # do the keying, which makes the decision which is the best line for each set
ocr_comparison.print_sets(False)
#ocr_comparison.print_n_distance_keying_results() # print keying results
if self._config.KEYING_RESULT_POSTCORRECTION:
ocr_comparison.do_postcorrection(postcorrect_keying=True)
created_path = self.get_basic_output_directory(dbdir_abs,"ndist_keying") + "/" + table + "_ndist.txt"
ocr_comparison.save_dataset_to_file(created_path, 0, self._config.MODE_ADD_LINEBREAKS, "ndist_keying")
return created_path, additional_created_files
if self._config.DO_MSA_BEST:
ocr_comparison.do_msa_best_new(self._config.MSA_BEST_USE_N_DIST_PIVOT,
self._config.MSA_BEST_USE_LONGEST_PIVOT,
self._config.MSA_BEST_USE_CHARCONFS,
self._config.MSA_BEST_USE_WORDWISE_MSA,
self._config.MSA_BEST_USE_SEARCHSPACE,
self._config.KEYING_RESULT_POSTCORRECTION)
if self._config.KEYING_RESULT_VOCABULARY_CORRECTION_POST:
ocr_comparison.do_vocabulary_correction()
if self._config.KEYING_RESULT_POSTCORRECTION:
ocr_comparison.do_postcorrection(postcorrect_keying=True,
postcorrect_msa=self._config.MSA_BEST_POSTCORRECTION,
postcorrect_ndist=self._config.NDIST_KEYING_POSTCORRECTION)
"""
if self._config.MSA_BEST_USE_WORDWISE_MSA:
# this is the new msa best invocation
ocr_comparison.do_msa_best_new(self._config.MSA_BEST_USE_N_DIST_PIVOT, self._config.MSA_BEST_USE_LONGEST_PIVOT, self._config.MSA_BEST_USE_CHARCONFS, \
self._config.MSA_BEST_USE_WORDWISE_MSA, self._config.MSA_BEST_USE_SEARCHSPACE, self._config.KEYING_RESULT_POSTCORRECTION)
else:
#todo refactor this old stuff
if self._config.MSA_BEST_USE_CHARCONFS is False:
if self._config.MSA_BEST_USE_N_DIST_PIVOT:
print("Doing: DO_MSA_BEST with MSA_BEST_USE_N_DIST_PIVOT")
ocr_comparison.do_msa_best_with_ndist_pivot()
else:
print("Doing: DO_MSA_BEST without NDIST_PIVOT")
ocr_comparison.do_msa_best()
else:
if self._config.MSA_BEST_USE_N_DIST_PIVOT:
print("Doing: DO_MSA_BEST with MSA_BEST_USE_N_DIST_PIVOT and CHARCONFS")
ocr_comparison.do_msa_best_with_ndist_pivot_charconf()
else:
print("Doing: DO_MSA_BEST without NDIST_PIVOT and CHARCONFS")
print("This is not implemented yet")
"""
#ocr_comparison.print_msa_best_results()
# created_path = self._config.OUTPUT_ROOT_PATH+"/"+self._base_db_dir+"//"+basename_db+"//"+table+"_msa_best.txt"
created_path = self.get_basic_output_directory(dbdir_abs,"msa_best") + "/" + table + "_msa_best.txt"
ocr_comparison.save_dataset_to_file(created_path, 0, self._config.MODE_ADD_LINEBREAKS, "msa_best")
if self._config.WRITE_HOCR:
ocr_comparison.save_dataset_to_hocr(created_path, 0, self._config.MODE_ADD_LINEBREAKS, "msa_best")
return created_path, additional_created_files
def create_reduced_file(self, filepath, ignore_whitespace, ignore_emptyline, ignore_tabs):
file = open(filepath, 'r')
#read_data = file.read()
final_data = []
for line in file:
linetocheck = line
if ignore_whitespace:
linetocheck = linetocheck.replace(" ","")
if ignore_tabs:
linetocheck = linetocheck.replace("\t", "")
if ignore_emptyline and not linetocheck.isspace():
final_data.append(linetocheck)
new_filepath_table = filepath + ".red"
file_new = open(new_filepath_table, 'w')
file_new.writelines(final_data)
file_new.close()
file.close()
return new_filepath_table
def validate_table_against_gt(self, filepath_table, filepath_groundtruth, ignore_whitespace=True, ignore_emptyline=True, ignore_tabs=True):
if self._config.DO_ISRI_VAL is True:
isri_handler = IsriHandler()
ignore_whitespace = self._config.ISRI_IGNORE_SPACES
ignore_emptyline = self._config.ISRI_IGNORE_EMPTY_LINES
ignore_tabs = self._config.ISRI_IGNORE_TABS
if ignore_whitespace:
filepath_table = self.create_reduced_file(filepath_table, ignore_whitespace, ignore_emptyline, ignore_tabs)
filepath_groundtruth = self.create_reduced_file(filepath_groundtruth, ignore_whitespace, ignore_emptyline, ignore_tabs)
# Test 'accuracy'
isri_handler.accuracy(filepath_groundtruth, filepath_table, filepath_table+".accreport")
# Test 'wordacc'
isri_handler.wordacc(filepath_groundtruth, filepath_table, None, filepath_table+".waccreport")
def summarize_accuracy_report_sums(self, waccreports, accreports, output_root_path):
if self._config.SUMMARIZE_ISRI_REPORTS is False:
return None, None
basename = os.path.basename(output_root_path)
isri_handler = IsriHandler()
isri_handler.accsum(accreports, output_root_path+"/"+basename+"_complete_summarized_report.accsum")
isri_handler.wordaccsum(waccreports, output_root_path+"/"+basename+"_complete_summarized_report.waccsum")
def summarize_accuracy_reports(self, root_folder, dbname):
if self._config.SUMMARIZE_ISRI_REPORTS is False:
return None, None
isri_handler = IsriHandler()
# isri_handler.accsum()
# isri_handler.wordaccsum()
# isri_handler.groupacc()
onlyfiles = [f for f in listdir(root_folder) if isfile(join(root_folder, f))]
files_waccsum = []
files_accsum = []
for file in onlyfiles:
if file.endswith(".waccreport"):
files_waccsum.append(root_folder+"/"+file)
elif file.endswith(".accreport"):
files_accsum.append(root_folder+"/"+file)
generated_acc_report = root_folder+"/"+dbname+"_summarized_report.accsum"
generated_wacc_report = root_folder+"/"+dbname+"_summarized_report.waccsum"
isri_handler.accsum(files_accsum, generated_acc_report )
isri_handler.wordaccsum(files_waccsum, generated_wacc_report)
return generated_acc_report, generated_wacc_report
def display_stuff(self, path_groundtruth, path_file, firstcall):
pyc_handler = VisualizationHandler()
if firstcall is True:
process = pyc_handler.show_file_comparison_meld(path_groundtruth, path_file)
else:
process = pyc_handler.show_file_comparison_meld(path_groundtruth, path_file, just_add_tab=True)
return process
#testing strange wordaccuracy report production
#pyc_handler.show_file_comparison(FILEPATH_NDIST_RESULT, FILEPATH_MSA_BEST_RESULT)
#pyc_handler.show_file_comparison(FILEPATH_WACCURACY_REPORT_NDIST, FILEPATH_WACCURACY_REPORT_MSA)
```
#### File: ocromore/vocabulary_checker/vocabulary_checker.py
```python
from configuration.configuration_handler import ConfigurationHandler
from akf_corelib.conditional_print import ConditionalPrint
import numpy as np
import re
class VocabularyChecker():
def __init__(self):
config_handler = ConfigurationHandler(first_init=False)
self.config = config_handler.get_config()
self.cpr = ConditionalPrint(self.config.PRINT_VOCABULARY_CHECKER, self.config.PRINT_EXCEPTION_LEVEL,
self.config.PRINT_WARNING_LEVEL)
self.dict_lines = []
self.max_edist = None
self.suggenstion_verbosity = None
#self.spellchecker = None
self.special_chars_borders = "!¦1234567890,)(;.:\"-"
self.pattern_start = re.compile(r"^["+self.special_chars_borders+"]+")
self.pattern_trail = re.compile(r"["+self.special_chars_borders+"]+$")
self.pattern_trail_dash = re.compile(r"[-]$")
self.pattern_only_normal_chars = re.compile(r"[a-zA-Z]+")
def _load_doc(self, filename):
# open the file as read only
file = open(filename, 'r')
# read all text
texts = file.readlines()
# close the file
file.close()
return texts
def without_special_chars(self, input_text):
len_text = len(input_text)
input_text_wo_sc = self.pattern_only_normal_chars.findall(input_text)
if len(input_text_wo_sc) >= 1:
len_text_wo_sc = len(input_text_wo_sc[0])
ratio = len_text_wo_sc / len_text
return input_text_wo_sc[0], ratio
else:
# there are only special characters
return input_text, 0
def get_accumulated_confidence_rate(self, word, word_acc_confs, wildcard_char):
word_reduced, word_starting_borders, word_trailing_borders, change = self.remove_and_give_borders(word)
wsplit = list(word)
if change == False:
acc_conf = 0
for i in range(0, len(wsplit)):
acc_conf += word_acc_confs[i]
return acc_conf, acc_conf/len(wsplit), False, word_starting_borders, word_trailing_borders, word
else:
acc_conf = 0
len_start = len(word_starting_borders)
len_trail = len(word_trailing_borders)
for i in range(len_start,len(wsplit)-len_trail):
acc_conf += word_acc_confs[i]
return acc_conf, acc_conf / (len(wsplit)-len_start-len_trail), True, word_starting_borders, word_trailing_borders, word_reduced
def remove_and_give_borders(self, input_text):
start_sc_text = ""
stop_sc_text = ""
if len(input_text) > 2:
start_special_chars = self.pattern_start.findall(input_text)
stop_special_chars = self.pattern_trail.findall(input_text)
if len(start_special_chars) >= 1:
start_sc_text = start_special_chars[0]
if len(stop_special_chars) >= 1:
stop_sc_text = stop_special_chars[0]
if start_special_chars == None and stop_special_chars == None:
return input_text, start_sc_text, stop_sc_text, False
else:
input_text_stripped = input_text.strip(self.special_chars_borders)
return input_text_stripped, start_sc_text, stop_sc_text, True
else:
return input_text, start_sc_text, stop_sc_text, False
def word_trails_with_dash(self,input_text):
trail_dash_res = self.pattern_trail_dash.findall(input_text)
if len(trail_dash_res) >= 1:
return True
else:
return False
def initialize_lines(self, dict_file_path, remove_special_border_chars):
# add the lines from a dictionary path to dict_lines
doc = self._load_doc(dict_file_path)
lines_doc = self._get_lines(doc, remove_special_border_chars)
self.dict_lines.extend(lines_doc)
def _get_lines(self, doc, remove_special_border_chars):
lines_doc = []
for line in doc:
if "--------------" in line:
continue
line = line.replace('\n', "")
if remove_special_border_chars:
# print("lbef",line)
line = line.strip(self.special_chars_borders)
# print("laft",line)
linelen = len(line)
if linelen > 2:
if linelen < self.config.KEYING_RESULT_VC_MIN_VOCAB_WORD_LENGTH:
continue # filter out lengths which are shorter than minimum
if self.config.KEYING_RESULT_VC_DOWNCAST_ALL_CASES:
line_low = line.lower()
if line_low != line:
lines_doc.append(line_low)
lines_doc.append(line)
return lines_doc
def initialize_spellchecker(self):
try:
from pysymspell.symspell import SymSpell
if self.dict_lines == None:
self.cpr.printw("can't initialize spellchecker, please first call initialize_lines")
return
# set paramters
self.max_edist = self.config.KEYING_RESULT_VC_EDIT_DISTANCE_LEVEL
self.suggenstion_verbosity = SymSpell.Verbosity.CLOSEST
# initialize symspell as spellchecker
sym_spell = SymSpell(self.max_edist)
# load dictionary to spellchecker
sym_spell.create_dictionary_by_list(self.dict_lines)
self.spellchecker = sym_spell
except:
print(
"To use the vocabulary checker you must pull PySymSpell from GitHub in the directory (AWARE: MIT License)"
"by activate and initalize the submodule (delete the comment symbol: #):\n"
".gitmodule at line: 1-3")
def correct_text_at_certain_indices_only(self, input_text, possible_error_indices):
replacement_char = "‖"
return_term, suggestions, first_letter_high = self.correct_text(input_text, suggestion_verbosity = SymSpell.Verbosity.ALL)
if input_text == return_term:
return return_term
#print("asd")
input_text_array = list(input_text)
#if "Vortrag" in input_text or len(suggestions)>=2:
# print("asd")
suggestion_number_error_correction_count = []
num_of_possible_suggestions = 0
for suggestion in suggestions:
input_text_array_c = input_text_array[:] # copy input text array
sug_array = list(suggestion.term)
for char_index_it, char_it in enumerate(input_text_array):
for char_index_sug, char_sug in enumerate(sug_array):
if input_text_array_c[char_index_it] == sug_array[char_index_sug]:
input_text_array_c[char_index_it] = replacement_char
sug_array[char_index_sug] = replacement_char
continue
# print("asd")
number_of_possible_errors_corrected = 0
# check if char was sustracted in possible error indices
for index in possible_error_indices:
char_to_check = input_text_array_c[index]
char_previous = input_text_array[index]
if char_to_check == char_previous:
number_of_possible_errors_corrected += 1
if number_of_possible_errors_corrected >= 1:
num_of_possible_suggestions += 1
suggestion_number_error_correction_count.append(number_of_possible_errors_corrected)
if len(suggestion_number_error_correction_count) <= 0:
return None
# if num_of_possible_suggestions >=2:
# print("asd")
best_suggestion_index = np.argmax(suggestion_number_error_correction_count)
best_suggestion_ecccount = suggestion_number_error_correction_count[best_suggestion_index]
if best_suggestion_ecccount > 0:
best_suggestion_value = suggestions[best_suggestion_index].term
if first_letter_high:
best_suggestion_value = best_suggestion_value[0].upper() + best_suggestion_value[1:]
return best_suggestion_value
else:
return None
def correct_text(self, input_text, suggestion_verbosity=None):
first_letter_high = False
if self.config.KEYING_RESULT_VC_DOWNCAST_ALL_CASES:
first_letter = input_text[0]
first_letter_high = first_letter.islower() == False
# input_text = input_text.lower()
suggestion_verbosity_used = self.suggenstion_verbosity
if suggestion_verbosity != None:
suggestion_verbosity_used = suggestion_verbosity
suggestions = self.spellchecker.lookup(input_text, suggestion_verbosity_used, self.max_edist)
if len(suggestions) >= 1:
return_term = suggestions[0]._term
if self.config.KEYING_RESULT_VC_DOWNCAST_ALL_CASES and first_letter_high:
return_term = return_term[0].upper() + return_term[1:]
return return_term, suggestions, first_letter_high
else:
return None, suggestions, first_letter_high
``` |
{
"source": "JKamlah/PAGETools",
"score": 3
} |
#### File: cli/management/cull.py
```python
from pagetools.src.Page import Page
from pagetools.src.utils import filesystem
from typing import List
import click
from lxml import etree
@click.command("cull", help="Cull datasets based on different conditions. ")
@click.argument("files", nargs=-1, type=str, required=True)
@click.option("-xml", "--needs-xml", is_flag=True, help="Removes all images without any associated XML.")
@click.option("--xml-extension", type=str, default=".xml", multiple=True,
help="Extension of XML files which are considered.")
@click.option("--text", "--needs-text", is_flag=True, help="Removes all XML files without any text content.")
@click.option("-text-index", "--needs-text-index", type=int, default=0, multiple=True,
help="Removes all XML files without any text content without the specified index.")
@click.option("-d", "--dry-run", is_flag=True, help="Only prints cullable files to output.")
def cull_cli(files: List[str], xml: bool, xml_extension: List[str], text: bool, text_index: List[int], dry_run: bool):
# TODO: finish cull
files = filesystem.collect_cullable_files(files, xml_extension)
for file in files:
try:
page = Page(file)
except etree.XMLSyntaxError:
click.echo(f"{file}: Not a valid XML file. Skipping…", err=True)
continue
except etree.ParseError:
click.echo(f"{file}: XML can't be parsed. Skipping…", err=True)
continue
if __name__ == "__main__":
cull_cli()
``` |
{
"source": "JKamlah/RaiseWikibase",
"score": 3
} |
#### File: RaiseWikibase/RaiseWikibase/datamodel.py
```python
import uuid
def label(language='en', value=''):
"""Create and return a label (dict)"""
return {language: {'language': language, 'value': value}}
def alias(language='en', value=''):
"""Create and return an alias (dict)"""
a = {}
if len(value) == 0:
a[language] = [{'language': language, 'value': ''}]
else:
a[language] = [{'language': language, 'value': val} for val in value]
return a
def description(language='en', value=''):
"""Create and return a description (dict)"""
return {language: {'language': language, 'value': value}}
def snak(datatype='', value='', prop='', snaktype='value'):
"""Create and return a snak (dict)"""
if datatype in ['', 'string', 'math', 'external-id', 'url', 'commonsMedia',
'localMedia', 'geo-shape', 'musical-notation', 'tabular-data']:
datavalue = {
'value': value,
'type': 'string'
}
elif datatype == 'wikibase-item':
datavalue = {
'value': {
'entity-type': 'item',
'numeric-id': value[1:],
'id': value
},
'type': 'wikibase-entityid'
}
elif datatype == 'wikibase-property':
datavalue = {
'value': {
'entity-type': 'property',
'numeric-id': value[1:],
'id': value
},
'type': 'wikibase-entityid'
}
elif datatype == 'time':
time, timezone, precision, calendarmodel = value
datavalue = {
'value': {
'time': time,
'timezone': timezone,
'before': 0,
'after': 0,
'precision': precision,
'calendarmodel': calendarmodel # http://www.wikidata.org/entity/Q1985727
},
'type': 'time'
}
elif datatype == 'monolingualtext':
val, language = value
datavalue = {
'value': {
'text': val,
'language': language
},
'type': 'monolingualtext'
}
elif datatype == 'quantity':
val, unit, upper_bound, lower_bound = value
datavalue = {
'value': {
'amount': val,
'unit': unit,
'upperBound': upper_bound,
'lowerBound': lower_bound
},
'type': 'quantity'
}
elif datatype == 'globe-coordinate':
latitude, longitude, precision, globe = value
datavalue = {
'value': {
'latitude': latitude,
'longitude': longitude,
'precision': precision,
'globe': globe
},
'type': 'globecoordinate'
}
elif datatype == 'wikibase-lexeme':
datavalue = {
'value': {
'entity-type': 'lexeme',
'numeric-id': value[1:],
'id': value
},
'type': 'wikibase-entityid'
}
elif datatype == 'wikibase-form':
datavalue = {
'value': {
'entity-type': 'form',
'id': value
},
'type': 'wikibase-entityid'
}
elif datatype == 'wikibase-sense':
datavalue = {
'value': {
'entity-type': 'sense',
'id': value
},
'type': 'wikibase-entityid'
}
else:
raise ValueError('{} is not a valid datatype'.format(datatype))
if snaktype in ['value', 'novalue', 'somevalue']:
snak = {'snaktype': snaktype,
'property': prop,
'datavalue': datavalue,
'datatype': datatype}
else:
raise ValueError("""{} is not a valid snaktype. Use "value, "novalue" or "somevalue".""".format(snaktype))
return snak
def claim(prop='', mainsnak=snak(), qualifiers=[], references=[]):
"""Create and return a claim (dict)"""
return {prop: [{'mainsnak': {**mainsnak, **{'hash': str(uuid.uuid4())}},
'type': 'statement',
'rank': 'normal',
'qualifiers': {prop: qualifiers},
'qualifiers-order': [prop],
'references': [{'snaks': {prop: references}, 'snaks-order': [prop]}],
'id': ''}]}
def entity(labels={}, aliases={}, descriptions={}, claims={}, etype='', datatype=''):
"""Create and return an entity (dict)"""
entity = {'type': etype,
'datatype': datatype,
'id': '',
'labels': labels,
'aliases': aliases,
'descriptions': descriptions,
'claims': claims}
if etype == 'item':
entity.pop('datatype')
return entity
"https://www.mediawiki.org/wiki/Extension_default_namespaces"
namespaces = {'wikibase-item': 120,
'wikibase-property': 122,
'main': 0,
'media': -2,
'special': -1,
'talk': 1,
'user': 2,
'user_talk': 3,
'project': 4,
'project_talk': 5,
'file': 6,
'file_talk': 7,
'mediawiki': 8,
'mediawiki_talk': 9,
'template': 10,
'template_talk': 11,
'help': 12,
'help_talk': 13,
'category': 14,
'category_talk': 15,
'Scribunto': 828}
"https://github.com/SuLab/WikidataIntegrator/blob/main/notebooks/CreateWikidataProperties.ipynb"
datatypes = {'http://wikiba.se/ontology#CommonsMedia': 'commonsMedia',
'http://wikiba.se/ontology#ExternalId': 'external-id',
'http://wikiba.se/ontology#GeoShape': 'geo-shape',
'http://wikiba.se/ontology#GlobeCoordinate': 'globe-coordinate',
'http://wikiba.se/ontology#Math': 'math',
'http://wikiba.se/ontology#Monolingualtext': 'monolingualtext',
'http://wikiba.se/ontology#Quantity': 'quantity',
'http://wikiba.se/ontology#String': 'string',
'http://wikiba.se/ontology#TabularData': 'tabular-data',
'http://wikiba.se/ontology#Time': 'time',
'http://wikiba.se/ontology#Url': 'url',
'http://wikiba.se/ontology#WikibaseItem': 'wikibase-item',
'http://wikiba.se/ontology#WikibaseLexeme': 'lexeme',
'http://wikiba.se/ontology#WikibaseForm': 'form',
'http://wikiba.se/ontology#WikibaseSense': 'sense',
'http://wikiba.se/ontology#MusicalNotation': 'musical-notation',
'http://wikiba.se/ontology#WikibaseProperty': 'wikibase-property'}
``` |
{
"source": "JKamlah/ratocer",
"score": 3
} |
#### File: JKamlah/ratocer/bs4ocr.py
```python
import argparse
from pathlib import Path
import cv2
import numpy as np
# Command line arguments.
def channelscaler(channel, value):
channel = cv2.multiply(channel, value)
channel = np.where(channel < 255, 255, channel)
return channel
# -i 4 -b 150 -d 10 good settings atm for 300 dpi
def subtractor(img, dilsize: int = 15, blursize: int = 59, kernelshape: str = "ellipse",
bluriter: int = 1, fix_blursize: bool = False, blurfilter: str = "Gaussian",
textdilation: bool = True, contrast: bool = False, verbose: bool = False):
"""
The text in the image will be removed, the background smoothed and than extracted from the original image
:param img:
:param dilsize:
:param blursize:
:param kernelshape:
:param normalize:
:param norm_min:
:param norm_max:
:param norm_auto:
:param bluriter:
:param fix_blursize:
:param blurfilter:
:param textdilation:
:param contrast:
:param verbose:
:return:
"""
rgb_planes = cv2.split(img)
result_planes = []
# Only odd blurkernelsize are valid
blursize = blursize + 1 if blursize % 2 == 0 else blursize
for idx, plane in enumerate(rgb_planes[:3]):
dilated_img = plane
kshape = {"rect": cv2.MORPH_RECT, "ellipse": cv2.MORPH_ELLIPSE, "cross": cv2.MORPH_CROSS}.get(kernelshape,
cv2.MORPH_ELLIPSE)
# Reduce influence of the text by dilation (round kernel produce atm the best results)
if textdilation:
dil_kernel = cv2.getStructuringElement(kshape, (int(dilsize / 2), dilsize))
dilated_img = cv2.dilate(plane, dil_kernel, iterations=3)
dil_kernel = cv2.getStructuringElement(kshape, (int(dilsize / 2) + 1, dilsize + 1))
dilated_img = cv2.erode(dilated_img, dil_kernel, iterations=1)
else:
dil_kernel = cv2.getStructuringElement(kshape, (dilsize, dilsize))
dilated_img = cv2.dilate(dilated_img, dil_kernel)
bg_img = dilated_img
for ksize in np.linspace(blursize, 1, num=bluriter):
if not fix_blursize:
if blurfilter == "Gaussian":
bg_img = cv2.GaussianBlur(bg_img,
(int(ksize) + (1 + int(ksize) % 2), int(ksize) + (1 + int(ksize) % 2)), 0)
else:
bg_img = cv2.medianBlur(bg_img, (int(ksize) + (1 + int(ksize) % 2)))
else:
if blurfilter == "Gaussian":
bg_img = cv2.GaussianBlur(bg_img, (blursize, blursize), 0)
else:
bg_img = cv2.medianBlur(bg_img, blursize)
if verbose:
cv2.imwrite(f"Filtered_{idx}.jpg", bg_img)
cv2.imwrite(f"Dilate_{idx}.jpg", dilated_img)
# Subtract bg from fg
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# Increases the contrast
if contrast:
diff_img = cv2.add(norm_img, plane * contrast, dtype=cv2.CV_8U)
# Normalize the final image to the range 0-255
norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
result_planes.append(norm_img)
return cv2.merge(result_planes)
def normalizer(img, norm_min: int = 0, norm_max: int = 255, norm_auto: bool = False):
"""
Normalizes the histogram of the image
:param img: path object of the image
:param norm_min: max min value
:param norm_max: min max value
:param auto: auto normalizer
:return:
"""
rgb_planes = cv2.split(img)
result_planes = []
for idx, plane in enumerate(rgb_planes[:3]):
if norm_auto:
auto_min = np.min(np.where((norm_min <= 25, 255)))
auto_max = np.max(np.where((norm_min <= 220, 0)))
plane = np.where(plane <= auto_min, auto_min, plane)
plane = np.where(plane >= auto_max, auto_max, plane)
else:
plane = np.where(plane <= norm_min, norm_min, plane)
plane = np.where(plane >= norm_max, norm_max, plane)
norm_img = cv2.normalize(plane, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
result_planes.append(norm_img)
return cv2.merge(result_planes)
def subtract_bg(args, imgpath=None):
# Set filenames or path
fname = Path(imgpath)
args.outputfolder = fname.parent
print(fname.name + " in process!")
try:
# Try to get dpi information
from PIL import Image
dpi = Image.open(fname).info['dpi']
args.dpi = np.mean(dpi, dtype=int)
print("DPI was set to:", args.dpi)
except:
pass
# Read image
img = cv2.imread(str(fname), -1)
resimg = img
# Channel scaler
if args.scale_channel != 'None' and len(img.shape) > 2:
if args.scale_channel in ['red', 'yellow', 'magenta']:
img[:, :, 0] = channelscaler(img[:, :, 0], args.scale_channel_value)
if args.scale_channel in ['green', 'yellow', 'cyan']:
img[:, :, 1] = channelscaler(img[:, :, 1], args.scale_channel_value)
if args.scale_channel in ['blue', 'magenta', 'cyan']:
img[:, :, 2] = channelscaler(img[:, :, 2], args.scale_channel_value)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Background normalizer
if args.normalize or args.normalize_only:
img = normalizer(img, args.normalize_min, args.normalize_max, args.normalize_auto)
# Background subtractor
if not args.normalize_only:
resimg = subtractor(img, dilsize=args.dilsize, blursize=args.blursize, kernelshape=args.kernelshape,
bluriter=args.bluriter, fix_blursize=args.fixblursize,
textdilation=args.textdilation, contrast=args.contrast, verbose=args.verbose)
# Image binarizer
if args.binarize:
DPI = args.dpi + 1 if args.dpi % 2 == 0 else args.dpi
resimg = resimg if len(resimg.shape) == 2 else cv2.cvtColor(resimg, cv2.COLOR_BGR2GRAY)
resimg = cv2.adaptiveThreshold(resimg, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, DPI, int(DPI / 12))
args.extensionaddon = args.extensionaddon + ".bin"
# Output
fout = Path(args.outputfolder).absolute().joinpath(
fname.name.rsplit(".", 1)[0] + f"{args.extensionaddon}.{args.extension}")
if not fout.parent.exists():
fout.parent.mkdir()
if args.extension == "jpg":
cv2.imwrite(str(fout.absolute()), resimg, [int(cv2.IMWRITE_JPEG_QUALITY), args.quality])
else:
cv2.imwrite(str(fout.absolute()), resimg)
print(str(fout) + " created!")
return str(fout.absolute())
```
#### File: JKamlah/ratocer/crop.py
```python
import argparse
import copy
import os
import warnings
import numpy as np
import scipy.misc as misc
import skimage as ski
import skimage.color as color
import skimage.filters.thresholding as th
import skimage.morphology as morph
import skimage.transform as transform
from scipy.ndimage import measurements
from skimage.io import imread, imsave
####################### CLASSES & METHODS ###########################
class Clippingmask():
def __init__(self, image):
self.height_start, self.width_start = 0, 0
if len(image.shape) > 2:
self.height_stop, self.width_stop, self.rgb = image.shape
else:
self.height_stop, self.width_stop = image.shape
self.user = None
class ImageParam():
def __init__(self, image, input):
if len(image.shape) > 2:
self.height, self.width, self.rgb = image.shape
else:
self.height, self.width = image.shape
self.path = os.path.dirname(input)
self.pathout = os.path.normpath(os.path.dirname(input)+"/TOC-Extraction/")
self.deskewpath = None
self.name = os.path.splitext(os.path.basename(input))[0]
class Linecoords():
def __init__(self, binary, value ,object):
self.height_start = object[0].start
self.height_stop = object[0].stop
self.width_start = object[1].start
self.width_stop = object[1].stop
self.middle = None
self.object = object
self.object_value = value
self.object_matrix = copy.deepcopy(binary[object])
self.segmenttype = None
class SpliceParam():
def __init__(self, input, parts):
self.name = os.path.splitext(input)[0]
self.segment = parts[len(parts)-2]
self.segmenttype = parts[len(parts)-1]
####################### FUNCTIONS ##################################
def create_dir(newdir):
if not os.path.isdir(newdir):
try:
os.makedirs(newdir)
print(newdir)
except IOError:
print(("cannot create %s directoy" % newdir))
def crop_lcol(args, image, image_param, list_linecoords, clippingmask):
# Find left column
pixelheight = set_pixelground(image_param.height)
image = np.rot90(image, args.horlinepos)
for idx, linecoords in enumerate(list_linecoords):
# Header
if idx == 0:
if not args.quiet: print("header")
roi = image[0:linecoords.height_start - 2, 0:image_param.width] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
# Crop middle segments
if linecoords.segmenttype == 'B':
if not args.quiet: print("blank")
# Add sum extra space to the cords
roi = image[linecoords.height_start + 2 - pixelheight(
args.addstartheightc):linecoords.height_stop - 2 + pixelheight(args.addstopheightc),
linecoords.width_start:linecoords.width_stop] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1:
idx = len(list_linecoords) - idx
if 'c' in args.croptypes:
pass
if linecoords.segmenttype == 'L':
# Fixing column size
if idx == 0:
print("line-first")
# linecoords.height_start = clippingmask.height_start + 17
if not args.quiet: print("line")
roi = image[
linecoords.height_start - pixelheight(args.addstartheightab):linecoords.height_stop + pixelheight(
args.addstopheightab),
0:linecoords.width_stop - 2] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1 and 'b' in args.croptypes:
idx = len(list_linecoords) - idx
elif 'a' in args.croptypes:
return roi
roi = image[
linecoords.height_start - pixelheight(args.addstartheightab):linecoords.height_stop + pixelheight(
args.addstopheightab),
0 + 1:clippingmask.width_stop]
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1 and 'a' in args.croptypes:
return roi
elif 'a' in args.croptypes:
return roi
return None
def cropping_lcol(imgpath, args):
# Main cropping function that deskew, analyse and crops the image
# read image
print(f"Find toc in {imgpath}")
try:
image = imread("%s" % imgpath)
image_param = ImageParam(image, imgpath)
if args.imgmask != [0.0, 1.0, 0.0, 1.0]:
image = image[int(args.imgmask[0]*image_param.height):int(args.imgmask[1]*image_param.height),
int(args.imgmask[2]*image_param.width):int(args.imgmask[3]*image_param.width)]
image_param = ImageParam(image, imgpath)
except IOError:
print(("cannot open %s" % imgpath))
return 1
create_dir(image_param.pathout)
####################### ANALYSE - LINECOORDS #######################
print("start linecoord-analyse")
clippingmask = Clippingmask(image)
border, labels, list_linecoords, topline_width_stop = linecoords_analyse(args, image, image_param, clippingmask)
####################### CROP #######################################
print("start crop lcol")
lcol = crop_lcol(args, image, image_param, list_linecoords, clippingmask)
return lcol
def cropping_toc(lcol, args):
image_param = ImageParam(lcol, args.input)
if args.imgmask != [0.0, 1.0, 0.0, 1.0]:
lcol = lcol[int(args.imgmask[0] * image_param.height):int(args.imgmask[1] * image_param.height),
int(args.imgmask[2] * image_param.width):int(args.imgmask[3] * image_param.width)]
image_param = ImageParam(lcol, args.input)
clippingmask = Clippingmask(lcol)
border, labels, list_linecoords, topline_width_stop = linecoords_analyse(args, lcol, image_param, clippingmask, get_toc=True)
####################### CROP #######################################
print("start crop toc")
tocpath = crop_toc(args, lcol, image_param, list_linecoords)
return tocpath
def crop_toc(args, image, image_param, list_linecoords):
# Find left column
create_dir(image_param.pathout+os.path.normcase("/"+image_param.name.split(".",1)[0]+"/"))
filepath = image_param.pathout+os.path.normcase("/"+image_param.name.split(".",1)[0]+"/")+image_param.name
image = np.rot90(image, args.horlinepos)
imsave("%s_leftcol.%s" % (filepath, args.extension),image)
for idx, linecoords in enumerate(list_linecoords):
# Header
if idx == 0:
if not args.quiet: print("header")
roi = image[0:linecoords.height_start - 2, 0:image_param.width] # region of interest
roi = np.rot90(roi, 4 - args.horlinepos)
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
if args.horlinetype == 1 and 'f' in args.croptypes:
pass
elif 'h' in args.croptypes:
imgpath = "%s_TOC.%s" % (filepath, args.extension)
print(imgpath)
imsave(imgpath, roi)
return imgpath
imgpath = "%s_TOC.%s" % (filepath, args.extension)
imsave(imgpath, image)
return imgpath
def deskew(args,image, image_param):
# Deskew the given image based on the horizontal line
# Calculate the angle of the points between 20% and 80% of the line
uintimage = get_uintimg(image)
binary = get_binary(args, uintimage)
for x in range(0,args.binary_dilation):
binary = ski.morphology.binary_dilation(binary,selem=np.ones((3, 3)))
labels, numl = measurements.label(binary)
objects = measurements.find_objects(labels)
deskew_path = None
for i, b in enumerate(objects):
linecoords = Linecoords(image, i, b)
# The line has to be bigger than minwidth, smaller than maxwidth, stay in the top (30%) of the img,
# only one obj allowed and the line isn't allowed to start contact the topborder of the image
if int(args.minwidthhor * image_param.width) < get_width(b) < int(args.maxwidthhor * image_param.width) \
and int(image_param.height * args.minheighthor) < get_height(b) < int(image_param.height * args.maxheighthor) \
and int(image_param.height * args.minheighthormask) < (linecoords.height_start+linecoords.height_stop)/2 < int(image_param.height * args.maxheighthormask) \
and linecoords.height_start != 0:
pixelwidth = set_pixelground(binary[b].shape[1])
mean_y = []
#Calculate the mean value for every y-array
old_start = None
for idx in range(pixelwidth(args.deskewlinesize)):
value_y = measurements.find_objects(labels[b][:, idx + pixelwidth((1.0-args.deskewlinesize)/2)] == i + 1)[0]
if old_start is None:
old_start = value_y[0].start
if abs(value_y[0].start-old_start) < 5:
mean_y.append(value_y[0].start)
old_start = value_y[0].start
polyfit_value = np.polyfit(list(range(0,len(mean_y))), mean_y, 1)
deskewangle = np.arctan(polyfit_value[0]) * (360 / (2 * np.pi))
args.ramp = True
deskew_image = transform.rotate(image, deskewangle, mode="edge")
create_dir(image_param.pathout+os.path.normcase("/deskew/"))
deskew_path = "%s_deskew.%s" % (image_param.pathout+os.path.normcase("/deskew/")+image_param.name, args.extension)
deskewinfo = open(image_param.pathout+os.path.normcase("/deskew/")+image_param.name + "_deskewangle.txt", "w")
deskewinfo.write("Deskewangle:\t%f" % deskewangle)
deskewinfo.close()
image_param.deskewpath = deskew_path
with warnings.catch_warnings():
#Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
misc.imsave(deskew_path, deskew_image)
break
return deskew_path
def get_binary(args, image):
thresh = th.threshold_sauvola(image, args.threshwindow, args.threshweight)
binary = image > thresh
binary = 1 - binary # inverse binary
binary = np.rot90(binary, args.horlinepos)
return binary
def get_height(s):
return s[0].stop-s[0].start
def get_linecoords(s):
return [[s[0].start,s[0].stop],[s[1].start,s[1].stop]]
def get_mindist(s,length):
# Computes the min. distance to the border and cuts the smallest one in half
d1 = s[1].start
d2 = length - s[1].stop
if d1 < d2:
return d1-int(d1*0.5)
else:
return d2-int(d2*0.5)
def get_uintimg(image):
if len(image.shape) > 2:
uintimage = color.rgb2gray(copy.deepcopy(image))
else:
uintimage = copy.deepcopy(image)
if uintimage.dtype == "float64":
with warnings.catch_warnings():
# Transform rotate convert the img to float and save convert it back
warnings.simplefilter("ignore")
uintimage = ski.img_as_uint(uintimage, force_copy=True)
return uintimage
def get_width(s):
return s[1].stop-s[1].start
def linecoords_analyse(args,origimg, image_param, clippingmask, get_toc=False):
# Computes the clipping coords of the masks
image = get_uintimg(origimg)
origimg = np.rot90(origimg, args.horlinepos)
binary = get_binary(args, image)
labels, numl = measurements.label(binary)
objects = measurements.find_objects(labels)
count_height = 0
count_width = 0
pixelheight = set_pixelground(image_param.height)
pixelwidth = set_pixelground(image_param.width)
list_linecoords = []
border = image_param.width
topline_width_stop = image_param.height# Init list of linecoordinates the format is: [0]: width.start, width.stopt,
# [1]:height.start, height.stop, [2]: Type of line [B = blank, L = vertical line]
for i, b in enumerate(objects):
# The line has to be bigger than minwidth, smaller than maxwidth, stay in the top (30%) of the img,
# only one obj allowed and the line isn't allowed to start contact the topborder of the image
linecoords = Linecoords(labels, i, b)
if pixelwidth(0.8) < get_width(b) < pixelwidth(args.maxwidthhor):
print(b)
if pixelwidth(args.minwidthhor) < get_width(b) < pixelwidth(args.maxwidthhor) \
and pixelheight(args.minheighthor) < get_height(b) < pixelheight(args.maxheighthor) \
and pixelheight(args.minheighthormask) < linecoords.height_stop < pixelheight(args.maxheighthormask) \
and count_width == 0 \
and linecoords.height_start != 0:
# Distance Calculation - defining the clippingmask
border = get_mindist(b, image_param.width)
topline_width_stop = b[0].stop + 2 # Lowest Point of object + 2 Pixel
if clippingmask.user is None:
clippingmask.width_start = border
clippingmask.width_stop = image_param.width - border
clippingmask.height_start = copy.deepcopy(topline_width_stop)
clippingmask.height_stop = 0
# Get coordinats of the line
labels[b][labels[b] == i + 1] = 0
count_width += 1
if get_toc:
list_linecoords.append(copy.deepcopy(linecoords))
# We knew there must be first a horizontal line
if count_width == 0: continue
if pixelheight(args.minheightver) < get_height(b) < pixelheight(args.maxheightver) \
and pixelwidth(args.minwidthver) < get_width(b) < pixelwidth(args.maxwidthver) \
and pixelwidth(args.minwidthvermask) < (linecoords.width_start+linecoords.width_stop)/2 < pixelwidth(args.maxwidthvermask) \
and float(get_width(b))/float(get_height(b)) < args.maxgradientver:
linecoords.segmenttype = 'L' # Defaultvalue for segmenttype 'P' for horizontal lines
if count_height == 0:
if b[0].start - topline_width_stop > pixelheight(args.minsizeblank+args.minsizeblankobolustop):
blankline = Linecoords(labels,i,b)
blankline.segmenttype = 'B'
blankline.height_start = topline_width_stop
blankline.height_stop = linecoords.height_start
blankline.width_start = border
blankline.width_stop = image_param.width - border
blankline.middle = int(((linecoords.width_start+linecoords.width_stop)-1)/2)
list_linecoords.append(copy.deepcopy(blankline))
count_height += 1
if args.ramp != None:
whiteout_ramp(origimg, linecoords)
list_linecoords.append(copy.deepcopy(linecoords))
count_height += 1
else:
# Should fix to short vertical lines, in the height to top if they appear before any B Part in the image
if topline_width_stop > 0:
linecoords.height_start = topline_width_stop + pixelheight(args.addstartheightab)
list_linecoords.append(copy.deepcopy(linecoords))
count_height += 1
if args.ramp != None:
whiteout_ramp(origimg, linecoords)
elif list_linecoords[count_height - 1].height_stop < b[0].stop:
#Test argument to filter braces
if b[0].start - list_linecoords[count_height - 1].height_stop > pixelheight(args.minsizeblank):
blankline = Linecoords(labels,i,b)
blankline.segmenttype = 'B'
blankline.height_start = list_linecoords[count_height - 1].height_stop
blankline.height_stop = linecoords.height_start
blankline.width_start = border
blankline.width_stop = image_param.width - border
blankline.middle = int(((linecoords.width_start+linecoords.width_stop)-1)/2)
list_linecoords.append(copy.deepcopy(blankline))
count_height += 1
list_linecoords.append(copy.deepcopy(linecoords))
if args.ramp != None:
whiteout_ramp(origimg, linecoords)
count_height += 1
labels[b][labels[b] == i + 1] = 0
else:
if args.ramp != None:
whiteout_ramp(origimg, linecoords)
print(b[0].stop)
list_linecoords[count_height - 1].height_stop = b[0].stop
labels[b][labels[b] == i + 1] = 0
return border, labels, list_linecoords, topline_width_stop
def set_pixelground(image_length):
#Computes the real pixel number out of the given percantage
def get_pixel(prc):
return int(image_length*prc)
return get_pixel
def whiteout_ramp(image, linecoords):
# Dilation enlarge the bright segments and cut them out off the original image
imagesection = image[linecoords.object]
count = 0
for i in morph.dilation(linecoords.object_matrix, morph.square(10)):
whitevalue = measurements.find_objects(i == linecoords.object_value + 1)
if whitevalue:
whitevalue = whitevalue[0][0]
imagesection[count,whitevalue.start:whitevalue.stop] = 255
count +=1
return 0
####################### MAIN-FUNCTIONS ############################################
def get_toc(args, imgpath=None):
if imgpath is not None:
args.input = imgpath
lcol = cropping_lcol(args.input, args)
if lcol is not None:
tocpath = cropping_toc(lcol, args)
return tocpath
else:
print("Left column was not found!")
return ""
``` |
{
"source": "JKamlah/tesseractXplore",
"score": 2
} |
#### File: tesseractXplore/controllers/tesseract_online_controller.py
```python
import threading
import time
from logging import getLogger
import re
from functools import partial
from kivymd.uix.button import MDFlatButton
from kivymd.uix.dialog import MDDialog
from kivymd.uix.filemanager import MDFileManager
from kivymd.uix.menu import MDDropdownMenu
from kivymd.uix.textfield import MDTextField
from tesseractXplore.app import alert, get_app
from tesseractXplore.controllers import Controller
from tesseractXplore.tessprofiles import write_tessprofiles, read_tessprofiles
from tesseractXplore.online_components import ocr_image, ocr_bulk_of_images
from tesseractXplore.process_manager import create_online_threadprocess
logger = getLogger().getChild(__name__)
class TesseractOnlineController(Controller):
""" Controller class to manage image selector screen """
def __init__(self, screen):
super().__init__(screen)
self.screen = screen
self.psms = [' 0 Orientation and script detection (OSD) only.',
' 1 Automatic page segmentation with OSD.',
' 2 Automatic page segmentation, but no OSD, or OCR. (not implemented)',
' 3 Fully automatic page segmentation, but no OSD. (Default)',
' 4 Assume a single column of text of variable sizes.',
' 5 Assume a single uniform block of vertically aligned text.',
' 6 Assume a single uniform block of text.',
' 7 Treat the image as a single text line.',
' 8 Treat the image as a single word.',
' 9 Treat the image as a single word in a circle.',
' 10 Treat the image as a single character.',
' 11 Sparse text. Find as much text as possible in no particular order.',
' 12 Sparse text with OSD.',
' 13 Raw line. Treat the image as a single text line, bypassing hacks that are Tesseract-specific.']
self.init_dropdown()
self.tessprofile_menu = screen.tessprofile_menu
self.output_manager = MDFileManager(
exit_manager=self.exit_output_manager,
select_path=self.select_output,
ext=[""],
)
self.selected_output_folder = None
self.screen.recognize_button.bind(on_release=self.recognize_thread)
self.screen.jobs_button.bind(on_release=self.switch_jobs)
self.screen.model.bind(on_release=get_app().image_selection_online_controller.get_model)
#elf.modelinfos = get_modelinfos()
self.print_on_screen = False
self.ocr_event = None
self.ocr_stop = False
self.last_rec_time = time.time()
# Context menu
self.screen.context_menu.ids.recognize_ctx.bind(on_release=self.recognize_single_thread)
# Load default settings
self.load_default_settings()
def load_default_settings(self):
for profile, profileparam in get_app().tessprofiles_online.items():
if profileparam['default'] == True:
self.load_tessprofile(profileparam)
def stop_rec(self, instance):
""" Unschedule progress event and log total execution time """
if self.ocr_event:
self.ocr_stop = True
logger.info(f'Recognizer: Canceled!')
def init_dropdown(self):
screen = self.screen
# Init dropdownsettingsmenu
self.psm_menu = self.create_dropdown(screen.psm, [{"viewclass":"OneLineListItem", 'text': 'PSM: ' + psm, 'on_release': partial(self.set_psm, 'PSM: ' + psm)} for psm in self.psms])
def disable_rec(self, instance, *args):
self.screen.recognize_button.disabled = True
def enable_rec(self, instance, *args):
self.screen.recognize_button.disabled = False
def recognize_thread(self, instance, *args, file_list=None, profile=None):
self.disable_rec(instance, *args)
self.ocr_event = threading.Thread(target=self.recognize, args=(instance, args),
kwargs={'file_list': file_list, 'profile': profile})
self.ocr_event.setDaemon(True)
self.ocr_event.start()
return self.ocr_event
def recognize_single_thread(self, instance, *args, file_list=None, profile=None):
self.disable_rec(instance, *args)
instance.parent.hide()
self.ocr_single_event = threading.Thread(target=self.recognize, args=(instance, args),
kwargs={'file_list': instance.selected_image.original_source,'profile': profile})
self.ocr_single_event.setDaemon(True)
self.ocr_single_event.start()
return self.ocr_single_event
def recognize(self, instance, *args, file_list=None, profile=None):
""" Recognize image with tesseract """
if profile is None:
profile = {}
if file_list is None:
file_list = get_app().image_selection_online_controller.file_list
if not file_list:
alert(f'Select images to recognize')
self.enable_rec(instance)
return
logger.info(f'Main: Recognize {len(file_list)} images')
model = profile.get("model", "eng" if self.screen.model.current_item == '' else self.screen.model.current_item.split(": ")[1].strip())
psm = profile.get("psm", "3" if self.screen.psm.current_item == '' else self.screen.psm.current_item.split(": ")[1].strip().split(' ',1)[0])
outputformats = ';'.join(profile.get("outputformats", self.active_outputformats()))
if isinstance(file_list, str):
print_on_screen = profile.get("print_on_screen", self.screen.print_on_screen_chk.active)
create_online_threadprocess("Start recognize online", ocr_image, file_list, model=model, psm=psm, outputformats=outputformats, print_on_screen=print_on_screen)
else:
jobname = profile.get("jobname", "Job_01" if self.screen.jobname.text == '' else self.screen.jobname.text)
jobname = re.sub(r"[\s,\.,!,/,\\]", "_", jobname)
create_online_threadprocess(f"{jobname}: Upload images", ocr_bulk_of_images, jobname, file_list, model=model, psm=psm, outputformats=outputformats, overwrite=str(self.screen.overwrite_job_chk.active))
self.enable_rec(instance)
def active_outputformats(self):
return [outputformat for outputformat in ['txt', 'hocr', 'alto', 'pdf', 'tsv'] if
self.screen[outputformat].state == 'down']
def on_tesssettings_click(self, *args):
self.tessprofile_menu.show(*get_app().root_window.mouse_pos)
def search_tessprofile(self):
get_app().tessprofiles_controller.set_profiles()
get_app().switch_screen('tessprofiles')
def load_tessprofile(self, tessprofileparams):
self.screen.model.set_item(f"Model: {tessprofileparams.get('model', 'eng')}")
self.screen.psm.set_item(f"PSM: {self.psms[int(tessprofileparams['psm'])]}")
for outputformat in ['txt', 'hocr', 'alto', 'pdf', 'tsv']:
if outputformat in tessprofileparams['outputformat']:
self.screen[outputformat.strip()].state = 'down'
else:
self.screen[outputformat.strip()].state = 'normal'
self.screen.print_on_screen_chk.active = True if tessprofileparams['print_on_screen'] == "True" else False
return
def save_tessprofile_dialog(self):
def close_dialog(instance, *args):
instance.parent.parent.parent.parent.dismiss()
dialog = MDDialog(title="Name of the profile",
type='custom',
auto_dismiss=False,
content_cls=MDTextField(text="",mode="rectangle"),
buttons=[
MDFlatButton(
text="SAVE", on_release=self.save_tessprofile
),
MDFlatButton(
text="DISCARD", on_release=close_dialog
),
],
)
if get_app()._platform not in ['win32', 'win64']:
# TODO: Focus function seems buggy in win
dialog.content_cls.focused = True
dialog.open()
def save_tessprofile(self, instance):
tessprofilename = instance.parent.parent.parent.parent.content_cls.text
if tessprofilename != '':
get_app().tessprofiles_online[tessprofilename] = {
"model": self.screen.model.current_item.split(" ")[1] if self.screen.model.current_item.split(" ")[
0] == "Model:" else "eng",
"psm": "".join([char for char in self.screen.psm.text if char.isdigit()]),
"outputformat": self.active_outputformats(),
"print_on_screen": str(self.screen.print_on_screen_chk.active),
"default": False
}
write_tessprofiles(get_app().tessprofiles_online, online=True)
instance.parent.parent.parent.parent.dismiss()
def reset_settings(self):
# TODO: Rework resetting
self.reset_text(self.screen.model)
self.reset_text(self.screen.psm)
self.reset_ouputformat()
self.screen.print_on_screen_chk.active = False
def reset_text(self, instance):
instance.text = instance.text + '!'
instance.set_item('')
instance.text = instance.text[:-1]
def reset_ouputformat(self):
self.screen.txt.state = 'normal'
self.screen.alto.state = 'normal'
self.screen.hocr.state = 'normal'
self.screen.pdf.state = 'normal'
self.screen.tsv.state = 'normal'
def create_dropdown(self, caller, item):
menu = MDDropdownMenu(caller=caller,
items=item,
position='bottom',
width_mult=20)
menu.bind()
return menu
def set_psm(self, text):
self.screen.psm.set_item(text)
self.psm_menu.dismiss()
def select_output(self, path=None):
'''It will be called when you click on the file name
or the catalog selection button.
:type path: str;
:param path: path to the selected directory or file;
'''
if path is None: return
self.selected_output_folder = path
self.screen.output.text = f"Selected output directory: {path}"
self.exit_output_manager()
def select_output_folder(self):
self.output_manager.show("/")
def exit_output_manager(self, *args):
'''Called when the user reaches the root of the directory tree.'''
self.output_manager.close()
@staticmethod
def switch_jobs(instance):
get_app().jobs_controller.update_jobdata()
get_app().switch_screen('jobs')
```
#### File: tesseractXplore/tesseractXplore/diff_stdout.py
```python
from functools import partial
import difflib
from kivymd.uix.button import MDFlatButton
from kivymd.uix.dialog import MDDialog
from kivymd.uix.list import MDList, OneLineListItem
from tesseractXplore.app import alert, get_app
from tesseractXplore.stdout_cache import read_stdout_cache
from tesseractXplore.font import get_fontstyle
from tesseractXplore.difflib import subseq_matcher, seq_align
def close_dialog(instance, *args):
instance.parent.parent.parent.parent.dismiss()
def diff_dialog(instance, *args):
image = instance.selected_image.original_source
stdout_cache = read_stdout_cache(image)
layout = MDList()
if len(stdout_cache.keys()) == 0:
alert("No stdout text available.")
return
item = OneLineListItem(text="Select first text", on_release=partial(select_text, stdout_cache.keys()))
layout.add_widget(item)
item = OneLineListItem(
text="Select second text",
on_release=partial(select_text, stdout_cache.keys()),
)
layout.add_widget(item)
dialog = MDDialog(title="Compare stdouts",
type='custom',
auto_dismiss=False,
content_cls=layout,
buttons=[
MDFlatButton(
text="COMPARE", on_release=partial(diff, stdout_cache, image)
),
MDFlatButton(
text="DISCARD", on_release=close_dialog
),
],
)
dialog.open()
def set_text(key, diff_instance, select_instance, *args):
diff_instance.text = key
select_instance.parent.parent.parent.parent.dismiss()
def select_text(stdout_keys, instance, *args):
layout = MDList()
for key in stdout_keys:
item = OneLineListItem(
text=key,
on_release=partial(set_text, key, instance),
)
layout.add_widget(item)
dialog = MDDialog(title="Select text",
type='custom',
auto_dismiss=False,
content_cls=layout,
)
dialog.open()
def set_item(instance, instance_menu, instance_menu_item):
instance.text = instance_menu.text
instance_menu.dismiss()
def diff(stdout_cache, image, instance, *args):
close_dialog(instance)
fst_key = instance.parent.parent.parent.parent.content_cls.children[0].text
seq1 = [line for line in stdout_cache[fst_key]["fulltext"].split("\n") if line.strip() != ""]
snd_key = instance.parent.parent.parent.parent.content_cls.children[1].text
seq2 = [line for line in stdout_cache[snd_key]["fulltext"].split("\n") if line.strip() != ""]
text = ""
#edits, chars, = 0, 0
sum_ratio, chars = 0, 0
for matched_subseq in subseq_matcher(seq1, seq2):
# TODO: Optimize seq_align
# for glyphs in seq_align(*matched_subseq):
# if not glyphs[0]:
# text += "[color=00FFFF]" + glyphs[1] + "[/color]"
# edits += len(glyphs[1])
# elif not glyphs[1]:
# text += "[color=b39ddb]" + glyphs[0] + "[/color]"
# edits += len(glyphs[0])
# elif glyphs[0] != glyphs[1]:
# text += '[color=b39ddb]' + glyphs[0] + "[/color]" + "[color=00FFFF]" + glyphs[1] + "[/color]"
# edits += len(glyphs[1])
# else:
# text += glyphs[0]
# chars += len(glyphs[0])
# text += '\n'
s = difflib.SequenceMatcher(None, *matched_subseq)
sum_ratio += s.ratio()*(len(seq1)+len(seq2))/2
chars += (len(seq1) + len(seq2)) / 2
for groupname, *value in s.get_opcodes():
if groupname == "equal":
text += matched_subseq[0][value[0]:value[1]]
elif groupname == "replace":
text += '[color=b39ddb]' + matched_subseq[0][value[0]:value[1]] + "[/color]" + "[color=00FFFF]" + matched_subseq[1][
value[2]:value[
3]] + "[/color]"
elif groupname == "add":
text += "[color=00FFFF]" + matched_subseq[1][value[2]:value[3]] + "[/color]"
else:
text += "[color=b39ddb]" + matched_subseq[0][value[0]:value[1]] + "[/color]"
text += '\n'
#similarity_score = 100
#if chars+edits > 0:
# similarity_score = 100-(edits*100/(chars+edits))
text = f"[b][color=b39ddb]{fst_key}[/color][/b]\n" \
f"[b][color=00FFFF]{snd_key}[/color][/b]\n" \
f"Similarity score: {(sum_ratio/chars)*100:.2f} %\n\n" + text
return diff_result(text, image)
def diff_result(text, image):
# TODO: Rework the implementation
layoutlist = MDList()
get_app().diffstdout_controller.screen['scrollview'].clear_widgets()
# layoutlist =
from kivymd.uix.label import MDLabel
for textline in text.split("\n"):
item = MDLabel(
text=textline,
markup=True,
font_style=get_fontstyle(),
theme_text_color="Primary"
)
item.bg_color = (0, 0, 0, 1)
item.size_hint_y = None
item.height = 40
layoutlist.add_widget(item)
get_app().diffstdout_controller.screen['scrollview'].add_widget(layoutlist)
get_app().diffstdout_controller.screen['image'].source = image
get_app().switch_screen('diffstdout')
get_app().modellist_controller.search = True
```
#### File: tesseractXplore/tesseractXplore/downloader.py
```python
from logging import getLogger
import requests
from kivy.network.urlrequest import UrlRequest
from kivymd.toast import toast
from kivymd.uix.progressbar import MDProgressBar
from tesseractXplore.app import get_app
from tesseractXplore.app.screens import HOME_SCREEN
logger = getLogger().getChild(__name__)
def switch_to_home_for_dl():
""" Just switch to home screen without resetting the progress bar"""
get_app().screen_manager.current = HOME_SCREEN
get_app().close_nav()
def download_error():
""" Error message """
toast('Download: Error')
logger.info(f'Download: Error while downloading')
def download_with_progress(url, file_path, on_success, color="#54B3FF"):
""" Downloader helper customized for the needs """
pb = MDProgressBar(color=color)
status_bar = get_app().image_selection_controller.status_bar
status_bar.clear_widgets()
status_bar.add_widget(pb)
pb.max = 1
pb.min = 0
pb.start()
def update_progress(request, current_size, total_size):
pb.value = current_size / total_size
if pb.value == 1:
pb.stop()
try:
UrlRequest(url=requests.get(url).url, on_progress=update_progress,
chunk_size=1024, on_success=on_success, on_failure=download_error,
file_path=file_path)
except:
download_error()
```
#### File: tesseractXplore/widgets/buttons.py
```python
from kivy.properties import BooleanProperty
from kivy.properties import (
NumericProperty,
)
from kivymd.color_definitions import colors
from kivymd.uix.behaviors.toggle_behavior import MDToggleButton
from kivymd.uix.button import MDFloatingActionButton, \
MDRoundFlatIconButton, \
MDRectangleFlatButton
from kivymd.uix.list import IconRightWidget
from kivymd.uix.tooltip import MDTooltip
from tesseractXplore.app import get_app
class StarButton(IconRightWidget):
"""
Selectable icon button that optionally toggles between 'selected' and 'unselected' star icons
"""
model_id = NumericProperty()
is_selected = BooleanProperty()
def __init__(self, model_id, is_selected=False, **kwargs):
super().__init__(**kwargs)
self.model_id = model_id
self.is_selected = is_selected
self.custom_icon = 'icon' in kwargs
self.set_icon()
def on_press(self):
self.is_selected = not self.is_selected
self.set_icon()
def set_icon(self):
if not self.custom_icon:
self.icon = 'star' if self.is_selected else 'star-outline'
class TooltipFloatingButton(MDFloatingActionButton, MDTooltip):
""" Floating action button class with tooltip behavior """
def set_text(self, interval):
pass
class TooltipIconButton(MDRoundFlatIconButton, MDTooltip):
""" Flat button class with icon and tooltip behavior """
def hex_to_rgba(value, hue=0.5,normalized=False):
value = value.lstrip('#')
lv = len(value)
hex = [int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)]
if normalized:
hex = [color/255 for color in hex]
hex.append(hue)
return tuple(hex)
class MyToggleButton(MDToggleButton, MDRectangleFlatButton):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.md_bg_color= 0, 206, 209, 0.001
self.background_normal = 0, 206, 209, 0.001
#self.background_down = get_color_from_hex(colors[get_app().theme_cls.primary_palette][get_app().theme_cls.primary_hue])
self.__is_filled = False
def _update_bg(self, ins, val):
"""Updates the color of the background."""
if val == "down":
self.md_bg_color = hex_to_rgba(colors[get_app().theme_cls.primary_palette][get_app().theme_cls.primary_hue])
if (
self.__is_filled is False
): # If the background is transparent, and the button it toggled,
# the font color must be withe [1, 1, 1, 1].
self.text_color = self.font_color_down
if self.group:
self._release_group(self)
else:
self.md_bg_color = self.background_normal
if (
self.__is_filled is False
): # If the background is transparent, the font color must be the
# primary color.
self.text_color = hex_to_rgba(colors[get_app().theme_cls.primary_palette][get_app().theme_cls.primary_hue], hue=1.0, normalized=True)
def on_md_bg_color(self, instance, value):
self.background_down = hex_to_rgba(colors[get_app().theme_cls.primary_palette][get_app().theme_cls.primary_hue])
pass
```
#### File: tesseractXplore/widgets/__init__.py
```python
def truncate(text: str) -> str:
""" Truncate a label string to not exceed maximum length """
if len(text) > MAX_LABEL_CHARS:
text = text[:MAX_LABEL_CHARS - 2] + '...'
return text
from tesseractXplore.constants import MAX_LABEL_CHARS
from tesseractXplore.widgets.autocomplete import AutocompleteSearch, DropdownContainer, DropdownItem
from tesseractXplore.widgets.buttons import StarButton, TooltipFloatingButton, TooltipIconButton, MyToggleButton
from tesseractXplore.widgets.images import CachedAsyncImage, IconicTaxaIcon, ImageMetaTile
from tesseractXplore.widgets.inputs import DropdownTextField, TextFieldWrapper
from tesseractXplore.widgets.labels import HideableTooltip, TooltipLabel
from tesseractXplore.widgets.lists import SortableList, SwitchListItem, TextInputListItem, ModelListItem, \
ThumbnailListItem, ListItemWithCheckbox, LeftCheckbox
from tesseractXplore.widgets.menus import ObjectContextMenu, AutoHideMenuItem, PhotoContextMenuItem, \
ListContextMenuItem, TessprofileContextMenuItem
from tesseractXplore.widgets.model_autocomplete import ModelAutocompleteSearch
from tesseractXplore.widgets.progress_bar import LoaderProgressBar
from tesseractXplore.widgets.spinner import FntSpinnerOption
from tesseractXplore.widgets.tabs import Tab
from tesseractXplore.widgets.zoom import Zoom
from tesseractXplore.widgets.filechooser import MyFileChooser
``` |
{
"source": "jkan2i/Kanishka-Portfolio",
"score": 3
} |
#### File: Kanishka-Portfolio/app/__init__.py
```python
from os import write
import os
from flask import Flask, render_template, redirect, request
import csv
from werkzeug.utils import redirect
from . import db
from app.db import get_db
app = Flask(__name__)
app.config['DATABASE'] = os.path.join(os.getcwd(), 'flask.sqlite')
db.init_app(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/contactform', methods=['GET','POST'])
def submit():
if request.method == 'POST':
try:
data = request.form.to_dict()
form_data(data)
message_form = "Thank you for contacting me. I will get in touch with you shortly."
return render_template('submission.html', message=message_form)
except:
message_form = "Database writing error!"
return render_template('submission.html', message=message_form)
else:
message_form = "Form not submitted. Try again!"
return render_template('submission.html', message=message_form)
@app.route('/health', methods=['GET'])
def health_endpoint():
return '200 OK'
@app.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
db = get_db()
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (username,)
).fetchone() is not None:
error = f"User {username} is already registered."
if error is None:
db.execute(
'INSERT INTO user (username, password) VALUES (?, ?)',
(username, generate_password_hash(password))
)
db.commit()
return f"User {username} created successfully"
else:
return error, 418
## TODO: Return a register page
return "Register Page not yet implemented", 501
@app.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
db = get_db()
error = None
user = db.execute(
'SELECT * FROM user WHERE username = ?', (username,)
).fetchone()
if user is None:
error = 'Incorrect username.'
elif not check_password_hash(user['password'], password):
error = 'Incorrect password.'
if error is None:
return "Login Successful", 200
else:
return error, 418
## TODO: Return a login page
return "Login Page not yet implemented", 501
def form_data(data):
email = data['email']
subject = data['subject']
message = data['message']
with open('database.csv', 'w', newline='') as csvfile:
form_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
form_writer.writerow([email, subject, message])
@app.route('/<string:page_name>')
def page_direct(page_name='/'):
try:
return render_template(page_name)
except:
return redirect('/')
``` |
{
"source": "jkan2i/MLH-PE-Project",
"score": 2
} |
#### File: MLH-PE-Project/events/views.py
```python
from django.shortcuts import render, redirect
import calendar
from calendar import HTMLCalendar
from datetime import datetime
from django.http import HttpResponseRedirect
from .models import Event, Venue
from .forms import VenueForm, EventForm
from django.http import HttpResponse
import csv
# Import PDF Stuff
from django.http import FileResponse
import io
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
# Import Pagination Stuff
from django.core.paginator import Paginator
# Generate a PDF File Venue List
def venue_pdf(request):
# Create Bytestream buffer
buf = io.BytesIO()
# Create a canvas
c = canvas.Canvas(buf, pagesize=letter, bottomup=0)
# Create a text object
textob = c.beginText()
textob.setTextOrigin(inch, inch)
textob.setFont("Helvetica", 14)
# Add some lines of text
#lines = [
# "This is line 1",
# "This is line 2",
# "This is line 3",
#]
# Designate The Model
venues = Venue.objects.all()
# Create blank list
lines = []
for venue in venues:
lines.append(venue.name)
lines.append(venue.address)
lines.append(venue.zip_code)
lines.append(venue.phone)
lines.append(venue.web)
lines.append(venue.email_address)
lines.append(" ")
# Loop
for line in lines:
textob.textLine(line)
# Finish Up
c.drawText(textob)
c.showPage()
c.save()
buf.seek(0)
# Return something
return FileResponse(buf, as_attachment=True, filename='venue.pdf')
# Generate CSV File Venue List
def venue_csv(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=venues.csv'
# Create a csv writer
writer = csv.writer(response)
# Designate The Model
venues = Venue.objects.all()
# Add column headings to the csv file
writer.writerow(['Venue Name', 'Address', 'Zip Code', 'Phone', 'Web Address', 'Email'])
# Loop Thu and output
for venue in venues:
writer.writerow([venue.name, venue.address, venue.zip_code, venue.phone, venue.web, venue.email_address])
return response
# Generate Text File Venue List
def venue_text(request):
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=venues.txt'
# Designate The Model
venues = Venue.objects.all()
# Create blank list
lines = []
# Loop Thu and output
for venue in venues:
lines.append(f'{venue.name}\n{venue.address}\n{venue.zip_code}\n{venue.phone}\n{venue.web}\n{venue.email_address}\n\n\n')
#lines = ["This is line 1\n",
#"This is line 2\n",
#"This is line 3\n\n",
#"<NAME> Awesome!\n"]
# Write To TextFile
response.writelines(lines)
return response
# Delete a Venue
def delete_venue(request, venue_id):
venue = Venue.objects.get(pk=venue_id)
venue.delete()
return redirect('list-venues')
# Delete an Event
def delete_event(request, event_id):
event = Event.objects.get(pk=event_id)
event.delete()
return redirect('list-events')
def add_event(request):
submitted = False
if request.method == "POST":
form = EventForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/add_event?submitted=True')
else:
form = EventForm
if 'submitted' in request.GET:
submitted = True
return render(request, 'events/add_event.html', {'form':form, 'submitted':submitted})
def update_event(request, event_id):
event = Event.objects.get(pk=event_id)
form = EventForm(request.POST or None, instance=event)
if form.is_valid():
form.save()
return redirect('list-events')
return render(request, 'events/update_event.html',
{'event': event,
'form':form})
def update_venue(request, venue_id):
venue = Venue.objects.get(pk=venue_id)
form = VenueForm(request.POST or None, instance=venue)
if form.is_valid():
form.save()
return redirect('list-venues')
return render(request, 'events/update_venue.html',
{'venue': venue,
'form':form})
def search_venues(request):
if request.method == "POST":
searched = request.POST['searched']
venues = Venue.objects.filter(name__contains=searched)
return render(request,
'events/search_venues.html',
{'searched':searched,
'venues':venues})
else:
return render(request,
'events/search_venues.html',
{})
def show_venue(request, venue_id):
venue = Venue.objects.get(pk=venue_id)
return render(request, 'events/show_venue.html',
{'venue': venue})
def list_venues(request):
#venue_list = Venue.objects.all().order_by('?')
venue_list = Venue.objects.all()
# Set up Pagination
p = Paginator(Venue.objects.all(), 3)
page = request.GET.get('page')
venues = p.get_page(page)
nums = "a" * venues.paginator.num_pages
return render(request, 'events/venue.html',
{'venue_list': venue_list,
'venues': venues,
'nums':nums}
)
def add_venue(request):
submitted = False
if request.method == "POST":
form = VenueForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/add_venue?submitted=True')
else:
form = VenueForm
if 'submitted' in request.GET:
submitted = True
return render(request, 'events/add_venue.html', {'form':form, 'submitted':submitted})
def all_events(request):
event_list = Event.objects.all().order_by('event_date')
return render(request, 'events/event_list.html',
{'event_list': event_list})
def home(request, year=datetime.now().year, month=datetime.now().strftime('%B')):
name = "John"
month = month.capitalize()
# Convert month from name to number
month_number = list(calendar.month_name).index(month)
month_number = int(month_number)
# create a calendar
cal = HTMLCalendar().formatmonth(
year,
month_number)
# Get current year
now = datetime.now()
current_year = now.year
# Get current time
time = now.strftime('%I:%M %p')
return render(request,
'events/home.html', {
"name": name,
"year": year,
"month": month,
"month_number": month_number,
"cal": cal,
"current_year": current_year,
"time":time,
})
```
#### File: MLH-PE-Project/members/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.success(request, ("There Was An Error Logging In, Try Again..."))
return redirect('login')
else:
return render(request, 'authenticate/login.html', {})
def logout_user(request):
logout(request)
messages.success(request, ("You Were Logged Out!"))
return redirect('home')
def register_user(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data['username']
password = form.cleaned_data['<PASSWORD>']
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, ("Registration Successful!"))
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'authenticate/register_user.html', {
'form':form,
})
``` |
{
"source": "jkan2i/MLH-Portfolio_New",
"score": 3
} |
#### File: jkan2i/MLH-Portfolio_New/server.py
```python
from os import write
from flask import Flask, render_template, redirect, request
import csv
from werkzeug.utils import redirect
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
"""
@app.route('/index.html')
def home():
return render_template('index.html')
@app.route('/about.html')
def about():
return render_template('about.html')
@app.route('/contact.html')
def contact():
return render_template('contact.html')
@app.route('/works.html')
def works():
return render_template('works.html')
@app.route('/work.html')
def work():
return render_template('work.html')
"""
@app.route('/contactform', methods=['GET','POST'])
def submit():
if request.method == 'POST':
try:
data = request.form.to_dict()
form_data(data)
message_form = "Thank you for contacting me. I will get in touch with you shortly."
return render_template('submission.html', message=message_form)
except:
message_form = "Database writing error!"
return render_template('submission.html', message=message_form)
else:
message_form = "Form not submitted. Try again!"
return render_template('submission.html', message=message_form)
@app.route('/health', methods=['GET'])
def health_endpoint():
return '200 OK'
@app.route('/<string:page_name>')
def page_direct(page_name='/'):
try:
return render_template(page_name)
except:
return redirect('/')
def form_data(data):
email = data['email']
subject = data['subject']
message = data['message']
with open('database.csv', 'w', newline='') as csvfile:
form_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
form_writer.writerow([email, subject, message])
``` |
{
"source": "jkanche/cirrocumulus",
"score": 3
} |
#### File: cirrocumulus/cirrocumulus/abstract_db.py
```python
from cirrocumulus.envir import *
from cirrocumulus.io_util import unique_id
class AbstractDB:
def __init__(self):
"""Initializes the object
"""
self.job_id_to_job = {}
def capabilities(self): # allow everything
c = {}
c[SERVER_CAPABILITY_RENAME_CATEGORIES] = True
c[SERVER_CAPABILITY_JOBS] = True
c[SERVER_CAPABILITY_SAVE_FEATURE_SETS] = True
c[SERVER_CAPABILITY_SAVE_LINKS] = True
c[SERVER_CAPABILITY_EDIT_DATASET] = True
c[SERVER_CAPABILITY_ADD_DATASET] = True
c[SERVER_CAPABILITY_DELETE_DATASET] = True
return c
def datasets(self, email):
""" Gets list of available datasets
Args:
email: User email or None
Returns:
A list of dicts. Example:
[{"id": "dataset_id",
"name": "dataset_name"
"title": "dataset_title",
"owner": false,
"url": "gs://xx/my_dataset".
"species": "Mus Musculus"
}]
"""
raise NotImplementedError()
def category_names(self, email, dataset_id):
"""Gets a list of renamed category names for a dataset
Args:
email: User email or None
dataset_id: Dataset id
Returns:
A list of dicts representing renamed categories. Example:
{"category":"louvain",
"dataset_id":"1",
"original":"1",
"new":"my cell type"}
"""
raise NotImplementedError()
def upsert_category_name(self, email, category, dataset_id, original_name, new_name):
""" Upserts a category name.
Args:
email: User email or None
category: Category in dataset (e.g. louvain)
dataset_id: Dataset id
original_name: Original category name (e.g. "1")
new_name: New name (e.g. "my cell type")
"""
raise NotImplementedError()
def user(self, email):
""" Gets metadata about a user
Args:
email: User email
Returns:
A dict with the keys id and importer. An importer can add datasets to cirrocumulus. Example:
{"id":"user_id",
"importer":false
}
"""
raise NotImplementedError()
# views
def dataset_views(self, email, dataset_id):
""" Gets list of saved dataset views (saved visualization states)
Args:
email: User email or None
dataset_id: Dataset id
Returns:
List of dicts. Example:
[{"id": "view id",
"name": "view name"}]
"""
raise NotImplementedError()
def delete_dataset_view(self, email, dataset_id, view_id):
""" Delete a saved view
Args:
email: User email or None
dataset_id: Dataset id
view_id: View id
"""
raise NotImplementedError()
def get_dataset_view(self, email, dataset_id, view_id):
""" Gets detailed information for a saved dataset view
Args:
email: User email or None
dataset_id: Dataset id
view_id: View id
Returns:
List of dicts containing id and name. Example:
[{"id": "view id",
"name": "view name",
"value": "JSON encoded state"
"notes": "view notes"
"email": "View creator email"
"""
raise NotImplementedError()
def upsert_dataset_view(self, email, dataset_id, view_id, name, value):
""" Upserts a dataset view
Args:
email: User email or None
dataset_id: Dataset id
view_id: View id or None to create new view
name: View name
value: JSON encoded state
Returns:
Upserted view id
"""
raise NotImplementedError()
def delete_dataset(self, email, dataset_id):
""" Deletes a dataset
Args:
email: User email or None
dataset_id: Dataset id
"""
raise NotImplementedError()
def upsert_dataset(self, email, dataset_id, dataset_name=None, url=None, readers=None, description=None, title=None,
species=None):
""" Upserts a dataset
Args:
email: User email or None
dataset_id: Dataset id
dataset_name: Name
url: URL
readers: List of allowed readers
description: Description
title: Title
species: Species
Returns:
Upserted dataset id
"""
raise NotImplementedError()
def get_feature_sets(self, email, dataset_id):
""" Gets saved feature sets
Args:
email: User email or None
dataset_id: Dataset id
Returns:
List of dicts. Example:
[{"id": "set_id",
"category": "set category",
"name": "set name",
"features: ["gene1", "gene2"]}]
"""
raise NotImplementedError()
def delete_feature_set(self, email, dataset_id, set_id):
""" Deletes a saved feature set
Args:
email: User email or None
dataset_id: Dataset id
set_id: Feature set id
"""
raise NotImplementedError()
def upsert_feature_set(self, email, dataset_id, set_id, category, name, features):
""" Upserts a feature set
Args:
email: User email or None
dataset_id: Dataset id
set_id: Feature set id
category: Set category
name: Set name
features: List of features
Returns:
Upserted id
"""
raise NotImplementedError()
def create_job(self, email, dataset_id, job_name, job_type, params):
""" Creates a job
Args:
email: User email or None
dataset_id: Dataset id
job_name: Job name
job_type: Job type
params: JSON encoded job params
Returns:
job id
"""
import datetime
job_id = unique_id()
self.job_id_to_job[job_id] = dict(id=job_id, dataset_id=dataset_id, name=job_name, type=job_type, params=params,
status=None, result=None, submitted=datetime.datetime.utcnow())
return job_id
def get_job(self, email, job_id, return_result):
""" Gets a job
Args:
email: User email or None
job_id: Job id
return_result: Whether to return the job result or status only
Returns:
The job
"""
job = self.job_id_to_job[job_id]
if return_result:
return job['result']
return dict(id=job['id'], name=job['name'], type=job['type'], status=job['status'], submitted=job['submitted'])
def get_jobs(self, email, dataset_id):
""" Gets a list of all jobs for a dataset.
Args:
email: User email or None
dataset_id: Dataset id
Returns:
List of jobs
"""
results = []
for job in self.job_id_to_job.values():
results.append(dict(id=job['id'], name=job['name'], type=job['type'], status=job['status'],
submitted=job['submitted']))
return results
def delete_job(self, email, job_id):
""" Deletes a job.
Args:
email: User email or None
job_id: Job id
"""
del self.job_id_to_job[job_id]
def update_job(self, email, job_id, status, result):
""" Updates job info.
Args:
email: User email or None
job_id: Job id
status: Job status
result: Job result
"""
job = self.job_id_to_job[job_id]
job['status'] = status
if result is not None:
from cirrocumulus.util import to_json
job['result'] = to_json(result)
```
#### File: cirrocumulus/cirrocumulus/dataset_api.py
```python
import os
from .file_system_adapter import FileSystemAdapter
def get_path(dataset, dataset_path):
path = dataset['url']
if path[len(path) - 1] == '/': # remove trailing slash
path = path[0:len(path) - 1]
if path.endswith('.json'):
path = os.path.dirname(path)
path = path + '/' + dataset_path
return path
class DatasetAPI:
def __init__(self):
self.suffix_to_provider = {}
self.fs_adapter = FileSystemAdapter()
self.default_provider = None
self.cached_schema = None
self.cached_dataset_id = None
def get_dataset_provider(self, path):
index = path.rfind('.')
if index == -1:
return self.default_provider
suffix = path[index + 1:].lower()
provider = self.suffix_to_provider.get(suffix)
return provider if provider is not None else self.default_provider
def add(self, provider):
if self.default_provider is None:
self.default_provider = provider
suffixes = provider.get_suffixes()
for suffix in suffixes:
self.suffix_to_provider[suffix.lower()] = provider
def schema(self, dataset):
dataset_id = dataset['id']
if self.cached_dataset_id == dataset_id:
return self.cached_schema
path = dataset['url']
provider = self.get_dataset_provider(path)
schema_dict = provider.schema(self.fs_adapter.get_fs(path), path)
if 'summary' in dataset:
schema_dict['summary'] = dataset['summary']
if 'markers' in schema_dict:
schema_dict['markers_read_only'] = schema_dict.pop('markers')
self.cached_schema = schema_dict
self.cached_dataset_id = dataset['id']
return schema_dict
def has_precomputed_stats(self, dataset):
path = dataset['url']
provider = self.get_dataset_provider(path)
return provider.has_precomputed_stats(self.fs_adapter.get_fs(path), path, dataset)
def read_precomputed_stats(self, dataset, obs_keys=[], var_keys=[]):
path = dataset['url']
provider = self.get_dataset_provider(path)
return provider.read_precomputed_stats(self.fs_adapter.get_fs(path), path, obs_keys=obs_keys, var_keys=var_keys)
def read_precomputed_grouped_stats(self, dataset, obs_keys=[], var_keys=[]):
path = dataset['url']
provider = self.get_dataset_provider(path)
return provider.read_precomputed_grouped_stats(self.fs_adapter.get_fs(path), path, obs_keys=obs_keys,
var_keys=var_keys)
def read_precomputed_basis(self, dataset, obs_keys=[], var_keys=[], basis=None):
path = dataset['url']
provider = self.get_dataset_provider(path)
return provider.read_precomputed_basis(self.fs_adapter.get_fs(path), path, obs_keys=obs_keys, var_keys=var_keys,
basis=basis)
def read_dataset(self, dataset, keys=[]):
path = dataset['url']
provider = self.get_dataset_provider(path)
return provider.read_dataset(self.fs_adapter.get_fs(path), path, keys=keys, dataset=dataset,
schema=self.schema(dataset))
```
#### File: cirrocumulus/cirrocumulus/__main__.py
```python
import argparse
import sys
from cirrocumulus import launch, prepare_data, serve
def main():
command_list = [launch, prepare_data, serve]
parser = argparse.ArgumentParser(description='Run a cirro command')
command_list_strings = list(map(lambda x: x.__name__[len('cirrocumulus.'):], command_list))
parser.add_argument('command', help='The command', choices=command_list_strings)
parser.add_argument('command_args', help='The command arguments', nargs=argparse.REMAINDER)
my_args = parser.parse_args()
command_name = my_args.command
command_args = my_args.command_args
cmd = command_list[command_list_strings.index(command_name)]
sys.argv[0] = cmd.__file__
cmd.main(command_args)
if __name__ == '__main__':
main()
```
#### File: cirrocumulus/cirrocumulus/mongo_db.py
```python
import json
import os
from bson import ObjectId
from cirrocumulus.abstract_db import AbstractDB
from cirrocumulus.util import get_email_domain
from pymongo import MongoClient
from .envir import CIRRO_DB_URI, CIRRO_AUTH_CLIENT_ID
from .invalid_usage import InvalidUsage
class MongoDb(AbstractDB):
def __init__(self):
super().__init__()
self.client = MongoClient(os.environ[CIRRO_DB_URI])
self.db = self.client.get_default_database()
def category_names(self, email, dataset_id):
self.get_dataset(email, dataset_id)
collection = self.db.categories
results = []
for doc in collection.find(dict(dataset_id=dataset_id)):
results.append({'category': doc['category'], 'dataset_id': doc['dataset_id'], 'original': doc['original'],
'new': doc['new']})
return results
def upsert_category_name(self, email, category, dataset_id, original_name, new_name):
self.get_dataset(email, dataset_id)
collection = self.db.categories
key = str(dataset_id) + '-' + str(category) + '-' + str(original_name)
if new_name == '':
collection.delete_one(dict(cat_id=key))
else:
collection.update_one(dict(cat_id=key),
{'$set': dict(category=category, dataset_id=dataset_id, original=original_name,
new=new_name)},
upsert=True)
def user(self, email):
collection = self.db.users
doc = collection.find_one(dict(email=email))
if doc is None:
collection.insert_one(dict(email=email))
return {'id': email}
else:
return {'id': email,
'importer': doc.get('importer', False)}
def get_dataset(self, email, dataset_id, ensure_owner=False):
collection = self.db.datasets
auth_client_id = os.environ.get(CIRRO_AUTH_CLIENT_ID)
if auth_client_id is None: # allow unregistered URL
try:
dataset_id.index('://')
return {
'id': dataset_id,
'name': dataset_id,
'url': dataset_id
}
except ValueError:
pass
doc = collection.find_one(dict(_id=ObjectId(dataset_id)))
if doc is None:
raise InvalidUsage('Please provide a valid id', 400)
readers = doc.get('readers')
domain = get_email_domain(email)
if email not in readers and domain not in readers:
raise InvalidUsage('Not authorized', 403)
if ensure_owner and email not in doc['owners']:
raise InvalidUsage('Not authorized', 403)
return {
'id': str(doc['_id']),
'name': doc['name'],
'readers': doc.get('readers'),
'species': doc.get('species'),
'description': doc.get('description'),
'title': doc.get('title'),
'url': doc['url'],
'owner': 'owners' in doc and email in doc['owners']}
def datasets(self, email):
collection = self.db.datasets
results = []
domain = get_email_domain(email)
if domain is None:
query = dict(readers=email)
else:
query = dict(readers={'$in': [email, domain]})
for doc in collection.find(query):
results.append({'id': str(doc['_id']), 'name': doc['name'], 'title': doc.get('title'),
'owner': 'owners' in doc and email in doc['owners'], 'url': doc['url'],
'species': doc.get('species')})
return results
# views
def dataset_views(self, email, dataset_id):
self.get_dataset(email, dataset_id)
collection = self.db.views
results = []
for doc in collection.find(dict(dataset_id=dataset_id)):
results.append(
{'id': str(doc['_id']), 'dataset_id': doc['dataset_id'], 'name': doc['name'], 'value': doc['value'],
'notes': doc.get('notes'), 'email': doc['email']})
return results
def delete_dataset_view(self, email, dataset_id, view_id):
collection = self.db.views
doc = collection.find_one(dict(_id=ObjectId(view_id)))
self.get_dataset(email, doc['dataset_id'])
collection.delete_one(dict(_id=ObjectId(view_id)))
def get_dataset_view(self, email, dataset_id, view_id):
collection = self.db.views
doc = collection.find_one(dict(_id=ObjectId(view_id)))
self.get_dataset(email, doc['dataset_id'])
return {'id': str(doc['_id']), 'dataset_id': doc['dataset_id'], 'name': doc['name'], 'value': doc['value'],
'email': doc['email']}
def upsert_dataset_view(self, email, dataset_id, view_id, name, value):
self.get_dataset(email, dataset_id)
collection = self.db.views
entity_update = {}
if name is not None:
entity_update['name'] = name
if value is not None:
entity_update['value'] = json.dumps(value)
if email is not None:
entity_update['email'] = email
if dataset_id is not None:
entity_update['dataset_id'] = dataset_id
if view_id is None:
return str(collection.insert_one(entity_update).inserted_id)
else:
collection.update_one(dict(_id=ObjectId(view_id)), {'$set': entity_update})
return view_id
def delete_dataset(self, email, dataset_id):
self.get_dataset(email, dataset_id, True)
collection = self.db.datasets
collection.delete_one(dict(_id=ObjectId(dataset_id)))
self.db.filters.delete_many(dict(dataset_id=dataset_id))
self.db.categories.delete_many(dict(dataset_id=dataset_id))
def is_importer(self, email):
# TODO check if user can modify dataset
user = self.db.users.find_one(dict(email=email))
if 'importer' not in user:
raise False
return user['importer']
def upsert_dataset(self, email, dataset_id, dataset_name=None, url=None, readers=None, description=None, title=None,
species=None):
collection = self.db.datasets
update_dict = {}
if dataset_name is not None:
update_dict['name'] = dataset_name
if url is not None:
update_dict['url'] = url
if readers is not None:
readers = set(readers)
if email in readers:
readers.remove(email)
readers.add(email)
update_dict['readers'] = list(readers)
if description is not None:
update_dict['description'] = description
if title is not None:
update_dict['title'] = title
if species is not None:
update_dict['species'] = species
if dataset_id is None: # new dataset
if email != '':
user = self.db.users.find_one(dict(email=email))
if 'importer' not in user or not user['importer']:
raise InvalidUsage('Not authorized', 403)
update_dict['owners'] = [email]
if 'readers' not in update_dict:
update_dict['readers'] = [email]
return str(collection.insert_one(update_dict).inserted_id)
else:
self.get_dataset(email, dataset_id, True)
collection.update_one(dict(_id=ObjectId(dataset_id)), {'$set': update_dict})
return dataset_id
def get_feature_sets(self, email, dataset_id):
self.get_dataset(email, dataset_id)
collection = self.db.feature_sets
results = []
for doc in collection.find(dict(dataset_id=dataset_id)):
results.append(
dict(id=str(doc['_id']), category=doc['category'], name=doc['name'], features=doc['features']))
return results
def delete_feature_set(self, email, dataset_id, set_id):
collection = self.db.feature_sets
doc = collection.find_one(dict(_id=ObjectId(set_id)))
self.get_dataset(email, doc['dataset_id'])
collection.delete_one(dict(_id=ObjectId(set_id)))
def upsert_feature_set(self, email, dataset_id, set_id, category, name, features):
self.get_dataset(email, dataset_id)
collection = self.db.feature_sets
entity_update = {}
if name is not None:
entity_update['name'] = name
if features is not None:
entity_update['features'] = features
if email is not None:
entity_update['email'] = email
if dataset_id is not None:
entity_update['dataset_id'] = dataset_id
if category is not None:
entity_update['category'] = category
if set_id is None:
return str(collection.insert_one(entity_update).inserted_id)
else:
collection.update_one(dict(_id=ObjectId(set_id)), {'$set': entity_update})
return set_id
def create_job(self, email, dataset_id, job_name, job_type, params):
self.get_dataset(email, dataset_id)
import datetime
collection = self.db.jobs
return str(collection.insert_one(
dict(dataset_id=dataset_id, name=job_name, email=email, type=job_type, params=params,
submitted=datetime.datetime.utcnow())).inserted_id)
def get_job(self, email, job_id, return_result):
collection = self.db.jobs
doc = collection.find_one(dict(_id=ObjectId(job_id)),
{"result": 0} if not return_result else {'result': 1, "dataset_id": 1})
self.get_dataset(email, doc['dataset_id'])
if return_result:
return doc['result']
else:
return dict(status=doc['status'])
def get_jobs(self, email, dataset_id):
self.get_dataset(email, dataset_id)
collection = self.db.jobs
results = []
for doc in collection.find(dict(dataset_id=dataset_id), dict(name=1, status=1, email=1, type=1, submitted=1)):
results.append(
dict(id=str(doc['_id']), name=doc['name'], status=doc['status'], type=doc['type'], email=doc['email'],
submitted=doc.get('submitted')))
return results
def update_job(self, email, job_id, status, result):
collection = self.db.jobs
doc = collection.find_one(dict(_id=ObjectId(job_id)))
self.get_dataset(email, doc['dataset_id'])
if result is not None:
from cirrocumulus.util import to_json
result = to_json(result)
collection.update_one(dict(_id=ObjectId(job_id)), {'$set': dict(status=status, result=result)})
def delete_job(self, email, job_id):
collection = self.db.jobs
doc = collection.find_one(dict(_id=ObjectId(job_id)), dict(email=1))
if doc['email'] == email:
collection.delete_one(dict(_id=ObjectId(job_id)))
else:
raise InvalidUsage('Not authorized', 403)
```
#### File: cirrocumulus/cirrocumulus/simple_data.py
```python
import numpy as np
import pandas as pd
import scipy.sparse
from cirrocumulus.io_util import cirro_id
class SimpleData:
def __init__(self, X, obs, var):
self.obs = obs
self.var = var
self.uns = {}
if X is not None:
if len(X.shape) == 1:
X = np.array([X]).T
n_var = X.shape[1]
n_obs = X.shape[0]
else:
n_var = len(var) if var is not None else 0
n_obs = len(obs) if obs is not None else 0
self.X = X
self.shape = (n_obs, n_var)
@staticmethod
def view(adata, row_slice):
X = adata.X[row_slice] if adata.X is not None else None
obs = adata.obs[row_slice] if adata.obs is not None else None
return SimpleData(X, obs, adata.var)
@staticmethod
def obs_stats(df, columns):
df = df[columns]
# variables on columns, stats on rows, transpose so that stats are on columns
return df.agg(['min', 'max', 'sum', 'mean']).T
@staticmethod
def X_stats(df, var_ids):
df = df[var_ids]
if len(df) == 0:
zeros = np.full(len(var_ids), 0)
empty = np.full(len(var_ids), np.nan)
return pd.DataFrame(
data={'min': empty, 'max': empty, 'sum': zeros,
'numExpressed': zeros,
'mean': empty}, index=var_ids)
return pd.DataFrame(
data={'min': np.min(df.values, axis=0), 'max': np.max(df.values, axis=0), 'sum': df.sum().values,
'numExpressed': (df.values != 0).sum(axis=0),
'mean': df.mean().values}, index=var_ids)
@staticmethod
def get_var_indices(adata, names):
return adata.var.index.get_indexer_for(names)
@staticmethod
def find_markers(adata, key, n_genes):
import scipy.stats as ss
import logging
logger = logging.getLogger("cirro")
marker_results = [] # array of category, name, id, features
category = key
for cat in adata.obs[key].cat.categories:
logger.info('Computing markers for {}, {}'.format(key, cat))
mask = adata.obs[key] == cat
ds1 = adata[mask]
ds_rest = adata[~mask]
gene_names_keep = ds1.X.mean(axis=0) > ds_rest.X.mean(axis=0)
if isinstance(gene_names_keep, np.matrix):
gene_names_keep = gene_names_keep.A1
if len(gene_names_keep) == 0:
continue
ds1 = ds1[:, gene_names_keep]
ds_rest = ds_rest[:, gene_names_keep]
pvals = np.full(ds1.shape[1], 1.0)
ds1_X = ds1.X
ds_rest_X = ds_rest.X
is_sparse = scipy.sparse.isspmatrix(ds1_X)
for i in range(ds1.shape[1]):
v1 = ds1_X[:, i]
v2 = ds_rest_X[:, i]
if is_sparse:
v1 = v1.toarray()[:, 0]
v2 = v2.toarray()[:, 0]
try:
_, pvals[i] = ss.mannwhitneyu(v1, v2, alternative="two-sided")
except ValueError:
# All numbers are identical
pass
fc = ds1_X.mean(axis=0) - ds_rest_X.mean(axis=0)
if isinstance(fc, np.matrix):
fc = fc.A1
df = pd.DataFrame(data=dict(pvals=pvals, fc=fc), index=ds1.var_names)
df = df.sort_values(by=['pvals', 'fc'], ascending=[True, False])
features = df[:n_genes].index.values
marker_results.append(dict(category=category, name=str(cat), features=features))
return marker_results
@staticmethod
def schema(adata):
""" Gets dataset schema.
Returns
schema dict. Example:
{"version":"1.0.0",
"categoryOrder":{
"louvain":["0","1","2","3","4","5","6","7"],
"leiden":["0","1","2","3","4","5","6","7"]},
"var":["TNFRSF4","CPSF3L","ATAD3C"],
"obs":["percent_mito","n_counts"],
"obsCat":["louvain","leiden"],
"shape":[2638,1838],
"embeddings":[{"name":"X_pca","dimensions":3},{"name":"X_pca","dimensions":2},{"name":"X_umap","dimensions":2}]
}
"""
obs_cat = []
obs = []
schema_dict = {'version': '1.0.0'}
marker_results = []
prior_marker_results = adata.uns.get('markers', [])
de_results_format = 'records'
if isinstance(prior_marker_results, str):
import json
prior_marker_results = json.loads(prior_marker_results)
marker_results += prior_marker_results
schema_dict['markers'] = marker_results
n_genes = 10
scanpy_marker_keys = []
for key in adata.uns.keys():
rank_genes_groups = adata.uns[key]
if isinstance(rank_genes_groups, dict) and 'names' in rank_genes_groups and (
'pvals' in rank_genes_groups or 'pvals_adj' in rank_genes_groups or 'scores' in rank_genes_groups):
scanpy_marker_keys.append(key)
de_results = [] # array of dicts containing params logfoldchanges, pvals_adj, scores, names
for scanpy_marker_key in scanpy_marker_keys:
rank_genes_groups = adata.uns[scanpy_marker_key]
params = rank_genes_groups['params']
# pts and pts_rest in later scanpy versions
rank_genes_groups_keys = list(rank_genes_groups.keys())
for k in ['params', 'names']:
if k in rank_genes_groups_keys:
rank_genes_groups_keys.remove(k)
if 'pvals' in rank_genes_groups_keys and 'pvals_adj' in rank_genes_groups_keys:
rank_genes_groups_keys.remove('pvals')
category = '{} ({})'.format(params['groupby'], scanpy_marker_key)
de_result_df = None
group_names = rank_genes_groups['names'].dtype.names
for group_name in group_names:
group_df = pd.DataFrame(index=rank_genes_groups['names'][group_name])
for rank_genes_groups_key in rank_genes_groups_keys:
values = rank_genes_groups[rank_genes_groups_key][group_name]
column_name = '{}:{}'.format(group_name, rank_genes_groups_key)
group_df[column_name] = values
group_df = group_df[group_df.index != 'nan']
if de_result_df is None:
de_result_df = group_df
else:
de_result_df = de_result_df.join(group_df, how='outer')
if n_genes > 0:
marker_results.append(
dict(category=category, name=str(group_name), features=group_df.index[:n_genes]))
if de_results_format == 'records':
de_result_data = de_result_df.reset_index().to_dict(orient='records')
else:
de_result_data = dict(index=de_result_df.index)
for c in de_result_df:
de_result_data[c] = de_result_df[c]
de_result = dict(id=cirro_id(),
color='logfoldchanges' if 'logfoldchanges' in rank_genes_groups_keys else
rank_genes_groups_keys[0],
size='pvals_adj' if 'pvals_adj' in rank_genes_groups_keys else rank_genes_groups_keys[0],
params=params, groups=group_names, fields=rank_genes_groups_keys, type='de',
name=category)
de_result['data'] = de_result_data
de_results.append(de_result)
if 'de_res' in adata.varm: # pegasus
de_res = adata.varm['de_res']
names = de_res.dtype.names
field_names = set() # e.g. 1:auroc
group_names = set()
for name in names:
index = name.index(':')
field_name = name[index + 1:]
group_name = name[:index]
field_names.add(field_name)
group_names.add(group_name)
group_names = list(group_names)
field_names = list(field_names)
de_result_df = pd.DataFrame(data=de_res, index=adata.var.index)
de_result_df.index.name = 'index'
if de_results_format == 'records':
de_result_data = de_result_df.reset_index().to_dict(orient='records')
else:
de_result_data = dict(index=de_result_df.index)
for c in de_res:
de_result_data[c] = de_result_df[c]
de_result = dict(id=cirro_id(), type='de', name='pegasus_de',
color='log2FC' if 'log2FC' in field_names else field_names[0],
size='mwu_qval' if 'mwu_qval' in field_names else field_names[0], groups=group_names,
fields=field_names)
de_result['data'] = de_result_data
de_results.append(de_result)
if n_genes > 0:
field_use = None
for field in ['mwu_qval', 'auroc', 't_qval']:
if field in field_names:
field_use = field
break
if field_use is not None:
field_ascending = field_use != 'auroc'
for group_name in group_names:
fc_column = '{}:log2FC'.format(group_name)
name = '{}:{}'.format(group_name, field_name)
idx_up = de_result_df[fc_column] > 0
df_up = de_result_df.loc[idx_up].sort_values(by=[name, fc_column],
ascending=[field_ascending, False])
features = df_up[:n_genes].index.values
marker_results.append(dict(category='markers', name=str(group_name), features=features))
for key in adata.obs_keys():
if pd.api.types.is_categorical_dtype(adata.obs[key]) or pd.api.types.is_bool_dtype(
adata.obs[key]) or pd.api.types.is_object_dtype(adata.obs[key]):
obs_cat.append(key)
else:
obs.append(key)
schema_dict['results'] = de_results
if 'metagenes' in adata.uns:
metagenes = adata.uns['metagenes']
schema_dict['metafeatures'] = metagenes['var'].index
category_to_order = {}
for key in adata.obs_keys():
if pd.api.types.is_categorical_dtype(adata.obs[key]) and len(adata.obs[key]) < 1000:
category_to_order[key] = adata.obs[key].cat.categories
schema_dict['categoryOrder'] = category_to_order
# spatial_node = adata.uns['spatial'] if 'spatial' in adata.uns else None
#
# if spatial_node is not None:
# spatial_node_keys = list(spatial_node.keys()) # list of datasets
# if len(spatial_node_keys) == 1:
# spatial_node = spatial_node[spatial_node_keys[0]]
images_node = adata.uns.get('images',
[]) # list of {type:image or meta_image, name:image name, image:path to image, spot_diameter:Number}
image_names = list(map(lambda x: x['name'], images_node))
schema_dict['var'] = adata.var_names.values
schema_dict['obs'] = obs
schema_dict['obsCat'] = obs_cat
schema_dict['shape'] = adata.shape
embeddings = []
for key in adata.obsm_keys():
dim = min(3, adata.obsm[key].shape[1])
if dim < 2:
continue
embedding = dict(name=key, dimensions=dim)
try:
image_index = image_names.index(key)
embedding['spatial'] = images_node[image_index]
except ValueError:
pass
if dim == 3:
embeddings.append(embedding)
embedding = embedding.copy()
embedding['dimensions'] = 2
embeddings.append(embedding)
else:
embeddings.append(embedding)
meta_images = adata.uns.get('meta_images', [])
for meta_image in meta_images:
embeddings.append(meta_image)
schema_dict['embeddings'] = embeddings
field_to_value_to_color = dict() # field -> value -> color
schema_dict['colors'] = field_to_value_to_color
for key in adata.uns.keys():
if key.endswith('_colors'):
field = key[0:len(key) - len('_colors')]
if field in adata.obs:
colors = adata.uns[key]
if pd.api.types.is_categorical_dtype(adata.obs[field]):
categories = adata.obs[field].cat.categories
if len(categories) == len(colors):
color_map = dict()
for i in range(len(categories)):
color_map[str(categories[i])] = colors[i]
field_to_value_to_color[field] = color_map
else:
print("Skipping colors for {}".format(key))
return schema_dict
@staticmethod
def to_df(adata, obs_measures, var_measures, dimensions, basis=None):
df = pd.DataFrame()
obs_keys = obs_measures + dimensions
if basis is not None:
obs_keys += basis['coordinate_columns']
for key in obs_keys:
df[key] = adata.obs[key]
indices = SimpleData.get_var_indices(adata, var_measures)
for i in range(len(var_measures)):
X = adata.X[:, indices[i]]
if scipy.sparse.issparse(X):
X = X.toarray()
df[var_measures[i]] = X
return df
```
#### File: cirrocumulus/tests/test_prepare_data.py
```python
import os
import fsspec
import pandas as pd
import scipy
from cirrocumulus.embedding_aggregator import get_basis
from cirrocumulus.parquet_dataset import ParquetDataset
from cirrocumulus.prepare_data import PrepareData
def read_and_diff(ds_reader, path, test_data, measures, dimensions, continuous_obs, basis):
dataset = dict(id='')
fs = fsspec.filesystem('file')
prepared_df = ds_reader.read_dataset(file_system=fs, path=path, dataset=dataset,
schema=ds_reader.schema(file_system=fs, path=path),
keys=dict(X=measures, obs=dimensions + continuous_obs,
basis=[get_basis(basis, -1, '')]))
if not scipy.sparse.issparse(test_data.X):
test_data.X = scipy.sparse.csr_matrix(test_data.X)
df = pd.DataFrame.sparse.from_spmatrix(test_data.X, columns=measures)
for f in dimensions:
df[f] = test_data.obs[f].values
df[f] = df[f].astype('category')
for f in continuous_obs:
df[f] = test_data.obs[f].values
embedding_data = test_data.obsm[basis]
for i in range(embedding_data.shape[1]):
df["{}_{}".format(basis, i + 1)] = embedding_data[:, i]
prepared_df = prepared_df[df.columns]
pd.testing.assert_frame_equal(df, prepared_df, check_names=False)
def test_prepare_cxg(test_data, measures, dimensions, continuous_obs, basis, tmp_path):
try:
from cirrocumulus.tiledb_dataset import TileDBDataset
output_dir = str(tmp_path)
test_data = test_data[:, measures]
test_data.obs = test_data.obs[dimensions + continuous_obs]
import subprocess
output_cxg = os.path.join(output_dir, 'test.cxg')
output_h5ad = os.path.join(output_dir, 'test.h5ad')
test_data.write(output_h5ad)
subprocess.check_call(['cellxgene', 'convert', '-o', output_cxg, '--disable-corpora-schema', output_h5ad])
read_and_diff(TileDBDataset(), output_cxg, test_data, measures, dimensions, continuous_obs, basis)
except: # tiledb install is optional
print("Skipping TileDB test")
pass
def test_prepare_parquet(test_data, measures, dimensions, continuous_obs, basis, tmp_path):
output_dir = str(tmp_path)
test_data = test_data[:, measures]
test_data.obs = test_data.obs[dimensions + continuous_obs]
prepare_data = PrepareData(adata=test_data, output=output_dir)
prepare_data.execute()
read_and_diff(ParquetDataset(), output_dir, test_data, measures, dimensions, continuous_obs, basis)
``` |
{
"source": "jkanche/epivizFileParser",
"score": 3
} |
#### File: epivizFileParser/benchmarks/formated_test.py
```python
file = "https://obj.umiacs.umd.edu/bigwig-files/39033.bigwig"
from parser import *
import pysam
# import msgpack
import umsgpack
import sys
import pandas as pd
import time
import json
import random
def format_result(input, params, offset=True):
"""
Fromat result to a epiviz compatible format
Args:
input : input dataframe
params : request parameters
offset: defaults to True
Returns:
formatted JSON response
"""
# measurement = params.get("measurement")[0]
# input_json = []
# for item in input_data:
# input_json.append({"chr":item[0], "start": item[1], "end": item[2], measurement: item[3]})
# input = pandas.read_json(ujson.dumps(input_json), orient="records")
# input = input.drop_duplicates()
input.start = input.start.astype("float")
input.end = input.end.astype("float")
# input[measurement] = input[measurement].astype("float")
# input["chr"] = params.get("seqName")
# input = bin_rows(input)
# input = pandas.DataFrame(input_data, columns = ["start", "end", measurement])
globalStartIndex = None
data = {
"rows": {
"globalStartIndex": globalStartIndex,
"useOffset" : offset,
"values": {
"id": None,
"chr": [],
"strand": [],
"metadata": {}
}
},
"values": {
"globalStartIndex": globalStartIndex,
"values": {}
}
}
if len(input) > 0:
globalStartIndex = input["start"].values.min()
if offset:
minStart = input["start"].iloc[0]
minEnd = input["end"].iloc[0]
input["start"] = input["start"].diff()
input["end"] = input["end"].diff()
input["start"].iloc[0] = minStart
input["end"].iloc[0] = minEnd
col_names = input.columns.values.tolist()
row_names = ["chr", "start", "end", "strand", "id"]
data = {
"rows": {
"globalStartIndex": globalStartIndex,
"useOffset" : offset,
"values": {
"id": None,
"chr": [],
"strand": [],
"metadata": {}
}
},
"values": {
"globalStartIndex": globalStartIndex,
"values": {}
}
}
for col in col_names:
if params.get("measurement") is not None and col in params.get("measurement"):
data["values"]["values"][col] = input[col].values.tolist()
elif col in row_names:
data["rows"]["values"][col] = input[col].values.tolist()
else:
data["rows"]["values"]["metadata"][col] = input[col].values.tolist()
else:
data["rows"]["values"]["start"] = []
data["rows"]["values"]["end"] = []
if params.get("metadata") is not None:
for met in params.get("metadata"):
data["rows"]["values"]["metadata"][met] = []
# else:
# data["rows"]["values"]["metadata"] = None
data["rows"]["values"]["id"] = None
if params.get("datasource") != "genes":
data["rows"]["values"]["strand"] = None
return data
params = {
"datasource" : "39033",
"metadata": None,
"measurement": ["39033"]
}
f = BigWig(file)
for u in range(1,1):
for x in range(1,1):
s = random.randint(1, 500)
r = 10**(u+3) + s
print("testing for range ", s, r)
result, _ = f.getRange('chr1', s, r)
formatted_result = format_result(result, params)
# print(formatted_result)
print("size of formatted result")
print(sys.getsizeof(formatted_result))
print("original DF size")
print(sys.getsizeof(result))
t1 = time.time()
ms = umsgpack.packb(formatted_result)
t1 = time.time() - t1
t2 = time.time()
temp = umsgpack.unpackb(ms)
t2 = time.time() - t2
# disk = str(10**(u+3)+x) + ".msg.testfile"
# with open(disk, 'wb') as wr:
# wr.write(bytearray(ms))
# wr.close()
print("time to compress to msgpack: ", t1, "read from msgpack: ", t2)
print("msgpack size: ", sys.getsizeof(ms))
mst1 = t1
mst2 = t2
t1 = time.time()
js = json.dumps(formatted_result)
t1 = time.time() - t1
t2 = time.time()
temp = json.loads(js)
t2 = time.time() - t2
print("time to compress to json: ", t1, "read from json: ", t2)
print("msgpack size: ", sys.getsizeof(js))
print(" ")
print("time difference to compress: ", mst1 - t1, "time difference to read: ", mst2 - t2)
print("size difference: ", sys.getsizeof(ms) - sys.getsizeof(js))
print("--------------------------")
print("==========================")
# t = time.time()
# result = pd.read_msgpack('msp.msg')
# print(time.time() - t)
# # print(b)
# print(sys.getsizeof(b))
# print(result)
# print(sys.getsizeof(result))
```
#### File: epivizfileserver/measurements/measurementManager.py
```python
from aiocache import cached, SimpleMemoryCache
from aiocache.serializers import JsonSerializer
import pandas as pd
from .measurementClass import DbMeasurement, FileMeasurement, ComputedMeasurement
from ..trackhub import TrackHub
from epivizFileParser import SamFile, BamFile, TbxFile, GtfParsedFile, BigBed, GtfFile
import ujson
import requests
import pandas as pd
from sanic.log import logger as logging
class EMDMeasurementMap(object):
"""
Manage mapping between measuremnts in EFS and metadata service
"""
def __init__(self, url, fileHandler):
self.emd_endpoint = url
self.handler = fileHandler
# collection records from emd
self.collections = dict()
# map { emd id => efs measurement id }
self.measurement_map = dict()
def init(self):
logging.debug("Initializing from emd at {}".format(self.emd_endpoint))
self.init_collections()
records = self.init_measurements()
logging.debug("Done initializing from emd")
return records
def init_collections(self):
req_url = self.emd_endpoint + "/collections/"
logging.debug("Initializing collections from emd")
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error initializing collections from emd {}: {}".format(req_url, r.text))
collection_records = r.json()
for rec in collection_records:
# map database id to efs id
self.collections[rec['id']] = rec['collection_id']
logging.debug("Done initializing collections from emd")
def process_emd_record(self, rec):
# this is not elegant but... the epiviz-md api returns an 'id' which is the
# database id, we want the id of the record to be the 'measurement_id' as returned
# by the epiviz-md api endpoint, so let's do that bit of surgery
# we keep a map between ids here
self.measurement_map[rec['id']] = rec['measurement_id']
rec['id'] = rec['measurement_id']
del rec['measurement_id']
collection_id = rec['collection_id']
del rec['collection_id']
collection_name = self.collections[collection_id]
current_annotation = rec['annotation']
if current_annotation is None:
current_annotation = { "collection": collection_name }
else:
current_annotation['collection'] = collection_name
rec['annotation'] = current_annotation
def init_measurements(self):
req_url = self.emd_endpoint + "/ms/"
logging.debug("Initializing measurements from emd")
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error initializing measurements from emd {}: {}".format(req_url, r.text))
records = r.json()
for rec in records:
self.process_emd_record(rec)
logging.debug("Done initializing measurements")
return records
def sync(self, current_ms):
logging.debug("Syncing with emd at {}".format(self.emd_endpoint))
# this will remove deleted collections from
# the collection id map
new_collections = self.sync_collections()
new_records_from_collections = self.add_new_collections(new_collections)
# this will remove measurements in current_ms
# no longer in the emd db
new_measurements = self.sync_measurements(current_ms)
new_records = self.add_new_measurements(new_measurements)
logging.debug("Done syncing with emd")
return new_records_from_collections + new_records
def sync_collections(self):
req_url = self.emd_endpoint + "/collections/ids"
logging.debug("Syncing collections from emd")
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error getting collection ids to sync from emd {}: {}".format(req_url, r.text))
emd_ids = r.json()
new_ids = list(set(emd_ids) - set(self.collections.values()))
del_ids = [ k for k, v in self.collections.items() if v not in emd_ids ]
for id in del_ids:
del self.collections[id]
return new_ids
def add_new_collections(self, new_collection_ids):
logging.debug("Adding new collections from emd")
all_records = []
for collection_id in new_collection_ids:
req_url = self.emd_endpoint + "/collections/" + collection_id
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error getting collection with id {} from {}: {}".format(collection_id, req_url, r.text))
rec = r.json()
# map emd db id to efs id
self.collections[rec['id']] = rec['collection_id']
logging.debug("Added new collection {} from emd".format(rec['collection_id']))
logging.debug("Adding measurements from collection {} from emd".format(rec['collection_id']))
req_url = self.emd_endpoint + "/collections/" + collection_id + "/ms"
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error getting records for collection with id {} from {}: {}".format(collection_id, req_url, r.text))
records = r.json()
for rec in records:
self.process_emd_record(rec)
logging.debug("Done adding measurements from new collection")
all_records.extend(records)
logging.debug("Done adding new collections from emd")
return all_records
def sync_measurements(self, current_ms):
req_url = self.emd_endpoint + "/ms/ids"
logging.debug("Syncing measurements from emd")
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error getting ms ids to sync from emd {}: {}".format(req_url, r.text))
ms_ids = r.json()
new_ids = list(set(ms_ids) - set(self.measurement_map.values()))
del_ids = [ k for k, v in self.measurement_map.items() if v not in ms_ids]
for id in del_ids:
ms_id = self.measurement_map[id]
del current_ms[ms_id]
if id in self.measurement_map:
del self.measurement_map[id]
else:
logging.debug("Tried to del ms map {}: not found".format(id))
return new_ids
def add_new_measurements(self, new_ms_ids):
logging.debug("Adding new ms from emd")
all_records = []
for ms_id in new_ms_ids:
req_url = self.emd_endpoint + "/ms/" + ms_id
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error getting ms with id {} from {}: {}".format(ms_id, req_url, r.text))
rec = r.json()
self.process_emd_record(rec)
all_records.append(rec)
logging.debug("Done adding new ms from emd")
return all_records
class MeasurementSet(object):
def __init__(self):
self.measurements = {}
def append(self, ms):
self.measurements[ms.mid] = ms
def __delitem__(self, key):
if key in self.measurements:
del self.measurements[key]
else:
logging.debug("Tried to del ms {}: not found".format(key))
def get(self, key):
return self.measurements[key] if key in self.measurements else None
def get_measurements(self):
return self.measurements.values()
def get_mids(self):
return self.measurements.keys()
class MeasurementManager(object):
"""
Measurement manager class
Attributes:
measurements: list of all measurements managed by the system
"""
def __init__(self):
# self.measurements = pd.DataFrame()
self.genomes = {}
self.measurements = MeasurementSet()
self.emd_endpoint = None
self.emd_map = None
self.tiledb = []
self.stats = {
"getRows": {},
"getValues": {},
"search": {}
}
def import_dbm(self, dbConn):
"""Import measurements from a database.The database
needs to have a `measurements_index` table with
information of files imported into the database.
Args:
dbConn: a database connection
"""
query = "select * from measurements_index"
with dbConn.cursor() as cursor:
cursor.execute(query)
result = cursor.fetchall()
for rec in result:
isGene = False
if "genes" in rec["location"]:
isGene = True
annotation = None
if rec["annotation"] is not None:
annotation = ujson.loads(rec["annotation"])
tempDbM = DbMeasurement("db", rec["column_name"], rec["measurement_name"],
rec["location"], rec["location"], dbConn=dbConn,
annotation=annotation, metadata=ujson.loads(rec["metadata"]),
isGenes=isGene
)
self.measurements.append(tempDbM)
def import_files(self, fileSource, fileHandler=None, genome=None):
"""Import measurements from a file.
Args:
fileSource: location of the configuration file to load
fileHandler: an optional filehandler to use
"""
with open(fileSource, 'r') as f:
json_string = f.read()
records = ujson.loads(json_string)
self.import_records(records, fileHandler=fileHandler, genome=genome)
def import_records(self, records, fileHandler=None, genome=None, skip=False):
"""Import measurements from a list of records (usually from a decoded json string)
Args:
fileSource: location of the configuration json file to load
fileHandler: an optional filehandler to use
genome: genome to use if its missing from measurement
skip: skips adding measurement to mgr
"""
measurements = []
num_records = len(records)
for i, rec in enumerate(records):
format_args = { "i": i,
"num_records": num_records,
"datatype": rec['datatype'],
"file_type": rec['file_type']
}
logging.debug("Importing record {i}/{num_records} with datatype {datatype} and file type {file_type}".format(**format_args))
isGene = False
if "annotation" in rec["datatype"]:
isGene = True
if rec.get("genome") is None and genome is None:
raise Exception("all files must be annotated with its genome build")
tgenome = rec.get("genome")
if tgenome is None:
tgenome = genome
if rec.get("file_type").lower() == "tiledb-dir":
# its expression dataset
samples = pd.read_csv(rec.get("url") + "/cols.tsv", sep="\t", index_col=0)
sample_names = samples["epiviz_ids"].values
# rows = pd.read_csv(rec.get("url") + "/rows", sep="\t", index_col=False, nrows=10)
# metadata = rows.columns.values
# metadata = [ m for m in metadata if m not in ['chr', 'start', 'end'] ]
row_rec = ujson.load(open(rec.get("url") + "/rows.tsv.bgz.json"))
metadata = [m["name"] for m in row_rec["covariates"]]
metadata = [m if m.lower() != "id" else "gene" for m in metadata]
metadata = [m for m in metadata if m not in ['seqnames', 'start', 'end', 'chr']]
print("metadata ", metadata)
for samp, (index, row) in zip(sample_names, samples.iterrows()):
anno = row.to_dict()
anno["_filetype"] = rec.get("file_type")
for key in rec.get("annotation").keys():
anno[key] = rec.get("annotation").get(key)
tempFileM = FileMeasurement("tiledb", samp,
samp + "_" + rec.get("name"),
rec.get("url"), genome=tgenome, annotation=anno,
metadata=metadata, minValue=0, maxValue=20,
isGenes=isGene, fileHandler=fileHandler
)
measurements.append(tempFileM)
if not skip:
self.measurements.append(tempFileM)
elif rec.get("file_type").lower() in ["gwas", "bigbed", "gwas_pip"]:
anno = rec.get("annotation")
if anno is None:
anno = {}
metadata = rec.get("metadata")
if (not metadata or len(metadata) == 0) and rec.get("file_type").lower() in ["gwas", "gwas_pip"]:
bw = BigBed(rec.get("url"))
metadata = bw.get_autosql()
if metadata and len(metadata) > 3:
metadata = metadata[3:]
else:
metadata = []
anno["_filetype"] = rec.get("file_type")
tempFileM = FileMeasurement(rec.get("file_type"), rec.get("id"), rec.get("name"),
rec.get("url"), genome=tgenome, annotation=anno,
metadata=metadata, minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler
)
measurements.append(tempFileM)
if not skip:
self.measurements.append(tempFileM)
else:
anno = rec.get("annotation")
if anno is None:
anno = {}
anno["_filetype"] = rec.get("file_type")
tempFileM = FileMeasurement(rec.get("file_type"), rec.get("id"), rec.get("name"),
rec.get("url"), genome=tgenome, annotation=anno,
metadata=rec.get("metadata"), minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler
)
measurements.append(tempFileM)
if not skip:
self.measurements.append(tempFileM)
return(measurements)
def get_ms_from_emd(self, mid):
"""
grabs the measurement from emd by id
"""
req_url = self.emd_endpoint + f"/ms/{mid}"
print("req_url ", req_url)
r = requests.get(req_url, verify=False)
if r.status_code != 200:
raise Exception("Error getting measurements {}: {}".format(mid, r.text))
ms = r.json()
return self.format_ms(ms)
def format_ms(self, rec):
rec['id'] = rec.get('measurement_id')
del rec['measurement_id']
ms = self.import_records([rec], fileHandler=self.emd_fileHandler, skip=True)
return ms[0]
# isGene = False
# if "annotation" in rec["datatype"]:
# isGene = True
# anno = rec.get("annotation")
# if anno is None:
# anno = {}
# return FileMeasurement(rec.get("file_type"), rec.get("id"), rec.get("name"),
# rec.get("url"), genome=rec.get("genome"), annotation=anno,
# metadata=rec.get("metadata"), minValue=0, maxValue=5,
# isGenes=isGene, fileHandler=self.emd_fileHandler
# )
# return rec
def import_ahub(self, ahub, handler=None):
"""Import measurements from annotationHub objects.
Args:
ahub: list of file records from annotationHub
handler: an optional filehandler to use
"""
measurements = []
for i, row in ahub.iterrows():
if "EpigenomeRoadMapPreparer" in row["preparerclass"]:
tempFile = FileMeasurement(row["source_type"], row["ah_id"], row["title"],
row["sourceurl"])
self.measurements.append(tempFile)
measurements.append(tempFile)
return measurements
def get_from_emd(self, url=None):
"""Make a GET request to a metadata api
Args:
url: the url of the epiviz-md api. If none the url on self.emd_endpoint is used if available (None)
"""
if url is None:
url = self.emd_endpoint
if url is None:
raise Exception("Error reading measurements from emd endpoint: missing url")
req_url = url + "/collections/all"
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error getting collections from emd {}".format(req_url))
collection_records = r.json()
collections = {}
for rec in collection_records:
collections[rec['id']] = (rec['collection_id'], rec["name"])
req_url = url + "/ms/all"
r = requests.get(req_url)
if r.status_code != 200:
raise Exception("Error importing measurements from collection {} with url {}: {}".format(collections['collection_id'], req_url, r.text))
records = r.json()
# this is not elegant but... the epiviz-md api returns an 'id' which is the
# database id, we want the id of the record to be the 'measurement_id' as returned
# by the epiviz-md api endpoint, so let's do that bit of surgery
for rec in records:
rec['id'] = rec['measurement_id']
del rec['measurement_id']
collection_id = rec['collection_id']
del rec['collection_id']
collection_name = collections[collection_id][1]
current_annotation = rec['annotation']
if current_annotation is None:
current_annotation = { "collection": collection_name }
else:
current_annotation['collection'] = collection_name
rec['annotation'] = current_annotation
return records
def use_emd(self, url, fileHandler=None):
"""Delegate all getMeasurement calls to an epiviz-md metdata service api
Args:
url: the url of the epiviz-md api
fileHandler: an optional filehandler to use
"""
logging.debug("Will be using emd api at {}".format(url))
self.emd_map = EMDMeasurementMap(url, fileHandler)
records = self.emd_map.init()
self.import_records(records, fileHandler = fileHandler)
def using_emd(self):
return self.emd_endpoint is not None
def import_emd(self, url, fileHandler=None, listen=True):
"""Import measurements from an epiviz-md metadata service api.
Args:
url: the url of the epiviz-md api
handler: an optional filehandler to use
listen: activate 'updateCollections' endpoint to add measurements from the service upon request
"""
self.emd_endpoint = url
self.emd_fileHandler = fileHandler
# records = self.get_from_emd(url)
# self.import_records(records, fileHandler=fileHandler)
def add_computed_measurement(self, mtype, mid, name, measurements, computeFunc, genome=None, annotation=None, metadata=None, computeAxis=1):
"""Add a Computed Measurement
Args:
mtype: measurement type, defaults to 'computed'
mid: measurement id
name: name for this measurement
measurements: list of measurement to use
computeFunc: `NumPy` function to apply
Returns:
a `ComputedMeasurement` object
"""
tempComputeM = ComputedMeasurement(mtype, mid, name, measurements=measurements, computeFunc=computeFunc, genome=genome, annotation=annotation, metadata=metadata, computeAxis=computeAxis)
self.measurements.append(tempComputeM)
return tempComputeM
def add_genome(self, genome, url="http://obj.umiacs.umd.edu/genomes/", type=None, fileHandler=None):
"""Add a genome to the list of measurements. The genome has to be tabix indexed for the file server
to make remote queries. Our tabix indexed files are available at https://obj.umiacs.umd.edu/genomes/index.html
Args:
genome: for example : hg19 if type = "tabix" or full location of gtf file if type = "gtf"
genome_id: required if type = "gtf"
url: url to the genome file
"""
isGene = True
tempGenomeM = None
if type == "tabix":
gurl = url + genome + "/" + genome + ".txt.gz"
tempGenomeM = FileMeasurement("tabix", genome, genome,
gurl, genome, annotation={"group": "genome"},
metadata=["geneid", "exons_start", "exons_end", "gene"], minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler, columns=["chr", "start", "end", "width", "strand", "geneid", "exon_starts", "exon_ends", "gene"]
)
# self.genomes.append(tempGenomeM)
# gtf_file = TbxFile(gurl)
# self.genomes[genome] = gtf_file
self.measurements.append(tempGenomeM)
elif type == "efs-tsv":
gurl = url
tempGenomeM = FileMeasurement("gtfparsed", genome, genome,
gurl, genome=genome, annotation={"group": "genome"},
metadata=["geneid", "exons_start", "exons_end", "gene"], minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler, columns=["chr", "start", "end", "width", "strand", "geneid", "exon_starts", "exon_ends", "gene"]
)
gtf_file = GtfParsedFile(gurl)
self.genomes[genome] = gtf_file
self.measurements.append(tempGenomeM)
elif type == "efs-dir":
genome_url = url + "/genes.tsv.gz"
print("Genome " + genome_url)
tempGenomeM = FileMeasurement("gtfparsed", genome, genome,
genome_url, genome=genome, annotation={"group": "genome", "collection": "genome"},
metadata=["geneid", "exons_start", "exons_end", "gene"], minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler, columns=["chr", "start", "end", "width", "strand", "geneid", "exon_starts", "exon_ends", "gene"]
)
gtf_file = GtfParsedFile(genome_url)
self.genomes[genome] = gtf_file
self.measurements.append(tempGenomeM)
tx_url = url + "/transcripts.tsv.bgz"
print("Transcript" + tx_url)
tempTxM = FileMeasurement("transcript", genome + ".transcripts", genome + ".transcripts",
tx_url, genome=genome, annotation={"group": "transcript", "collection": "transcript"},
metadata=[], minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler
)
self.measurements.append(tempTxM)
elif type == "gtf":
gurl = url
tempGenomeM = FileMeasurement("gtf", genome, genome,
gurl, genome=genome, annotation={"group": "genome"},
metadata=["geneid", "exons_start", "exons_end", "gene"], minValue=0, maxValue=5,
isGenes=isGene, fileHandler=fileHandler, columns=["chr", "start", "end", "width", "strand", "geneid", "exon_starts", "exon_ends", "gene"]
)
gtf_file = GtfFile(gurl)
self.genomes[genome] = gtf_file
self.measurements.append(tempGenomeM)
return(tempGenomeM)
def get_measurements(self):
"""Get all available measurements
"""
if self.emd_map is not None:
# this will remove measurements in self.measureemnts
# that are not in the emd dbs any more
logging.debug("Getting mesurements. Cur ms {}".format(list(self.measurements.get_mids())))
new_records = self.emd_map.sync(self.measurements)
self.import_records(new_records, fileHandler = self.emd_map.handler)
return self.measurements.get_measurements()
def get_measurement(self, ms_id):
"""Get a specific measurement
"""
if self.measurements.get(ms_id) is not None:
return self.measurements.get(ms_id)
elif self.emd_endpoint is not None and "::" in ms_id:
return self.get_ms_from_emd(ms_id)
return None
# return self.measurements.get(ms_id)
def get_genomes(self):
"""Get all available genomes
"""
return self.genomes
def import_trackhub(self, hub, handler=None):
"""Import measurements from annotationHub objects.
Args:
ahub: list of file records from annotationHub
handler: an optional filehandler to use
"""
measurements = []
trackhub = TrackHub(hub)
if handler is not None:
for m in trackhub.measurments:
# TODO: this looks wrong
m.fileHandler = handler
measurements.append(m)
self.measurements.append(measurements)
return measurements
``` |
{
"source": "jkan-cn/cortx",
"score": 2
} |
#### File: drp/DRP-python/DRP_python.py
```python
import asyncio
import websockets
import json
class DRP_Endpoint:
def __init__(self, wsConn, drpNode, endpointID, endpointType):
self.wsConn = wsConn
self.drpNode = drpNode
if (self.wsConn):
self.wsConn.drpEndpoint = self
self.EndpointID = endpointID
self.EndpointType = endpointType
self.EndpointCmds = {}
self.ReplyHandlerQueue = {}
self.StreamHandlerQueue = {}
self.TokenNum = 1
self.Subscriptions = {}
self.openCallback = None
self.closeCallback = None
self.RegisterMethod("getCmds", "GetCmds")
asyncio.ensure_future(self.wsReceiveLoop())
async def wsReceiveLoop(self):
while True:
#print(f"> Waiting for next incoming message...")
json_in = await self.wsConn.recv()
print(f"< {json_in}")
self.ReceiveMessage(json_in)
def GetToken(self):
token = self.TokenNum
self.TokenNum += 1
return token
def AddReplyHandler(self, callback):
token = self.GetToken()
self.ReplyHandlerQueue[token] = callback;
return token
def DeleteReplyHandler(self, token):
del self.ReplyHandlerQueue[token]
def AddStreamHandler(self, callback):
streamToken = self.GetToken()
self.StreamHandlerQueue[streamToken] = callback
return streamToken
def DeleteStreamHandler(self, streamToken):
del self.StreamHandlerQueue[streamToken]
def RegisterMethod(self, methodName, method):
thisEndpoint = self
if (callable(method)):
thisEndpoint.EndpointCmds[methodName] = method
elif (callable(getattr(self, method, None))):
thisEndpoint.EndpointCmds[methodName] = getattr(self, method, None)
else:
typeObj = getattr(self, method, None)
typeName = ""
if typeObj is None:
typeName = "None"
else:
typeName = typeObj.__name__
thisEndpoint.log(f"Cannot add EndpointCmds[{cmd}] -> sourceObj[{method}] of type {typeName}")
async def SendCmd(self, serviceName, method, params, awaitResponse, routeOptions):
thisEndpoint = self
returnVal = None
token = None
async def awaitFunc(returnData):
return returnData
if (awaitResponse):
# TODO - Update this to add a go style channel to the ReplyHanderQueue
replyQueue = asyncio.Queue()
token = thisEndpoint.AddReplyHandler(replyQueue);
else:
# We don't expect a response; leave reply token null
token = None
# Send command
sendCmd = DRP_Cmd(serviceName, method, params, token, routeOptions)
print(f"> {json.dumps(sendCmd)}")
await thisEndpoint.wsConn.send(json.dumps(sendCmd))
#print(f"Command sent")
if (awaitResponse):
# Get data from queue
# Get data from queue
returnVal = await replyQueue.get()
else:
returnVal = None
return returnVal
async def ProcessCmd(self, message):
thisEndpoint = self
cmdResults = {
"status": 0,
"output": None
}
if ("routeOptions" not in message or message.routeOptions.tgtNodeID == thisEndpoint.drpNode.NodeID):
# Execute locally
# Is the message meant for the default DRP service?
if ("serviceName" not in message or message["serviceName"] == "DRP"):
# Yes - execute here
if ("method" not in message):
cmdResults["output"] = "message.method not specified"
elif (message["method"] not in thisEndpoint.EndpointCmds):
cmdResults["output"] = f"{message['method']} not in EndpointCmds"
elif (not callable(thisEndpoint.EndpointCmds[message["method"]])):
cmdResults["output"] = f"EndpointCmds[{message['method']}] is not callable"
else:
# Execute method
try:
cmdResults["output"] = await thisEndpoint.EndpointCmds[message.get("method",None)](message.get("params",None), thisEndpoint, message.get("token",None))
cmdResults["status"] = 1
except ():
cmdResults["output"] = "Could not execute"
else:
# No - treat as ServiceCommand
try:
cmdResults["output"] = await thisEndpoint.drpNode.ServiceCommand(message, thisEndpoint)
cmdResults["status"] = 1
except (err):
cmdResults["output"] = err.message;
else:
# This message is meant for a remote node
try:
targetNodeEndpoint = await thisEndpoint.drpNode.VerifyNodeConnection(message.routeOptions.tgtNodeID);
cmdResults["output"] = await targetNodeEndpoint.SendCmd(message.serviceName, message.method, message.params, true, null);
except:
# Either could not get connection to node or command send attempt errored out
x = 1
# Reply with results
if ("token" in message and message["token"] is not None):
await thisEndpoint.SendReply(message["token"], cmdResults["status"], cmdResults["output"])
# SendReply
async def SendReply(self, token, status, payload):
if (self.wsConn.state == 1):
replyString = None
try:
replyString = json.dumps(DRP_Reply(token, status, payload, None, None))
except:
replyString = json.dumps(DRP_Reply(token, 0, "Failed to stringify response", None, None))
print(f"> {replyString}")
await self.wsConn.send(replyString);
return 0
else:
return 1
async def ProcessReply(self, message):
thisEndpoint = self
# Yes - do we have the token?
if (message["token"] in thisEndpoint.ReplyHandlerQueue):
# We have the token - execute the reply callback
thisEndpoint.ReplyHandlerQueue[message["token"]].put_nowait(message)
# Delete if we don't expect any more data
if message["status"] < 2:
del thisEndpoint.ReplyHandlerQueue[message["token"]]
return False;
else:
# We do not have the token - tell the sender we do not honor this token
return True;
def ReceiveMessage(self, rawMessage):
thisEndpoint = self
message = {}
try:
message = json.loads(rawMessage);
except:
thisEndpoint.log("Received non-JSON message, disconnecting client... %s", wsConn._socket.remoteAddress);
thisEndpoint.wsConn.close();
return
if ("type" not in message):
thisEndpoint.log("No message.type; here's the JSON data..." + rawMessage);
return
def default():
thisEndpoint.log("Invalid message.type; here's the JSON data..." + rawMessage);
switcher = {
"cmd": thisEndpoint.ProcessCmd,
"reply": thisEndpoint.ProcessReply,
#"stream": await thisEndpoint.ProcessStream
}
func = switcher.get(message["type"], default);
asyncio.ensure_future(func(message))
# WatchStream
# RegisterSubscription
async def GetCmds(self, params, endpoint, token):
return list(self.EndpointCmds.keys())
def log(self, logMessage):
thisEndpoint = self
if (thisEndpoint.drpNode is not None):
thisEndpoint.drpNode.log(logMessage)
else:
print(logMessage)
class DRP_Cmd(dict):
def __init__(self, serviceName, method, params, token, routeOptions):
dict.__init__(self, type="cmd", serviceName=serviceName, method=method, params=params, token=token, routeOptions=routeOptions)
class DRP_Reply(dict):
def __init__(self, token, status, payload, srcNodeID, tgtNodeID):
dict.__init__(self, type="reply", token=token, status=status, payload=payload, srcNodeID=srcNodeID, tgtNodeID=tgtNodeID)
class DRP_Stream:
def __init__(self, token, status, payload, srcNodeID, tgtNodeID):
dict.__init__(self, type="stream", token=token, status=status, payload=payload, srcNodeID=srcNodeID, tgtNodeID=tgtNodeID)
class DRP_RouteOptions:
def __init__(srcNodeID, tgtNodeID, routePath):
self.srcNodeID = srcNodeID;
self.tgtNodeID = tgtNodeID;
self.routeHistory = routeHistory
async def hello(websocket, path):
while True:
json_in = await websocket.recv()
print(f"< {json_in}")
json_dict = json.loads(json_in)
if "type" in json_dict:
# Get the type
msg_type = json_dict["type"]
print(f"> This is a '{msg_type}' message")
else:
# No type
print(f"> Unknown message -> '{json_in}'")
greeting = '{"reply":"bleh"}'
await websocket.send(greeting)
print(f"> {greeting}")
async def wsRecv(websocket, path):
myEndpoint = DRP_Endpoint(websocket, None, "1234567890", "Server")
while True:
json_in = await websocket.recv()
myEndpoint.ReceiveMessage(json_in)
#start_server = websockets.serve(wsRecv, "localhost", 8765)
#print("Started WS listener")
#asyncio.get_event_loop().run_until_complete(start_server)
#asyncio.get_event_loop().run_forever()
# DRP Test Client
async def drpTestClient():
uri = "ws://localhost:8080"
print(f"Connecting to -> {uri}")
async with websockets.connect(uri) as websocket:
print(f"Connected!")
myEndpoint = DRP_Endpoint(websocket, None, "1234567890", "Client")
print(f"Sending hello...")
returnData = await myEndpoint.SendCmd("DRP", "hello", { "userAgent": "python", "user": "someuser", "pass": "<PASSWORD>" }, True, None)
print(f"Sending getCmds...")
returnData = await myEndpoint.SendCmd("DRP", "getCmds", None, True, None)
# Using this to keep the client alive; need to figure out a cleaner way
dummyQueue = asyncio.Queue()
await dummyQueue.get()
asyncio.run(drpTestClient())
```
#### File: torchvision/cortx_pytorch/__init__.py
```python
import tempfile
from functools import partial
import io
from os import path
import torch as ch
from tqdm import tqdm
import webdataset as wds
import boto3
from botocore.client import Config
def make_client(end_point, access_key, secure_key):
client = boto3.client('s3', endpoint_url=end_point,
aws_access_key_id=access_key,
aws_secret_access_key=secure_key,
config=Config(signature_version='s3v4'),
region_name='US')
return client
class Packer(ch.utils.data.Dataset):
def __init__(self, ds):
super().__init__()
self.ds = ds
def __len__(self):
return len(self.ds)
def __getitem__(self, ix):
im, lab = self.ds[ix]
with io.BytesIO() as output:
im.save(output, format="JPEG")
return ix, output.getvalue(), lab
def upload_shard(fname, bucket, base_folder, client):
print("uploading", fname, "on CORTX")
obj_name = path.join(base_folder, path.basename(fname))
client.upload_file(fname, bucket, obj_name)
def upload_cv_dataset(ds, client, bucket, base_folder, maxsize, maxcount, workers=0, batch_size=256):
loader = ch.utils.data.DataLoader(Packer(ds), batch_size=batch_size, num_workers=workers, shuffle=True,
collate_fn=lambda x: x)
with tempfile.TemporaryDirectory() as tempfolder:
pattern = path.join(tempfolder, f"shard-%06d.tar")
writer = partial(upload_shard, client=client, bucket=bucket, base_folder=base_folder)
with wds.ShardWriter(pattern, maxsize=int(maxsize), maxcount=int(maxcount), post=writer) as sink:
for r in tqdm(loader):
for ix, im, label in r:
sample = {"__key__": f"im-{ix}", "jpg": im, "cls": label}
sink.write(sample)
def readdir(client, bucket, folder):
ob = client.list_objects(Bucket=bucket, Prefix=folder)
return [x['Key'] for x in ob['Contents']]
def shard_downloader(it, client, bucket):
for desc in it:
with io.BytesIO() as output:
client.download_fileobj(bucket, desc['url'], output)
content = output.getvalue()
print(len(content))
yield {
'stream': io.BytesIO(content)
}
def RemoteDataset(client, bucket, folder, shuffle=True):
shards = readdir(client, bucket, folder)
downloader = partial(shard_downloader, client=client, bucket=bucket)
dataset = wds.ShardList(shards, shuffle=shuffle)
dataset = wds.Processor(dataset, downloader)
dataset = wds.Processor(dataset, wds.tar_file_expander)
dataset = wds.Processor(dataset, wds.group_by_keys)
return dataset
``` |
{
"source": "jkandasa/operator-courier",
"score": 2
} |
#### File: operator-courier/operatorcourier/format.py
```python
import yaml
from operatorcourier.build import BuildCmd
class _literal(str):
pass
def _literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
def _get_empty_formatted_bundle():
return dict(
data=dict(
customResourceDefinitions='',
clusterServiceVersions='',
packages='',
)
)
def format_bundle(bundle):
"""
Converts a bundle object into a push-ready bundle by
changing list values of 'customResourceDefinitions',
'clusterServiceVersions', and 'packages' into stringified yaml literals.
This format is required by the Marketplace backend.
:param bundle: A bundle object
"""
formattedBundle = _get_empty_formatted_bundle()
yaml.add_representer(_literal, _literal_presenter)
if 'data' not in bundle:
return formattedBundle
# Format data fields as string literals to match backend expected format
if bundle['data'].get('customResourceDefinitions'):
formattedBundle['data']['customResourceDefinitions'] = _literal(
yaml.dump(bundle['data']['customResourceDefinitions'],
default_flow_style=False))
if 'clusterServiceVersions' in bundle['data']:
# Format description and alm-examples
clusterServiceVersions = []
for csv in bundle['data']['clusterServiceVersions']:
if csv.get('metadata', {}).get('annotations', {}).get('alm-examples'):
csv['metadata']['annotations']['alm-examples'] = _literal(
csv['metadata']['annotations']['alm-examples'])
if csv.get('spec', {}).get('description'):
csv['spec']['description'] = _literal(csv['spec']['description'])
clusterServiceVersions.append(csv)
if clusterServiceVersions:
formattedBundle['data']['clusterServiceVersions'] = _literal(
yaml.dump(clusterServiceVersions, default_flow_style=False))
if bundle['data'].get('packages'):
formattedBundle['data']['packages'] = _literal(
yaml.dump(bundle['data']['packages'], default_flow_style=False))
return formattedBundle
def unformat_bundle(formattedBundle):
"""
Converts a push-ready bundle into a structured object by changing
stringified yaml of 'customResourceDefinitions', 'clusterServiceVersions',
and 'packages' into lists of objects.
Undoing the format helps simplify bundle validation.
:param formattedBundle: A push-ready bundle
"""
bundle = BuildCmd()._get_empty_bundle()
if 'data' not in formattedBundle:
return bundle
if 'customResourceDefinitions' in formattedBundle['data']:
customResourceDefinitions = yaml.safe_load(
formattedBundle['data']['customResourceDefinitions'])
if customResourceDefinitions:
bundle['data']['customResourceDefinitions'] = customResourceDefinitions
if 'clusterServiceVersions' in formattedBundle['data']:
clusterServiceVersions = yaml.safe_load(
formattedBundle['data']['clusterServiceVersions'])
if clusterServiceVersions:
bundle['data']['clusterServiceVersions'] = clusterServiceVersions
if 'packages' in formattedBundle['data']:
packages = yaml.safe_load(formattedBundle['data']['packages'])
if packages:
bundle['data']['packages'] = packages
return bundle
```
#### File: operator-courier/tests/test_api.py
```python
import pytest
import yaml
from operatorcourier import api
from operatorcourier.format import unformat_bundle
from operatorcourier.errors import OpCourierBadBundle
@pytest.mark.parametrize('directory,expected', [
("tests/test_files/bundles/api/valid_flat_bundle",
"tests/test_files/bundles/api/results/bundle.yaml"),
("tests/test_files/bundles/api/valid_flat_bundle_with_random_folder",
"tests/test_files/bundles/api/results/bundle.yaml"),
])
def test_make_bundle(directory, expected):
verified_manifest = api.build_and_verify(source_dir=directory)
with open(expected, "r") as expected_file:
expected_bundle = yaml.safe_load(expected_file)
assert unformat_bundle(verified_manifest.bundle) == unformat_bundle(expected_bundle)
assert not verified_manifest.nested
assert verified_manifest.is_valid
assert hasattr(verified_manifest, 'bundle')
@pytest.mark.parametrize('yaml_files,expected', [
(
[
"tests/test_files/bundles/api/valid_flat_bundle/crd.yml",
"tests/test_files/bundles/api/valid_flat_bundle/csv.yaml",
"tests/test_files/bundles/api/valid_flat_bundle/packages.yaml"
],
"tests/test_files/bundles/api/results/bundle.yaml"
),
])
def test_make_bundle_with_yaml_list(yaml_files, expected):
yamls = []
for file in yaml_files:
with open(file, "r") as yaml_file:
yamls.append(yaml_file.read())
verified_manifest = api.build_and_verify(yamls=yamls)
with open(expected, "r") as expected_file:
expected_bundle = yaml.safe_load(expected_file)
assert unformat_bundle(verified_manifest.bundle) == unformat_bundle(expected_bundle)
assert not verified_manifest.nested
assert verified_manifest.is_valid
assert hasattr(verified_manifest, 'bundle')
@pytest.mark.parametrize('yaml_files,validation_info', [
(
[
"tests/test_files/bundles/api/valid_flat_bundle/crd.yml",
"tests/test_files/bundles/api/valid_flat_bundle/csv.yaml"
], {
'errors': ['Bundle does not contain any packages.'],
'warnings': ['csv metadata.annotations not defined.',
'csv spec.icon not defined']
}
),
])
def test_make_bundle_invalid(yaml_files, validation_info):
yamls = []
for file in yaml_files:
with open(file, "r") as yaml_file:
yamls.append(yaml_file.read())
with pytest.raises(OpCourierBadBundle) as err:
api.build_and_verify(yamls=yamls)
assert str(err.value) == "Resulting bundle is invalid, " \
"input yaml is improperly defined."
assert err.value.validation_info == validation_info
@pytest.mark.parametrize('nested_source_dir', [
'tests/test_files/bundles/api/etcd_valid_nested_bundle',
'tests/test_files/bundles/api/etcd_valid_nested_bundle_with_random_folder',
'tests/test_files/bundles/api/prometheus_valid_nested_bundle',
'tests/test_files/bundles/api/prometheus_valid_nested_bundle_2',
])
def test_valid_nested_bundles(nested_source_dir):
verified_manifest = api.build_and_verify(source_dir=nested_source_dir)
assert verified_manifest.nested
assert verified_manifest.is_valid
# Made changes to etcdoperator.v0.9.0.clusterserviceversion.yaml and
# removed apiVersion and spec.installModes
@pytest.mark.parametrize('nested_source_dir', [
'tests/test_files/bundles/api/etcd_invalid_nested_bundle',
])
def test_invalid_nested_bundles(nested_source_dir):
with pytest.raises(OpCourierBadBundle) as err:
api.build_and_verify(source_dir=nested_source_dir, repository='etcd')
assert str(err.value) == "Resulting bundle is invalid, " \
"input yaml is improperly defined."
@pytest.mark.parametrize('nested_source_dir', [
'tests/test_files/yaml_source_dir/invalid_yamls_multiple_packages',
])
def test_invalid_flat_bundles(nested_source_dir):
with pytest.raises(OpCourierBadBundle) as err:
api.build_and_verify(source_dir=nested_source_dir, repository='oneagent')
assert str(err.value) == "Only 1 package is expected to exist in source root folder."
```
#### File: operator-courier/tests/test_manifest_parser.py
```python
import pytest
import os
from tempfile import TemporaryDirectory
from distutils.dir_util import copy_tree
from operatorcourier.manifest_parser import filterOutFiles
from operatorcourier.push import BLACK_LIST
@pytest.mark.parametrize('folder_to_filter,expected_output_dir', [
("tests/test_files/bundles/flatten/etcd_valid_input_7",
"tests/test_files/bundles/flatten/etcd_valid_input_7_result")
])
def test_filterOutFiles(folder_to_filter, expected_output_dir):
with TemporaryDirectory() as output_dir:
copy_tree(folder_to_filter, output_dir)
filterOutFiles(output_dir, BLACK_LIST)
assert _get_dir_file_paths(output_dir) == _get_dir_file_paths(expected_output_dir)
def _get_dir_file_paths(source_dir):
"""
:param source_dir: the path of the input directory
:return: a set of relative paths of all files inside input directory and
its subdirectories
"""
file_paths = set()
for root_path, dir_names, file_names in os.walk(source_dir):
dir_path_relative = os.path.relpath(root_path, source_dir)
for file_name in file_names:
file_paths.add(os.path.join(dir_path_relative, file_name))
return file_paths
``` |
{
"source": "jkandasa/sekuli",
"score": 3
} |
#### File: sekuli_python/sekuli/sekuli.py
```python
import json
import requests
class SekuliBaseClient(object):
'''
This SekuliBaseClient act as a base class for all the clients like screen, keyboard and mouse.
Arguments:
url
full URL to access remote sikuli via selenium grid or selenium node.
'''
# pylint: disable=too-few-public-methods
def __init__(self, url):
self.url = url
def _post(self, data):
_response = requests.post(
self.url,
headers={'Content-Type': 'application/json'},
data=json.dumps(data))
_data = _response.json()
if _response.status_code != 200:
raise BadResponseException(_data)
if 'result' in _data and _data['result'] == 'SUCCESS':
if 'response' in _data:
return _data['response']
return None
raise BadResponseException(_data)
@classmethod
def get_command(cls, module, command, parameters):
"""
This command function returns command format dict object.
"""
return {
"module": module,
"command": command,
"parameters": parameters
}
class BadResponseException(Exception):
"""
If there is an error with request/response. This exception will be raised.
"""
pass
``` |
{
"source": "jkandasa/wrapanapi",
"score": 2
} |
#### File: wrapanapi/mgmtsystem/openstack_infra.py
```python
from keystoneclient.v2_0 import client as oskclient
from novaclient import client as osclient
from novaclient.client import HTTPClient
from requests.exceptions import Timeout
from base import MgmtSystemAPIBase
# TODO The following monkeypatch nonsense is criminal, and would be
# greatly simplified if openstack made it easier to specify a custom
# client class. This is a trivial PR that they're likely to accept.
# Note: This same mechanism may be required for keystone and cinder
# clients, but hopefully won't be.
# monkeypatch method to add retry support to openstack
def _request_timeout_handler(self, url, method, retry_count=0, **kwargs):
try:
# Use the original request method to do the actual work
return HTTPClient.request(self, url, method, **kwargs)
except Timeout:
if retry_count >= 3:
self._cfme_logger.error('nova request timed out after {} retries'.format(retry_count))
raise
else:
# feed back into the replaced method that supports retry_count
retry_count += 1
self._cfme_logger.error('nova request timed out; retry {}'.format(retry_count))
return self.request(url, method, retry_count=retry_count, **kwargs)
class OpenstackInfraSystem(MgmtSystemAPIBase):
"""Openstack Infrastructure management system
# TODO
"""
_stats_available = {
'num_template': lambda self: len(self.list_template()),
'num_host': lambda self: len(self.list_host()),
}
states = {
'running': ('ACTIVE',),
'stopped': ('SHUTOFF',),
'suspended': ('SUSPENDED',),
}
can_suspend = True
def __init__(self, **kwargs):
super(OpenstackInfraSystem, self).__init__(kwargs)
self.tenant = kwargs['tenant']
self.username = kwargs['username']
self.password = kwargs['password']
self.auth_url = kwargs['auth_url']
self._api = None
self._kapi = None
self._capi = None
@property
def api(self):
if not self._api:
self._api = osclient.Client('2',
self.username,
self.password,
self.tenant,
self.auth_url,
service_type="compute",
insecure=True,
timeout=30)
# replace the client request method with our version that
# can handle timeouts; uses explicit binding (versus
# replacing the method directly on the HTTPClient class)
# so we can still call out to HTTPClient's original request
# method in the timeout handler method
self._api.client._cfme_logger = self.logger
self._api.client.request = _request_timeout_handler.__get__(self._api.client,
HTTPClient)
return self._api
@property
def kapi(self):
if not self._kapi:
self._kapi = oskclient.Client(username=self.username,
password=<PASSWORD>,
tenant_name=self.tenant,
auth_url=self.auth_url,
insecure=True)
return self._kapi
@property
def nodes(self):
return self.api.servers.list()
@property
def images(self):
return self.api.images.list()
@property
def networks(self):
return self.api.networks.list()
def start_vm(self, vm_name):
raise NotImplementedError('start_vm not implemented.')
def wait_vm_running(self, vm_name, num_sec):
raise NotImplementedError('wait_vm_running not implemented.')
def stop_vm(self, vm_name):
raise NotImplementedError('stop_vm not implemented.')
def wait_vm_stopped(self, vm_name, num_sec):
raise NotImplementedError('wait_vm_stopped not implemented.')
def create_vm(self, vm_name):
raise NotImplementedError('create_vm not implemented.')
def delete_vm(self, vm_name):
raise NotImplementedError('delete_vm not implemented.')
def restart_vm(self, vm_name):
raise NotImplementedError('restart_vm not implemented.')
def vm_status(self, vm_name):
raise NotImplementedError('vm_status not implemented.')
def is_vm_running(self, vm_name):
raise NotImplementedError('is_vm_running not implemented.')
def is_vm_stopped(self, vm_name):
raise NotImplementedError('is_vm_stopped not implemented.')
def is_vm_suspended(self, vm_name):
raise NotImplementedError('is_vm_suspended not implemented.')
def suspend_vm(self, vm_name):
raise NotImplementedError('restart_vm not implemented.')
def wait_vm_suspended(self, vm_name, num_sec):
raise NotImplementedError('wait_vm_suspended not implemented.')
def list_vm(self, **kwargs):
raise NotImplementedError('list_vm not implemented.')
def list_template(self):
return [image.name for image in self.images]
def list_flavor(self):
raise NotImplementedError('list_flavor not implemented.')
def list_network(self):
return [network.name for network in self.networks]
def list_host(self):
return [node.name for node in self.nodes]
def info(self):
raise NotImplementedError('info not implemented.')
def disconnect(self):
pass
def clone_vm(self, source_name, vm_name):
raise NotImplementedError()
def does_vm_exist(self, name):
raise NotImplementedError()
def deploy_template(self, template, *args, **kwargs):
raise NotImplementedError()
def current_ip_address(self, vm_name):
raise NotImplementedError()
def get_ip_address(self, vm_name):
""
raise NotImplementedError()
def remove_host_from_cluster(self, hostname):
raise NotImplementedError()
# TODO
```
#### File: wrapanapi/mgmtsystem/rest_client.py
```python
import requests
import os
import json
from exceptions import RestClientException
requests.packages.urllib3.disable_warnings()
class BearerTokenAuth(requests.auth.AuthBase):
"""Attaches a bearer token to the given request object"""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = 'Bearer {}'.format(self.token)
return r
class ContainerClient(object):
def __init__(self, hostname, auth, protocol="https", port=6443, entry='api/v1', verify=False):
"""Simple REST API client for container management systems
Args:
hostname: String with the hostname or IP address of the server (e.g. '10.11.12.13')
auth: Either a (user, pass) sequence or a string with token
protocol: Protocol to use for communication with the server
port: Port to use
entry: Entry point of the REST API
verify: 'True' if we want to verify SSL, 'False' otherwise
"""
self.api_entry = "{}://{}:{}/{}".format(protocol, hostname, port, entry)
self.verify = verify
if type(auth) in (list, set, tuple):
self.auth = auth
elif type(auth) is str:
self.auth = BearerTokenAuth(auth)
else:
raise RestClientException('Invalid auth object')
def get(self, entity_type, name=None, namespace=None):
"""Sends a request to fetch an entity of specific type
Fetches a single entity if its name is provided or all of given type if name is ommited.
Note:
Some entities are tied to namespaces (projects).
To fetch these by name, namespace has to be provided as well.
Return:
Tuple containing status code and json response with requested entity/entities.
"""
path = '{}s'.format(entity_type)
if name is not None:
if namespace is not None:
path = os.path.join('namespaces/{}'.format(namespace), path)
path = os.path.join(path, '{}'.format(name))
r = self.raw_get(path)
return (r.status_code, r.json() if r.ok else None)
def get_json(self, path, headers=None, params=None):
r = self.raw_get(path, headers, params)
return (r.json() if r.ok else None)
def put_status(self, path, data, headers=None):
r = self.raw_put(path, data, headers)
return r.ok
def post_status(self, path, data, headers=None):
r = self.raw_post(path, data, headers)
return r.ok
def delete_status(self, path, headers=None):
r = self.raw_delete(path, headers)
return r.ok
def raw_get(self, path, headers=None, params=None):
return requests.get(
os.path.join(self.api_entry, path),
auth=self.auth,
verify=self.verify,
headers=headers,
params=params)
def raw_put(self, path, data, headers=None):
return requests.put(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers, data=json.dumps(data))
def raw_post(self, path, data, headers=None):
return requests.post(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers, data=json.dumps(data))
def raw_delete(self, path, headers=None):
return requests.delete(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers)
```
#### File: wrapanapi/tests/test_hawkular.py
```python
import json
from urlparse import urlparse
import os
import pytest
from mgmtsystem import hawkular
from mock import patch
from random import sample
from mgmtsystem.hawkular import CanonicalPath
def fake_urlopen(c_client, url, headers, params):
"""
A stub urlopen() implementation that load json responses from
the filesystem.
"""
# Map path from url to a file
parsed_url = urlparse("{}/{}".format(c_client.api_entry, url)).path
if parsed_url.startswith('/hawkular/inventory/traversal') \
or parsed_url.startswith('/hawkular/inventory/entity'):
# Change parsed url, when we use default one, 'd;configuration' replaced with 'd'
parsed_url = "{}/{}".format(urlparse("{}".format(c_client.api_entry)).path, url)
parsed_url = parsed_url.replace('traversal/', '')
parsed_url = parsed_url.replace('entity/', '')
parsed_url = parsed_url.replace('f;', 'feeds/')
parsed_url = parsed_url.replace('r;', 'resources/', 1)
parsed_url = parsed_url.replace('r;', '')
parsed_url = parsed_url.replace('rt;', 'resourceTypes/')
parsed_url = parsed_url.replace('rl;defines/', '')
parsed_url = parsed_url.replace('type=rt', 'resourceTypes')
parsed_url = parsed_url.replace('type=r', 'resources')
parsed_url = parsed_url.replace('type=f', 'feeds')
parsed_url = parsed_url.replace('d;configuration', 'data')
resource_file = os.path.normpath("tests/resources/{}.json".format(parsed_url))
# Must return a file-like object
return json.load(open(resource_file))
def fake_urldelete(c_client, url, headers):
"""
A stub delete_status() implementation that returns True
"""
return True
def fake_urlput(c_client, url, data, headers):
"""
A stub put_status() implementation that returns True
"""
return True
def fake_urlpost(c_client, url, data, headers):
"""
A stub post_status() implementation that returns True
"""
return True
@pytest.yield_fixture(scope="function")
def provider():
"""
A stub urlopen() implementation that load json responses from
the filesystem.
"""
if not os.getenv('HAWKULAR_HOSTNAME'):
patcher = patch('mgmtsystem.rest_client.ContainerClient.get_json', fake_urlopen)
patcher.start()
patcher = patch('mgmtsystem.rest_client.ContainerClient.delete_status', fake_urldelete)
patcher.start()
patcher = patch('mgmtsystem.rest_client.ContainerClient.post_status', fake_urlpost)
patcher.start()
patcher = patch('mgmtsystem.rest_client.ContainerClient.put_status', fake_urlput)
patcher.start()
hwk = hawkular.Hawkular(
hostname=os.getenv('HAWKULAR_HOSTNAME', 'localhost'),
protocol=os.getenv('HAWKULAR_PROTOCOL', 'http'),
port=os.getenv('HAWKULAR_PORT', 8080),
username=os.getenv('HAWKULAR_USERNAME', 'jdoe'),
password=os.getenv('HAWKULAR_PASSWORD', 'password'),
ws_connect=False
)
yield hwk
if not os.getenv('HAWKULAR_HOSTNAME'):
patcher.stop()
@pytest.yield_fixture(scope="function")
def datasource(provider):
"""
Fixture for preparing Datasource for tests.
It creates resource and resource data for Datasource.
On the end of testing, Datasource is deleted.
"""
datasources = provider.inventory.list_server_datasource()
assert len(datasources) > 0, "No resource data is listed for any of datasources"
new_datasource = None
for datasource in sample(datasources, 1):
r_data = _read_resource_data(provider, datasource)
assert r_data
name_ext = "MWTest"
new_datasource = hawkular.Resource(name="{}{}".format(datasource.name, name_ext),
id="{}{}".format(datasource.id, name_ext),
path=hawkular.CanonicalPath(
"{}{}".format(datasource.path.to_string, name_ext)))
new_datasource.path.resource_id = new_datasource.path.resource_id[1]
resource_type = hawkular.ResourceType(id=None, name=None,
path=CanonicalPath("/rt;Datasource"))
new_datasource_data = hawkular.ResourceData(name=None, path=None, value=r_data.value)
new_datasource_data.value.update(
{"JNDI Name": "{}{}".format(r_data.value["JNDI Name"], name_ext),
"Enabled": "true"
}
)
_delete_resource(provider, new_datasource)
result = _create_resource(provider, resource=new_datasource,
resource_data=new_datasource_data, resource_type=resource_type)
assert result, "Create should be successful"
r_data = _read_resource_data(provider, new_datasource)
assert r_data, "Resource data should exist"
assert r_data.value == new_datasource_data.value
yield new_datasource
if new_datasource:
_delete_resource(provider, new_datasource)
def test_list_feed(provider):
""" Checks whether any feed is listed """
feeds = provider.inventory.list_feed()
assert len(feeds) > 0, "No feeds are listed"
for feed in feeds:
assert feed.id
assert feed.path
def test_list_resource_type(provider):
""" Checks whether any resource type is listed and has attributes """
feeds = provider.inventory.list_feed()
for feed in feeds:
res_types = provider.inventory.list_resource_type(feed_id=feed.id)
for res_type in res_types:
assert res_type.id
assert res_type.name
assert res_type.path
assert len(res_types) > 0, "No resource type is listed for any of feeds"
def test_list_server(provider):
""" Checks whether any server is listed and has attributes"""
servers = provider.inventory.list_server()
for server in servers:
assert server.id
assert server.name
assert server.path
assert server.data
assert len(servers) > 0, "No server is listed for any of feeds"
def test_list_domain(provider):
""" Checks whether any domain is listed and has attributes"""
domains = provider.inventory.list_domain()
for domain in domains:
assert domain.id
assert domain.name
assert domain.path
assert domain.data['Local Host Name']
assert len(domains) > 0, "No domain is listed for any of feeds"
def test_list_server_group(provider):
""" Checks whether any group is listed and has attributes"""
domains = provider.inventory.list_domain()
for domain in domains:
server_groups = provider.inventory.list_server_group(domain.path.feed_id)
for server_group in server_groups:
assert server_group.id
assert server_group.name
assert server_group.path
assert server_group.data
assert len(server_group) > 0, "No server group is listed for any of feeds"
def test_list_server_deployment(provider):
""" Checks whether any deployment is listed and has attributes """
deployments = provider.inventory.list_server_deployment()
for deployment in deployments:
assert deployment.id
assert deployment.name
assert deployment.path
assert len(deployments) > 0, "No deployment is listed for any of feeds"
def test_list_messaging(provider):
""" Checks whether any messaging is listed and has attributes """
messagings = provider.inventory.list_messaging()
for messaging in messagings:
assert messaging.id
assert messaging.name
assert messaging.path
assert len(messagings) > 0, "No messaging is listed for any of feeds"
def test_get_config_data(provider):
""" Checks whether resource data is provided and has attributes """
found = False
servers = provider.inventory.list_server()
for server in servers:
r_data = provider.inventory.get_config_data(feed_id=server.path.feed_id,
resource_id=server.id)
if r_data:
found = True
assert r_data.name
assert r_data.path
assert r_data.value
assert found, "No resource data is listed for any of servers"
def test_edit_resource_data(provider, datasource):
""" Checks whether resource data is edited """
r_data = _read_resource_data(provider, datasource)
assert r_data, "Resource data should exist"
r_data.value['Enabled'] = "false"
result = _update_resource_data(provider, r_data, datasource)
assert result, "Update should be successful"
r_data = _read_resource_data(provider, datasource)
# skip value verification for mocked provider
if os.getenv('HAWKULAR_HOSTNAME'):
assert r_data.value['Enabled'] == "false"
def test_delete_resource(provider, datasource):
""" Checks whether resource is deleted """
r_data = _read_resource_data(provider, datasource)
assert r_data, "Resource data should exist"
result = _delete_resource(provider, datasource)
assert result, "Delete should be successful"
r_data = _read_resource_data(provider, datasource)
# skip deleted verification for mocked provider
if os.getenv('HAWKULAR_HOSTNAME'):
assert not r_data
def _read_resource_data(provider, resource):
return provider.inventory.get_config_data(feed_id=resource.path.feed_id,
resource_id=resource.path.resource_id)
def _create_resource(provider, resource, resource_data, resource_type):
return provider.inventory.create_resource(resource=resource, resource_data=resource_data,
resource_type=resource_type,
feed_id=resource.path.feed_id)
def _update_resource_data(provider, resource_data, resource):
return provider.inventory.edit_config_data(resource_data=resource_data,
feed_id=resource.path.feed_id,
resource_id=resource.path.resource_id)
def _delete_resource(provider, resource):
return provider.inventory.delete_resource(feed_id=resource.path.feed_id,
resource_id=resource.path.resource_id)
def test_list_server_datasource(provider):
""" Checks whether any datasource is listed and has attributes """
found = False
datasources = provider.inventory.list_server_datasource()
if len(datasources) > 0:
found = True
for datasource in datasources:
assert datasource.id
assert datasource.name
assert datasource.path
assert found | provider.inventory._stats_available['num_datasource'](provider.inventory) > 0,\
"No any datasource is listed for any of feeds, but they exists"
def test_path(provider):
""" Checks whether path returned correctly """
feeds = provider.inventory.list_feed()
for feed in feeds:
assert feed.path
assert feed.path.tenant_id
assert feed.path.feed_id
servers = provider.inventory.list_server()
for server in servers:
assert server.path
assert server.path.tenant_id
assert server.path.feed_id
assert server.path.resource_id
def test_num_server(provider):
""" Checks whether number of servers is returned correct """
servers_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
servers_count += len(provider.inventory.list_server(feed_id=feed.id))
num_server = provider.inventory._stats_available['num_server'](provider.inventory)
assert num_server == servers_count, "Number of servers is wrong"
def test_num_deployment(provider):
""" Checks whether number of deployments is returned correct """
deployments_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
deployments_count += len(provider.inventory.list_server_deployment(feed_id=feed.id))
num_deployment = provider.inventory._stats_available['num_deployment'](provider.inventory)
assert num_deployment == deployments_count, "Number of deployments is wrong"
def test_num_datasource(provider):
""" Checks whether number of datasources is returned correct """
datasources_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
datasources_count += len(provider.inventory.list_server_datasource(feed_id=feed.id))
num_datasource = provider.inventory._stats_available['num_datasource'](provider.inventory)
assert num_datasource == datasources_count, "Number of datasources is wrong"
def test_num_messaging(provider):
""" Checks whether number of messagings is returned correct """
messagings_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
messagings_count += len(provider.inventory.list_messaging(feed_id=feed.id))
num_messaging = provider.inventory._stats_available['num_messaging'](provider.inventory)
assert num_messaging == messagings_count, "Number of messagings is wrong"
def test_list_event(provider):
""" Checks whether is any event listed """
events = provider.alert.list_event()
if len(events) > 0:
event = events[0]
assert event.id
assert event.eventType
assert event.ctime
assert event.dataSource
assert event.dataId
assert event.category
assert event.text
``` |
{
"source": "jkane002/application_automation",
"score": 3
} |
#### File: jkane002/application_automation/apply.py
```python
import sys
from bs4 import BeautifulSoup
import urllib.request
from urllib.request import urlopen
from datetime import date
import gspread
from oauth2client.service_account import ServiceAccountCredentials
'''
Automation script that enters the job's title, company, location, and url
into Google Sheets given a URL
'''
class JobApplication:
def __init__(self, url=None, title=None, company=None, location=None):
self.url = url
self.title = title
self.company = company
self.location = location
def addRecord(self):
'''Enters job onto Google Sheets '''
# Authorize Google Sheets
sheetName = 'Job Search 2021'
scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets',
"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name(
'creds.json', scope)
client = gspread.authorize(creds)
sheet = client.open(sheetName).sheet1
data = sheet.get_all_records()
# Enter today's date
today = date.today()
applied_date = today.strftime("%m/%d/%Y")
# Insert a new row into Google Sheets
newEntry = [self.company, self.title,
self.location, self.url, applied_date]
sheet.insert_row(newEntry, 2)
# Updates date difference formula
sheet.update_acell('F2', '=DATEDIF(E2, TODAY(), "D")')
def parse_jobsleverco(job_url):
html = urlopen(job_url)
soup = BeautifulSoup(html, 'html.parser')
posting_headline = soup.find(
'div', attrs={'class': 'posting-headline'}
)
# Job Title
title = posting_headline.contents[0].get_text()
# Job Location
details = posting_headline.contents[1]
location = details.find_all(
'div', {'class': 'sort-by-time posting-category medium-category-label'}
)[0].get_text().strip('/')
# Company name
footer = soup.find_all(
'div', {
'class': 'main-footer-text page-centered'}
)
footer_name = footer[0].find('p').text.strip()
stopwords = ['home', 'page']
querywords = footer_name.split()
resultwords = [word for word in querywords if word.lower()
not in stopwords]
company_name = ' '.join(resultwords)
return JobApplication(job_url, title, company_name, location)
def parse_linkedin(job_url):
html = urlopen(job_url)
soup = BeautifulSoup(html, 'html.parser')
# Job Location
span_loc = soup.find(
'span', {'class': 'topcard__flavor topcard__flavor--bullet'})
location = span_loc.get_text().strip()
# Company name
company_atag = soup.find(
'a', {'class': 'topcard__org-name-link topcard__flavor--black-link'})
company_name = company_atag.get_text().strip()
# Job title
title = soup.find('h1', {'class': 'topcard__title'}).get_text().strip()
return JobApplication(job_url, title, company_name, location)
def parse_greenhouse(job_url):
html = urlopen(job_url)
soup = BeautifulSoup(html, 'html.parser')
# Job Location
location_loc = soup.find(
'div', {'class': 'location'})
location = location_loc.get_text().strip()
# Company name
company_loc = soup.find(
'span', {'class': 'company-name'})
company_name = company_loc.get_text().strip()
stopwords = ['at']
querywords = company_name.split()
resultwords = [word for word in querywords if word.lower()
not in stopwords]
company_name = ' '.join(resultwords)
# Job title
title = soup.find('h1', {'class': 'app-title'}).get_text().strip()
return JobApplication(job_url, title, company_name, location)
def general_parse(job_url):
while True:
title = input("Job title: ")
company_name = input("Company name: ")
location = input("Location: ")
print(f"{title} at {company_name} in {location} ({job_url})")
res = input('Is this good? (y/n)')
if res == '' or not res[0].lower() in ['y', 'n']:
print('Please answer with yes or no!')
elif res[0].lower() == 'n':
continue
else:
break
return JobApplication(job_url, title, company_name, location)
def parse_website(job_url, job):
'''
Factory pattern in parsing websites
'''
# web scraped these websites
known_parsing = [
{
'name': 'linkedin',
'parser': parse_linkedin
},
{
'name': 'greenhouse',
'parser': parse_greenhouse
},
{
'name': 'jobs.lever.co',
'parser': parse_jobsleverco
}
]
# known websites for general parsing
general_list = ['workday', 'icims', 'careers', 'gh_jid']
for known in known_parsing:
if known['name'] in job_url and known['name'] == 'linkedin':
job = parse_linkedin(job_url)
elif known['name'] in job_url and known['name'] == "greenhouse":
job = parse_greenhouse(job_url)
elif known['name'] in job_url and known['name'] == "jobs.lever.co":
job = parse_jobsleverco(job_url)
for co in general_list:
if co in job_url:
job = general_parse(job_url)
if job.title:
job.addRecord()
print(
f"Entered:\n{job.company}\n{job.title}\n{job.location}\n{job.url}\n"
)
else:
'''Link not in known_parsing nor general_list lists'''
while True:
valid_url = input(f"Valid url at {job_url}? (y/n)")
if valid_url == '' or not valid_url[0].lower() in ['y', 'n']:
print('Please answer with yes or no!')
elif valid_url[0].lower() == 'n':
break
else:
'''
Can force inputting wrong entries
Honor code when inputting data
TODO: have more checks regarding url patterns
'''
job = general_parse(job_url)
job.addRecord()
break
if __name__ == "__main__":
try:
'''Get 2nd argument'''
job_url = sys.argv[1]
except:
print("\tEnter a job url")
else:
job = JobApplication()
parse_website(job_url, job)
``` |
{
"source": "jkane002/jkane002_v2",
"score": 2
} |
#### File: jkane002_v2/blog/models.py
```python
from django.db import models
from django.utils import timezone
from django.urls import reverse
class Tag(models.Model):
'''Blog Tags'''
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class BlogPost(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default = timezone.now)
tags = models.ManyToManyField(Tag, blank=False)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog-post-detail', kwargs={'pk':self.pk})
```
#### File: jkane002_v2/blog/views.py
```python
from django.shortcuts import render
from .models import BlogPost, Tag
from django.views.generic import ListView, DetailView
import random
def blog(request):
"""blog page"""
return render(request, 'blog/blog.html')
class BlogPostListView(ListView):
"""blog page"""
model = BlogPost
template_name = 'blog/blog.html'
context_object_name = 'blogposts'
ordering = ['-date_posted']
paginate_by = 5
def get_context_data(self, **kwargs):
'''mulitple contexts - blogposts & tags'''
context = super().get_context_data(**kwargs)
context['tags'] = Tag.objects.all()
return context
class BlogPostDetailView(DetailView):
'''Detail View of a single post'''
model = BlogPost
context_object_name = 'blogposts'
``` |
{
"source": "jkang1640/TextAugmentation-GPT2",
"score": 2
} |
#### File: wikiextractor_1/wikiextractor/extractPage.py
```python
import sys, os.path
import re
import argparse
import bz2
# Program version
__version__ = '3.0.5'
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
#tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>([^<]*)')
# 1 2 3
def process_data(input_file, id, templates=False):
"""
:param input_file: name of the wikipedia dump file.
:param id: article id
"""
if input_file.lower().endswith(".bz2"):
input = bz2.open(input_file, mode='rt', encoding='utf-8')
else:
input = open(input_file)
page = []
for line in input:
line = line
if '<' not in line: # faster than doing re.search()
if page:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
page.append(line)
inArticle = False
elif tag == 'id':
curid = m.group(3)
if id == curid:
page.append(line)
inArticle = True
elif not inArticle and not templates:
page = []
elif tag == 'title':
if templates:
if m.group(3).startswith('Template:'):
page.append(line)
else:
page = []
else:
page.append(line)
elif tag == '/page':
if page:
page.append(line)
print(''.join(page))
if not templates:
break
page = []
elif page:
page.append(line)
input.close()
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
parser.add_argument("--id", default="1",
help="article number")
parser.add_argument("--template", action="store_true",
help="template number")
parser.add_argument("-v", "--version", action="version",
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
process_data(args.input, args.id, args.template)
if __name__ == '__main__':
main()
``` |
{
"source": "jkania7/galfacts",
"score": 4
} |
#### File: jkania7/galfacts/converter.py
```python
ver = "v1.0.0"
def main():
print("Converter {0}".format(ver))
pad = float(raw_input("Please enter the amount of padding you" +\
" would like (in arc min): "))
pad = pad/(60)
RA_start = parser(raw_input("Please enter start RA: "))
RA_stop = parser(raw_input("Please enter stop RA: "))
RA_start_deci = RA_to_decimal(RA_start) + pad
RA_stop_deci = RA_to_decimal(RA_stop) - pad
DEC_start = parser(raw_input("Please enter start DEC: "))
DEC_stop = parser(raw_input("Please enter stop DEC: "))
DEC_start_deci = DEC_to_decimal(DEC_start) + pad
DEC_stop_deci = DEC_to_decimal(DEC_stop) - pad
print("---------")
print("RA start in decimal = {0:.2f}".format(RA_start_deci))
print("RA stop in decimal = {0:.2f}".format(RA_stop_deci))
print("DEC start in decimal = {0:.2f}".format(DEC_start_deci))
print("DEC stop in decimal = {0:.2f}".format(DEC_stop_deci))
def parser(str):
result = []
for i in range(len(str)):
result.append(float(str[i]))
if len(result) != 6:
print("You did not enter the correct lenght")
return result
def RA_to_decimal(RA):
hour_deci = (10*RA[0]+RA[1]) + (10*RA[2]+RA[3])/60 + (10*RA[4]+RA[5])/(60*60)
RA_deg_deci = hour_deci*15
return RA_deg_deci
def DEC_to_decimal(DEC):
DEC_deci = (10*DEC[0]+DEC[1]) + (10*DEC[2]+DEC[3])/60 + (10*DEC[4]+DEC[5])/(60*60)
return DEC_deci
if __name__ == "__main__":
main()
```
#### File: jkania7/galfacts/make_plots.py
```python
import sys
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import MaxNLocator
from itertools import cycle
def stokes_plot(x_data, xlabel, I_data, Q_data, U_data, V_data,
filename):
"""Generate plot of 4 stokes parameters"""
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True)
ax1.plot(x_data,I_data)
ax1.set_xlim(np.nanmin(x_data),np.nanmax(x_data))
ax1.set_ylim(np.nanmin(I_data[I_data.nonzero()]),
np.nanmax(I_data))
ax1.set_ylabel("Stokes I (K)")
ax2.plot(x_data,Q_data)
ax2.set_ylim(np.nanmin(Q_data),np.nanmax(Q_data))
ax2.set_ylabel("Stokes Q (K)")
ax3.plot(x_data,U_data)
ax3.set_ylim(np.nanmin(U_data),np.nanmax(U_data))
ax3.set_ylabel("Stokes U (K)")
ax4.plot(x_data,V_data)
ax4.set_ylim(np.nanmin(V_data),np.nanmax(V_data))
ax4.set_ylabel("Stokes V (K)")
ax4.set_xlabel(xlabel)
fig.subplots_adjust(hspace=0.1)
for ax in [ax1, ax2, ax3, ax4]:
# make the fontsize a bit smaller
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
plt.savefig(filename)
plt.close()
def single_stokes(x_data, xlabel, y_data, ylabel, filename):
"""Generate plot of single stokes parameter"""
fig, ax1 = plt.subplots(1)
ax1.plot(x_data,y_data)
ax1.set_xlim(np.nanmin(x_data),np.nanmax(x_data))
ax1.set_ylim(np.nanmin(y_data),np.nanmax(y_data))
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
plt.savefig(filename)
plt.close()
def source_plot(dec, I_data, all_dec, all_I_data, residuals,
fit_x, fit_y, filename, gb, ploy_base_fit):
"""Generate a plot of I vs dec for a single source"""
titl = (filename.split("/")[-1]).split(".")[0] + gb
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(all_dec, all_I_data, 'ko')
ax1.plot(dec, I_data, 'ro')
ax1.plot(fit_x,fit_y,'k-')
ax1.plot(dec, ploy_base_fit,"g^")
ax1.set_xlim(np.min(dec),np.max(dec))
ax1.set_ylabel('Stokes I (K)')
res_data = 100.*residuals/I_data
ax2.plot(dec,res_data,'ro')
if np.min(residuals) > -1:
lower_lim = -1
else:
lower_lim = np.min(residuals)
if np.max(residuals) < 1:
upper_lim = 1
else:
upper_lim = np.max(residuals)
ax2.set_ylim(lower_lim,upper_lim)
ax2.set_ylabel('Residuals (K)')
ax2.set_xlabel("Dec (degs)")
fig.subplots_adjust(hspace=0.1)
fig.suptitle(titl, fontsize=14)
plt.savefig(filename)
plt.close()
def field_plot(ra, dec, I_data, filename,labels=None,centers=None):
fig, ax1 = plt.subplots(1)
corr_ra = ra*np.cos(np.deg2rad(dec))
if labels == None:
sc = ax1.scatter(corr_ra,dec,c=I_data)
cb = plt.colorbar(sc)
cb.set_label('Stokes I (K)')
else:
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmy')
for i,col in zip(range(len(centers)),colors):
my_members = labels == i
center = centers[i]
plt.plot(corr_ra[my_members],dec[my_members],col+'o')
plt.plot(center[0],center[1],'o',markerfacecolor=col,
markeredgecolor='k', markersize=14, alpha=0.5)
plt.gca().invert_xaxis() # RA increase to left
ax1.set_xlabel('RA * cos(Dec) (deg)')
ax1.set_ylabel('Dec (deg)')
plt.savefig(filename)
plt.close()
def field_plot_3d(ra, dec, I_data, fit_x, fit_y, fit_z, filename):
fig, ax1 = plt.subplots(1,subplot_kw={"projection": '3d'})
corr_ra = ra*np.cos(np.deg2rad(dec))
ax1.scatter(corr_ra,dec,I_data,s=2)
ax1.plot_wireframe(fit_x, fit_y, fit_z, rstride=10, cstride=10)
plt.gca().invert_xaxis() # RA inreases to the left
ax1.set_xlabel('RA * cos(Dec) (deg)')
ax1.set_ylabel('Dec (deg)')
ax1.set_zlabel('Stokes I (K)')
plt.tight_layout()
plt.savefig(filename)
plt.close()
if __name__ == "__main__":
sys.exit("Error: module not meant to be run at top level.")
``` |
{
"source": "jkankiewicz/pyaaf2",
"score": 2
} |
#### File: pyaaf2/aaf2/essence.py
```python
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import traceback
from . import core
from . mobid import MobID
from .utils import register_class
from .auid import AUID
@register_class
class EssenceData(core.AAFObject):
class_id = AUID("0d010101-0101-2300-060e-2b3402060101")
__slots__ = ()
@property
def unique_key(self):
return self.mob_id
@property
def mob_id(self):
return self['MobID'].value
@mob_id.setter
def mob_id(self, value):
self['MobID'].value = value
@property
def mob(self):
mob_id = self.mob_id
if mob_id:
return self.root.content.mobs.get(mob_id, None)
return None
@mob.setter
def mob(self, value):
self.mob_id = value.mob_id
def open(self, mode='r'):
return self['Data'].open(mode)
@register_class
class EssenceDescriptor(core.AAFObject):
class_id = AUID("0d010101-0101-2400-060e-2b3402060101")
__slots__ = ()
@property
def locator(self):
return self['Locator'].value
@register_class
class FileDescriptor(EssenceDescriptor):
class_id = AUID("0d010101-0101-2500-060e-2b3402060101")
__slots__ = ()
@property
def length(self):
return self['Length'].value
@length.setter
def length(self, value):
self['Length'].value = value
@register_class
class DigitalImageDescriptor(FileDescriptor):
class_id = AUID("0d010101-0101-2700-060e-2b3402060101")
__slots__ = ()
@register_class
class CDCIDescriptor(DigitalImageDescriptor):
class_id = AUID("0d010101-0101-2800-060e-2b3402060101")
__slots__ = ()
@register_class
class RGBADescriptor(DigitalImageDescriptor):
class_id = AUID("0d010101-0101-2900-060e-2b3402060101")
__slots__ = ()
@property
def pixel_layout(self):
return self['PixelLayout'].value
@register_class
class TapeDescriptor(EssenceDescriptor):
class_id = AUID("0d010101-0101-2e00-060e-2b3402060101")
__slots__ = ()
@register_class
class SoundDescriptor(FileDescriptor):
class_id = AUID("0d010101-0101-4200-060e-2b3402060101")
__slots__ = ()
@register_class
class WAVEDescriptor(FileDescriptor):
# from ... https://github.com/ebu/ebu-libmxf/blob/master/tools/MXFDump/AAFMetaDictionary.h
class_id = AUID("0d010101-0101-2c00-060e-2b3402060101")
__slots__ = ()
@register_class
class DataEssenceDescriptor(FileDescriptor):
class_id = AUID("0d010101-0101-4300-060e-2b3402060101")
__slots__ = ()
@register_class
class MultipleDescriptor(FileDescriptor):
class_id = AUID("0d010101-0101-4400-060e-2b3402060101")
__slots__ = ()
@register_class
class PCMDescriptor(SoundDescriptor):
class_id = AUID("0d010101-0101-4800-060e-2b3402060101")
__slots__ = ()
@register_class
class PhysicalDescriptor(EssenceDescriptor):
class_id = AUID("0d010101-0101-4900-060e-2b3402060101")
__slots__ = ()
@register_class
class ImportDescriptor(PhysicalDescriptor):
class_id = AUID("0d010101-0101-4a00-060e-2b3402060101")
__slots__ = ()
``` |
{
"source": "jkapila/exploreTS",
"score": 3
} |
#### File: exploreTS/sourcecodes/example_module.py
```python
import logging
import pkg_resources
from .exceptions import SomethingException
logger = logging.getLogger(__name__)
EXAMPLE_CONSTANT = 2
EXAMPLE_CONSTANT_OPTIONS = (
"a_value",
"another_value",
)
class ExampleModule:
""" Some documentation about the class.
It'll get rendered in the auto generated docs. If Kanye could see this he'd rap about how great it is.
"""
def __init__(self, **kwargs):
""" Instantiate a glorious ExampleModule
"""
pass
def _example_private_method(self):
""" Does private things not for use outside the library
As indicated by preceding its name with _
"""
pass
def example_method(self, example_arg, example_kwarg=None):
""" A public method to show parameter stuff in docstrings
:param example_arg: Document the function parameter
:param example_kwarg: Document the function parameter
:return: None
"""
def example_error_handling_and_logging(self, thing):
""" Method to show how to handle an error
"""
try:
logger.debug("Doing something to a thing (thing: %s)", thing)
do_something(thing, EXAMPLE_CONSTANT, EXAMPLE_CONSTANT_OPTIONS)
except NameError as e:
raise SomethingException(str(e))
def example_of_how_to_refer_to_a_file_in_the_package(self):
""" Method showing how to reference files within the package
You need to refer to the file in the package bundle, because you don't know where the
module will be called from... this gets you the right path always, relative to sourcecodes
Don't forget to include non-python files in the MANIFEST.in too!
"""
file_name = pkg_resources.resource_string("sourcecodes", "module_data/file_required_by_module.json")
logger.info("file name is %s", file_name)
``` |
{
"source": "jkapila/MVP_AI",
"score": 3
} |
#### File: jkapila/MVP_AI/AnalyticalEngine.py
```python
from __future__ import print_function, with_statement, division
from Utils import getdir, get_model_from_directory
class AnalyticalEngine:
def __init__(self, analytical_method):
"""
:param analytical_method:
"""
print('Analytical Engine Invoked')
self.model = None
self.analytical_method = analytical_method
self.load_or_make_model(self.analytical_method)
def load_or_make_model(self, method):
"""
:param method:
:return:
"""
has_model, model = get_model_from_directory(getdir(), method)
if has_model:
self.model = model
else:
self.model = '{} {}'.format(self.analytical_method, 'Model Created')
def get_model(self):
"""
:return:
"""
return self.model
def get_prediction(self, input_val):
return 'As {}! It will make prediction on {}'.format(self.model, input_val)
```
#### File: jkapila/MVP_AI/OutputAdaptor.py
```python
from __future__ import print_function, division, with_statement
class OutputAdaptor(object):
def __init__(self, output_method):
print('Output Adaptor Invoked!')
self.output_method = output_method
self.prediction = None
self.output = None
def outputs(self):
print('The Output is:', self.output)
return self.output
def adapat_y(self, predictions):
self.prediction = predictions
self.output = self.prediction
``` |
{
"source": "jkapila/paper-codebase",
"score": 2
} |
#### File: DriverFatigueness/preprocessing/YawnPreprocess.py
```python
import numpy as np
import os
from six.moves import cPickle as pickle
import cv2
dirs = ['Dataset/yawnMouth', 'Dataset/normalMouth']
countYawn = 40
countNormal = 34
def generate_dataset():
'''countYawn = 0
countNormal = 0
maxY = 0
maxX = 0
minX = 1000
minY = 1000
pos = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.png'):
im = cv2.imread(dir + '/' + filename)
maxX = max(maxX, im.shape[0])
minX = min(minX, im.shape[0])
maxY = max(maxY, im.shape[1])
minY = min(minY, im.shape[1])
if pos == 0:
countYawn +=1
else:
countNormal += 1
pos += 1
print(minX, maxX, minY, maxY, countYawn, countNormal)'''
maxX = 60
maxY = 60
dataset = np.ndarray([countYawn + countNormal, maxY, maxX, 1], dtype='float32')
i = 0
j = 0
pos = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.png'):
im = cv2.imread(dir + '/' + filename)
im = cv2.resize(im, (maxX, maxY))
im = np.dot(np.array(im, dtype='float32'), [[0.2989], [0.5870], [0.1140]])/255
#print(i)
dataset[i, :, :, :] = im[:, :, :]
i += 1
if pos == 0:
labels = np.ones([i, 1], dtype=int)
j = i
pos += 1
else:
labels = np.concatenate((labels, np.zeros([i-j, 1], dtype=int)))
return dataset, labels
dataset, labels = generate_dataset()
print("Total = ", len(dataset))
totalCount = countYawn + countNormal
split = int(countYawn*0.8)
splitEnd = countYawn
split2 = countYawn + int(countNormal*0.8)
train_dataset = dataset[:split]
train_labels = np.ones([split, 1], dtype=int)
test_dataset = dataset[split:splitEnd]
test_labels = np.ones([splitEnd - split, 1], dtype=int)
train_dataset = np.concatenate((train_dataset, dataset[splitEnd:split2]))
train_labels = np.concatenate((train_labels, np.zeros([split2 - splitEnd, 1], dtype=int)))
test_dataset = np.concatenate((test_dataset, dataset[split2:]))
test_labels = np.concatenate((test_labels, np.zeros([totalCount - split2, 1], dtype=int)))
pickle_file = 'yawnMouths.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
```
#### File: DriverFatigueness/utils/LoggingUtil.py
```python
import logging
class BraceMessage:
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
```
#### File: DriverFatigueness/utils/Utils.py
```python
from __future__ import print_function,division
import cv2
import numpy as np
# from Extraction import gray_thresh_limit,eye_center_range
# Functions
def resizing(image, pixel_length, maintain_aspect = True, trace = False):
'''
This
:param image: Image to be Resized
:param pixel_length: Pixel to be Resized
:return: Resized image
'''
if maintain_aspect:
r = pixel_length / image.shape[1]
dim = (pixel_length, int(image.shape[0] * r))
if trace:
print("Original image size: ", image.shape)
print("Maintaining Aspect Ratio Multiplier: ", r)
print("Expected Resize : ", dim)
else:
dim = (pixel_length, pixel_length)
if trace:
print("Original image size: ", image.shape)
print("Expected Resize : ", dim)
# perform the actual resizing of the image and show it
resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
return resized
def probable_mouth(x, y, w, h):
probableWidth = int(w / 2)
probableHeigth = int(h / 3)
shiftX = int((w - probableWidth) / 2)
shiftY = int(probableHeigth * 2)
return ((shiftX, shiftY, probableWidth, probableHeigth))
def mouth_area_calculator(img, img_gray, x, y, w, h):
gray_thresh_limit = 90
(mx, my, mw, mh) = probable_mouth(x, y, w, h)
cv2.rectangle(img, (mx, my), (mx + mw, my + mh), (0, 255, 0), 2)
mouth_img = img_gray[my:my + mh, mx:mx + mw]
# mouth_img = img[my:my + mh, mx:mx + mw]
cv2.imshow('Mouth image', mouth_img)
ret, thresh = cv2.threshold(mouth_img, gray_thresh_limit, 255, cv2.THRESH_BINARY)
thresh_img = thresh
image, contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(thresh_img, contours, -1, (255, 0, 0), 3)
cv2.imshow('thresh image', thresh_img)
return True
#
# def eye_circle_detection(eye_image, area_ration=0.3, trace=False):
# '''
#
# :param eye_image:
# :param method:
# :return:
# '''
# min_radius = int(eye_image.shape[1] * area_ration)
# circle_image = eye_image
# cv2.imshow('Eye Image', eye_image)
# try:
# circles = cv2.HoughCircles(circle_image, cv2.HOUGH_GRADIENT, 1, 20,
# param1=2, param2=20, minRadius=min_radius, maxRadius=0)
# # print("Circle we got: ", circles)
# circles = np.uint16(np.around(circles))
# area = 0
# if len(circles[0, :]) == 1:
# if trace:
# print('Only one circle region found! : ', circles[0, :][0])
# (cx, cy, cradius) = circles[0, :][0]
# cv2.circle(circle_image, (cx, cy), cradius, (255, 0, 0), 2)
# cv2.circle(circle_image, (cx, cy), 2, (255, 0, 0), 3)
# area = cv2.contourArea(circles)
# else:
# for (cx, cy, cradius) in circles[0,]:
# if cx in range(int(eye_frame_center[0] - eye_center_range),
# int(eye_frame_center[0] + eye_center_range + 1)):
# if trace:
# print('Circle found as', (cx, cy, cradius))
# print(' This circle is in range!')
# cv2.circle(circle_image, (cx, cy), cradius, (255, 0, 0), 2)
# cv2.circle(circle_image, (cx, cy), 2, (255, 0, 0), 3)
# area += cv2.contourArea((cx, cy, cradius))
# return area
# except Exception:
# print("Circle not found")
#
# Least Recently Used Page Replacement Algorithm
def stream_init(page_size, init_value = 0):
page = []
for i in range(page_size):
page.append(init_value)
return page
def stream_counter(array, new_value, page_size, init_value = 0):
'''
:param array: A array of values for blink, perclose and yawn
:param new_value: New Value added to be added array
:param page_size: The interval for which counter should work
:param init_value: for initilising the array
:return:
'''
n = len(array)
if n == 0:
array = lru_init(page_size, init_value)
init_flag = True
elif n <= 2*page_size-1:
init_flag = True
elif n >= 2*page_size:
init_flag = False
present_array = array
array.append(new_value)
page = array[(n-page_size+1):(n+1)]
# if len(array) == (len(present_array)+1):
return (array, page, init_flag)
```
#### File: misc - work/lookalike/model_script.py
```python
from abc import ABCMeta, abstractmethod
import six
import os
import sys
import time
from sklearn.svm import OneClassSVM
from sklearn.pipeline import Pipeline, Parallel
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
class ExtendedMinMaxScaler(BaseEstimator, TransformerMixin):
def __init__(self, feature_range=(0, 1), na_treatment='replace', na_value=-1, treat_inf_as_na=True,
data_min=None, data_max=None, copy=False, verbose=False):
self.feature_range = feature_range
self.copy = copy
self.verbose = verbose
self.na_treatment = na_treatment
self.na_value = na_value
self.treat_inf_as_na = treat_inf_as_na
if data_max is not None and isinstance(data_max, pd.DataFrame):
self.data_max = data_max.values
elif data_max is not None and isinstance(data_max, np.ndarray):
self.data_max = data_max
else:
print("Max values not in correct format!")
self.data_max = None
if data_min is not None and isinstance(data_min, pd.DataFrame):
self.data_min = data_min.values
elif data_min is not None and isinstance(data_min, np.ndarray):
self.data_min = data_min
else:
print("Min values not in correct format!")
self.data_min = None
def fit(self, X):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, force_all_finite=False)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if self.data_min is not None:
assert len(self.data_min) == X.shape[1]
data_min = self.data_min
else:
data_min = np.nanmin(X, axis=0)
self.data_min = data_min
if self.data_max is not None:
assert len(self.data_max) == X.shape[1]
data_max = self.data_max
else:
data_max = np.nanmax(X, axis=0)
self.data_max = data_max
if self.treat_inf_as_na:
X[np.isinf(X)] = np.nan
if self.na_treatment == 'max':
self.na_treatment_value = data_max
elif self.na_treatment == 'min':
self.na_treatment_value = data_min
elif self.na_treatment == 'max_perc':
self.na_treatment_value = data_max * (1 + self.na_value)
elif self.na_treatment == 'min_perc':
self.na_treatment_value = data_min * (1 - self.na_value)
elif self.na_treatment == 'replace':
self.na_treatment_value = self.na_value
else: # default behaviour mid value of range
self.na_treatment_value = (data_max - data_min) / 2
data_range = data_max - data_min
if self.verbose:
print('Minmum Values: \n{}'.format(data_min))
print('Maximum Values: \n{}'.format(data_max))
print('Data_range: \n{}'.format(data_range))
print('NA treatment values: \n{}'.format(self.na_treatment_value))
# Do not scale constant features
if isinstance(data_range, np.ndarray):
data_range[data_range == 0.0] = 1.0
elif data_range == 0.:
data_range = 1.
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, force_all_finite=False)
if self.treat_inf_as_na:
X[np.isinf(X)] = np.nan
mask = np.isnan(X)
if X.shape[0] > 1:
na_values = self.na_treatment_value * np.ones((X.shape[0], 1))
#print(X.shape,na_values.shape)
assert X.shape == na_values.shape
else:
na_values = self.na_treatment_value
print(X.shape,na_values.shape)
X[mask] = na_values[mask]
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, force_all_finite=False)
if self.treat_inf_as_na:
X[np.isinf(X)] = np.nan
X -= self.min_
X /= self.scale_
return X
class OneClassLookalike(six.with_metaclass(ABCMeta)):
def __init__(self, path, folder_name, limit=0.01, minmaxparams=None, oneclassparams=None, na_treatment="mode"):
self.limit = limit
self.minmaxparams = minmaxparams
self.oneclassparams = oneclassparams
self.continuous_stats = None
self.categorical_stats = None
self.na_treatment = na_treatment
self.path = path
self.folder_name = folder_name
self.selected_fields = None
self.selected_index = None
self.input_length = None
self.pipe = None
self.processed_stat = None
self.__get_stats()
def __get_stats(self):
stats = pd.read_csv(os.path.join(self.path, 'continuous.csv'))
stats.columns = [i.lower().replace(' ', '_') for i in stats.columns]
stats = stats[[i for i in stats.columns if not str(i).startswith('unnamed')]]
# print(stats)
stats = stats[stats.folder.isin([self.folder_name])]
stats[['mean', 'std_dev', 'min', 'max', 'coeffvar']] = stats[['mean', 'std_dev', 'min',
'max', 'coeffvar']].apply(pd.to_numeric,
errors='omit')
# print(stats.isna().sum())
self.continuous_stats = stats
# print(self.continuous_stats.head(10))
# print(self.continuous_stats.dtypes)
# print(self.continuous_stats.columns)
def __coeffofvar(self, df):
col, cvar, na_perc = [], [], []
for col_ in df.columns:
if df[col_].dtypes in [np.nan]:
col.append(col_)
val = df[col_].values
cvar_ = np.nanstd(val) / np.nanmean(val)
cvar.append(cvar_)
na_prec_ = np.sum(np.isnan(val)) / val.shape[0]
na_perc.append(na_prec_)
print('Column: {} Coeff OF Var: {} NA perc: {}'.format(col_, cvar_, na_prec_))
opt = {'field_name_header': col, 'coeffvar_pos': cvar, 'na_percentage_pos': na_perc}
return opt
def fit(self, df):
t = time.time()
# if isinstance(df,pd.DataFrame):
df.columns = [i.lower().replace(' ', '_') for i in df.columns]
self.input_length = df.shape[1]
cont_df = df[self.continuous_stats.field_name_header]
cont_df = cont_df.apply(pd.to_numeric, errors='coerce')
# cont_stat = self.__coeffofvar(cont_df)
cont_stat = cont_df.describe().T
# print(cont_stat)
cont_stat['coeffvar_pos'] = cont_stat['std'] / cont_stat['mean']
cont_stat['field_name_header'] = cont_stat.index
cont_stat['na_perc'] = cont_df.isna().sum() / cont_df.shape[0]
cont_stat = pd.merge(self.continuous_stats, cont_stat, how='inner', on='field_name_header')
cont_stat['coeff_diff'] = cont_stat.coeffvar - cont_stat.coeffvar_pos
cont_stat['sel_fields'] = cont_stat['coeff_diff'].apply(
lambda x: 1 if x >= self.limit or x <= -self.limit else 0)
# print(cont_stat.head().T)
# print(np.sum(cont_stat['sel_fields']))
self.processed_stat = cont_stat
sel_fields = cont_stat.loc[cont_stat.sel_fields == 1, :]
self.selected_fields = sel_fields.field_name_header.values.T.tolist()
print('Total Fields Selected {}'.format(len(self.selected_fields)))
self.selected_index = [i for i, v in enumerate(df.columns) if v in self.selected_fields]
print('Finding relevant fields took {:5.4f} secs'.format(time.time() - t))
t = time.time()
minmaxer = ExtendedMinMaxScaler(na_treatment='max_perc', na_value=0.25,
data_min=cont_stat.loc[cont_stat.sel_fields == 1, 'min_x'].values.T,
data_max=cont_stat.loc[cont_stat.sel_fields == 1, 'max_x'].values.T
).fit(cont_df[self.selected_fields])
cont_df = minmaxer.transform(cont_df[self.selected_fields])
print('Scaling data done in {:5.4f} secs\nMaking Classifier'.format(time.time() - t))
t = time.time()
# clf = OneClassSVM().fit(cont_df)
clf = IsolationForest(n_estimators=250, n_jobs=-1, max_features=50,
contamination=0.2, bootstrap=True).fit(cont_df)
# clf = LocalOutlierFactor(n_jobs=-1,contamination=0.3).fit(cont_df)
print('Building One Class model took {:5.4f} secs'.format(time.time() - t))
self.scaler = minmaxer
self.lookalike_scorer = clf
# self.pipe = Pipeline([('minmaxer',minmaxer),('oneclasser', clf)])
# self.pipe.fit(cont_df[self.selected_fields])
return self
def predict(self, X):
# if isinstance(X, np.ndarray):
# if X.shape[1] == self.input_length:
# X_ = X[:, self.selected_index]
#
# else:
# #assert X.shape[1] == len(self.selected_index)
# X_ = X
#
# elif isinstance(X, pd.DataFrame):
# X.columns = [i.lower() for i in X.columns]
# X_ = X[self.selected_fields].values
#
# elif isinstance(X, list):
# if len(X) == self.input_length:
# X_ = [X[i] for i, v in enumerate(X) if i in self.selected_index]
# X_ = np.array(X_)
# else:
# #assert len(X) == len(self.selected_index)
# X_ = np.array(X)
# else:
# try:
# X = check_array(X, ensure_2d=False, force_all_finite=False)
# if X.shape[1] == self.input_length:
# X_ = X[:, self.selected_index]
# else:
# #assert X.shape[1] == len(self.selected_index)
# X_ = X
# except:
try: # internally handled by scikit learn
X_ = self.scaler.transform(X)
X_ = self.lookalike_scorer.decision_function(X_)
return X_
except Exception as e:
raise ValueError('{} as input is neither an numpy array, pandas data frame or a list! {}'.format(X,e))
X_ = self.scaler.transform(X_)
X_ = self.lookalike_scorer.decision_function(X_)
return X_
def hist_ascii(arr, bins = 50, max_length=100, print_space=15):
hist, bin_edges = np.histogram(arr, bins=bins)
hist = hist / hist.max()
hist = hist * max_length
print('{}_|'.format(' ' * print_space))
print('{} |'.format('{:3.4f}'.format(bin_edges[0]).center(print_space)))
for i, v in enumerate(bin_edges[0:]):
print('{}_| {}'.format(' ' * print_space, '*' * int(hist[i - 1])))
print('{} |'.format('{:3.4f}'.format(v).center(print_space)))
if __name__ == '__main__':
oneclass = OneClassLookalike(path='/Users/jitins_lab/Documents/work/DataCloud', folder_name='enh_cpg')
t = time.time()
print('Loading data!')
data = pd.read_csv('/Users/jitins_lab/sources/Zeta_data/enh_cpg.csv')
print("Data loaded in {} secs!\nTraining Model Now!".format(time.time() - t))
t = time.time()
oneclass.fit(data)
print('Traning done in {} secs!\nMaking prediction!'.format(time.time() - t))
t = time.time()
predict = oneclass.predict(data)
print('Prediction took {} secs!\ nSample predictions:'.format(time.time() - t))
print(predict[:20, ])
print('Prediction stats: \n Mean: {} Max: {} Median: {} Min: {} Std: {}'.format(
np.mean(predict), np.max(predict), np.median(predict), np.min(predict), np.std(predict)
))
print('Freq counts in the data:\n')
#y, bin = np.histogram(predict, bins=50)
# ii = np.nonzero(y)[0]
# pred = pd.Series(predict)
# import matplotlib.pyplot as plt
# pred.plot.hist(grid=True, bins=50, rwidth=0.9,color='#607c8e')
# plt.title('Bins of values')
# plt.xlabel('Score from classifier')
# plt.ylabel('Counts')
# plt.grid(axis='y', alpha=0.75)
# plt.show()
hist_ascii(predict)
```
#### File: misc - work/lookalike/score_n_store.py
```python
from __future__ import print_function, division
from model_script import OneClassLookalike, hist_ascii
import time
import os
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from pyspark import SparkContext, StorageLevel
from pyspark.conf import SparkConf
from pyspark.sql import SQLContext, SparkSession
# from pyspark.sql import functions as F
from pyspark.sql.types import *
# import re, time, json, itertools, math
path = "/home/ubuntu/ds_vol/shared/"
def score_folder(path, folder):
print('Folder to look for is: {}'.format(folder))
oneclass = OneClassLookalike(path=os.path.join(path, 'analysis'), folder_name=folder)
t = time.time()
print('Loading data!')
data = pd.read_csv(os.path.join(path, 'positive-data/{}/{}.csv'.format(folder, folder)))
print("Data loaded in {:5.4f} secs!\nTraining Model Now!".format(time.time() - t))
t = time.time()
oneclass.fit(data)
print('Traning done in {:5.4f} secs!\nMaking prediction!'.format(time.time() - t))
model_path = os.path.join(path,'analysis','{}_model.dump'.format(folder))
print('Saving model at: {}'.fromat(model_path))
joblib.dump(oneclass,model_path)
oneclass = joblib.load(model_path)
t = time.time()
predict = oneclass.predict(data)
print('Prediction took {:5.4f} secs!\nSample predictions:'.format(time.time() - t))
print(predict[:20, ])
print('Prediction stats: \n Mean: {} Max: {} Median: {} Min: {} Std: {}'.format(
np.mean(predict), np.max(predict), np.median(predict), np.min(predict), np.std(predict)
))
t = time.time()
print('Freq counts in the data:\n')
y, bin = np.histogram(predict, bins=30)
hist_ascii(y, bin, print_space=20)
# adding score to profile enh_cpg
total_pred = pd.DataFrame({'email_address_md5': data.email_address_md5,
'predict_enh_cpg': predict})
op_path = os.path.join(path, os.path.join('analysis', '{}_positive.csv'.format(folder)))
total_pred.to_csv(op_path)
print('\nOutput saved in {:5.4f} secs at:\n{}'.format(time.time() - t, op_path))
return oneclass
def score_lookalike(sc, path, folder, scorer):
t = time.time()
# rdd_df = sc.textFile(os.path.join(path, 'shared', folder, (folder + '.csv*.gz'))) \
# rdd_df = sc.textFile(os.path.join(path, 'shared', folder, (folder + '.csv*.gz'))) \
rdd_df = sc.textFile(os.path.join(path, 'shared', folder, (folder + '.50k.gz'))) \
.map(lambda l: l.replace('"', '').strip().split(",")) \
.filter(lambda x: len(x) == len(header))
header = pd.read_csv(os.path.join(path, 'shared', folder, (folder + '.header'))).columns
schema = StructType([StructField(i, StringType(), True) for i in header])
# keep_cols=dedup_cols[dedup_cols['FOLDER']==folder].FIELD_NAME
# keep_cols = cont_cols[cont_cols['FOLDER'] == folder].FIELD_NAME_HEADER
df = spark.createDataFrame(rdd_df, schema)
# df = df.repartition(50).persist(StorageLevel.MEMORY_AND_DISK)
print('Scoring data loaded in {:5.4f} secs'.format(time.time()-t))
t = time.time()
lookalikobj = sc.broadcast(oneclass)
results = df.select(list(scorer.selected_fields)).rdd
print(results.take(20))
results = results.map(lambda x: lookalikobj.value.predict(x))
scores = results.collect()
print(scores)
# print(scores.)
md5s = df.select(['email_address_md5'])
lookalike_scores = pd.DataFrame({'email_address_md5': md5s,
'lookalike_score': scores})
op_path = os.path.join(path, os.path.join('analysis', '{}_alike.csv'.format(folder)))
lookalike_scores.to_csv(op_path)
print('\nOutput saved in {:5.4f} secs at:\n{}'.format(time.time() - t, op_path))
return oneclass
if __name__ == '__main__':
conf = SparkConf().setAppName("ScorerAnalysis")
conf = (conf.setMaster('local[6]')
.set('spark.executor.memory', '3G')
.set('spark.driver.memory', '3G')
.set('spark.driver.maxResultSize', '3G')
.set('spark.debug.maxToStringFields', 1000))
# sc = SparkContext(conf=conf)
spark = SparkSession.builder.master("local[4]").appName("ScorerAnalysis").config(conf=conf).getOrCreate()
sc = spark.sparkContext
sqlContext = SQLContext(sc)
print('Spark standalone with properties:\n', sc.getConf().getAll())
oneclass = score_folder('/Users/jitins_lab/sources/ins_lookalike', 'enh_cpg')
print('Scoring for: {}'.format('enh_cpg'))
score_lookalike(sc, '/Users/jitins_lab/sources/ins_lookalike', 'enh_cpg', oneclass )
sc.cancelAllJobs()
```
#### File: misc - work/lookalike/spark_perform.py
```python
from sklearn.datasets import load_boston, load_breast_cancer, load_iris, load_digits
from sklearn.utils.random import sample_without_replacement
from sklearn.model_selection import train_test_split
# basic dependencies
import pandas as pd
import numpy as np
# model dependencies
from sklearn.neighbors import NearestNeighbors
from sklearn.svm import OneClassSVM
# calling spark requirements
from pyspark import SparkConf, SparkContext
# fetching data
def get_data(dataname):
if dataname == 'iris':
data = load_iris()
elif dataname == 'boston':
data = load_boston()
elif dataname == 'cancer':
data = load_breast_cancer()
df = pd.concat([pd.DataFrame(data.data), pd.DataFrame(data.target)], axis=1)
names = [i for i in data.feature_names]
names.append('target')
df.columns = names
print('Data:\n')
print(df.head())
print('\nSummary Stats:\n')
print(df.describe())
return df
# getting data
df = get_data('iris')
# generating spark context
# getting spark ready
conf = (SparkConf()
.setMaster("local[4]")
.setAppName("class app")
.set("spark.executor.memory", "1g"))
sc = SparkContext(conf=conf)
# splitting dta for testing
X = df.iloc[:120, 0:3].values
X_test = df.iloc[120:, 0:3].values
# putting part of data to Nearest Neighbours
nbrs = NearestNeighbors(n_neighbors=5, algorithm='ball_tree').fit(X)
# generating oneclass classifier for the data
onecls = OneClassSVM().fit(X)
# outputs
distances, indices = nbrs.kneighbors(X_test)
print('Neighbours:\n{}\n{}'.format(distances, indices))
print('Oneclass:\n{}'.format(onecls.predict(X_test)))
# broadcasting neighbours model to spark partitions
bc_knnobj = sc.broadcast(nbrs)
# broadcasting oneclass model to spark partitions
bc_oneobj = sc.broadcast(onecls)
# getting rest data to spark rdd
# testvec = sc.parallelize([[[0,0]],[[1,1]],[[-1, -1]]])
# have to deal with this list comprehension
testvec = sc.parallelize([[i] for i in X_test.tolist()], 3)
# getting output of neighbours from the spark
results = testvec.map(lambda x: bc_knnobj.value.kneighbors(x))
print('\nNeighbour results: \n')
print(results.glom().collect())
# broadcasting oneclass model to spark partitions
bc_oneobj = sc.broadcast(onecls)
# getting output of oneclass from the spark
results2 = testvec.map(lambda x: bc_oneobj.value.predict(x))
print('\nOne Class results: \n')
print(results2.collect())
# import defined object
from model import TestPartition
# Now the test module
tst = TestPartition().fit(np.arange(10))
#broadcasting it
bc_tstobj = sc.broadcast(tst)
#testing it
print('\nTest object\'s results: \n')
print(testvec.map(lambda x: bc_tstobj.value.test_power(x,p=4)).collect())
```
#### File: misc - work/oneclass/model_script.py
```python
from abc import ABCMeta, ABC
# import six
import os
import sys
import time
from sklearn.svm import OneClassSVM
from sklearn.pipeline import Pipeline, Parallel
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
class ExtendedMinMaxScaler(BaseEstimator, TransformerMixin):
def __init__(self, feature_range=(-1, 1), na_treatment='replace', na_value=-1, treat_inf_as_na=True,
data_min=None, data_max=None, copy=False, verbose=False):
self.feature_range = feature_range
self.copy = copy
self.verbose = verbose
self.na_treatment = na_treatment
self.na_value = na_value
self.treat_inf_as_na = treat_inf_as_na
if data_max is not None and isinstance(data_max, pd.DataFrame):
self.data_max = data_max.values
elif data_max is not None and isinstance(data_max, np.ndarray):
self.data_max = data_max
else:
print("Max values not in correct format!")
self.data_max = None
if data_min is not None and isinstance(data_min, pd.DataFrame):
self.data_min = data_min.values
elif data_min is not None and isinstance(data_min, np.ndarray):
self.data_min = data_min
else:
print("Min values not in correct format!")
self.data_min = None
def fit(self, X):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, force_all_finite=False)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if self.data_min is not None:
assert len(self.data_min) == X.shape[1]
data_min = self.data_min
else:
data_min = np.nanmin(X, axis=0)
self.data_min = data_min
if self.data_max is not None:
assert len(self.data_max) == X.shape[1]
data_max = self.data_max
else:
data_max = np.nanmax(X, axis=0)
self.data_max = data_max
if self.treat_inf_as_na:
X[np.isinf(X)] = np.nan
if self.na_treatment == 'max':
self.na_treatment_value = data_max
elif self.na_treatment == 'min':
self.na_treatment_value = data_min
elif self.na_treatment == 'max_perc':
self.na_treatment_value = data_max * (1 + self.na_value)
elif self.na_treatment == 'min_perc':
self.na_treatment_value = data_min * (1 - self.na_value)
elif self.na_treatment == 'replace':
self.na_treatment_value = self.na_value
else: # default behaviour mid value of range
self.na_treatment_value = (data_max - data_min) / 2
data_range = data_max - data_min
if self.verbose:
print('Minmum Values: \n{}'.format(data_min))
print('Maximum Values: \n{}'.format(data_max))
print('Data_range: \n{}'.format(data_range))
print('NA treatment values: \n{}'.format(self.na_treatment_value))
# Do not scale constant features
if isinstance(data_range, np.ndarray):
data_range[data_range == 0.0] = 1.0
elif data_range == 0.:
data_range = 1.
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, force_all_finite=False)
if self.treat_inf_as_na:
X[np.isinf(X)] = np.nan
mask = np.isnan(X)
if X.shape[0] > 1:
na_values = self.na_treatment_value * np.ones((X.shape[0], 1))
# print(X.shape,na_values.shape)
assert X.shape == na_values.shape
else:
na_values = self.na_treatment_value
print(X.shape, na_values.shape)
X[mask] = na_values[mask]
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, force_all_finite=False)
if self.treat_inf_as_na:
X[np.isinf(X)] = np.nan
X -= self.min_
X /= self.scale_
return X
class OneClassLookalike(ABC):
def __init__(self, path, folder_name, method='if', limit=0.01, minmaxparams=None, oneclassparams=None,
na_treatment="'max_perc'"):
self.method = method
if self.method == 'if':
self.batch_size = 5 * 64
if self.method == 'svm':
self.batch_size = 128
if self.method == 'lof':
self.batch_size = 1000
self.batch_data = None
self.limit = limit
self.minmaxparams = minmaxparams
self.oneclassparams = oneclassparams
self.continuous_stats = None
self.categorical_stats = None
self.na_treatment = na_treatment
self.path = path
self.folder_name = folder_name
self.selected_fields = None
self.selected_index = None
self.input_length = None
self.pipe = None
self.processed_stat = None
self.scaler = None
self.lookalike_scorer = None
self._get_stats()
def _get_stats(self):
stats = pd.read_csv(os.path.join(self.path, 'continuous.csv'))
stats.columns = [i.lower().replace(' ', '_') for i in stats.columns]
stats = stats[[i for i in stats.columns if not str(i).startswith('unnamed')]]
# print(stats)
# stats = stats[stats.folder.isin([self.folder_name])]
stats[['mean', 'std_dev', 'min', 'max', 'coeffvar']] = stats[['mean', 'std_dev', 'min',
'max', 'coeffvar']].apply(pd.to_numeric,
errors='omit')
# print(stats.isna().sum())
self.continuous_stats = stats
# print(self.continuous_stats.head(10))
# print(self.continuous_stats.dtypes)
# print(self.continuous_stats.columns)
def _coeffofvar(self, df):
col, cvar, na_perc = [], [], []
for col_ in df.columns:
if df[col_].dtypes in [np.nan]:
col.append(col_)
val = df[col_].values
cvar_ = np.nanstd(val) / np.nanmean(val)
cvar.append(cvar_)
na_prec_ = np.sum(np.isnan(val)) / val.shape[0]
na_perc.append(na_prec_)
print('Column: {} Coeff OF Var: {} NA perc: {}'.format(col_, cvar_, na_prec_))
opt = {'field_name_header': col, 'coeffvar_pos': cvar, 'na_percentage_pos': na_perc}
return opt
def fit(self, df):
t = time.time()
# if isinstance(df,pd.DataFrame):
df.columns = [i.lower().replace(' ', '_') for i in df.columns]
self.input_length = df.shape[1]
field_names = self.continuous_stats[self.continuous_stats.field_name_header.isin(df.columns)].field_name_header
cont_df = df[field_names]
cont_df = cont_df.apply(pd.to_numeric, errors='coerce')
# cont_stat = self.__coeffofvar(cont_df)
cont_stat = cont_df.describe().T
# print(cont_stat)
cont_stat['coeffvar_pos'] = cont_stat['std'] / cont_stat['mean']
cont_stat['field_name_header'] = cont_stat.index
cont_stat['na_perc'] = cont_df.isna().sum() / cont_df.shape[0]
cont_stat = pd.merge(self.continuous_stats, cont_stat, how='inner', on='field_name_header')
cont_stat['coeff_diff'] = cont_stat.coeffvar - cont_stat.coeffvar_pos
cont_stat['sel_fields'] = cont_stat['coeff_diff'].apply(
lambda x: 1 if x >= self.limit or x <= -self.limit else 0)
# print(cont_stat.head().T)
# print(np.sum(cont_stat['sel_fields']))
self.processed_stat = cont_stat
sel_fields = cont_stat.loc[cont_stat.sel_fields == 1, :]
self.selected_fields = sel_fields.field_name_header.values.T.tolist()
print('Total Fields Selected {}'.format(len(self.selected_fields)))
self.selected_index = [i for i, v in enumerate(df.columns) if v in self.selected_fields]
print('Finding relevant fields took {:5.4f} secs'.format(time.time() - t))
t = time.time()
minmaxer = ExtendedMinMaxScaler(na_treatment=self.na_treatment, na_value=0.25,
data_min=cont_stat.loc[cont_stat.sel_fields == 1, 'min_x'].values.T,
data_max=cont_stat.loc[cont_stat.sel_fields == 1, 'max_x'].values.T
).fit(cont_df[self.selected_fields])
cont_df = minmaxer.transform(cont_df[self.selected_fields])
print('Scaling data done in {:5.4f} secs\nMaking Classifier'.format(time.time() - t))
t = time.time()
if self.method == 'svm':
clf = OneClassSVM(cache_size=128, coef0=0.01, max_iter=250, random_state=12339, degree=2,
shrinking=False).fit(cont_df)
elif self.method == 'if':
clf = IsolationForest(n_estimators=100, n_jobs=-1, max_features=20,
contamination=0.05, bootstrap=True, random_state=12339).fit(cont_df)
elif self.method == 'lof':
clf = LocalOutlierFactor(n_jobs=-1, contamination=0.1, n_neighbors=15, leaf_size=5, p=1).fit(cont_df)
print('Building One Class model took {:5.4f} secs'.format(time.time() - t))
self.scaler = minmaxer
self.lookalike_scorer = clf
# self.pipe = Pipeline([('minmaxer',minmaxer),('oneclasser', clf)])
# self.pipe.fit(cont_df[self.selected_fields])
# initializing batc nan array for future
self.batch_data = np.empty((self.batch_size, len(self.selected_fields)))
self.batch_data[:] = np.nan
return self
def _predict(self, X):
if isinstance(X, np.ndarray):
if X.shape[1] == self.input_length:
X_ = X[:, self.selected_index]
else:
assert X.shape[1] == len(self.selected_index)
X_ = X
elif isinstance(X, pd.DataFrame):
X.columns = [i.lower() for i in X.columns]
X_ = X[self.selected_fields].values
else:
try:
X = np.array(list(X), dtype=np.float32)
try:
shp = X.shape[1]
except:
X = X.reshape(-1, 1).T
shp = X.shape[1]
if shp == self.input_length:
X_ = X[:, self.selected_index]
else:
# assert X.shape[1] == len(self.selected_index)
X_ = X
# except:
# try: # internally handled by scikit learn
# X_ = self.scaler.transform(X)
# X_ = self.lookalike_scorer.decision_function(X_)
# return X_
except Exception as e:
# print(e)
raise ValueError('{} as input is neither an numpy array, pandas data frame or a list!'.format(X))
X_ = self.scaler.transform(X_)
if self.method == 'svm':
X_ = self.lookalike_scorer.decision_function(X_)
elif self.method == 'if':
X_ = self.lookalike_scorer.decision_function(X_)
elif self.method == 'lof':
X_ = self.lookalike_scorer.predict(X_)
return X_
def _extract_data(self, dict_fields):
# dict_fields = {'X1': 0.1, 'X2': 0.2, 'X3': 0.4, 'X4': 0.09}
alist = [[self.selected_fields.index(k), v] for k, v in dict_fields.items()]
# alist
# [[4, 0.09], [3, 0.4], [1, 0.1], [2, 0.2]]
X = np.zeros((1, len(self.selected_fields)))
X[:] = np.nan
for k, v in alist: X[:,k] = v
# dict_fields = {}
# for col_ in dict_fields.keys():
# if col_ in self.selected_fields:
# val = dict_fields[col_]
return X
def score(self, user_data):
"""
Scorer::score()
def score(user_data):
return [(user_id, score), (user_id, score), (user_id, score), ...]
Parameters:
user_data: an iterable of (user_id, dict) tuples. Each dict is a sparse representation of a user's properties and most recent behavioral events.
Returns: List of (user_id, score) tuples, where score is a float.
"""
user_ids = None
X = np.zeros((1, len(self.selected_fields)))
if isinstance(user_data, dict):
user_ids = user_data.keys()
for user_id in user_ids:
X = np.vstack((X, self._extract_data(user_data[user_id])))
# X = X[1:, ]
elif isinstance(user_data, list):
user_ids = []
for i in user_data:
user_ids.append(i[0])
X = np.vstack((X, self._extract_data(i[1])))
# X = X[1:, ]
elif isinstance(user_data, pd.DataFrame):
if 'email_address_md5' in user_data.columns:
user_ids = user_data['email_address_md5']
X = user_data[[i for i in user_data.columns if i != 'email_address_md5']]
else:
print('Data dosent have email md5s. Please check the input! Returning None')
return None
elif isinstance(user_data, np.ndarray):
X = user_data
assert X.shape[1] == len(self.selected_fields)
print('No user ids provided. Will return a list of scores!')
else:
raise ValueError('Input data {} is not iterable'.format(user_data))
#
# if len(user_ids) > self.batch_size:
# pass
# else:
score = self._predict(X).tolist()
if len(user_ids) == len(score)-1:
score = score[1:]
else:
assert len(user_ids) == len(score)
scores = list(zip(user_ids, score))
return scores
def scorer_hints(self):
# print('Required Fields: \n{}'.format(self.selected_fields))
# print('Batch Size: {} '.format(self.batch_size))
stats = self.continuous_stats
dict_fields = stats[stats.field_name_header.isin(self.selected_fields)][['folder', 'field_name_header']] \
.groupby('folder').agg(lambda x: x.tolist()).to_dict()
dict_fields = {'acxiom': dict_fields}
return self.batch_size, self.selected_fields, dict_fields
def hist_ascii(arr, bins=50, max_length=100, print_space=15):
hist, bin_edges = np.histogram(arr, bins=bins)
hist = hist / hist.max()
hist = hist * max_length
print('{}_|'.format(' ' * print_space))
print('{} |'.format('{:3.4f}'.format(bin_edges[0]).center(print_space)))
for i, v in enumerate(bin_edges[0:]):
print('{}_| {}'.format(' ' * print_space, '*' * int(hist[i - 1])))
print('{} |'.format('{:3.4f}'.format(v).center(print_space)))
# if __name__ == '__main__':
# oneclass = OneClassLookalike(path='/Users/jitins_lab/Documents/work/DataCloud', folder_name='enh_cpg')
# t = time.time()
# print('Loading data!')
# data = pd.read_csv('/Users/jitins_lab/sources/Zeta_data/enh_cpg.csv')
# print("Data loaded in {} secs!\nTraining Model Now!".format(time.time() - t))
# t = time.time()
# oneclass.fit(data)
# print('Traning done in {} secs!\nMaking prediction!'.format(time.time() - t))
# t = time.time()
# predict = oneclass.predict(data)
# print('Prediction took {} secs!\ nSample predictions:'.format(time.time() - t))
# print(predict[:20, ])
# print('Prediction stats: \n Mean: {} Max: {} Median: {} Min: {} Std: {}'.format(
# np.mean(predict), np.max(predict), np.median(predict), np.min(predict), np.std(predict)
# ))
# print('Freq counts in the data:\n')
#
# # y, bin = np.histogram(predict, bins=50)
# # ii = np.nonzero(y)[0]
# # pred = pd.Series(predict)
# # import matplotlib.pyplot as plt
# # pred.plot.hist(grid=True, bins=50, rwidth=0.9,color='#607c8e')
# # plt.title('Bins of values')
# # plt.xlabel('Score from classifier')
# # plt.ylabel('Counts')
# # plt.grid(axis='y', alpha=0.75)
# # plt.show()
# hist_ascii(predict)
```
#### File: relm/codebase/utils.py
```python
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix,accuracy_score,roc_auc_score,f1_score
from sklearn.metrics import roc_curve,cohen_kappa_score,log_loss,adjusted_mutual_info_score
from sklearn.metrics.regression import r2_score,mean_squared_error,mean_absolute_error,explained_variance_score
from scipy.stats import entropy
import matplotlib.pyplot as plt
import seaborn as sns
# objective dictionary
def mape_1(y_true, y_pred):
abs_true = np.absolute(y_true)
abs_pred = np.absolute(y_true - y_pred)
n = y_true.shape[0]
return 1 - np.sum((abs_pred / abs_true)) / n
# todo:
# 1) percentage concordant discordant
# 2) kendal's tau
# 3) gamma
# 4) k
objectives = {
'f1_score': f1_score,
'accuracy': accuracy_score,
'loss': log_loss,
'cohen_kappa': cohen_kappa_score,
'f1_score_multi': f1_score,
'accuracy_multi': accuracy_score,
'loss_multi': log_loss,
'cohen_kappa_multi': cohen_kappa_score,
'1_mape': mape_1,
'mse': mean_squared_error,
'mae': mean_absolute_error,
'mi': adjusted_mutual_info_score,
'kld': entropy
}
# funciton for plotting roc curves of models on train and test data
def plot_ROC(y_train_true, y_train_prob, y_test_true, y_test_prob, threshold=None, path ='', name=None):
'''
a funciton to plot the ROC curve for train labels and test labels.
Use the best threshold found in train set to classify items in test set.
'''
sns.set('talk', 'whitegrid', 'dark', font_scale=1,
rc={"lines.linewidth": 2, 'grid.linestyle': '--'})
fpr_train, tpr_train, thresholds_train = roc_curve(y_train_true, y_train_prob, pos_label=True)
sum_sensitivity_specificity_train = tpr_train + (1 - fpr_train)
best_threshold_id_train = np.argmax(sum_sensitivity_specificity_train)
best_threshold = thresholds_train[best_threshold_id_train]
best_fpr_train = fpr_train[best_threshold_id_train]
best_tpr_train = tpr_train[best_threshold_id_train]
if threshold is None:
y_train = y_train_prob >= best_threshold
else:
y_train = y_train_prob >= threshold
cm_train = confusion_matrix(y_train_true, y_train)
acc_train = accuracy_score(y_train_true, y_train)
auc_train = roc_auc_score(y_train_true, y_train)
f1_score_train = f1_score(y_train_true, y_train)
print('Train Accuracy: {}'.format(acc_train))
print('Train AUC: {}'.format(auc_train))
print('Train F1 Score: {}'.format(f1_score_train))
print('Train Confusion Matrix:')
print(cm_train)
fig = plt.figure(figsize=(15, 8))
ax = fig.add_subplot(121)
curve1 = ax.plot(fpr_train, tpr_train)
curve2 = ax.plot([0, 1], [0, 1], color='navy', linestyle='--')
dot = ax.plot(best_fpr_train, best_tpr_train, marker='o', color='black')
ax.text(best_fpr_train, best_tpr_train, s='(%.3f,%.3f)' % (best_fpr_train, best_tpr_train))
plt.xlim([-0.01, 1.0])
plt.ylim([0.0, 1.01])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve (Train), AUC = %.4f' % auc_train)
fpr_test, tpr_test, thresholds_test = roc_curve(y_test_true, y_test_prob, pos_label=True)
if threshold is None:
y_test = y_test_prob >= best_threshold
else:
y_test = y_test_prob >= threshold
cm_test = confusion_matrix(y_test_true, y_test)
acc_test = accuracy_score(y_test_true, y_test)
auc_test = roc_auc_score(y_test_true, y_test)
f1_score_test = f1_score(y_test_true, y_test)
print('Test Accuracy: {}'.format(acc_test))
print('Test AUC: {}'.format(auc_test))
print('Test F1 Score: {}'.format(f1_score_test))
print('Test Confusion Matrix:')
print(cm_test)
tpr_score = float(cm_test[1][1]) / (cm_test[1][1] + cm_test[1][0])
fpr_score = float(cm_test[0][1]) / (cm_test[0][0] + cm_test[0][1])
ax2 = fig.add_subplot(122)
curve1 = ax2.plot(fpr_test, tpr_test)
curve2 = ax2.plot([0, 1], [0, 1], color='navy', linestyle='--')
dot = ax2.plot(fpr_score, tpr_score, marker='o', color='black')
ax2.text(fpr_score, tpr_score, s='(%.3f,%.3f)' % (fpr_score, tpr_score))
plt.xlim([-0.01, 1.0])
plt.ylim([0.0, 1.01])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve (Test), AUC = %.4f' % auc_test)
if len(path) != 0 and name is not None:
place = '{}/{}.png'.format(path, 'ROC RELM Agent' if name is None else 'ROC {} Agent'.format(name))
plt.savefig(place, dpi=500)
plt.show()
return best_threshold
# function to plot pair grid for latent features
def plot_pair(tr_mat):
df_plot = pd.DataFrame(tr_mat)
sns.set(style="ticks")
g = sns.PairGrid(df_plot)
g = g.map_upper(plt.scatter)
g = g.map_lower(sns.kdeplot, cmap="Blues_d")
g = g.map_diag(sns.kdeplot, lw=3, legend=False)
g
def plot_learning(population_training, path='', name=None, style='seaborn-white'):
plt.style.use(style)
plt_data = np.array(population_training)
iteration = plt_data.shape[0] + 1
trainarray = np.arange(1, iteration, 1)
ticks = np.arange(1, iteration, 10)
if np.all(ticks != (iteration - 1)):
ticks = np.append(ticks, iteration - 1)
scores = -plt_data[:, 2, :]
fig = plt.figure(figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')
line_mean, = plt.plot(trainarray, np.mean(scores, axis=1))
line_min, = plt.plot(trainarray, np.min(scores, axis=1))
line_max, = plt.plot(trainarray, np.max(scores, axis=1))
plt.legend([line_mean, line_min, line_max], ['mean', 'min', 'max'])
plt.xlabel('Generations ', fontsize=20)
plt.ylabel('Loss Socres', fontsize=16)
plt.xticks(ticks, fontsize=14,rotation=90)
plt.title('Log Loss across Generations', fontsize=24)
if len(path) != 0 and name is not None:
plt.savefig('{}/Log_loss_{}.png'.format(path, name))
plt.show()
ticks = np.arange(1, iteration, int(iteration / 10))
if np.all(ticks != (iteration - 1)):
ticks = np.append(ticks, iteration - 1)
fig = plt.figure(figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(1, 2, 1)
scores = plt_data[:, 0, :]
# line_mean, = plt.plot(trainarray, np.percentile(scores,q=25,axis=1) )
line_min, = plt.plot(trainarray, np.median(scores, axis=1))
line_max, = plt.plot(trainarray, np.max(scores, axis=1))
plt.legend([line_min, line_max], ['median', 'max'])
plt.xlabel('Generations ', fontsize=20)
plt.ylabel('F1 Socres', fontsize=16)
plt.xticks(ticks, fontsize=14, rotation=90)
plt.title('F1 across Generations', fontsize=24)
plt.subplot(1, 2, 2)
scores = plt_data[:, 1, :]
# line_mean, = plt.plot(trainarray, np.percentile(scores,q=25,axis=1) )
line_min, = plt.plot(trainarray, np.median(scores, axis=1))
line_max, = plt.plot(trainarray, np.max(scores, axis=1))
plt.legend([line_min, line_max], ['median', 'max'])
plt.xlabel('Generations ', fontsize=20)
plt.ylabel('Accuracy', fontsize=16)
plt.xticks(ticks, fontsize=14, rotation=90)
plt.title('Accuracy across Generations', fontsize=24)
plt.suptitle('Classification Behaviour of Population', fontsize=36)
if len(path) != 0 and name is not None:
plt.savefig('{}/Classification Behaviour_{}.png'.format(path, name))
plt.show()
``` |
{
"source": "jkapila/pygemodels",
"score": 3
} |
#### File: pygemodels/examples/example_tests.py
```python
from gemodels import ModelStats
from gemodels.growth import GrowthModel
import numpy as np
import pandas as pd
if __name__ == '__main__':
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
b = [1, 2, 3, 4, 5, 6, 7, 9, 9, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
assert len(a) == len(b)
print('Generic Model Summary!')
mod_stat = ModelStats()
mod_stat.score(a, b, len(a) - 2, 2)
mod_stat.summary()
print('Fitting Logistic Model')
a, b, c = 1.632, 0.5209, 0.0137
# setting up an exponential decay function
def decay(x, intercept, factor, exponent):
return intercept - factor * np.exp(-exponent * x)
# a function to generate exponential decay with gaussian noise
def generate(intercept, factor, exponent):
x = np.linspace(0.5, 500, num=100)
y = decay(x, intercept, factor, exponent) + np.random.normal(loc=0, scale=0.05, size=100)
return (x, y)
# plot and generate some data
np.random.seed(1)
x, y = generate(a, b, c)
X = np.vstack((x,y)).T
print('Data has shape: ', X.shape)
print('Data head: \n', X[:5,:])
mod_growth = GrowthModel()
mod_growth.fit(X)
mod_growth.summary()
steps = 10
print('Making Prediction for steps: ',steps)
print('Predictions: ',mod_growth.predict(steps))
mod_growth.plot()
url = 'https://apmonitor.com/che263/uploads/Main/stats_data.txt'
data = pd.read_csv(url)
X = data[['x', 'y']].values
mod_growth = GrowthModel('exponential')
mod_growth.fit(X)
mod_growth.summary()
mod_growth.plot(sigma=0.2)
```
#### File: gemodels/growth/interface.py
```python
from gemodels import BaseModelInterface, ModelStats, ModelError, StatError
from gemodels import check_data, check_data_1d
from .models import all_func
import numpy as np
from scipy.optimize import curve_fit, minimize, least_squares
from matplotlib import pyplot as plt
from scipy.stats import t as statt, f as statf, chi2 as statx2, nbinom as statnb
class GrowthModel(BaseModelInterface):
"""
Boiler plat for all growth models
"""
def __init__(self, model='logistic', method='curve', method_params=dict(),
alter_strategy=None, valid_steps=0, confidence_criteria='one-student',
confidence_alpha=0.05, saddle_tol=1e-4, inverse=False, copy=True):
"""
Growth Models
:param model:'logistic','richard','bass','chapmanrichard','gompretz','weibull
:param method: 'curve','lsq','minloss', # stochastic in progress
:param method_params: extra params while training
:param alter_strategy: Manipulate data before fitting 'ma', 'dtrend' in progress
:param valid_steps: data points to do validity on actual data.
:param confidence_criteria: 'covariance'
:param confidence_alpha: float value to define confidence interval
:param saddle_tol: Tolerance to find the saddle point / stable state of the curve
:param inverse: a flag for true and false.
:param copy: Copy data with the model object
"""
super().__init__()
self.model_name = "Growth"
self.model_type = model.title()
self._model = all_func[model]
self.method = method
self.method_params = method_params
self._func = None
self._pfunc = self._model['parametric']
self._parameters = self._model['parameters']
self.parameters = None
self.parameters_std = None
self.stats = ModelStats(name='{} {} Model'.format(self.model_type, self.model_name),
p_alpha=confidence_alpha)
self.alter_strategy = alter_strategy
self.inverse = inverse
self.valid_steps = valid_steps
self.conf_criteria = confidence_criteria
self.saddle_tol = saddle_tol
self.state_flag = copy
def _alter_data(self, y, t):
# todo: implement moving average and all here
if self.alter_strategy is None:
return y, t
def _get_saddle_point(self):
# todo: make this
return 0
def _get_data(self, X=None, use_alter=True):
if X is None and self.state_flag:
X = self.state
y, t = check_data(X)
if use_alter:
y, t = self._alter_data(y, t)
return y, t
# def __repr__(self):
# # todo: make this
def _fit_curve(self, X, **kwargs):
y, t = self._get_data(X)
opt, covar_mat = curve_fit(self._func, t, y)
# setting optimal parameters
self.parameters = self._parameters._make(opt)
# getting covariance based standard deviations
sigma_ab = np.sqrt(np.diagonal(covar_mat))
self.parameters_std = self._parameters._make(sigma_ab)
print('Curve Fitted on {} {} Model with Parameters'.format(
self.model_type, self.model_name), self.parameters)
return y, self._pfunc(t, self.parameters)
def _fit_linear(self, X, **kwargs):
y, t = self._get_data(X)
opt = []
self.parameters = self._parameters._make(opt)
return y, self.predict(t)
def _fit_stochastic(self, X, **kwargs):
y, t = self._get_data(X)
opt = []
self.parameters = self._parameters._make(opt)
return y, self.predict(t)
def _fit_minimize(self, X, **kwargs):
y, t = self._get_data(X)
opt = []
self.parameters = self._parameters._make(opt)
return y, self.predict(t)
def fit(self, X, **model_args):
"""
:param X:
:param model_args:
:return:
"""
if self.method == "curve":
self._func = self._model['curve']
y_act, y_fit = self._fit_curve(X)
elif self.method == "linear":
self._func = self._model['curve']
y_act, y_fit = self._fit_linear(X)
elif self.method == "minimize":
self._func = self._model['curve']
y_act, y_fit = self._fit_minimize(X, **model_args)
elif self.method == "stochastic":
self._func = self._model['curve']
y_act, y_fit = self._fit_stochastic(X)
else:
raise ModelError('Not a Valid Method for fitting')
self.stats.score(y_act=y_act, y_fit=y_fit,
ndf=len(y_act) - self._model['df_model'] + 1,
mdf=self._model['df_model'])
if self.state_flag:
self.state = X
def summary(self):
self.stats.summary(return_df=False)
def _get_steps(self, steps, use_data=False, smoothed=False, breaks=100):
"""
Step formulation, checking and smoothening
:param steps: integer, list or 1D numpy array
:param use_data: Use the exsisting data and add steps with them
:param smoothed: To smoothed the steps or not
:param breaks:
:return:
"""
if use_data and self.state_flag:
_, steps = self._get_data()
if smoothed:
breaks = breaks if len(steps) < 0.75 * breaks else int(2 * len(steps))
steps = np.linspace(int(0.95 * np.min(steps)), int(1.05 * np.max(steps)), breaks)
return steps
elif use_data:
raise ModelError('Data is not stored with the model. Flag \'use_data\' wont work!')
if self.state_flag: # based on value of data
_ , t = self._get_data()
t_steps = int(np.max(t))
else: # based on degree of freedoms
t_steps = self.stats.ndf + self.stats.mdf - 1
if isinstance(steps, int) and smoothed:
# This is crude as of now need better methods
steps = np.linspace(t_steps + 1, t_steps + steps + 1, breaks)
elif isinstance(steps,int) and not smoothed:
steps = np.arange(t_steps + 1, t_steps + steps + 1)
elif (isinstance(steps, list) or isinstance(steps, tuple)) and len(steps) == 2:
steps = np.linspace(steps[0], steps[1], breaks)
elif smoothed:
breaks = breaks if len(steps) < 0.75 * breaks else int(2 * len(steps))
steps = np.linspace(int(0.95 * np.min(steps)), int(1.05 * np.max(steps)), breaks)
else:
steps = check_data_1d(steps)
return steps
def predict(self, steps, response=False, sigma=1.96, breaks=100):
steps = self._get_steps(steps, breaks=breaks)
y_fit = self._pfunc(steps, self.parameters)
if response:
params = [self.parameters, self.parameters_std]
uparam = self._parameters(*map(lambda x: x[0] + sigma * x[1], zip(*params)))
lparam = self._parameters(*map(lambda x: x[0] - sigma * x[1], zip(*params)))
fit_upper = self._pfunc(steps, uparam)
fit_lower = self._pfunc(steps, lparam)
return y_fit, fit_upper, fit_lower
return y_fit
def plot(self, title=None, new_data=None, plot_range=None, confidence=True, confidence_band=True, sigma=1.96,
breaks=100, fig_size=(10, 7)):
title = title if title is not None else 'Estimated {} {} Model'.format(self.model_type, self.model_name)
try:
y_act, t = self._get_data(new_data)
except Exception as e:
raise ModelError('No data to make a plot on or Data is not in right format! Aborting! Error:\n', e)
# Confidence level
alpha = int(100 - self.stats.confidence_alpha * 100)
# plotting actual data
# plt.figure(figsize=fig_size, dpi=300)
plt.scatter(t, y_act, s=3, label='Data')
# print("Actual Steps: ", t)
# getting smoothed breaks
if plot_range is None:
plot_range = t
t_smoothed = self._get_steps(plot_range, smoothed=True, breaks=breaks)
y_fit = self._pfunc(t_smoothed, self.parameters)
# print("Smooth Steps: ", t_smoothed)
# plot the regression
plt.plot(t_smoothed, y_fit, c='black',
label='{} {} Model'.format(self.model_type, self.model_name))
if confidence:
params = [self.parameters, self.parameters_std]
uparam = self._parameters(*map(lambda x: x[0] + sigma * x[1], zip(*params)))
lparam = self._parameters(*map(lambda x: x[0] - sigma * x[1], zip(*params)))
fit_upper = self._pfunc(t_smoothed, uparam)
fit_lower = self._pfunc(t_smoothed, lparam)
plt.plot(t_smoothed, fit_lower, c='orange', label='{}% Confidence Region'.format(alpha))
plt.plot(t_smoothed, fit_upper, c='orange')
if confidence_band:
lpb, upb = confidence_band_t(func=self._pfunc, params=self.parameters,
y_act=y_act, t=t,
t_breaks=t_smoothed,
alpha=self.stats.confidence_alpha)
plt.plot(t_smoothed, lpb, 'k--', label='{}% Prediction Band'.format(alpha))
plt.plot(t_smoothed, upb, 'k--')
plt.ylabel('Estimated Values')
plt.xlabel('Data Steps')
plt.title(title)
plt.legend(loc='best')
# save and show figure
plt.savefig('{}.png'.format(title))
plt.show()
def plot_forecast(self, steps, plot_range=None, title=None, use_trianing=True,
confidence=True, return_forecast=False, sigma=1.96, fig_size=(10, 7)):
# plt.figure(figsize=fig_size, dpi=300)
title = title if title is not None else 'Estimated {} {} Model'.format(self.model_type, self.model_name)
steps = self._get_steps(steps, use_data=use_trianing)
# Confidence level
alpha = int(100 - self.stats.confidence_alpha * 100)
res = self.predict(steps, response=confidence, sigma=sigma)
if confidence:
plt.plot(steps, res[0], 'black', label='Forecast Values')
plt.plot(steps, res[1], 'k--', label='{}% Prediction Band'.format(alpha))
plt.plot(steps, res[2], 'k--')
else:
plt.plot(steps, res, 'black', label='Forecast Values')
plt.ylabel('Estimated Values')
plt.xlabel('Data Steps')
plt.title(title)
plt.legend(loc='best')
plt.show()
if return_forecast:
return res
```
#### File: pygemodels/gemodels/__init__.py
```python
from pkg_resources import get_distribution, DistributionNotFound
import logging
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
import numpy as np
from scipy.stats import t as statt, f as statf, chi2 as statx2, nbinom as statnb
# defining stat function to avoid dependency on any other package
class StatError(Exception):
"""Base Stat Error"""
pass
class ModelError(Exception):
"""Base Model Error"""
pass
def check_data(data):
if isinstance(data, np.ndarray):
if len(data.shape) == 1:
y = data.reshape(1, -1)
t = np.arange(1, len(y))
else:
# assuming first array is time and second is values
# todo: validate this later
y = data[:, 1]
t = data[:, 0]
elif isinstance(data, list):
y = np.array(data)
t = np.arange(1, len(y))
else:
raise ModelError('Cannot Parse Data : \n', data)
return y, t
def check_data_1d(x):
if isinstance(x, list):
x = np.array(x)
elif isinstance(x, np.ndarray):
if len(x.shape) > 1:
raise StatError('Data must be 1D only.')
return x
def confidence_band_t(func, params, y_act, t, t_breaks=None, alpha=0.05, breaks=100):
"""
Making a prediction band based on t statistic
:param y_act: Actual Values
:param t: Steps of the data
:param func: parameterize function
:param params: trained parameters
:param alpha: confidence interval/ significance level
:param t_breaks: Smoothed Steps for finer lines
:param breaks: Breaks of prediction to increase smoothness of curve
:return: lower predicted band and upper predicted band
"""
N = y_act.size # data sample size
var_n = len(params) # number of parameters
y_fit = func(t, params)
if t_breaks is None:
t_breaks = np.arange(np.min(t), np.max(t), breaks)
t_mean = np.mean(t)
dof = max(0, N - var_n)
# Quantile of Student's t distribution for p=(1-alpha/2)
q = statt.ppf(1.0 - alpha / 2.0, dof)
# Stdev of an individual measurement
se = np.sqrt(1. / (N - var_n) * np.sum((y_act - y_fit) ** 2))
# Auxiliary definitions
sx = (t_breaks - t_mean) ** 2
sxd = np.sum((t - t_mean) ** 2)
# Predicted values (best-fit model)
yp = func(t_breaks, params)
# Prediction band
dy = q * se * np.sqrt(1.0 + (1.0 / N) + (sx / sxd))
# Upper & lower prediction bands.
lpb, upb = yp - dy, yp + dy
return lpb, upb
class ModelStats(object):
def __init__(self, name=None, p_alpha=None,
tolerance=1e-4, keep_stat=True, digits=2):
# Class variables
# self.confidence_measure = confidence_measure
self.identifier = name
self.confidence_alpha = p_alpha if p_alpha is not None else 0.05
self.tolerance = tolerance
self.keep_stat = keep_stat
self.digits = digits
# Deviation Measures
self.me = 0 # Mean Error
self.bias = 0 # Multiplicative bias
self.mae = 0 # Mean Absolute Error
self.mad = 0 # Mean Absolute Deviance
self.mape = 0 # Mean Average Percentage Error
self.rmse = 0 # Root Mean Square Error
# may implement median versions and tweedie deviance,
# correlation coeffcient, log errors, skill score, LESP
# refer : https://www.cawcr.gov.au/projects/verification/#Methods_for_dichotomous_forecasts
#
# Model Measures
self.r2_val = 0 # R2
self.adjr2_val = 0 # Adj R2
self.aic = 0 # AIC
self.bic = 0 # BIC
self.fstat = (0, 0) # F-statistics
self.ndf = 0 # Degree of freedom
self.mdf = 0 # Model Degree of Freedom
self.loglik = 0 # Log Likelihood # May generalize this
def _deviation_measures(self, y_act, y_fit):
error = y_act - y_fit
n = len(y_act)
self.me = np.mean(error)
self.bias = np.mean(y_fit) / np.mean(y_act)
self.mae = np.mean(np.abs(error)) / n
self.mad = np.mean(np.abs(error - self.me))
self.mape = np.mean(np.abs(error / y_act)) * 100
self.rmse = np.sqrt(np.mean(error ** 2))
self.r2_val = 1.0 - (np.sum(error ** 2) / ((n - 1.0) * np.var(y_act, ddof=1)))
def _model_measures(self, y_act, y_fit, ndf, mdf):
error = y_act - y_fit
n = len(y_act)
fdist = statf(n - 1, n - 1)
alpha = self.confidence_alpha
# degree of freedom
self.ndf = ndf
self.mdf = mdf
# Adjusted R squared
self.adjr2_val = 1 - (np.var(error, ddof=1) * (ndf - 1)) / (np.var(y_act, ddof=1) * (ndf - mdf - 1))
# F statistics
f_val = np.var(y_fit, ddof=1) / np.var(y_act, ddof=1)
f_p_value = 2 * min(fdist.cdf(alpha), 1 - fdist.cdf(alpha))
self.fstat = (f_val, f_p_value)
# Likely to add Levene's , Barttlets, Brown–Forsythe variants and Box - M test
# self.loglik = None
# self.aic = None
# self.bic = None
def score(self, y_act, y_fit, ndf, mdf):
y_act = check_data_1d(y_act)
y_fit = check_data_1d(y_fit)
self._deviation_measures(y_act, y_fit)
self._model_measures(y_act, y_fit, ndf, mdf)
def summary(self, return_df=False):
s = "*" * 80 + '\n'
s += " Model Summary Statistics"
s += ' - ' + self.identifier + '\n' if self.identifier is not None else "\n"
s += "*" * 80 + '\n'
s += 'Mean Error (ME) : {:5.4f} \n'.format(self.me)
s += 'Multiplicative Bias : {:5.4f} \n'.format(self.bias)
s += 'Mean Abs Error (MAE) : {:5.4f} \n'.format(self.mae)
s += 'Mean Abs Deviance Error (MAD) : {:5.4f} \n'.format(self.mad)
s += 'Mean Abs Percentage Error(MAPE) : {:5.4f} \n'.format(self.mape)
s += 'Root Mean Squared Error (RMSE) : {:5.4f} \n'.format(self.rmse)
s += 'R-Squared : {:5.4f} \n'.format(self.r2_val)
s += 'Adj R-Squared : {:5.4f} \n'.format(self.adjr2_val)
s += 'F-Statistic : {:5.4f} \n'.format(self.fstat[0])
s += 'Prob (F-Statistic) : {:5.4f} \n'.format(self.fstat[1])
s += 'Degree of Freedom - Residual : {:d} \n'.format(self.ndf)
s += 'Degree of Freedom - Model : {:d} \n'.format(self.mdf)
# s += 'Log Likelihood : {:5.4f} \n'.format(self.loglik)
# s += 'Akaike Info. Criterion (AIC) : {:5.4f} \n'.format(self.aic)
# s += 'Bayesian Info. Criterion (BIC) : {:5.4f} \n'.format(self.bic)
s += "*" * 80 + '\n'
print(s)
if return_df:
return None
class BaseModelInterface:
"""
Class defining unified interface for all Models
"""
def __init__(self):
self.model_name = None
self.model_type = None
self.error_type = None
self.state = None
self.parameters = None
self.ge_func = None
self.confidence = None
self.stats = None
self.path = {}
def plot_model(self):
raise NotImplementedError
# for series, data in self.path.items():
# if series == 'time':
# continue
# plt.plot(self.path['time'], data, label=series)
# plt.legend(loc=0)
# plt.grid()
# plt.title("{} Model of {} Type".format(self.model_name, self.model_type))
def plot_confidence(self):
raise NotImplementedError
``` |
{
"source": "jkapila/py-git-package",
"score": 3
} |
#### File: py-git-package/tests/test_example_module.py
```python
import unittest
from sourcecode import BaseExceptions, ExampleModule
from .base import BaseTestCase
class TestExampleModule(BaseTestCase):
"""Testing operation of the ExampleModule class"""
def test_init_example_module(self):
"""Ensures that the twine class can be instantiated with a file"""
test_data_file = self.path + "test_data/.json"
try:
mod = ExampleModule()
mod = mod.__repr__() + test_data_file
print(mod)
except BaseExceptions:
raise ValueError("An Error is raised here")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jkapilian/geoclaw",
"score": 3
} |
#### File: tests/particles/maketopo.py
```python
from __future__ import absolute_import
from clawpack.geoclaw.topotools import Topography
from numpy import *
def maketopo():
"""
Output topography file for the entire domain
"""
nxpoints = 201
nypoints = 241
xlower = 0.e0
xupper = 100.e0
ylower = 0.e0
yupper = 50.e0
outfile= "island.tt3"
topography = Topography(topo_func=topo)
topography.x = linspace(xlower,xupper,nxpoints)
topography.y = linspace(ylower,yupper,nypoints)
topography.write(outfile, topo_type=3, Z_format="%22.15e")
def makeqinit():
"""
Create qinit data file
"""
nxpoints = 101
nypoints = 101
xlower = -50.e0
xupper = 50.e0
yupper = 50.e0
ylower = -50.e0
outfile= "hump.xyz"
topography = Topography(topo_func=qinit)
topography.x = linspace(xlower,xupper,nxpoints)
topography.y = linspace(ylower,yupper,nypoints)
topography.write(outfile, topo_type=1)
def topo(x,y):
"""
Island
"""
ze = -((x-40.)**2 + (y-35.)**2)/20.
#z_island = where(ze>-10., 100.*exp(ze), 0.)
z_island = where(ze>-10., 150.*exp(ze), 0.)
z = -50 + z_island
return z
def qinit(x,y):
"""
Dam break
"""
from numpy import where
eta = where(x<10, 40., 0.)
return eta
if __name__=='__main__':
maketopo()
makeqinit()
```
#### File: tests/particles/regression_tests.py
```python
r"""Particles regression test for GeoClaw
To create new regression data use
`python regression_tests.py True`
"""
from __future__ import absolute_import
import os
import sys
import unittest
import shutil
import numpy
import clawpack.geoclaw.test as test
import clawpack.geoclaw.topotools as topotools
class ParticlesTest(test.GeoClawRegressionTest):
r"""Particles regression test for GeoClaw"""
def setUp(self):
super(ParticlesTest, self).setUp()
start_dir = os.getcwd()
# Make topography
shutil.copy(os.path.join(self.test_path, "maketopo.py"),
self.temp_path)
os.chdir(self.temp_path)
os.system('python maketopo.py')
os.chdir(start_dir)
def runTest(self, save=False, indices=(2, 3)):
r"""Test particles example
Note that this stub really only runs the code and performs no tests.
"""
# Write out data files
self.load_rundata()
self.write_rundata_objects()
# Run code
self.run_code()
# Perform tests
self.check_gauges(save=save, gauge_id=1, indices=(1, 2))
self.check_gauges(save=save, gauge_id=2, indices=(1, 2))
self.success = True
if __name__=="__main__":
if len(sys.argv) > 1:
if bool(sys.argv[1]):
# Fake the setup and save out output
test = ParticlesTest()
try:
test.setUp()
test.runTest(save=True)
finally:
test.tearDown()
sys.exit(0)
unittest.main()
``` |
{
"source": "jkaraguesian/NeuralForceField",
"score": 2
} |
#### File: nff/analysis/cp3d.py
```python
import os
import pickle
import random
import logging
import json
import numpy as np
import torch
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, auc, precision_recall_curve
from sklearn.metrics.pairwise import cosine_similarity as cos_sim
from rdkit import Chem
from nff.utils import fprint
from nff.data.features import get_e3fp
LOGGER = logging.getLogger()
LOGGER.disabled = True
FP_FUNCS = {"e3fp": get_e3fp}
def get_pred_files(model_path):
"""
Get pickle files with model predictions, fingerprints,
learned weights, etc.
Args:
model_path (str): path where the prediction files are
saved
Returns:
pred_files (list[str]): prediction file paths
"""
pred_files = []
for file in os.listdir(model_path):
# should have the form <split>_pred_<metric>.pickle
# or pred_<metric>.pickle
splits = ["train", "val", "test"]
starts_split = any([file.startswith(f"{split}_pred")
for split in splits])
starts_pred = any([file.startswith(f"pred")
for split in splits])
if (not starts_split) and (not starts_pred):
continue
if not file.endswith("pickle"):
continue
pred_files.append(os.path.join(model_path, file))
return pred_files
def load_preds(pred_files):
"""
Load the predictions from the predcition files
and put them in a dictionary.
Args:
pred_files (list[str]): prediction file paths
Returns:
pred (dic): dictionary of the form {file_name:
predictions} for each file name.
"""
pred = {}
for file in tqdm(pred_files):
with open(file, "rb") as f_open:
this_pred = pickle.load(f_open)
name = file.split("/")[-1].split(".pickle")[0]
pred[name] = this_pred
return pred
def get_att_type(dic):
"""
Figure out what kind of attention and how many heads were used.
Args:
dic (dict): prediction dictionary
Returns:
num_heads (int): number of attention heads
is_linear (bool): whether linear attention was used (as opposed
to pair-wise).
"""
num_weights_list = []
num_confs_list = []
for sub_dic in dic.values():
num_learned_weights = sub_dic['learned_weights'].shape[0]
num_confs = sub_dic['boltz_weights'].shape[0]
if num_learned_weights in num_weights_list:
continue
if num_confs == 1:
continue
num_weights_list.append(num_learned_weights)
num_confs_list.append(num_confs)
if len(num_confs_list) == 2:
break
is_linear = ((num_weights_list[1] / num_weights_list[0])
== (num_confs_list[1] / num_confs_list[0]))
if is_linear:
num_heads = int(num_weights_list[0] / num_confs_list[0])
else:
num_heads = int((num_weights_list[0] / num_confs_list[0] ** 2))
return num_heads, is_linear
def annotate_confs(dic):
"""
Annotate conformers with "head_weights" (the attention weights assigned
to each conformer, split up by head, and also summed over conformer pairs
if using pairwise attention), "max_weight_conf" (the conformer with the
highest attention weight of any conformer among all heads), and
"max_weight_head" (the head that gave this conformer its weight)/
Args:
dic (dict): prediction dictionary
Returns:
None
"""
num_heads, is_linear = get_att_type(dic)
for sub_dic in dic.values():
num_confs = sub_dic['boltz_weights'].shape[0]
if is_linear:
split_sizes = [num_confs] * num_heads
else:
split_sizes = [num_confs ** 2] * num_heads
learned = torch.Tensor(sub_dic['learned_weights'])
head_weights = torch.split(learned, split_sizes)
# if it's not linear, sum over conformer pairs to
# get the average importance of each conformer
if not is_linear:
head_weights = [i.reshape(num_confs, num_confs).sum(0)
for i in head_weights]
# the conformers with the highest weight, according to each
# head
max_weight_confs = [head_weight.argmax().item()
for head_weight in head_weights]
# the highest conformer weight assigned by each head
max_weights = [head_weight.max()
for head_weight in head_weights]
# the head that gave out the highest weight
max_weight_head = np.argmax(max_weights)
# the conformer with the highest of all weights
max_weight_conf = max_weight_confs[max_weight_head]
sub_dic["head_weights"] = {i: weights.tolist() for i, weights in
enumerate(head_weights)}
sub_dic["max_weight_conf"] = max_weight_conf
sub_dic["max_weight_head"] = max_weight_head
def choices_from_pickle(paths):
"""
Get conformer choices as RDKit mols from pickle paths.
Args:
paths (list[str]): conformer path for each of the two
molecules being compared.
Returns:
fp_choices (list[list[rdkit.Chem.rdchem.Mol]]):
RDKit mol choices for each of the two molecules.
"""
fps_choices = []
for path in paths:
with open(path, "rb") as f:
dic = pickle.load(f)
choices = [sub_dic["rd_mol"] for sub_dic in dic["conformers"]]
for mol in choices:
mol.SetProp("_Name", "test")
fps_choices.append(choices)
return fps_choices
def funcs_for_external(external_fp_fn,
summary_path,
rd_path):
"""
If requesting an external method to get and compare
fingerprints, then use this function to get a dictionary
of pickle paths for each smiles, and the external
fingerprinting function.
Args:
external_fp_fn (str): name of the fingerprinting function
you want to use
summary_path (str): path of the file with the summary
dictionary of species properties, their pickle
paths, etc.
rd_path (str): path to the folder that has all your
pickles with RDKit mols.
Returns:
pickle_dic (dict): dictionary of the form {smiles:
full_pickle_path} for each smiles
func (callable): fingerprinting function
"""
func = FP_FUNCS[external_fp_fn]
with open(summary_path, "r") as f:
summary = json.load(f)
pickle_dic = {}
for key, sub_dic in summary.items():
pickle_path = sub_dic.get("pickle_path")
if pickle_path is None:
continue
pickle_dic[key] = os.path.join(rd_path, pickle_path)
return pickle_dic, func
def sample_species(dic, classifier, max_samples):
"""
Sample species to compare to each other.
Args:
dic (dict): prediction dictionary
classifier (bool): whether your model is a classifier
max_samples (int): maximum number of pairs to compare
Returns:
sample_dics (dict): dictionary with different sampling
methods as keys, and the corresponding sampled species
as values.
"""
if not classifier:
# if it's not a classifier, you'll just randomly sample
# different species pairs and compare their fingerprints
keys = list(dic.keys())
samples = [np.random.choice(keys, max_samples),
np.random.choice(keys, max_samples)]
sample_dics = {"random_mols": samples}
else:
# if it is a classifier, you'll want to compare species
# that are both hits, both misses, or one hit and one miss
pos_keys = [smiles for smiles, sub_dic in dic.items()
if sub_dic['true'] == 1]
neg_keys = [smiles for smiles, sub_dic in dic.items()
if sub_dic['true'] == 0]
intra_pos = [np.random.choice(pos_keys, max_samples),
np.random.choice(pos_keys, max_samples)]
intra_neg = [np.random.choice(neg_keys, max_samples),
np.random.choice(neg_keys, max_samples)]
inter = [np.random.choice(pos_keys, max_samples),
np.random.choice(neg_keys, max_samples)]
sample_dics = {"intra_pos": intra_pos,
"intra_neg": intra_neg,
"inter": inter}
return sample_dics
def calc_sim(dic,
smiles_0,
smiles_1,
func,
pickle_dic,
conf_type,
fp_kwargs):
"""
Calculate the similatiy between conformers of two different species.
Args:
dic (dict): prediction dictionary
smiles_0 (str): first SMILES string
smiles_1 (str): second SMILES string
external_fp_fn (str): name of external fingerprinting function
func (callable): actual external fingerprinting function
pickle_dic (dict): dictionary of the form {smiles:
full_pickle_path} for each smiles
conf_type (str): whether you're comparing conformers picked
randomly for each species or based on their attention weight.
fp_kwargs (dict): any keyword arguments you may need for your
fingerprinting function.
Returns:
sim (float): cosine similarity between two conformers, one from
each species.
"""
sub_dic_0 = dic[smiles_0]
sub_dic_1 = dic[smiles_1]
if func is not None:
paths = [pickle_dic[smiles_0], pickle_dic[smiles_1]]
fp_0_choices, fp_1_choices = choices_from_pickle(paths)
else:
fp_0_choices = sub_dic_0["conf_fps"]
fp_1_choices = sub_dic_1["conf_fps"]
if conf_type == "att":
conf_0_idx = sub_dic_0["max_weight_conf"]
conf_1_idx = sub_dic_1["max_weight_conf"]
fp_0 = fp_0_choices[conf_0_idx]
fp_1 = fp_1_choices[conf_1_idx]
elif conf_type == "random":
fp_0 = random.choice(fp_0_choices)
fp_1 = random.choice(fp_1_choices)
fps = [fp_0, fp_1]
for j, fp in enumerate(fps):
if fp_kwargs is None:
fp_kwargs = {}
if isinstance(fp, Chem.rdchem.Mol):
fps[j] = func(fp, **fp_kwargs)
sim = cos_sim(fps[0].reshape(1, -1),
fps[1].reshape(1, -1)).item()
return sim
def attention_sim(dic,
max_samples,
classifier,
seed,
external_fp_fn=None,
summary_path=None,
rd_path=None,
fp_kwargs=None):
"""
Calculate similarities of the conformer fingerprints of different
pairs of species.
Args:
dic (dict): prediction dictionary
max_samples (int): maximum number of pairs to compare
classifier (bool): whether your model is a classifier
seed (int): random seed
external_fp_fn (str, optional): name of the fingerprinting
function you want to use. If none is provided then the model's
generated fingerprint will be used.
summary_path (str, optional): path of the file with the summary
dictionary of species properties, their pickle
paths, etc.
rd_path (str, optional): path to the folder that has all your
pickles with RDKit mols.
fp_kwargs (dict, optional): any keyword arguments you need
when calling an external fingeprinter.
Returns:
fp_dics (dict): dictionary of the that gives similarity scores
between random conformers for each species, and also
between the conformers assigned the highest attention
weight. Has the form {sample_type: {"att": float,
"random": float}}, where sample_type describes what kind
of species are being sampled (e.g. both hits, both misses,
one hit and one miss, etc.)
"""
np.random.seed(seed)
random.seed(seed)
# get an external fingeprinting function if asked
if external_fp_fn is not None:
pickle_dic, func = funcs_for_external(external_fp_fn,
summary_path,
rd_path)
else:
pickle_dic = None
func = None
sample_dics = sample_species(dic, classifier, max_samples)
fp_dics = {}
# go through each method of sampling species and calculate their
# conformer similarities
for key, samples in sample_dics.items():
fp_dics[key] = {}
conf_types = ['att', 'random']
for conf_type in conf_types:
fp_sims = []
for i in tqdm(range(len(samples[0]))):
smiles_0 = samples[0][i]
smiles_1 = samples[1][i]
sim = calc_sim(dic=dic,
smiles_0=smiles_0,
smiles_1=smiles_1,
func=func,
pickle_dic=pickle_dic,
conf_type=conf_type,
fp_kwargs=fp_kwargs)
fp_sims.append(sim)
fp_dics[key][conf_type] = np.array(fp_sims)
return fp_dics
def analyze_data(bare_data, analysis):
"""
Do analysis of different fingerprints (e.g. mean, standard deviation,
std deviation of the mean). Uses a recursive method to go through
each sub-dictionary until an array is found.
Args:
bare_data (dict): dictionary with bare fingerprint similarities
analysis (dict): same form as `bare_data` but replaces arrays
with a dictionary analyzing their properties.
Returns:
None
"""
for key, val in bare_data.items():
if isinstance(val, np.ndarray):
analysis[key] = {"mean": np.mean(val),
"std": np.std(val),
"std_of_mean": (np.std(val)
/ val.shape[0] ** 0.5)}
else:
if key not in analysis:
analysis[key] = {}
analyze_data(val, analysis[key])
def report_delta(bare_dic):
"""
For a binary task, report analysis on the difference between
similarity among hits and similarity between hits and misses.
Args:
bare_dic (dict): bare dictionary of similarities
Returns:
None
"""
for key, dic in bare_dic.items():
fprint(f"Results for {key}")
fprint("+/- indicates standard deviation of the mean")
# attention and random differences in similarity
delta_att = dic['intra_pos']['att'] - dic['inter']['att']
delta_rand = dic['intra_pos']['random'] - dic['inter']['random']
# compute mean for attention
delta_att_mean = np.mean(delta_att)
# std deviation on the mean
delta_att_std = np.std(delta_att) / (len(delta_att)) ** 0.5
# same for random
delta_rand_mean = np.mean(delta_rand)
delta_rand_std = np.std(delta_rand) / (len(delta_rand)) ** 0.5
# delta delta is the difference in deltas between random and attention,
# a measure of how much attention is learning
delta_delta_mean = delta_att_mean - delta_rand_mean
delta_delta_std = ((np.var(delta_att) + np.var(delta_rand)) ** 0.5
/ (len(delta_att)) ** 0.5)
fprint("Delta att: %.4f +/- %.4f" % (delta_att_mean, delta_att_std))
fprint("Delta rand: %.4f +/- %.4f" % (delta_rand_mean, delta_rand_std))
fprint("Delta delta: %.4f +/- %.4f" %
(delta_delta_mean, delta_delta_std))
fprint("\n")
def conf_sims_from_files(model_path,
max_samples,
classifier,
seed,
external_fp_fn=None,
summary_path=None,
rd_path=None,
fp_kwargs=None):
"""
Get similarity among species according to predictions of different
models, given a folder with all of the prediction pickles.
Args:
model_path (str): path to the folder where the prediction pickles
are saved.
max_samples (int): maximum number of pairs to compare
classifier (bool): whether your model is a classifier
seed (int): random seed
external_fp_fn (str, optional): name of the fingerprinting
function you want to use. If none is provided then the model's
generated fingerprint will be used.
summary_path (str, optional): path of the file with the summary
dictionary of species properties, their pickle
paths, etc.
rd_path (str, optional): path to the folder that has all your
pickles with RDKit mols.
fp_kwargs (dict, optional): any keyword arguments you need
when calling an external fingeprinter.
Returns:
analysis (dict): dictionary of the form {prediction_name:
similarity_dic} for the name of each prediction file.
bare_data (dict): same idea as `analysis` but with the full
set of similarities between each molecule.
"""
fprint("Loading pickle files...")
pred_files = get_pred_files(model_path)
pred = load_preds(pred_files)
bare_data = {}
fprint("Calculating fingerprint similarities...")
for key in tqdm(pred):
dic = pred[key]
annotate_confs(dic)
fp_dics = attention_sim(dic=dic,
max_samples=max_samples,
classifier=classifier,
seed=seed,
external_fp_fn=external_fp_fn,
summary_path=summary_path,
rd_path=rd_path,
fp_kwargs=fp_kwargs)
bare_data[key] = fp_dics
# analyze the bare data
analysis = {}
analyze_data(bare_data, analysis)
if classifier:
report_delta(bare_data)
return analysis, bare_data
def get_scores(path, avg_metrics=['auc', 'prc-auc']):
"""
Load pickle files that contain predictions and actual values, using
models evaluated by different validation metrics, and use the predictions
to calculate and save PRC and AUC scores.
Args:
path (str): path to the saved model folder, which contains the
pickle files.
avg_metrics (list[str]): metrics to use in score averaging
Returns:
scores (list): list of dictionaries containing the split being
used, the validation metric used to get the model, and
the PRC and AUC scores.
"""
files = [i for i in os.listdir(path) if i.endswith(".pickle")
and i.startswith("pred")]
if not files:
return
scores = []
for file in files:
with open(os.path.join(path, file), "rb") as f:
dic = pickle.load(f)
split = file.split(".pickle")[0].split("_")[-1]
from_metric = file.split("pred_")[-1].split(f"_{split}")[0]
pred = [sub_dic['pred'] for sub_dic in dic.values()]
true = [sub_dic['true'] for sub_dic in dic.values()]
# then it's not a binary classification problem
if any([i not in [0, 1] for i in true]):
return
auc_score = roc_auc_score(y_true=true, y_score=pred)
precision, recall, thresholds = precision_recall_curve(
y_true=true, probas_pred=pred)
prc_score = auc(recall, precision)
scores.append({"split": split,
"from_metric": from_metric,
"auc": auc_score,
"prc": prc_score})
if avg_metrics is None:
avg_metrics = [score["from_metric"] for score in scores]
all_auc = [score["auc"] for score in scores if score['from_metric']
in avg_metrics]
all_prc = [score["prc"] for score in scores if score['from_metric']
in avg_metrics]
avg_auc = {"mean": np.mean(all_auc),
"std": np.std(all_auc)}
avg_prc = {"mean": np.mean(all_prc),
"std": np.std(all_prc)}
scores.append({"from_metric": "average",
"auc": avg_auc,
"prc": avg_prc,
"avg_metrics": avg_metrics})
save_path = os.path.join(path, "scores_from_metrics.json")
with open(save_path, "w") as f:
json.dump(scores, f, indent=4, sort_keys=True)
return scores
def recursive_scoring(base_path, avg_metrics=['auc', 'prc-auc']):
"""
Recursively search in a base directory to find sub-folders that
have pickle files that can be used for scoring. Apply `get_scores`
to these sub-folders.
Args:
base_path (str): base folder to search in
avg_metrics (list[str]): metrics to use in score averaging
Returns:
None
"""
files = [i for i in os.listdir(base_path) if i.endswith(".pickle")
and i.startswith("pred")]
if files:
print(f"Analyzing {base_path}")
get_scores(base_path, avg_metrics)
for direc in os.listdir(base_path):
direc_path = os.path.join(base_path, direc)
if not os.path.isdir(direc_path):
continue
files = [i for i in os.listdir(direc_path) if i.endswith(".pickle")
and i.startswith("pred")]
if files:
print(f"Analyzing {direc_path}")
get_scores(direc_path, avg_metrics)
continue
folders = [os.path.join(direc_path, i) for i in
os.listdir(direc_path)]
folders = [i for i in folders if os.path.isdir(i)]
if not folders:
continue
for folder in folders:
recursive_scoring(folder)
```
#### File: nff/data/dataset.py
```python
import torch
import numbers
import numpy as np
import copy
import nff.utils.constants as const
from copy import deepcopy
from sklearn.utils import shuffle as skshuffle
from sklearn.model_selection import train_test_split
from ase import Atoms
from ase.neighborlist import neighbor_list
from torch.utils.data import Dataset as TorchDataset
from tqdm import tqdm
from nff.data.parallel import (featurize_parallel, NUM_PROCS,
add_e3fp_parallel, add_kj_ji_parallel,
add_bond_idx_parallel)
from nff.data.features import ATOM_FEAT_TYPES, BOND_FEAT_TYPES
from nff.data.features import add_morgan as external_morgan
from nff.data.features import featurize_rdkit as external_rdkit
from nff.data.graphs import (get_bond_idx, reconstruct_atoms,
get_neighbor_list, generate_subgraphs,
DISTANCETHRESHOLDICT_Z, get_angle_list,
add_ji_kj, make_dset_directed)
class Dataset(TorchDataset):
"""Dataset to deal with NFF calculations.
Attributes:
props (list of dicts): list of dictionaries containing all properties of the system.
Keys are the name of the property and values are the properties. Each value
is given by `props[idx][key]`. The only mandatory key is 'nxyz'. If inputting
energies, forces or hessians of different electronic states, the quantities
should be distinguished with a "_n" suffix, where n = 0, 1, 2, ...
Whatever name is given to the energy of state n, the corresponding force name
must be the exact same name, but with "energy" replaced by "force".
Example:
props = {
'nxyz': [np.array([[1, 0, 0, 0], [1, 1.1, 0, 0]]), np.array([[1, 3, 0, 0], [1, 1.1, 5, 0]])],
'energy_0': [1, 1.2],
'energy_0_grad': [np.array([[0, 0, 0], [0.1, 0.2, 0.3]]), np.array([[0, 0, 0], [0.1, 0.2, 0.3]])],
'energy_1': [1.5, 1.5],
'energy_1_grad': [np.array([[0, 0, 1], [0.1, 0.5, 0.8]]), np.array([[0, 0, 1], [0.1, 0.5, 0.8]])],
'dipole_2': [3, None]
}
Periodic boundary conditions must be specified through the 'offset' key in props.
Once the neighborlist is created, distances between
atoms are computed by subtracting their xyz coordinates
and adding to the offset vector. This ensures images
of atoms outside of the unit cell have different
distances when compared to atoms inside of the unit cell.
This also bypasses the need for a reindexing.
units (str): units of the energies, forces etc.
"""
def __init__(self,
props,
units='kcal/mol',
check_props=True,
do_copy=True):
"""Constructor for Dataset class.
Args:
props (dictionary of lists): dictionary containing the
properties of the system. Each key has a list, and
all lists have the same length.
units (str): units of the system.
"""
if check_props:
if do_copy:
self.props = self._check_dictionary(deepcopy(props))
else:
self.props = self._check_dictionary(props)
else:
self.props = props
self.units = units
self.to_units('kcal/mol')
def __len__(self):
"""Summary
Returns:
TYPE: Description
"""
return len(self.props['nxyz'])
def __getitem__(self, idx):
"""Summary
Args:
idx (TYPE): Description
Returns:
TYPE: Description
"""
return {key: val[idx] for key, val in self.props.items()}
def __add__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if other.units != self.units:
other = other.copy().to_units(self.units)
new_props = self.props
keys = list(new_props.keys())
for key in keys:
if key not in other.props:
new_props.pop(key)
continue
val = other.props[key]
if type(val) is list:
new_props[key] += val
else:
old_val = new_props[key]
new_props[key] = torch.cat([old_val,
val.to(old_val.dtype)])
self.props = new_props
return copy.deepcopy(self)
def _check_dictionary(self, props):
"""Check the dictionary or properties to see if it has the
specified format.
Args:
props (TYPE): Description
Returns:
TYPE: Description
"""
assert 'nxyz' in props.keys()
n_atoms = [len(x) for x in props['nxyz']]
n_geoms = len(props['nxyz'])
if 'num_atoms' not in props.keys():
props['num_atoms'] = torch.LongTensor(n_atoms)
else:
props['num_atoms'] = torch.LongTensor(props['num_atoms'])
for key, val in props.items():
if val is None:
props[key] = to_tensor([np.nan] * n_geoms)
elif any([x is None for x in val]):
bad_indices = [i for i, item in enumerate(val) if item is None]
good_indices = [index for index in range(
len(val)) if index not in bad_indices]
if len(good_indices) == 0:
nan_list = np.array([float("NaN")]).tolist()
else:
good_index = good_indices[0]
nan_list = (np.array(val[good_index])
* float('NaN')).tolist()
for index in bad_indices:
props[key][index] = nan_list
props.update({key: to_tensor(val)})
else:
assert len(val) == n_geoms, (f'length of {key} is not '
f'compatible with {n_geoms} '
'geometries')
props[key] = to_tensor(val)
return props
def generate_atom_initializations(self,atom_inits):
self.props["init"] = []
for idx in tqdm(range(len(self.props["nxyz"]))):
curr_nxyz = self.props["nxyz"][idx]
initial_rep = np.vstack([atom_inits[str(int(n))] for n in curr_nxyz[:,0]])
self.props["init"].append(torch.tensor(initial_rep))
def generate_neighbor_list(self,
cutoff,
undirected=True,
key='nbr_list',
offset_key='offsets'):
"""Generates a neighbor list for each one of the atoms in the dataset.
By default, does not consider periodic boundary conditions.
Args:
cutoff (float): distance up to which atoms are considered bonded.
undirected (bool, optional): Description
Returns:
TYPE: Description
"""
if 'lattice' not in self.props:
self.props[key] = [
get_neighbor_list(nxyz[:, 1:4], cutoff, undirected)
for nxyz in self.props['nxyz']
]
self.props[offset_key] = [
torch.sparse.FloatTensor(nbrlist.shape[0], 3)
for nbrlist in self.props[key]
]
else:
self._get_periodic_neighbor_list(cutoff=cutoff,
undirected=undirected,
offset_key=offset_key,
nbr_key=key)
return self.props[key], self.props[offset_key]
return self.props[key]
# def make_nbr_to_mol(self):
# nbr_to_mol = []
# for nbrs in self.props['nbr_list']:
# nbrs_to_mol.append(torch.zeros(len(nbrs)))
def make_all_directed(self):
make_dset_directed(self)
def generate_angle_list(self):
self.make_all_directed()
angles, nbrs = get_angle_list(self.props['nbr_list'])
self.props['nbr_list'] = nbrs
self.props['angle_list'] = angles
ji_idx, kj_idx = add_ji_kj(angles, nbrs)
self.props['ji_idx'] = ji_idx
self.props['kj_idx'] = kj_idx
return angles
def generate_kj_ji(self, num_procs=1):
"""
Generate only the `ji_idx` and `kj_idx` without storing
the full angle list.
"""
self.make_all_directed()
add_kj_ji_parallel(self,
num_procs=num_procs)
def _get_periodic_neighbor_list(self,
cutoff,
undirected=False,
offset_key='offsets',
nbr_key='nbr_list'):
from nff.io.ase import AtomsBatch
nbrlist = []
offsets = []
for nxyz, lattice in zip(self.props['nxyz'], self.props['lattice']):
atoms = AtomsBatch(
nxyz[:, 0].long(),
positions=nxyz[:, 1:],
cell=lattice,
pbc=True,
cutoff=cutoff,
directed=(not undirected)
)
try:
nbrs, offs = atoms.update_nbr_list()
except:
breakpoint()
nbrlist.append(nbrs)
offsets.append(offs)
self.props[nbr_key] = nbrlist
self.props[offset_key] = offsets
return
def generate_bond_idx(self, num_procs=1):
"""
For each index in the bond list, get the
index in the neighbour list that corresponds to the
same directed pair of atoms.
Args:
None
Returns:
None
"""
self.make_all_directed()
add_bond_idx_parallel(self, num_procs)
def copy(self):
"""Copies the current dataset
Returns:
TYPE: Description
"""
return Dataset(self.props, self.units)
def to_units(self, target_unit):
"""Converts the dataset to the desired unit. Modifies the dictionary
of properties in place.
Args:
target_unit (str): unit to use as final one
Returns:
TYPE: Description
Raises:
NotImplementedError: Description
"""
if target_unit not in ['kcal/mol', 'atomic']:
raise NotImplementedError(
'unit conversion for {} not implemented'.format(target_unit)
)
if target_unit == 'kcal/mol' and self.units == 'atomic':
self.props = const.convert_units(
self.props,
const.AU_TO_KCAL
)
elif target_unit == 'atomic' and self.units == 'kcal/mol':
self.props = const.convert_units(
self.props,
const.KCAL_TO_AU
)
else:
return
self.units = target_unit
return
def change_idx(self, idx):
"""
Change the dataset so that the properties are ordered by the
indices `idx`. If `idx` does not contain all of the original
indices in the dataset, then this will reduce the size of the
dataset.
"""
for key, val in self.props.items():
if isinstance(val, list):
self.props[key] = [val[i] for i in idx]
else:
self.props[key] = val[idx]
def shuffle(self):
"""Summary
Returns:
TYPE: Description
"""
idx = list(range(len(self)))
reindex = skshuffle(idx)
self.change_idx(reindex)
def featurize(self,
num_procs=NUM_PROCS,
bond_feats=BOND_FEAT_TYPES,
atom_feats=ATOM_FEAT_TYPES):
"""
Featurize dataset with atom and bond features.
Args:
num_procs (int): number of parallel processes
bond_feats (list[str]): names of bond features
atom_feats (list[str]): names of atom features
Returns:
None
"""
featurize_parallel(self,
num_procs=num_procs,
bond_feats=bond_feats,
atom_feats=atom_feats)
def add_morgan(self, vec_length):
"""
Add Morgan fingerprints to each species in the dataset.
Args:
vec_length (int): length of fingerprint
Returns:
None
"""
external_morgan(self, vec_length)
def add_e3fp(self,
fp_length,
num_procs=NUM_PROCS):
"""
Add E3FP fingerprints for each conformer of each species
in the dataset.
Args:
fp_length (int): length of fingerprint
num_procs (int): number of processes to use when
featurizing.
Returns:
None
"""
add_e3fp_parallel(self,
fp_length,
num_procs)
def featurize_rdkit(self, method):
"""
Add 3D-based RDKit fingerprints for each conformer of
each species in the dataset.
Args:
method (str): name of RDKit feature method to use
Returns:
None
"""
external_rdkit(self, method=method)
def unwrap_xyz(self, mol_dic):
"""
Unwrap molecular coordinates by displacing atoms by box vectors
Args:
mol_dic (dict): dictionary of nodes of each disconnected subgraphs
"""
from nff.io.ase import AtomsBatch
for i in range(len(self.props['nxyz'])):
# makes atoms object
atoms = AtomsBatch(positions=self.props['nxyz'][i][:, 1:4],
numbers=self.props['nxyz'][i][:, 0],
cell=self.props["cell"][i],
pbc=True)
# recontruct coordinates based on subgraphs index
if self.props['smiles']:
mol_idx = mol_dic[self.props['smiles'][i]]
atoms.set_positions(reconstruct_atoms(atoms, mol_idx))
nxyz = atoms.get_nxyz()
self.props['nxyz'][i] = torch.Tensor(nxyz)
def save(self, path):
"""Summary
Args:
path (TYPE): Description
"""
# to deal with the fact that sparse tensors can't be pickled
offsets = self.props.get('offsets', torch.LongTensor([0]))
old_offsets = copy.deepcopy(offsets)
# check if it's a sparse tensor. The first two conditions
# Are needed for backwards compatability in case it's a float
# or empty list
if all([hasattr(offsets, "__len__"), len(offsets) > 0]):
if isinstance(offsets[0], torch.sparse.FloatTensor):
self.props['offsets'] = [val.to_dense() for val in offsets]
torch.save(self, path)
if "offsets" in self.props:
self.props['offsets'] = old_offsets
def gen_bond_stats(self):
bond_len_dict = {}
# generate bond statistics
for i in range(len(self.props['nxyz'])):
z = self.props['nxyz'][i][:, 0]
xyz = self.props['nxyz'][i][:, 1:4]
bond_list = self.props['bonds'][i]
bond_len = (xyz[bond_list[:, 0]] - xyz[bond_list[:, 1]]
).pow(2).sum(-1).sqrt()[:, None]
bond_type_list = torch.stack(
(z[bond_list[:, 0]], z[bond_list[:, 1]])).t()
for i, bond in enumerate(bond_type_list):
bond = tuple(torch.LongTensor(sorted(bond)).tolist())
if bond not in bond_len_dict.keys():
bond_len_dict[bond] = [bond_len[i]]
else:
bond_len_dict[bond].append(bond_len[i])
# compute bond len averages
self.bond_len_dict = {key: torch.stack(
bond_len_dict[key]).mean(0) for key in bond_len_dict.keys()}
return self.bond_len_dict
def gen_bond_prior(self, cutoff, bond_len_dict=None):
from nff.io.ase import AtomsBatch
if not self.props:
raise TypeError("the dataset has no data yet")
bond_dict = {}
mol_idx_dict = {}
#---------This part can be simplified---------#
for i in range(len(self.props['nxyz'])):
z = self.props['nxyz'][i][:, 0]
xyz = self.props['nxyz'][i][:, 1:4]
# generate arguments for ase Atoms object
cell = self.props['cell'][i] if 'cell' in self.props.keys(
) else None
ase_param = {"numbers": z,
"positions": xyz,
"pbc": True,
"cell": cell}
atoms = Atoms(**ase_param)
sys_name = self.props['smiles'][i]
if sys_name not in bond_dict.keys():
print(sys_name)
i, j = neighbor_list("ij", atoms, DISTANCETHRESHOLDICT_Z)
bond_list = torch.LongTensor(np.stack((i, j), axis=1)).tolist()
bond_dict[sys_name] = bond_list
# generate molecular graph
# TODO: there is redundant code in generate_subgraphs
subgraph_index = generate_subgraphs(atoms)
mol_idx_dict[sys_name] = subgraph_index
# generate topologies
# TODO: include options to only generate bond topology
self.generate_topologies(bond_dic=bond_dict)
if 'cell' in self.props.keys():
self.unwrap_xyz(mol_idx_dict)
#---------This part can be simplified---------#
# generate bond length dictionary if not given
if not bond_len_dict:
bond_len_dict = self.gen_bond_stats()
# update bond len and offsets
all_bond_len = []
all_offsets = []
all_nbr_list = []
for i in range(len(self.props['nxyz'])):
z = self.props['nxyz'][i][:, 0]
xyz = self.props['nxyz'][i][:, 1:4]
bond_list = self.props['bonds'][i]
bond_type_list = torch.stack(
(z[bond_list[:, 0]], z[bond_list[:, 1]])).t()
bond_len_list = []
for bond in bond_type_list:
bond_type = tuple(torch.LongTensor(sorted(bond)).tolist())
bond_len_list.append(bond_len_dict[bond_type])
all_bond_len.append(torch.Tensor(bond_len_list).reshape(-1, 1))
# update offsets
cell = self.props['cell'][i] if 'cell' in self.props.keys(
) else None,
ase_param = {"numbers": z,
"positions": xyz,
"pbc": True,
"cutoff": cutoff,
"cell": cell,
"nbr_torch": False}
# the coordinates have been unwrapped and try to results offsets
atoms = AtomsBatch(**ase_param)
atoms.update_nbr_list()
all_offsets.append(atoms.offsets)
all_nbr_list.append(atoms.nbr_list)
# update
self.props['bond_len'] = all_bond_len
self.props['offsets'] = all_offsets
self.props['nbr_list'] = all_nbr_list
self._check_dictionary(deepcopy(self.props))
@classmethod
def from_file(cls, path):
"""Summary
Args:
path (TYPE): Description
Returns:
TYPE: Description
Raises:
TypeError: Description
"""
obj = torch.load(path)
if isinstance(obj, cls):
return obj
else:
raise TypeError(
'{} is not an instance from {}'.format(path, type(cls))
)
def force_to_energy_grad(dataset):
"""
Converts forces to energy gradients in a dataset. This conforms to
the notation that a key with `_grad` is the gradient of the
property preceding it. Modifies the database in-place.
Args:
dataset (TYPE): Description
dataset (nff.data.Dataset)
Returns:
success (bool): if True, forces were removed and energy_grad
became the new key.
"""
if 'forces' not in dataset.props.keys():
return False
else:
dataset.props['energy_grad'] = [
-x
for x in dataset.props.pop('forces')
]
return True
def convert_nan(x):
"""
If a list has any elements that contain nan, convert its contents
to the right form so that it can eventually be converted to a tensor.
Args:
x (list): any list with floats, ints, or Tensors.
Returns:
new_x (list): updated version of `x`
"""
new_x = []
# whether any of the contents have nan
has_nan = any([np.isnan(y).any() for y in x])
for y in x:
if has_nan:
# if one is nan then they will have to become float tensors
if type(y) in [int, float]:
new_x.append(torch.Tensor([y]))
elif isinstance(y, torch.Tensor):
new_x.append(y.float())
else:
# otherwise they can be kept as is
new_x.append(y)
return new_x
def to_tensor(x, stack=False):
"""
Converts input `x` to torch.Tensor.
Args:
x (list of lists): input to be converted. Can be: number, string, list, array, tensor
stack (bool): if True, concatenates torch.Tensors in the batching dimension
Returns:
torch.Tensor or list, depending on the type of x
Raises:
TypeError: Description
"""
# a single number should be a list
if isinstance(x, numbers.Number):
return torch.Tensor([x])
if isinstance(x, str):
return [x]
if isinstance(x, torch.Tensor):
return x
if type(x) is list and type(x[0]) != str:
if not isinstance(x[0], torch.sparse.FloatTensor):
x = convert_nan(x)
# all objects in x are tensors
if isinstance(x, list) and all([isinstance(y, torch.Tensor) for y in x]):
# list of tensors with zero or one effective dimension
# flatten the tensor
if all([len(y.shape) < 1 for y in x]):
return torch.cat([y.view(-1) for y in x], dim=0)
elif stack:
return torch.cat(x, dim=0)
# list of multidimensional tensors
else:
return x
# some objects are not tensors
elif isinstance(x, list):
# list of strings
if all([isinstance(y, str) for y in x]):
return x
# list of ints
if all([isinstance(y, int) for y in x]):
return torch.LongTensor(x)
# list of floats
if all([isinstance(y, numbers.Number) for y in x]):
return torch.Tensor(x)
# list of arrays or other formats
if any([isinstance(y, (list, np.ndarray)) for y in x]):
return [torch.Tensor(y) for y in x]
raise TypeError('Data type not understood')
def concatenate_dict(*dicts):
"""Concatenates dictionaries as long as they have the same keys.
If one dictionary has one key that the others do not have,
the dictionaries lacking the key will have that key replaced by None.
Args:
*dicts: Description
*dicts (any number of dictionaries)
Example:
dict_1 = {
'nxyz': [...],
'energy': [...]
}
dict_2 = {
'nxyz': [...],
'energy': [...]
}
dicts = [dict_1, dict_2]
Returns:
TYPE: Description
"""
assert all([type(d) == dict for d in dicts]), \
'all arguments have to be dictionaries'
# Old method
# keys = set(sum([list(d.keys()) for d in dicts], []))
# New method
keys = set()
for dic in dicts:
for key in dic.keys():
if key not in keys:
keys.add(key)
# While less pretty, the new method is MUCH faster. For example,
# for a dataset of size 600,000, the old method literally
# takes hours, while the new method takes 250 ms
def is_list_of_lists(value):
if isinstance(value, list):
return isinstance(value[0], list)
return False
def get_length(value):
if is_list_of_lists(value):
if is_list_of_lists(value[0]):
return len(value)
return 1
elif isinstance(value, list):
return len(value)
return 1
def get_length_of_values(dict_):
if 'nxyz' in dict_:
return get_length(dict_['nxyz'])
return min([get_length(v) for v in dict_.values()])
def flatten_val(value):
"""Given a value, which can be a number, a list or
a torch.Tensor, return its flattened version
to be appended to a list of values
"""
if is_list_of_lists(value):
if is_list_of_lists(value[0]):
return value
else:
return [value]
elif isinstance(value, list):
return value
elif isinstance(value, torch.Tensor):
if len(value.shape) == 0:
return [value]
elif len(value.shape) == 1:
return [item for item in value]
else:
return [value]
elif get_length(value) == 1:
return [value]
return [value]
# we have to see how many values the properties of each dictionary has.
values_per_dict = [get_length_of_values(d) for d in dicts]
# creating the joint dicionary
joint_dict = {}
for key in keys:
# flatten list of values
values = []
for num_values, d in zip(values_per_dict, dicts):
val = d.get(key,
([None] * num_values if num_values > 1 else None)
)
values += flatten_val(val)
joint_dict[key] = values
return joint_dict
def binary_split(dataset, targ_name, test_size, seed):
"""
Split the dataset with proportional amounts of a binary label in each.
Args:
dataset (nff.data.dataset): NFF dataset
targ_name (str, optional): name of the binary label to use
in splitting.
test_size (float, optional): fraction of dataset for test
Returns:
idx_train (list[int]): indices of species in the training set
idx_test (list[int]): indices of species in the test set
"""
# get indices of positive and negative values
pos_idx = [i for i, targ in enumerate(dataset.props[targ_name])
if targ]
neg_idx = [i for i in range(len(dataset)) if i not in pos_idx]
# split the positive and negative indices separately
pos_idx_train, pos_idx_test = train_test_split(pos_idx,
test_size=test_size,
random_state=seed)
neg_idx_train, neg_idx_test = train_test_split(neg_idx,
test_size=test_size,
random_state=seed)
# combine the negative and positive test idx to get the test idx
# do the same for train
idx_train = pos_idx_train + neg_idx_train
idx_test = pos_idx_test + neg_idx_test
return idx_train, idx_test
def split_train_test(dataset,
test_size=0.2,
binary=False,
targ_name=None,
seed=None):
"""Splits the current dataset in two, one for training and
another for testing.
Args:
dataset (nff.data.dataset): NFF dataset
test_size (float, optional): fraction of dataset for test
binary (bool, optional): whether to split the dataset with
proportional amounts of a binary label in each.
targ_name (str, optional): name of the binary label to use
in splitting.
Returns:
TYPE: Description
"""
if binary:
idx_train, idx_test = binary_split(dataset=dataset,
targ_name=targ_name,
test_size=test_size,
seed=seed)
else:
idx = list(range(len(dataset)))
idx_train, idx_test = train_test_split(idx, test_size=test_size,
random_state=seed)
train = Dataset(
props={key: [val[i] for i in idx_train]
for key, val in dataset.props.items()},
units=dataset.units
)
test = Dataset(
props={key: [val[i] for i in idx_test]
for key, val in dataset.props.items()},
units=dataset.units
)
return train, test
def split_train_validation_test(dataset,
val_size=0.2,
test_size=0.2,
seed=None,
**kwargs):
"""Summary
Args:
dataset (TYPE): Description
val_size (float, optional): Description
test_size (float, optional): Description
Returns:
TYPE: Description
"""
train, validation = split_train_test(dataset,
test_size=val_size,
seed=seed,
**kwargs)
train, test = split_train_test(train,
test_size=test_size / (1 - val_size),
seed=seed,
**kwargs)
return train, validation, test
```
#### File: nff/data/graphs.py
```python
import numpy as np
import networkx as nx
import torch
from ase import Atoms
from tqdm import tqdm
from nff.utils.misc import tqdm_enum
DISTANCETHRESHOLDICT_SYMBOL = {
("H", "H"): 1.00,
("H", "Li"): 1.30,
("H", "C"): 1.30,
("H", "N"): 1.30,
("H", "O"): 1.30,
("H", "F"): 1.30,
("H", "Na"): 1.65,
("H", "Si"): 1.65,
("H", "Mg"): 1.40,
("H", "S"): 1.50,
("H", "Cl"): 1.60,
("H", "Br"): 1.60,
("Li", "C"): 0.0,
("Li", "N"): 0.0,
("Li", "O"): 0.0,
("Li", "F"): 0.0,
("Li", "Mg"): 0.0,
("B", "C"): 1.70,
("B", "N"): 1.70,
("B", "O"): 1.70,
("B", "F"): 1.70,
("B", "Na"): 1.8,
("B", "Mg"): 1.8,
("B", "Cl"): 2.1,
("B", "Br"): 2.1,
("C", "C"): 1.70,
("C", "O"): 1.70,
("C", "N"): 1.8,
("C", "F"): 1.65,
("C", "Na"): 1.80,
("C", "Mg"): 1.70,
("C", "Si"): 2.10,
("C", "S"): 2.20,
("N", "O"): 1.55,
("N", "Na"): 1.70,
("N", "S"): 2.0,
("O", "Na"): 1.70,
("O", "Mg"): 1.35,
("O", "S"): 2.00,
("O", "Cl"): 1.80,
("O", "O"): 1.70,
("O", "F"): 1.50,
("O", "Si"): 1.85,
("O", "Br"): 1.70,
("F", "Mg"): 1.35, }
DISTANCETHRESHOLDICT_Z = {
(1., 1.): 1.00,
(1., 3.): 1.30,
(1., 5.): 1.50,
(1., 6.): 1.30,
(1., 7.): 1.30,
(1., 8.): 1.30,
(1., 9.): 1.30,
(1., 11.): 1.65,
(1., 14.): 1.65,
(1., 12.): 1.40,
(1., 16.): 1.50,
(1., 17.): 1.60,
(1., 35.): 1.60,
(3., 6.): 0.0,
(3., 7.): 0.0,
(3., 8.): 0.0,
(3., 9.): 0.0,
(3., 12.): 0.0,
(5., 6.): 1.70,
(5., 7.): 1.70,
(5., 8.): 1.70,
(5., 9.): 1.70,
(5., 11.): 1.8,
(5., 12.): 1.8,
(5., 17.): 2.1,
(5., 35.): 2.1,
(6., 6.): 1.70,
(6., 8.): 1.70,
(6., 7.): 1.8,
(6., 9.): 1.65,
(6., 11.): 1.80,
(6., 12.): 1.70,
(6., 14.): 2.10,
(6., 16.): 2.20,
(7., 8.): 1.55,
(7., 11.): 1.70,
(7., 16.): 2.0,
(8., 11.): 1.70,
(8., 12.): 1.35,
(8., 16.): 2.00,
(8., 17.): 1.80,
(8., 8.): 1.70,
(8., 9.): 1.50,
(8., 14.): 1.85,
(8., 35.): 1.70,
(9., 12.): 1.35}
def get_neighbor_list(xyz, cutoff=5, undirected=True):
"""Get neighbor list from xyz positions of atoms.
Args:
xyz (torch.Tensor or np.array): (N, 3) array with positions
of the atoms.
cutoff (float): maximum distance to consider atoms as
connected.
Returns:
nbr_list (torch.Tensor): (num_edges, 2) array with the
indices of connected atoms.
"""
xyz = torch.Tensor(xyz)
n = xyz.size(0)
# calculating distances
dist = (xyz.expand(n, n, 3) - xyz.expand(n, n, 3).transpose(0, 1)
).pow(2).sum(dim=2).sqrt()
# neighbor list
mask = (dist <= cutoff)
mask[np.diag_indices(n)] = 0
nbr_list = mask.nonzero(as_tuple=False)
if undirected:
nbr_list = nbr_list[nbr_list[:, 1] > nbr_list[:, 0]]
return nbr_list
def to_tuple(tensor):
"""
Convert tensor to tuple.
Args:
tensor (torch.Tensor): any tensor
Returns:
tup (tuple): tuple form
"""
tup = tuple(tensor.cpu().tolist())
return tup
def get_bond_idx(bonded_nbr_list, nbr_list):
"""
For each index in the bond list, get the
index in the neighbour list that corresponds to the
same directed pair of atoms.
Args:
bonded_nbr_list (torch.LongTensor): pairs
of bonded atoms.
nbr_list (torch.LongTensor): pairs of atoms
within a cutoff radius of each other.
Returns:
bond_idx (torch.LongTensor): set of indices in the
neighbor list that corresponds to the same
directed pair of atoms in the bond list.
"""
# make them both directed
# make the neighbour list into a dictionary of the form
# {(atom_0, atom_1): nbr_list_index} for each pair of atoms
nbr_dic = {to_tuple(pair): i for i, pair in enumerate(nbr_list)}
# call the dictionary for each pair of atoms in the bonded neighbor
# list to get `bond_idx`
bond_idx = torch.LongTensor([nbr_dic[to_tuple(pair)]
for pair in bonded_nbr_list])
return bond_idx
def get_dist_mat(xyz, box_len, unwrap=True):
dis_mat = (xyz[:, None, :] - xyz[None, ...])
# build minimum image convention
mask_pos = dis_mat.ge(0.5*box_len).type(torch.FloatTensor)
mask_neg = dis_mat.lt(-0.5*box_len).type(torch.FloatTensor)
# modify distance
if unwrap:
dis_add = mask_neg * box_len
dis_sub = mask_pos * box_len
dis_mat = dis_mat + dis_add - dis_sub
# create cutoff mask
# compute squared distance of dim (B, N, N)
dis_sq = dis_mat.pow(2).sum(-1)
# mask = (dis_sq <= cutoff ** 2) & (dis_sq != 0)
# byte tensor of dim (B, N, N)
#A = mask.unsqueeze(3).type(torch.FloatTensor).to(self.device) #
# 1) PBC 2) # gradient of zero distance
dis_sq = dis_sq.unsqueeze(-1)
# dis_sq = (dis_sq * A) + 1e-8
# to make sure the distance is not zero,
# otherwise there will be inf gradient
dis_mat = dis_sq.sqrt().squeeze()
return dis_mat
def adjdistmat(atoms, threshold=DISTANCETHRESHOLDICT_Z, unwrap=True):
#dmat = (xyz[:, None, :] - xyz[None, ...]).pow(2).sum(-1).numpy()
xyz = torch.Tensor(atoms.get_positions(wrap=True))
atomicnums = atoms.get_atomic_numbers().tolist()
box_len = torch.Tensor(np.diag(atoms.get_cell()))
dmat = get_dist_mat(xyz, box_len, unwrap=unwrap).numpy()
thresholdmat = np.array([[threshold.get(tuple(
sorted((i, j))), 2.0) for i in atomicnums] for j in atomicnums])
adjmat = (dmat < thresholdmat).astype(int)
np.fill_diagonal(adjmat, 0)
return np.array(atomicnums), np.array(adjmat), np.array(dmat), thresholdmat
def generate_mol_atoms(atomic_nums, xyz, cell):
return Atoms(numbers=atomic_nums, positions=xyz, cell=cell, pbc=True)
def generate_subgraphs(atomsobject, unwrap=True, get_edge=False):
from nff.io.ase import AtomsBatch
atoms = AtomsBatch(atomsobject)
z, adj, dmat, threshold = adjdistmat(atoms, unwrap=unwrap)
box_len = torch.Tensor(np.diag(atoms.get_cell()))
G = nx.from_numpy_matrix(adj)
for i, item in enumerate(z):
G.nodes[i]['z'] = item
sub_graphs = nx.connected_component_subgraphs(G)
edge_list = []
partitions = []
for i, sg in enumerate(sub_graphs):
partitions.append(list(sg.nodes))
if get_edge:
edge_list.append(list(sg.edges))
if len(edge_list) != 0:
return partitions, edge_list
else:
return partitions
def get_single_molecule(atomsobject, mol_idx, single_mol_id):
z = atomsobject.get_atomic_numbers()[mol_idx[single_mol_id]]
pos = atomsobject.get_positions()[mol_idx[single_mol_id]]
return Atoms(numbers=z, positions=pos,
cell=atomsobject.cell, pbc=True)
def reconstruct_atoms(atomsobject, mol_idx):
sys_xyz = torch.Tensor(atomsobject.get_positions(wrap=True))
box_len = torch.Tensor(atomsobject.get_cell_lengths_and_angles()[:3])
print(box_len)
for idx in mol_idx:
mol_xyz = sys_xyz[idx]
center = mol_xyz.shape[0]//2
intra_dmat = (mol_xyz[None, ...] - mol_xyz[:, None, ...])[center]
sub = (intra_dmat > 0.5 * box_len).to(torch.float) * box_len
add = (intra_dmat <= -0.5 * box_len).to(torch.float) * box_len
traj_unwrap = mol_xyz + add - sub
sys_xyz[idx] = traj_unwrap
new_pos = sys_xyz.numpy()
return new_pos
def list2adj(bond_list, size=None):
E = bond_list
if size is None:
size = max(set([n for e in E for n in e])) + 1
# make an empty adjacency list
adjacency = [[0]*size for _ in range(size)]
# populate the list for each edge
for sink, source in E:
adjacency[sink][source] = 1
return adjacency
def make_directed(nbr_list):
"""
Check if a neighbor list is directed, and make it
directed if it isn't.
Args:
nbr_list (torch.LongTensor): neighbor list
Returns:
new_nbrs (torch.LongTensor): directed neighbor
list
directed (bool): whether the old one was directed
or not
"""
gtr_ij = (nbr_list[:, 0] > nbr_list[:, 1]).any().item()
gtr_ji = (nbr_list[:, 1] > nbr_list[:, 0]).any().item()
directed = gtr_ij and gtr_ji
if directed:
return nbr_list, directed
new_nbrs = torch.cat([nbr_list, nbr_list.flip(1)], dim=0)
return new_nbrs, directed
def make_nbr_dic(nbr_list):
"""
Make a dictionary that maps each atom to the indices
of its neighbors.
Args:
nbr_list (torch.LongTensor): nbr list for a geometry
Returns:
nbr_dic (dict): dictionary described above
"""
nbr_dic = {}
for nbr in nbr_list:
nbr_0 = nbr[0].item()
if nbr_0 not in nbr_dic:
nbr_dic[nbr_0] = []
nbr_dic[nbr_0].append(nbr[1].item())
return nbr_dic
def get_angle_list(nbr_lists):
"""
Get angle lists from neighbor lists.
Args:
nbr_lists (list): list of neighbor
lists.
Returns:
angles (list): list of angle lists
new_nbrs (list): list of new neighbor
lists (directed if they weren't
already).
"""
new_nbrs = []
angles = []
num = []
for nbr_list in tqdm(nbr_lists):
nbr_list, _ = make_directed(nbr_list)
these_angles = []
nbr_dic = make_nbr_dic(nbr_list)
for nbr in nbr_list:
nbr_1 = nbr[1].item()
nbr_1_nbrs = torch.LongTensor(nbr_dic[nbr_1]).reshape(-1, 1)
nbr_repeat = nbr.repeat(len(nbr_1_nbrs), 1)
these_angles += [torch.cat([nbr_repeat,
nbr_1_nbrs], dim=-1)]
these_angles = torch.cat(these_angles)
new_nbrs.append(nbr_list)
angles.append(these_angles)
num.append(these_angles.shape[0] - len(nbr_list))
# take out angles of the form [i, j, i], which aren't really angles
angle_tens = torch.cat(angles)
mask = angle_tens[:, 0] != angle_tens[:, 2]
angles = list(torch.split(angle_tens[mask],
num))
return angles, new_nbrs
def m_idx_of_angles(angle_list,
nbr_list,
angle_start,
angle_end):
"""
Get the array index of elements of an angle list.
Args:
angle_list (torch.LongTensor): directed indices
of sets of three atoms that are all in each
other's neighborhood.
nbr_list (torch.LongTensor): directed indices
of pairs of atoms that are in each other's
neighborhood.
angle_start (int): the first index in the angle
list you want.
angle_end (int): the last index in the angle list
you want.
Returns:
idx (torch.LongTensor): `m` indices.
Example:
angle_list = torch.LongTensor([[0, 1, 2],
[0, 1, 3]])
nbr_list = torch.LongTensor([[0, 1],
[0, 2],
[0, 3],
[1, 0],
[1, 2],
[1, 3],
[2, 0],
[2, 1],
[2, 3],
[3, 0],
[3, 1],
[3, 2]])
# This means that message vectors m_ij are ordered
# according to m = {m_01, m_01, m_03, m_10,
# m_12, m_13, m_30, m_31, m_32}. Say we are interested
# in indices 2 and 1 for each element in the angle list.
# If we want to know what the corresponding indices
# in m (or the nbr list) are, we would call `m_idx_of_angles`
# with angle_start = 2, angle_end = 1 (if we want the
# {2,1} and {3,1} indices), or angle_start = 1,
# angle_end = 0 (if we want the {1,2} and {1,3} indices).
# Say we choose angle_start = 2 and angle_end = 1. Then
# we get the indices of {m_21, m_31}, which we can see
# from the nbr list are [7, 10].
"""
# expand nbr_list[:, 0] so it's repeated once
# for every element of `angle_list`.
repeated_nbr = nbr_list[:, 0].repeat(angle_list.shape[0], 1)
reshaped_angle = angle_list[:, angle_start].reshape(-1, 1)
# gives you a matrix that shows you where each angle is equal
# to nbr_list[:, 0]
mask = repeated_nbr == reshaped_angle
# same idea, but with nbr_list[:, 1] and angle_list[:, angle_end]
repeated_nbr = nbr_list[:, 1].repeat(angle_list.shape[0], 1)
reshaped_angle = angle_list[:, angle_end].reshape(-1, 1)
# the full mask is the product of both
mask *= (repeated_nbr == reshaped_angle)
# get the indices where everything is true
idx = mask.nonzero(as_tuple=False)[:, 1]
return idx
def add_ji_kj(angle_lists, nbr_lists):
"""
Get ji and kj idx (explained more below):
Args:
angle_list (list[torch.LongTensor]): list of angle
lists
nbr_list (list[torch.LongTensor]): list of directed neighbor
lists
Returns:
ji_idx_list (list[torch.LongTensor]): ji_idx for each geom
kj_idx_list (list[torch.LongTensor]): kj_idx for each geom
"""
# given an angle a_{ijk}, we want
# ji_idx, which is the array index of m_ji.
# We also want kj_idx, which is the array index
# of m_kj. For example, if i,j,k = 0,1,2,
# and our neighbor list is [[0, 1], [0, 2],
# [1, 0], [1, 2], [2, 0], [2, 1]], then m_10 occurs
# at index 2, and m_21 occurs at index 5. So
# ji_idx = 2 and kj_idx = 5.
ji_idx_list = []
kj_idx_list = []
for i, nbr_list in tqdm_enum(nbr_lists):
angle_list = angle_lists[i]
ji_idx = m_idx_of_angles(angle_list=angle_list,
nbr_list=nbr_list,
angle_start=1,
angle_end=0)
kj_idx = m_idx_of_angles(angle_list=angle_list,
nbr_list=nbr_list,
angle_start=2,
angle_end=1)
ji_idx_list.append(ji_idx)
kj_idx_list.append(kj_idx)
return ji_idx_list, kj_idx_list
def make_dset_directed(dset):
"""
Make everything in the dataset correspond to a directed
neighbor list.
Args:
dset (nff.data.Dataset): nff dataset
Returns:
None
"""
# make the neighbor list directed
for i, batch in enumerate(dset):
nbr_list, nbr_was_directed = make_directed(batch['nbr_list'])
dset.props['nbr_list'][i] = nbr_list
# fix bond_idx
bond_idx = batch.get("bond_idx")
has_bond_idx = (bond_idx is not None)
if (not nbr_was_directed) and has_bond_idx:
nbr_dim = nbr_list.shape[0]
bond_idx = torch.cat([bond_idx,
bond_idx + nbr_dim // 2])
dset.props['bond_idx'][i] = bond_idx
# make the bonded nbr list directed
bond_nbrs = batch.get('bonded_nbr_list')
has_bonds = (bond_nbrs is not None)
if has_bonds:
bond_nbrs, bonds_were_directed = make_directed(bond_nbrs)
dset.props['bonded_nbr_list'][i] = bond_nbrs
# fix the corresponding bond features
bond_feats = batch.get('bond_features')
has_bond_feats = (bond_feats is not None)
if (has_bonds and has_bond_feats) and (not bonds_were_directed):
bond_feats = torch.cat([bond_feats] * 2, dim=0)
dset.props['bond_features'][i] = bond_feats
def batch_angle_idx(nbrs):
"""
Given a neighbor list, find the sets of indices in the neighbor list
corresponding to the kj and ji indices. Usually you can only do this
for one conformer without running out of memory -- to do it for multiple
conformers, use `full_angle_idx` below.
Args:
nbrs (torch.LongTensor): neighbor list
Returns:
ji_idx (torch.LongTensor): a set of indices for the neighbor list
kj_idx (torch.LongTensor): a set of indices for the neighbor list
such that nbrs[kj_idx[n]][0] == nbrs[ji_idx[n]][1] for any
value of n.
"""
all_idx = torch.stack([torch.arange(len(nbrs))] * len(nbrs)).long()
mask = ((nbrs[:, 1] == nbrs[:, 0, None])
* (nbrs[:, 0] != nbrs[:, 1, None]))
ji_idx = all_idx[mask]
kj_idx = mask.nonzero(as_tuple=False)[:, 0]
return ji_idx, kj_idx
def full_angle_idx(batch):
"""
Create all the kj and ji indices for a batch that may have several conformers.
Args:
batch (dict): batch of an nff dataset
Returns:
ji_idx (torch.LongTensor): a set of indices for the neighbor list
kj_idx (torch.LongTensor): a set of indices for the neighbor list
such that nbrs[kj_idx[n]][0] == nbrs[ji_idx[n]][1] for any
value of n.
"""
nbr_list = batch['nbr_list']
num_atoms = batch['num_atoms']
mol_size = batch.get('mol_size', num_atoms)
num_confs = num_atoms // mol_size
all_ji_idx = []
all_kj_idx = []
for i in range(num_confs):
max_idx = (i + 1) * mol_size
min_idx = (i) * mol_size
# get only the indices for this conformer
conf_mask = ((nbr_list[:, 0] < max_idx) *
(nbr_list[:, 0] >= min_idx))
nbrs = nbr_list[conf_mask]
# map from indices of these sub-neighbors
# to indices in full neighbor list
ji_idx, kj_idx = batch_angle_idx(nbrs)
# map to these indices
map_indices = (conf_mask.nonzero(as_tuple=False)
.reshape(-1))
all_ji_idx.append(map_indices[ji_idx])
all_kj_idx.append(map_indices[kj_idx])
all_ji_idx = torch.cat(all_ji_idx)
all_kj_idx = torch.cat(all_kj_idx)
return all_ji_idx, all_kj_idx
def kj_ji_to_dset(dataset, track):
"""
Add all the kj and ji indices to the dataset
Args:
dataset (nff.data.Dataset): nff dataset
track (bool): whether to track progress
Returns:
dataset (nff.data.Dataset): updated dataset
"""
all_ji_idx = []
all_kj_idx = []
if track:
iter_func = tqdm
else:
def iter_func(x): return x
for batch in iter_func(dataset):
ji_idx, kj_idx = full_angle_idx(batch)
all_ji_idx.append(ji_idx)
all_kj_idx.append(kj_idx)
dataset.props['ji_idx'] = all_ji_idx
dataset.props['kj_idx'] = all_kj_idx
return dataset
def add_bond_idx(dataset, track):
"""
Add indices that tell you which element of the neighbor
list corresponds to an index of the bonded neighbor list.
Args:
dataset (nff.data.Dataset): nff dataset
track (bool): whether to track progress
Returns:
dataset (nff.data.Dataset): updated dataset
"""
if track:
iter_func = tqdm
else:
def iter_func(x): return x
dataset.props["bond_idx"] = []
for i in iter_func(range(len(dataset))):
bonded_nbr_list = dataset.props["bonded_nbr_list"][i]
nbr_list = dataset.props["nbr_list"][i]
bond_idx = get_bond_idx(bonded_nbr_list, nbr_list)
dataset.props["bond_idx"].append(bond_idx.cpu())
return dataset
```
#### File: nff/data/sampling.py
```python
import torch
from tqdm import tqdm
from nff.train.loss import batch_zhu_p
from nff.utils import constants as const
from nff.utils.misc import cat_props
from nff.data import Dataset
from nff.utils.geom import compute_distances
def get_spec_dic(props):
"""
Find the indices of geoms in the dataset that correspond
to each species.
Args:
props (dict): dataset properties
Returns:
spec_dic (dict): dictionary of the form
{smiles: idx}, where smiles is the smiles
of a species without cis/trans indicators,
and idx are the indices of geoms in that
species in the dataset.
"""
spec_dic = {}
for i, spec in enumerate(props["smiles"]):
no_stereo_spec = (spec.replace("\\", "")
.replace("/", ""))
if no_stereo_spec not in spec_dic:
spec_dic[no_stereo_spec] = []
spec_dic[no_stereo_spec].append(i)
for key, val in spec_dic.items():
spec_dic[key] = torch.LongTensor(val)
return spec_dic
def compute_zhu(props,
zhu_kwargs):
"""
Compute the approximate Zhu-Nakamura hopping probabilities for
each geom in the dataset.
Args:
props (dict): dataset properties
zhu_kwargs (dict): dictionary with information about how
to calculate the hopping rates.
Returns:
zhu_p (torch.Tensor): hopping probabilities
"""
upper_key = zhu_kwargs["upper_key"]
lower_key = zhu_kwargs["lower_key"]
expec_gap_kcal = zhu_kwargs["expec_gap"] * const.AU_TO_KCAL["energy"]
func_type = zhu_kwargs["func_type"]
zhu_p = batch_zhu_p(batch=cat_props(props),
upper_key=upper_key,
lower_key=lower_key,
expec_gap=expec_gap_kcal,
func_type=func_type,
gap_shape=None)
return zhu_p
def balanced_spec_zhu(spec_dic,
zhu_p):
"""
Get the Zhu weights assigned to each geom, such that
the probability of getting a geom in species A
is equal to the probability of getting a geom in
species B [p(A) = p(B)], while the probabiltiies
within a species are related by p(A, i) / p(A, j)
= p_zhu(i) / p_zhu(j), where i and j are geometries in
species A and p_zhu is the Zhu-Nakamura hopping probability.
Args:
spec_dic (dict): dictionary with indices of geoms in each
species
zhu_p (torch.Tensor): Zhu-Nakamura hopping probabilities
for each geom.
Returns:
all_weights (torch.Tensor): sampling weights for each
geom in the dataset, normalized to 1.
"""
num_geoms = sum([len(i) for i in spec_dic.values()])
all_weights = torch.zeros(num_geoms)
for lst_idx in spec_dic.values():
idx = torch.LongTensor(lst_idx)
this_zhu = zhu_p[idx]
sum_zhu = this_zhu.sum()
# make sure they're normalized first
if sum_zhu != 0:
this_zhu /= sum_zhu
all_weights[idx] = this_zhu
all_weights /= all_weights.sum()
return all_weights
def imbalanced_spec_zhu(zhu_p):
"""
Get the Zhu weights assigned to each geom, such that
the probability of getting **any** geom i in the dataset
is related to the probability of getting **any** geom j
through p(i) / p(j) = p_zhu(i) / p_zhu(j),
p_zhu is the Zhu-Nakamura hopping probability. This
is not balanced with respect to species, so a species
that has more geoms with high p_zhu will get sampled
more often.
Args:
zhu_p (torch.Tensor): Zhu-Nakamura hopping probabilities
for each geom
Returns:
all_weights (torch.Tensor): sampling weights for each
geom in the dataset, normalized to 1
"""
all_weights = zhu_p / zhu_p.sum()
return all_weights
def assign_clusters(ref_idx,
spec_nxyz,
ref_nxyzs,
device,
num_clusters,
extra_category,
extra_rmsd):
"""
Assign each geom to a cluster.
Args:
ref_idx (torch.LongTensor): atom indices
to consider in the RMSD computation between reference
nxyz and geom nxyz. For example, if you want to associate
a geometry to a cis or trans cluster, you only really want
the RMSD of the CNNC atoms with respect to those in the
converged cis or trans geoms.
spec_nxyz (list[torch.Tensor]): list of nxyz's for this
species.
ref_nxyzs (list[list[torch.Tensor]]): the reference xyz's that
you want to include in your sampling (e.g. cis,
trans, and CI). Every xyz will be assigned to the
one of these three states that it is closest to.
These three states will then be evenly sampled.
Note that each state gets its own list of tensors,
because one state can have more than one geom (e.g. there
might be multiple distinct CI geoms).
device (str): device on which to do the RMSD calculations
num_clusters (int): number of clusters a geom could be a
part of.
extra_category (bool, optional): whether to add an extra category for the
cluster assignment, occupied by any geoms not close enough to a geom
in `ref_nxyz_dic`
extra_rmsd (float, optional): if using `extra_category`, this is the RMSD
beyond which a geom will be assigned to an extra category.
Returns:
cluster_dic (dict): dictionary of the form {cluster: idx},
where cluster is the cluster number and idx is the set of
indices of geoms that belong to that cluster.
min_rmsds (torch.Tensor): the RMSDs between each species
and its clusters. Returning this is useful for
when we want to assign diabatic states to geoms later
on.
"""
# assign a cluster to each nxyz by computing its RMSD with respect
# to each reference nxyz and selecting the one with the smallest
# distance
# we'll make datasets so we can use them as input to the torch
# parallelized distance computation
# the first is just the set of geom nxyz's
props_1 = {"nxyz": [i[ref_idx] for i in spec_nxyz]}
# the second is the reference dataset
props_0 = {"nxyz": []}
# use `cluster_idx` to keep track of which reference geoms belong
# to which cluster, because one cluster can have many reference
# geoms
cluster_idx = {}
for i, ref_nxyz_lst in enumerate(ref_nxyzs):
cluster_idx[i] = torch.arange(len(ref_nxyz_lst))
if i != 0:
cluster_idx[i] += cluster_idx[i - 1][-1] + 1
for ref_nxyz in ref_nxyz_lst:
props_0["nxyz"].append(ref_nxyz[ref_idx])
# compute the rmsds
dset_0 = Dataset(props=props_0)
dset_1 = Dataset(props=props_1)
rmsds, _ = compute_distances(dataset=dset_0,
device=device,
dataset_1=dset_1)
# take the minimum rmsd with respect to the set of reference
# nxyz's in each cluster. Put infinity if a species is missing a
# reference for a certain cluster.
min_rmsds = torch.zeros(len(spec_nxyz), num_clusters)
for cluster, idx in cluster_idx.items():
these_rmsds = rmsds[idx]
these_mins, _ = these_rmsds.min(0)
min_rmsds[:, cluster] = these_mins
# assign a cluster to each species and compute the rmsd
# to that cluster
min_rmsds[torch.isnan(min_rmsds)] = float("inf")
clusters = min_rmsds.argmin(-1)
if extra_category:
in_extra = min_rmsds.min(-1)[0] >= extra_rmsd
clusters[in_extra] = num_clusters
# record clusters in `cluster_dic`
cluster_dic = {i: [] for i in
range(num_clusters + int(extra_category))}
for spec_idx, cluster in enumerate(clusters):
cluster_dic[cluster.item()].append(spec_idx)
return cluster_dic, min_rmsds
def per_spec_config_weights(spec_nxyz,
ref_nxyzs,
ref_idx,
num_clusters,
extra_category,
extra_rmsd,
device='cpu'):
"""
Get weights to evenly sample different regions of phase
space for a given species
Args:
spec_nxyz (list[torch.Tensor]): list of nxyz's for this
species.
ref_nxyzs (list[list[torch.Tensor]]): the reference xyz's that
you want to include in your sampling (e.g. cis,
trans, and CI). Every xyz will be assigned to the
one of these three states that it is closest to.
These three states will then be evenly sampled.
Note that each state gets its own list of tensors,
because one state can have more than one geom (e.g. there
might be multiple distinct CI geoms).
ref_idx (torch.LongTensor): atom indices
to consider in the RMSD computation between reference
nxyz and geom nxyz. For example, if you want to associate
a geometry to a cis or trans cluster, you only really want
the RMSD of the CNNC atoms with respect to those in the
converged cis or trans geoms.
num_clusters (int): number of clusters a geom could be a
part of.
extra_category (bool, optional): whether to add an extra category for the
cluster assignment, occupied by any geoms not close enough to a geom
in `ref_nxyz_dic`
extra_rmsd (float, optional): if using `extra_category`, this is the RMSD
beyond which a geom will be assigned to an extra category.
device (str): device on which to do the RMSD calculations
Returns:
geom_weights(torch.Tensor): weights for each geom of this species,
normalized to 1.
cluster_rmsds (torch.Tensor): the RMSD between each species
and its closest cluster. Returning this is useful for
when we want to assign diabatic states to geoms later
on.
cluster_dic (dict): dictionary of the form {cluster: idx},
where cluster is the cluster number and idx is the set of
indices of geoms that belong to that cluster.
"""
# a dictionary that tells you which geoms are in each cluster
cluster_dic, cluster_rmsds = assign_clusters(ref_idx=ref_idx,
spec_nxyz=spec_nxyz,
ref_nxyzs=ref_nxyzs,
device=device,
num_clusters=num_clusters,
extra_category=extra_category,
extra_rmsd=extra_rmsd)
# assign weights to each geom equal to 1 / (num geoms in cluster),
# so that the probability of sampling any one cluster is equal to
# the probability of sampling any other
num_geoms = len(spec_nxyz)
geom_weights = torch.zeros(num_geoms)
for idx in cluster_dic.values():
if len(idx) == 0:
continue
geom_weight = 1 / len(idx)
# print(geom_weight)
torch_idx = torch.LongTensor(idx)
geom_weights[torch_idx] = geom_weight
# return normalized weights
geom_weights /= geom_weights.sum()
return geom_weights, cluster_rmsds, cluster_dic
def all_spec_config_weights(props,
ref_nxyz_dic,
spec_dic,
device,
extra_category,
extra_rmsd):
"""
Get the "configuration weights" for each geom, i.e.
the weights chosen to evenly sample each cluster
for each species.
Args:
props (dict): dataset properties
ref_nxyz_dic (dict): dictionary of the form
{smiles: [{"nxyz": ref_nxyz,
"idx": idx}]}, where smiles is
the smiles without cis/trans info, the
ref_nxyzs are the reference nxyz's
for that species, and idx are the indices
of the atoms in the RMSD computation
with respect to the reference.
spec_dic (dict): dictionary with indices of geoms in each
species
device (str): device on which to do the RMSD calculations
extra_category (bool, optional): whether to add an extra category for the
cluster assignment, occupied by any geoms not close enough to a geom
in `ref_nxyz_dic`
extra_rmsd (float, optional): if using `extra_category`, this is the RMSD
beyond which a geom will be assigned to an extra category.
Returns:
weight_dic(dict): dictionary of the form {smiles: geom_weights},
where geom_weights are the set of normalized weights for each
geometry in that species.
cluster_rmsds (torch.Tensor): RMSD between geom and its species'
clusters for each geom in the dataset.
cluster_assgn (torch.Tensor): assignment of each geom to a cluster
"""
weight_dic = {}
num_geoms = len(props['nxyz'])
num_clusters = max([len(ref_dic['nxyz']) for
ref_dic in ref_nxyz_dic.values()])
cluster_rmsds = torch.zeros(num_geoms, num_clusters)
cluster_assgn = torch.zeros(num_geoms)
for spec in tqdm(list(spec_dic.keys())):
idx = spec_dic[spec]
ref_nxyzs = ref_nxyz_dic[spec]['nxyz']
ref_idx = ref_nxyz_dic[spec]['idx']
spec_nxyz = [props['nxyz'][i] for i in idx]
geom_weights, these_rmsds, cluster_dic = per_spec_config_weights(
spec_nxyz=spec_nxyz,
ref_nxyzs=ref_nxyzs,
ref_idx=ref_idx,
num_clusters=num_clusters,
device=device,
extra_category=extra_category,
extra_rmsd=extra_rmsd)
# assign weights to each species
weight_dic[spec] = geom_weights
# record the rmsds to the clusters for each geom
cluster_rmsds[idx] = these_rmsds
# record the cluster assignments for each geom
for cluster, base_idx in cluster_dic.items():
cluster_assgn[idx[base_idx]] = cluster
return weight_dic, cluster_rmsds, cluster_assgn
def balanced_spec_config(weight_dic,
spec_dic):
"""
Generate weights for geoms such that there is balance with respect
to species [p(A) = p(B)], and with respect to clusters in each
species [p(A, c1) = p(A, c2), where c1 and c2 are two different
clusters in species A].
Args:
spec_dic (dict): dictionary with indices of geoms in each species.
weight_dic (dict): dictionary of the form {smiles: geom_weights},
where geom_weights are the set of normalized weights for each
geometry in that species.
Returns:
all_weights (torch.Tensor): normalized set of weights
"""
num_geoms = sum([i.shape[0] for i in weight_dic.values()])
all_weights = torch.zeros(num_geoms)
for key, idx in spec_dic.items():
all_weights[idx] = weight_dic[key]
all_weights /= all_weights.sum()
return all_weights
def imbalanced_spec_config(weight_dic,
spec_dic):
"""
Generate weights for geoms such that there is no balance with respect
to species [p(A) != p(B)], but there is with respect to clusters in
each species [p(A, c1) = p(A, c2), where c1 and c2 are two different
clusters in species A].
Args:
spec_dic (dict): dictionary with indices of geoms in each species.
weight_dic (dict): dictionary of the form {smiles: geom_weights},
where geom_weights are the set of normalized weights for
each geometry in that species.
Returns:
all_weights (torch.Tensor): normalized set of weights
"""
num_geoms = sum([i.shape[0] for i in weight_dic.values()])
all_weights = torch.zeros(num_geoms)
for key, idx in spec_dic.items():
num_spec_geoms = len(idx)
all_weights[idx] = weight_dic[key] * num_spec_geoms
all_weights /= all_weights.sum()
return all_weights
def get_rand_weights(spec_dic):
"""
Generate weights for random sampling of geoms - i.e., equal weights
for every geometry.
Args:
spec_dic (dict): dictionary with indices of geoms in each species.
Returns:
balanced_spec_weights (torch.Tensor): weights generated so that
P(A) = P(B) for species A and B, and p(A, i) = p(A, j)
for any geoms within A.
imbalanced_spec_weights (torch.Tensor): weights generated so that
P(A) != P(B) in general for species A and B, and p(i) = p(j)
for any geoms.
"""
num_geoms = sum([len(i) for i in spec_dic.values()])
imbalanced_spec_weights = torch.ones(num_geoms)
imbalanced_spec_weights /= imbalanced_spec_weights.sum()
balanced_spec_weights = torch.zeros(num_geoms)
for idx in spec_dic.values():
if len(idx) == 0:
continue
balanced_spec_weights[idx] = 1 / len(idx)
total = balanced_spec_weights.sum()
if total != 0:
balanced_spec_weights /= total
return balanced_spec_weights, imbalanced_spec_weights
def combine_weights(balanced_config,
imbalanced_config,
balanced_zhu,
imbalanced_zhu,
balanced_rand,
imbalanced_rand,
spec_weight,
config_weight,
zhu_weight):
"""
Combine config weights, Zhu-Nakamura weights, and random
weights to get the final weights for each geom.
Args:
balanced_config (torch.Tensor): config weights with
species balancing
imbalanced_config (torch.Tensor): config weights without
species balancing
balanced_zhu (torch.Tensor): Zhu weights with
species balancing
imbalanced_zhu (torch.Tensor): Zhu weights without
species balancing
balanced_rand (torch.Tensor): equal weights with
species balancing
imbalanced_rand (torch.Tensor): equal weights without
species balancing
spec_weight (float): weight given to equal species balancing.
If equal to 1, then p(A) = p(B) for all species. If equal
to 0, species are not considered at all for balancing.
Intermediate values reflect the extent to which you care
about balancing species during sampling.
config_weight (float): the weight given to balance among
configurations. Must be <= 1.
zhu_weight (float): the weight given to sampling high-hopping rate.
geoms. Must be <= 1 and satisfy `config_weight` + `zhu_weight`
<= 1. Thedifference, 1 - config_weight - zhu_weight, is the
weight given to purely random sampling.
Returns:
final_weights (torch.Tensor): final weights for all geoms, normalized
to 1.
"""
# combination of zhu weights that are balanced and imbalanced with respect
# to species
weighted_zhu = (balanced_zhu * zhu_weight * spec_weight
+ imbalanced_zhu * zhu_weight * (1 - spec_weight))
# combination of config weights that are balanced and imbalanced with
# respect to species
weighted_config = (balanced_config * config_weight * spec_weight
+ imbalanced_config * config_weight * (1 - spec_weight))
# combination of random weights that are balanced and imbalanced with
# respect to species
rand_weight = (1 - zhu_weight - config_weight)
weighted_rand = (balanced_rand * rand_weight * spec_weight
+ imbalanced_rand * rand_weight * (1 - spec_weight))
# final weights
final_weights = weighted_zhu + weighted_config + weighted_rand
return final_weights
def spec_config_zhu_balance(props,
ref_nxyz_dic,
zhu_kwargs,
spec_weight,
config_weight,
zhu_weight,
extra_category=False,
extra_rmsd=None,
device='cpu'):
"""
Generate weights that combine balancing of species,
configurations, and Zhu-Nakamura hopping rates.
Args:
props (dict): dataset properties
zhu_kwargs (dict): dictionary with information about how
to calculate the hopping rates.
spec_weight (float): weight given to equal species balancing.
If equal to 1, then p(A) = p(B) for all species. If equal
to 0, species are not considered at all for balancing.
Intermediate values reflect the extent to which you care
about balancing species during sampling.
config_weight (float): the weight given to balance among configurations.
Must be <= 1.
zhu_weight (float): the weight given to sampling high-hopping rate geoms.
Must be <= 1 and satisfy `config_weight` + `zhu_weight` <= 1. The
difference, 1 - config_weight - zhu_weight, is the weight given to
purely random sampling.
extra_category (bool, optional): whether to add an extra category for the
cluster assignment, occupied by any geoms not close enough to a geom
in `ref_nxyz_dic`
extra_rmsd (float, optional): if using `extra_category`, this is the RMSD
beyond which a geom will be assigned to an extra category.
device (str): device on which to do the RMSD calculations
Returns:
results (dict): dictionary with final weights and also config weights for
future use.
"""
spec_dic = get_spec_dic(props)
# get the species-balanced and species-imbalanced
# configuration weights
config_weight_dic, cluster_rmsds, cluster_assgn = all_spec_config_weights(
props=props,
ref_nxyz_dic=ref_nxyz_dic,
spec_dic=spec_dic,
device=device,
extra_category=extra_category,
extra_rmsd=extra_rmsd)
balanced_config = balanced_spec_config(
weight_dic=config_weight_dic,
spec_dic=spec_dic)
imbalanced_config = imbalanced_spec_config(
weight_dic=config_weight_dic,
spec_dic=spec_dic)
# get the species-balanced and species-imbalanced
# zhu weights
zhu_p = compute_zhu(props=props,
zhu_kwargs=zhu_kwargs)
balanced_zhu = balanced_spec_zhu(spec_dic=spec_dic,
zhu_p=zhu_p)
imbalanced_zhu = imbalanced_spec_zhu(zhu_p=zhu_p)
# get the random weights
balanced_rand, imbalanced_rand = get_rand_weights(
spec_dic=spec_dic)
# combine them all together
final_weights = combine_weights(
balanced_config=balanced_config,
imbalanced_config=imbalanced_config,
balanced_zhu=balanced_zhu,
imbalanced_zhu=imbalanced_zhu,
balanced_rand=balanced_rand,
imbalanced_rand=imbalanced_rand,
spec_weight=spec_weight,
config_weight=config_weight,
zhu_weight=zhu_weight)
# put relevant info in a dictionary
results = {"weights": final_weights,
"cluster_rmsds": cluster_rmsds,
"clusters": cluster_assgn}
return results
```
#### File: nff/data/stats.py
```python
import torch
import numpy as np
from nff.data import Dataset
def remove_outliers(array, std_away=3, max_value=np.inf):
"""
Remove outliers from given array using both a number of standard
deviations and a hard cutoff.
Args:
array (np.array): array from which the outliers will be removed.
std_away (float): maximum number of standard deviations to consider
a value as outlier.
max_value (float): cutoff for the values of array. Values higher than
this cutoff will be considered outliers and thus removed from the
array.
Returns:
array without outliers (np.array)
non_outlier (np.array): array containing the indices of non-outlier
values.
"""
std = np.std(array)
mean = np.mean(array)
non_outlier = np.bitwise_and(
np.abs(array - mean) < std_away * std,
array < max_value
)
non_outlier = np.arange(len(array))[non_outlier]
return array[non_outlier], non_outlier
def remove_dataset_outliers(dset, reference_key='energy', std_away=3, max_value=np.inf):
"""
Remove outliers from given dataset using both a number of standard
deviations and a hard cutoff.
Args:
dset (nff.data.Dataset): dataset from which the outliers will be removed.
reference_key (str): key of the dataset which should serve as reference
when removing the outliers.
std_away (float): maximum number of standard deviations to consider
a value as outlier.
max_value (float): cutoff for the values of array. Values higher than
this cutoff will be considered outliers and thus removed from the
array.
Returns:
new_dset (nff.data.Dataset): new dataset with the bad data removed.
"""
array = dset.props[reference_key]
if isinstance(array, torch.Tensor):
array = array.cpu().numpy()
_, idx = remove_outliers(array, std_away, max_value)
new_props = {
key: [val[i] for i in idx]
for key, val in dset.props.items()
}
return Dataset(new_props, units=dset.units)
```
#### File: nff/io/cprop.py
```python
import json
import os
import numpy as np
from nff.utils import bash_command, fprint
def get_cp_cmd(script,
config_path,
data_path,
dataset_type):
"""
Get the string for a ChemProp command.
Args:
script (str): the path to the chemprop script you're running
config_path (str): path to the config file for the job
data_path (str): path to the dataset being used
dataset_type (str): type of problem you're doing (e.g. regression,
classification, multiclass)
Returns:
cmd (str): the chemprop command
"""
cmd = (f"python {script} --config_path {config_path} "
f" --data_path {data_path} "
f" --dataset_type {dataset_type}")
return cmd
def cp_hyperopt(cp_folder,
hyp_folder,
rerun):
"""
Run hyperparameter optimization with ChemProp.
Args:
cp_folder (str): path to the chemprop folder on your computer
hyp_folder (str): where you want to store your hyperparameter
optimization models
rerun (bool): whether to rerun hyperparameter optimization if
`hyp_folder` already exists and has the completion file
`best_params.json`.
Returns:
best_params (dict): best parameters from hyperparameter
optimization
"""
# path to `best_params.json` file
param_file = os.path.join(hyp_folder, "best_params.json")
params_exist = os.path.isfile(param_file)
# If it exists and you don't want to re-run, then load it
if params_exist and (not rerun):
fprint(f"Loading hyperparameter results from {param_file}\n")
with open(param_file, "r") as f:
best_params = json.load(f)
return best_params
# otherwise run the script and read in the results
hyp_script = os.path.join(cp_folder, "hyperparameter_optimization.py")
config_path = os.path.join(hyp_folder, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
data_path = config["data_path"]
dataset_type = config["dataset_type"]
cmd = get_cp_cmd(hyp_script,
config_path,
data_path,
dataset_type)
cmd += f" --config_save_path {param_file}"
fprint(f"Running hyperparameter optimization in folder {hyp_folder}\n")
fprint(cmd)
p = bash_command(f"source activate chemprop && {cmd}")
p.wait()
with open(param_file, "r") as f:
best_params = json.load(f)
return best_params
def cp_train(cp_folder,
train_folder):
"""
Train a chemprop model.
Args:
cp_folder (str): path to the chemprop folder on your computer
train_folder (str): where you want to store your trained models
Returns:
None
"""
train_script = os.path.join(cp_folder, "train.py")
config_path = os.path.join(train_folder, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
data_path = config["data_path"]
dataset_type = config["dataset_type"]
cmd = get_cp_cmd(train_script,
config_path,
data_path,
dataset_type)
p = bash_command(f"source activate chemprop && {cmd}")
p.wait()
def make_feat_paths(feat_path):
"""
Make a feature path into a list.
Args:
feat_path (str): feature path
Returns:
paths (list): list of paths
"""
if feat_path is not None:
paths = [feat_path]
else:
paths = None
return paths
def modify_config(base_config_path,
metric,
train_feat_path,
val_feat_path,
test_feat_path,
train_folder,
features_only,
hyp_params,
no_features):
"""
Modify a chemprop config file with new parameters.
Args:
base_config_path (str): where your basic job config file
is, with parameters that may or may not be changed depending
on the given run
metric (str): what metric you want to optimize in this run
train_feat_path (str): where the features of your training set are
val_feat_path (str): where the features of your validation set are
test_feat_path (str): where the features of your test set are
train_folder (str): where you want to store your trained models
features_only (bool): whether to just train with the features and no
MPNN
hyp_params (dict): any hyperparameters that may have been optimized
no_features (bool): Don't use external features when training model.
Returns:
None
"""
with open(base_config_path, "r") as f:
config = json.load(f)
dic = {"metric": metric,
"features_path": make_feat_paths(train_feat_path),
"separate_val_features_path": make_feat_paths(val_feat_path),
"separate_test_features_path": make_feat_paths(test_feat_path),
"save_dir": train_folder,
"features_only": features_only,
**hyp_params}
config.update({key: val for key, val in
dic.items() if val is not None})
if no_features:
for key in list(config.keys()):
if "features_path" in key:
config.pop(key)
new_config_path = os.path.join(train_folder, "config.json")
if not os.path.isdir(train_folder):
os.makedirs(train_folder)
with open(new_config_path, "w") as f:
json.dump(config, f, indent=4, sort_keys=True)
def modify_hyp_config(hyp_config_path,
metric,
hyp_feat_path,
hyp_folder,
features_only,
no_features):
"""
Modfiy a hyperparameter optimization config file with new parameters.
Args:
hyp_config_path (str): where your basic hyperopt job config file
is, with parameters that may or may not be changed depending
on the given run
metric (str): what metric you want to optimize in this run
hyp_feat_path (str): path to all the features of the species that are
part of the hyperparameter optimization (train and val from the
real dataset).
hyp_folder (str): where you want to store your trained models
features_only (bool): whether to just train with the features and no
MPNN
no_features (bool): Don't use external features when training model.
Returns:
None
"""
with open(hyp_config_path, "r") as f:
config = json.load(f)
dic = {"metric": metric,
"features_path": make_feat_paths(hyp_feat_path),
"save_dir": hyp_folder,
"features_only": features_only}
config.update({key: val for key, val in
dic.items() if val is not None})
if no_features:
for key in list(config.keys()):
if "features_path" in key:
config.pop(key)
new_config_path = os.path.join(hyp_folder, "config.json")
if not os.path.isdir(hyp_folder):
os.makedirs(hyp_folder)
with open(new_config_path, "w") as f:
json.dump(config, f, indent=4, sort_keys=True)
def get_smiles(smiles_folder, name):
"""
Get SMILES strings from csv.
Args:
smiles_folder (str): folder with the csvs
name (str): csv file name
Returns:
smiles_list (list[str]): SMILES strings
"""
path = os.path.join(smiles_folder, name)
with open(path, "r") as f:
lines = f.readlines()
smiles_list = [i.strip() for i in lines[1:]]
return smiles_list
def save_smiles(smiles_folder, smiles_list, name):
"""
Re-save the SMILES strings, ignoring those that aren't in
`smiles_list`.
Args:
smiles_folder (str): folder with the csvs
smiles_list (list[str]): SMILES strings that we will use
in training -- excludes those that, for example, do not
have 3D structures.
name (str): csv file name
Returns:
None
"""
# both the file with only the SMILES string, and the file
# that has the SMILES string with its properties (e.g. bind /
# no bind):
file_names = [f"{name}_smiles.csv", f"{name}_full.csv"]
paths = [os.path.join(smiles_folder, name) for name in
file_names]
for path in paths:
with open(path, "r") as f:
lines = f.readlines()
keep_lines = [lines[0]]
for line in lines[1:]:
smiles = line.split(",")[0].strip()
# keep the line only if the SMILES string is in
# `smiles_list`
if smiles in smiles_list:
keep_lines.append(line)
text = "".join(keep_lines)
with open(path, "w") as f:
f.write(text)
def make_hyperopt_csvs(smiles_folder, all_smiles):
"""
Make csv files with SMILES strings for hyperparameter optimization.
Args:
smiles_folder (str): folder with the csvs
all_smiles (list[str]): combined train and val SMILES for hyperparameter
optimization that are actually used
Returns:
None
"""
# full csv with properties, and just smiles csv
suffixes = ["smiles", "full"]
# dictionary with the combined lines read from train and val csvs
# for each of the suffixes
combined_lines = {suffix: [] for suffix in suffixes}
for i, name in enumerate(["train", "val"]):
for suffix in suffixes:
file_path = os.path.join(smiles_folder, f"{name}_{suffix}.csv")
with open(file_path, "r") as f:
lines = f.readlines()
# only include the header in the first file
if i != 0:
lines = lines[1:]
combined_lines[suffix] += lines
# write to new hyperopt csvs
for suffix, lines in combined_lines.items():
text = "".join(lines)
new_path = os.path.join(smiles_folder, f"hyperopt_{suffix}.csv")
with open(new_path, "w") as f:
f.write(text)
# re-save to account for the fact that not all smiles are used
save_smiles(smiles_folder, all_smiles, name="hyperopt")
def save_hyperopt(feat_folder,
metric,
smiles_folder,
cp_save_folder,
dset_size):
"""
Aggregate and save the train and validation SMILES for hyperparameter optimization.
Args:
feat_folder (str): path to the folder that contains all the feature files.
metric (str): metric with which you're evaluating the model performance
smiles_folder (str): folder with the csvs
cp_save_folder (str): folder in which you're saving features for chemprop use
dset_size (int, optional): maximum size of the entire dataset to use in hyperparameter
optimization.
Returns:
hyp_np_path (str): path of npz features file for hyperparameter optimization
"""
names = ["train", "val"]
all_feats = []
all_smiles = []
for name in names:
smiles_list = get_smiles(smiles_folder, f"{name}_smiles.csv")
np_save_path = os.path.join(cp_save_folder,
f"{name}_{metric}.npz")
feats = np.load(np_save_path)['features']
all_feats.append(feats)
all_smiles += smiles_list
all_feats = np.concatenate(all_feats)
if dset_size is not None:
all_smiles = all_smiles[:dset_size]
all_feats = all_feats[:dset_size]
# save the entire train + val dataset features
hyp_np_path = os.path.join(cp_save_folder,
f"hyperopt_{metric}.npz")
np.savez_compressed(hyp_np_path, features=all_feats)
# save csvs for the train + val dataset
make_hyperopt_csvs(smiles_folder=smiles_folder,
all_smiles=all_smiles)
return hyp_np_path
```
#### File: io/tests/test_ase.py
```python
from nff.io.ase import *
import numpy as np
import unittest as ut
import networkx as nx
def get_ethanol():
"""Returns an ethanol molecule.
Returns:
ethanol (Atoms)
"""
nxyz = np.array([
[ 6.0000e+00, 5.5206e-03, 5.9149e-01, -8.1382e-04],
[ 6.0000e+00, -1.2536e+00, -2.5536e-01, -2.9801e-02],
[ 8.0000e+00, 1.0878e+00, -3.0755e-01, 4.8230e-02],
[ 1.0000e+00, 6.2821e-02, 1.2838e+00, -8.4279e-01],
[ 1.0000e+00, 6.0567e-03, 1.2303e+00, 8.8535e-01],
[ 1.0000e+00, -2.2182e+00, 1.8981e-01, -5.8160e-02],
[ 1.0000e+00, -9.1097e-01, -1.0539e+00, -7.8160e-01],
[ 1.0000e+00, -1.1920e+00, -7.4248e-01, 9.2197e-01],
[ 1.0000e+00, 1.8488e+00, -2.8632e-02, -5.2569e-01]
])
ethanol = Atoms(
nxyz[:, 0].astype(int),
positions=nxyz[:, 1:]
)
return ethanol
@ut.skip('skip this for now')
class TestAtomsBatch(ut.TestCase):
def setUp(self):
self.ethanol = get_ethanol()
def test_AtomsBatch():
# Test for an ethanol molecule (no PBC)
expected_nbrlist_cutoff_2dot5 = np.array([
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [0, 8],
[1, 0], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [2, 0],
[2, 1], [2, 3], [2, 4], [2, 6], [2, 7], [2, 8], [3, 0], [3, 1],
[3, 2], [3, 4], [3, 8], [4, 0], [4, 1], [4, 2], [4, 3], [4, 7],
[5, 0], [5, 1], [5, 6], [5, 7], [6, 0], [6, 1], [6, 2], [6, 5],
[6, 7], [7, 0], [7, 1], [7, 2], [7, 4], [7, 5], [7, 6], [8, 0],
[8, 2], [8, 3]
])
atoms_batch = AtomsBatch(self.ethanol, cutoff=2.5)
atoms_batch.update_nbr_list()
G1 = nx.from_edgelist(expected_nbrlist_cutoff_2dot5)
G2 = nx.from_edgelist(atoms_batch.nbr_list.numpy())
assert nx.is_isomorphic(G1, G2)
def test_get_batch():
atoms_batch = AtomsBatch(self.ethanol, cutoff=5)
batch = atoms_batch.get_batch()
assert 'nxyz' in batch
class TestPeriodic(ut.TestCase):
def setUp(self):
nxyz = np.array([
[14.0, -1.19984241582007, 2.07818802527655, 4.59909615202747],
[14.0, 1.31404847917993, 2.27599872954824, 2.7594569553608],
[14.0, 2.39968483164015, 0.0, 0.919817758694137],
[8.0, -1.06646793438585, 3.24694318819338, 0.20609293956337],
[8.0, 0.235189576572621, 1.80712683722845, 3.8853713328967],
[8.0, 0.831278357813231, 3.65430348422777, 2.04573213623004],
[8.0, 3.34516925281323, 0.699883270597028, 5.31282465043663],
[8.0, 1.44742296061415, 1.10724356663142, 1.6335462571033],
[8.0, 2.74908047157262, 2.54705991759635, 3.47318545376996]
])
lattice = np.array([
[5.02778179, 0.0, 3.07862843796742e-16],
[-2.513890895, 4.3541867548248, 3.07862843796742e-16],
[0.0, 0.0, 5.51891759]
])
self.quartz = AtomsBatch(
nxyz[:, 0].astype(int),
positions=nxyz[:, 1:],
cell=lattice,
pbc=True
)
def test_ase(self):
print(self.quartz)
def test_nbrlist(self):
nbrlist, offsets = self.quartz.update_nbr_list()
print(nbrlist)
print(offsets)
if __name__ == '__main__':
ut.main()
```
#### File: md/ci/opt.py
```python
import sys
sys.path.append('/home/saxelrod/htvs-ax/htvs')
import os
import django
os.environ["DJANGO_SETTINGS_MODULE"]="djangochem.settings.orgel"
django.setup()
# Shell Plus Model Imports
from django.contrib.contenttypes.models import ContentType
from jobs.models import Job, JobConfig
from django.contrib.auth.models import Group
from pgmols.models import (AtomBasis,
Geom, Hessian, Jacobian, MDFrame, Mechanism, Method, Mol, MolGroupObjectPermission,
MolSet, MolUserObjectPermission, PathImage, ProductLink, ReactantLink, Reaction,
ReactionPath, ReactionType, SinglePoint, Species, Stoichiometry, Trajectory)
import numpy as np
import random
import pdb
import json
from rdkit import Chem
from torch.utils.data import DataLoader
import copy
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from ase.calculators.calculator import Calculator
from ase import optimize, Atoms, units
from ase.md.verlet import VelocityVerlet
from ase.io.trajectory import Trajectory as AseTrajectory
# from nff.nn.models import PostProcessModel
from nff.io.ase_ax import NeuralFF, AtomsBatch
from nff.train import load_model
from nff.utils import constants as const
from nff.nn.tensorgrad import get_schnet_hessians
from nff.data import collate_dicts, Dataset
from nff.utils.cuda import batch_to
from neuralnet.utils import vib
KT = 0.000944853
FS_TO_AU = 41.341374575751
AU_TO_ANGS = 0.529177
CM_TO_AU = 4.5564e-6
AU_TO_KELVIN = 317638
DEFAULT_OPT_METHOD = 'nn_ci_opt_sf_tddft_bhhlyp'
DEFAULT_MD_METHOD = 'nn_ci_dynamics_sf_tddft_bhhlyp'
DEFAULT_OPT_CONFIG = 'nn_ci_opt'
DEFAULT_MD_CONFIG = 'nn_ci_dynamics'
DEFAULT_GROUP = 'switches'
DEFAULT_TEMP = 300
DEFAULT_PENALTY = 0.5
DEFAULT_CI_OPT_TYPE = 'BFGS'
DEFAULT_MAX_STEPS = 500
WEIGHT_FOLDER = '/home/saxelrod/engaging/models'
GROUP_NAME = 'switches'
PERIODICTABLE = Chem.GetPeriodicTable()
# BASE_NXYZ = np.array([[9.0, 1.626, -1.2352, -2.1575],
# [6.0, 1.9869, -0.4611, -1.1023],
# [6.0, 2.4846, 0.8267, -1.2586],
# [6.0, 2.8629, 1.5495, -0.1401],
# [6.0, 2.8794, 0.9444, 1.1089],
# [6.0, 2.314, -0.313, 1.2809],
# [9.0, 2.5912, -1.0202, 2.3902],
# [6.0, 1.6616, -0.8408, 0.1753],
# [7.0, 0.828, -1.9748, 0.448],
# [7.0, -0.2528, -1.678, 1.0679],
# [6.0, -1.087, -0.7575, 0.3522],
# [6.0, -1.8388, -1.3056, -0.6806],
# [6.0, -3.0777, -0.8041, -1.0307],
# [6.0, -3.4558, 0.4394, -0.5895],
# [6.0, -2.6664, 1.0449, 0.3605],
# [6.0, -1.6479, 0.3386, 0.9823],
# [1.0, 2.4427, 1.2813, -2.2382],
# [1.0, 3.1273, 2.6058, -0.2052],
# [1.0, 3.2671, 1.4542, 1.978],
# [1.0, -1.5848, -2.2797, -1.0655],
# [1.0, -3.8298, -1.3837, -1.5617],
# [1.0, -4.364, 0.9446, -0.9216],
# [1.0, -2.9646, 1.969, 0.8442],
# [1.0, -1.3019, 0.6553, 1.9634]])
BASE_NXYZ = np.array([[6.0, 4.452, -0.5003, 0.3975],
[6.0, 3.6787, -1.4613, 1.0474],
[6.0, 2.2963, -1.29, 1.1518],
[6.0, 1.673, -0.1649, 0.6015],
[7.0, 0.2828, -0.0077, 0.7513],
[7.0, -0.2784, 0.0488, -0.3654],
[6.0, -1.6699, 0.1935, -0.2155],
[6.0, -2.2349, 1.31, 0.4161],
[6.0, -3.6213, 1.4419, 0.5226],
[6.0, -4.4562, 0.4614, -0.0113],
[6.0, -3.9067, -0.6474, -0.6535],
[6.0, -2.5197, -0.7764, -0.758],
[6.0, 2.4631, 0.8004, -0.0379],
[6.0, 3.8456, 0.6321, -0.1442],
[1.0, 5.5279, -0.6325, 0.3158],
[1.0, 4.1501, -2.3426, 1.4743],
[1.0, 1.6958, -2.0383, 1.6624],
[1.0, -1.5867, 2.0791, 0.8278],
[1.0, -4.0473, 2.3094, 1.0197],
[1.0, -5.5355, 0.5628, 0.0706],
[1.0, -4.5559, -1.4108, -1.0743],
[1.0, -2.0934, -1.6394, -1.2627],
[1.0, 1.9939, 1.6872, -0.4558],
[1.0, 4.4467, 1.3848, -0.6474]])
# BASE_NXYZ = np.array([[6.0, -2.9158, -0.8555, 0.8318],
# [6.0, -2.8923, -0.9434, -0.5533],
# [6.0, -2.3179, 0.1365, -1.2153],
# [6.0, -1.5066, 1.0489, -0.5507],
# [7.0, -0.557, 1.7803, -1.3222],
# [7.0, 0.3693, 1.0616, -1.8394],
# [6.0, 1.1488, 0.374, -0.8505],
# [6.0, 1.6462, -0.8982, -1.0729],
# [6.0, 2.5616, -1.4677, -0.1954],
# [6.0, 3.1468, -0.6729, 0.7641],
# [6.0, 3.0255, 0.7069, 0.6335],
# [6.0, 1.9038, 1.1984, -0.0226],
# [6.0, -1.6495, 1.1864, 0.8133],
# [6.0, -2.4421, 0.2665, 1.4903],
# [1.0, -3.4773, -1.6338, 1.332],
# [1.0, -3.0551, -1.9037, -1.016],
# [1.0, -2.2432, 0.0943, -2.294],
# [1.0, 1.0876, -1.5393, -1.7434],
# [1.0, 2.6524, -2.5394, -0.0807],
# [1.0, 3.6078, -1.0611, 1.6671],
# [1.0, 3.6938, 1.3734, 1.1781],
# [1.0, 1.5802, 2.2304, 0.1155],
# [1.0, -0.9201, 1.7747, 1.3505],
# [1.0, -2.4469, 0.2829, 2.5802]])
OPT_DIC = {
"BFGS": optimize.BFGS
}
def get_new_model(model, lower_key, upper_key, ref_energy, penalty):
# process_list = [{"func_name": "gap",
# # hack to get ase to think this is the energy
# # for minimization
# "output_name": "energy",
# "params": {"lower_key": lower_key,
# "upper_key": upper_key}},
# {"func_name": "gap_grad",
# "output_name": "energy_grad",
# "params": {"lower_key": lower_key,
# "upper_key": upper_key}}]
if ref_energy is None:
ref_energy = 0
process_list = [{"func_name": "gap_penalty",
# hack to get ase to think this is the energy
# for minimization
"output_name": "energy",
"params": {"lower_key": lower_key,
"upper_key": upper_key,
"ref_energy": ref_energy,
"penalty": penalty
}},
{"func_name": "gap_penalty_grad",
"output_name": "energy_grad",
"params": {"lower_key": lower_key,
"upper_key": upper_key,
"penalty": penalty
}}
]
base_keys = [lower_key, upper_key, lower_key + "_grad",
upper_key + "_grad"]
new_model = PostProcessModel(model=model, process_list=process_list,
base_keys=base_keys)
return new_model
def set_ci_calc(atoms, model, lower_key, upper_key,
**kwargs):
new_model = get_new_model(model=model,
lower_key=lower_key,
upper_key=upper_key,
**kwargs)
ci_calculator = NeuralFF(
model=new_model,
**kwargs)
atoms.set_calculator(ci_calculator)
def pdb_wrap(func):
def main(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print(e)
pdb.post_mortem()
return main
def opt_ci(
model,
nxyz,
penalty=0.5,
lower_idx=0,
upper_idx=1,
method='BFGS',
steps=500,
**kwargs):
atoms = AtomsBatch(BASE_NXYZ[:, 0], BASE_NXYZ[:, 1:])
init_calc = NeuralFF(model=model, output_keys=['energy_0'])
atoms.set_calculator(init_calc)
ref_energy = atoms.get_potential_energy().item(
) * const.EV_TO_KCAL_MOL
lower_key = "energy_{}".format(lower_idx)
upper_key = "energy_{}".format(upper_idx)
set_ci_calc(atoms=atoms, model=model,
lower_key=lower_key, upper_key=upper_key,
ref_energy=ref_energy, penalty=penalty)
dyn = OPT_DIC[method](atoms)
dyn.run(steps=steps)
return atoms
def get_modes(model, nxyz, cutoff, energy_keys, device=0):
props = {"nxyz": [nxyz], **{key: [0] for key in energy_keys}}
dataset = Dataset(props.copy())
dataset.generate_neighbor_list(cutoff)
loader = DataLoader(dataset, batch_size=1, collate_fn=collate_dicts)
batch = next(iter(loader))
batch = batch_to(batch, device)
model = model.to(device)
w_list = []
orth_list = []
for key in energy_keys:
hessian = get_schnet_hessians(batch=batch, model=model, device=device,
energy_key=key)[0].cpu().detach().numpy()
# convert to Ha / bohr^2
hessian *= (const.BOHR_RADIUS) ** 2
hessian *= const.KCAL_TO_AU['energy']
force_consts, vib_freqs, eigvec = vib.vib_analy(
r=nxyz[:, 0], xyz=nxyz[:, 1:],
hessian=hessian)
w_list.append(vib_freqs * CM_TO_AU)
orth_list.append(np.array(eigvec))
return w_list, orth_list
def normal_to_real(orth, mass_vec, x=None, ref_geom=None, p=None):
new_orth = np.transpose(orth)
pos = None
mom = None
if x is not None:
mass_pos = np.matmul(new_orth, x_t).reshape(-1, 3)
pos = (mass_pos / (mass_vec.reshape(-1, 1)) ** 0.5)
# convert to angstrom
pos *= const.BOHR_RADIUS
# add converged geometry
pos += ref_geom
if p is not None:
mass_mom = np.matmul(new_orth, p).reshape(-1, 3)
mom = (mass_mom * (mass_vec.reshape(-1, 1)) ** 0.5)
return pos, mom
def sample_p(w, orth, mass_vec, kt=KT):
dim = len(w)
cov = kt * np.identity(dim)
p_normal = np.random.multivariate_normal(mean=np.zeros(dim), cov=cov)
_, p_real = normal_to_real(orth=orth, mass_vec=mass_vec, p=p_normal)
p_real *= 1/ (1 / FS_TO_AU * units.fs ) * AU_TO_ANGS / const.AMU_TO_AU
return p_real
def sample_ci(ci_atoms, model, cutoff, energy_keys, device=0, kt=KT):
nxyz = ci_atoms.get_nxyz()
mass_vec = np.array([PERIODICTABLE.GetAtomicWeight(int(element[0])) * const.AMU_TO_AU
for element in nxyz])
w_list, orth_list = get_modes(model=model, nxyz=nxyz, cutoff=cutoff,
energy_keys=energy_keys, device=device)
lower_atoms = copy.deepcopy(ci_atoms)
upper_atoms = copy.deepcopy(ci_atoms)
# pdb.set_trace()
for i, atoms in enumerate([lower_atoms, upper_atoms]):
# ignore negative w modes
w = w_list[i]
orth = orth_list[i]
good_idx = [i for i, w_1d in enumerate(w) if w_1d > 0]
new_w = w[good_idx]
new_orth = orth[good_idx, :]
p = sample_p(w=new_w, orth=new_orth, mass_vec=mass_vec, kt=kt)
atoms.set_momenta(p)
return lower_atoms, upper_atoms
def opt_and_sample_ci(model,
nxyz,
penalty=0.5,
lower_idx=0,
upper_idx=1,
method='BFGS',
steps=500,
cutoff=5.0,
device=0,
kt=KT,
**kwargs):
ci_atoms = opt_ci(
model=model,
nxyz=nxyz,
penalty=penalty,
lower_idx=lower_idx,
upper_idx=upper_idx,
method=method,
steps=steps)
energy_keys = ["energy_{}".format(lower_idx), "energy_{}".format(upper_idx)]
lower_atoms, upper_atoms = sample_ci(
ci_atoms=ci_atoms,
model=model,
cutoff=cutoff,
energy_keys=energy_keys,
device=device,
kt=KT)
return lower_atoms, upper_atoms
def test():
# weightpath = "/home/saxelrod/engaging/models/971"
weightpath = "/home/saxelrod/engaging/models/953"
nxyz = BASE_NXYZ
penalty = 0.5
# atoms = opt_ci(weightpath=weightpath, nxyz=nxyz,
# penalty=penalty)
model = load_model(weightpath)
lower_idx = 0
upper_idx = 1
lower_atoms, upper_atoms = opt_and_sample_ci(model=model,
nxyz=nxyz,
penalty=DEFAULT_PENALTY,
lower_idx=lower_idx,
upper_idx=upper_idx,
method='BFGS',
steps=100,
cutoff=5.0,
device=0,
kt=KT)
lower_calc = NeuralFF(model=model, output_keys=["energy_{}".format(lower_idx)])
upper_calc = NeuralFF(model=model, output_keys=["energy_{}".format(upper_idx)])
lower_atoms.set_calculator(lower_calc)
upper_atoms.set_calculator(upper_calc)
lower_integrator = VelocityVerlet(lower_atoms, dt=units.fs, logfile='test_lower.log',
trajectory='test_lower.traj')
lower_integrator.run(1000)
upper_integrator = VelocityVerlet(upper_atoms, dt=units.fs, logfile='test_upper.log',
trajectory='test_upper.traj')
upper_integrator.run(1000)
def run_ci_md(model, lower_atoms, upper_atoms, lower_idx, upper_idx,
base_name='test', dt=0.5, tmax=500):
lower_calc = NeuralFF(model=model, output_keys=["energy_{}".format(lower_idx)])
upper_calc = NeuralFF(model=model, output_keys=["energy_{}".format(upper_idx)])
lower_atoms.set_calculator(lower_calc)
upper_atoms.set_calculator(upper_calc)
lower_log = "{}_lower.log".format(base_name)
lower_trj_name = "{}_lower.traj".format(base_name)
num_steps = int(tmax/dt)
lower_integrator = VelocityVerlet(lower_atoms, dt=dt * units.fs, logfile=lower_log,
trajectory=lower_trj_name)
lower_integrator.run(num_steps)
upper_log = "{}_upper.log".format(base_name)
upper_trj_name = "{}_upper.traj".format(base_name)
upper_integrator = VelocityVerlet(upper_atoms, dt=dt * units.fs, logfile=upper_log,
trajectory=upper_trj_name)
upper_integrator.run(num_steps)
lower_trj = AseTrajectory(lower_trj_name)
upper_trj = AseTrajectory(upper_trj_name)
return lower_trj, upper_trj
def make_geom(method, job, coords, parentgeom):
geom = Geom(method=method,
parentjob=job)
geom.set_coords(coords)
geom.converged = False
geom.species = parentgeom.species
geom.stoichiometry = parentgeom.stoichiometry
# geom.details = {'temp_hessian': sampletemp}
geom.save()
geom.parents.add(parentgeom)
return geom
def make_coords(nxyz):
coords = []
for i in range(len(nxyz)):
number, x, y, z = nxyz[i]
element = PERIODICTABLE.GetElementSymbol(int(number))
coords.append(dict(element=element, x=x, y=y, z=z))
return coords
def to_db(smiles, nnid, num_samples, group_name=GROUP_NAME,
weight_folder=WEIGHT_FOLDER, penalty=DEFAULT_PENALTY,
lower_idx=0, upper_idx=1,
max_opt_steps=DEFAULT_MAX_STEPS, cutoff=5.0,
device=0, kt=KT, lower_trj='test_lower.traj',
upper_trj='test_upper.traj', dt=0.5, tmax=500,
ci_opt_type=DEFAULT_CI_OPT_TYPE,
md_method_name=DEFAULT_MD_METHOD, md_config_name=DEFAULT_MD_CONFIG,
opt_method_name=DEFAULT_OPT_METHOD, opt_config_name=DEFAULT_OPT_CONFIG):
group = Group.objects.get(name=group_name)
parentgeom = Geom.objects.filter(species__smiles=smiles, species__group=group,
converged=True).first()
nxyz = parentgeom.xyz
weightpath = os.path.join(weight_folder, str(nnid))
model = load_model(weightpath)
lower_atoms, upper_atoms = opt_and_sample_ci(model=model,
nxyz=nxyz,
penalty=penalty,
lower_idx=lower_idx,
upper_idx=upper_idx,
method=ci_opt_type,
steps=max_opt_steps,
cutoff=cutoff,
device=device,
kt=kt)
opt_details = {"penalty": penalty,
"lower_state": lower_idx,
"upper_state": upper_idx,
"method": ci_opt_type,
"max_steps": max_opt_steps,
"cutoff": cutoff,
"temp": round(kt * AU_TO_KELVIN, 2),
"nnid": nnid}
opt_method, new_method = Method.objects.get_or_create(name=opt_method_name,
description='generated with opt_ci code for optimizing conical intersections'
)
opt_config, new_jc = JobConfig.objects.get_or_create(name=opt_config_name,
parent_class_name='Geom',
configpath='None')
opt_job = Job(config=opt_config,
status='done',
group=group,
parentct=ContentType.objects.get_for_model(parentgeom),
parentid=parentgeom.id,
completetime=timezone.now()
)
opt_job.details = opt_details
opt_job.save()
ci_nxyz = lower_atoms.get_nxyz()
coords = make_coords(ci_nxyz)
ci_geom = make_geom(method=opt_method,
job=opt_job,
coords=coords,
parentgeom=parentgeom)
######
######
######
lower_trj, upper_trj = run_ci_md(model=model,
lower_atoms=lower_atoms,
upper_atoms=upper_atoms,
lower_idx=lower_idx,
upper_idx=upper_idx,
base_name=parentgeom.id,
dt=dt,
tmax=tmax)
md_details = {"thermostat": "velocity_verlet",
"dt": dt,
"tmax": tmax,
"lower_state": lower_idx,
"upper_state": upper_idx,
"nnid": nnid}
md_method, new_method = Method.objects.get_or_create(name=md_method_name,
description='generated with neural dynamics around CI')
md_config, new_jc = JobConfig.objects.get_or_create(name=md_config_name,
parent_class_name='Geom',
configpath='None')
md_job = Job(config=md_config,
status='done',
group=group,
parentct=ContentType.objects.get_for_model(parentgeom),
parentid=parentgeom.id,
completetime=timezone.now()
)
md_job.details = md_details
md_job.save()
lower_key = "energy_{}".format(lower_idx)
upper_key = "energy_{}".format(upper_idx)
best_atoms = []
# pdb.set_trace()
gap_pairs = []
best_atoms = []
i = 0
for trj in [lower_trj, upper_trj]:
for atoms in trj:
set_ci_calc(atoms=atoms, model=model,
lower_key=lower_key, upper_key=upper_key,
ref_energy=0, penalty=0)
gap = atoms.get_potential_energy().item()
gap_pairs.append([gap, i])
best_atoms.append(atoms)
i += 1
# # exclude > 1 eV
# if gap < 0.8:
# # if gap < 10000:
# best_atoms.append(atoms)
# # break
# random.shuffle(best_atoms)
sorted_idx = [item[-1] for item in sorted(gap_pairs)]
best_atoms = [best_atoms[i] for i in sorted_idx]
for atoms in best_atoms[:num_samples]:
nxyz = AtomsBatch(atoms).get_nxyz()
coords = make_coords(nxyz)
new_geom = make_geom(method=md_method,
job=md_job,
coords=coords,
parentgeom=ci_geom)
@pdb_wrap
def main():
# smiles = 'c1ccc(/N=N\\c2ccccc2)cc1'
smiles = 'c1ccc(/N=N/c2ccccc2)cc1'
nnid = 953
num_samples = 1000
to_db(smiles=smiles, nnid=nnid, num_samples=num_samples)
def make_plots():
smiles = 'c1ccc(/N=N\\c2ccccc2)cc1'
group = Group.objects.get(name='switches')
parentgeom = Geom.objects.filter(species__smiles=smiles, species__group=group,
converged=True).first()
trj_name = "{}_upper.traj".format(parentgeom.id)
print(trj_name)
return
trj = AseTrajectory(trj_name)
lower_key = "energy_0"
upper_key = "energy_1"
nxyz_list = []
gap_list = []
model = load_model("/home/saxelrod/engaging/models/953")
for atoms in trj:
nxyz = AtomsBatch(atoms).get_nxyz()
nxyz_list.append(nxyz.tolist())
set_ci_calc(atoms=atoms, model=model,
lower_key=lower_key, upper_key=upper_key,
ref_energy=0, penalty=0)
gap = atoms.get_potential_energy().item()
gap_list.append(gap)
nxyz_save = 'demo.json'
gap_save = 'demo_gap.json'
with open(nxyz_save, "w") as f:
json.dump(nxyz_list, f)
with open(gap_save, "w") as f:
json.dump(gap_list, f)
if __name__ == "__main__":
# main()
make_plots()
```
#### File: nff/md/nms.py
```python
import numpy as np
import os
from torch.utils.data import DataLoader
from torch.nn.modules.container import ModuleDict
import copy
import pickle
from rdkit import Chem
from ase import optimize, units
from ase.md.verlet import VelocityVerlet
from ase.io.trajectory import Trajectory as AseTrajectory
from nff.io.ase_ax import NeuralFF, AtomsBatch
from nff.train import load_model
from nff.data import collate_dicts, Dataset
from nff.md import nve
from neuralnet.utils.vib import get_modes
from tqdm import tqdm
from nff.utils.constants import FS_TO_AU, ASE_TO_FS, EV_TO_AU, BOHR_RADIUS
PERIODICTABLE = Chem.GetPeriodicTable()
RESTART_FILE = "restart.pickle"
OPT_KEYS = ["steps", "fmax"]
MAX_ROUNDS = 20
NUM_CONFS = 20
OPT_FILENAME = "opt.traj"
DEFAULT_INFO_FILE = "job_info.json"
INTEGRATOR_DIC = {"velocityverlet": VelocityVerlet}
CM_2_AU = 4.5564e-6
ANGS_2_AU = 1.8897259886
AMU_2_AU = 1822.88985136
k_B = 1.38064852e-23
PLANCKS_CONS = 6.62607015e-34
HA2J = 4.359744E-18
BOHRS2ANG = 0.529177
SPEEDOFLIGHT = 2.99792458E8
AMU2KG = 1.660538782E-27
def get_key(iroot, num_states):
"""
Get energy key for the state of interest.
Args:
iroot (int): state of interest
num_states (int): total number of states
Returns:
key (str): energy key
"""
# energy if only one state
if iroot == 0 and num_states == 1:
key = "energy"
# otherwise energy with state suffix
else:
key = "energy_{}".format(iroot)
return key
def init_calculator(atoms, params):
"""
Set the calculator for the atoms and
get the model.
Args:
atoms (AtomsBatch): atoms for geom of interest
params (dict): dictionary of parameters
Returns:
model (nn.Module): nnpotential model
en_key (str): energy key
"""
opt_state = params.get("iroot", 0)
num_states = params.get("num_states", 1)
en_key = get_key(iroot=opt_state, num_states=num_states)
nn_id = params['nnid']
# get the right weightpath (either regular or cluster-mounted)
# depending on which exists
weightpath = os.path.join(params['weightpath'], str(nn_id))
if not os.path.isdir(weightpath):
weightpath = os.path.join(params['mounted_weightpath'], str(nn_id))
# get the model
nn_params = params.get("networkhyperparams", {})
model_type = params.get("model_type")
model = load_model(weightpath,
model_type=model_type,
params=nn_params)
# get and set the calculator
nff_ase = NeuralFF.from_file(
weightpath,
device=params.get('device', 'cuda'),
output_keys=[en_key],
params=nn_params,
model_type=model_type,
needs_angles=params.get("needs_angles", False),
)
atoms.set_calculator(nff_ase)
return model, en_key
def correct_hessian(restart_file, hessian):
"""
During an optimization, replace the approximate BFGS
Hessian with the analytical nnpotential Hessian.
Args:
restart_file (str): name of the pickle file
for restarting the optimization.
hessian (list): analytical Hessian
Returns:
None
"""
# get the parameters from the restart file
with open(restart_file, "rb") as f:
restart = pickle.load(f)
# set the Hessian with ase units
hess = np.array(hessian) * units.Hartree / (units.Bohr) ** 2
restart = (hess, *restart[1:])
# save the restart file
with open(restart_file, "wb") as f:
pickle.dump(restart, f)
def get_output_keys(model):
atomwisereadout = model.atomwisereadout
# get the names of all the attributes of the readout dict
readout_attr_names = dir(atomwisereadout)
# restrict to the attributes that are ModuleDicts
readout_dict_names = [name for name in readout_attr_names if
type(getattr(atomwisereadout, name)) is ModuleDict]
# get the ModuleDicts
readout_dicts = [getattr(atomwisereadout, name)
for name in readout_dict_names]
# get their keys
output_keys = [key for dic in readout_dicts for key in dic.keys()]
return output_keys
def get_loader(model,
nxyz_list,
num_states,
cutoff,
needs_angles=False,
base_keys=['energy']):
# base_keys = get_output_keys(model)
grad_keys = [key + "_grad" for key in base_keys]
ref_quant = [0] * len(nxyz_list)
ref_quant_grad = [
np.zeros(((len(nxyz_list[0])), 3)).tolist()] * len(nxyz_list)
props = {"nxyz": nxyz_list, **{key: ref_quant for key in base_keys},
**{key: ref_quant_grad for key in grad_keys}}
dataset = Dataset(props.copy())
dataset.generate_neighbor_list(cutoff)
if needs_angles:
dataset.generate_angle_list()
loader = DataLoader(dataset, batch_size=1, collate_fn=collate_dicts)
return model, loader
def check_convg(model, loader, energy_key, device, restart_file):
mode_dic = get_modes(model=model,
loader=loader,
energy_key=energy_key,
device=device)
freqs = mode_dic["freqs"]
neg_freqs = list(filter(lambda x: x < 0, freqs))
num_neg = len(neg_freqs)
if num_neg != 0:
print(("Found {} negative frequencies; "
"restarting optimization.").format(num_neg))
correct_hessian(restart_file=restart_file, hessian=mode_dic["hess"])
return False, mode_dic
else:
print(("Found no negative frequencies; "
"optimization complete."))
return True, mode_dic
def get_opt_kwargs(params):
# params with the right name for max_step
new_params = copy.deepcopy(params)
new_params["steps"] = new_params["opt_max_step"]
new_params.pop("opt_max_step")
opt_kwargs = {key: val for key,
val in new_params.items() if key in OPT_KEYS}
return opt_kwargs
def opt_conformer(atoms, params):
converged = False
device = params.get("device", "cuda")
restart_file = params.get("restart_file", RESTART_FILE)
num_states = params.get("num_states", 1)
cutoff = params.get("cutoff", 5)
max_rounds = params.get("max_rounds", MAX_ROUNDS)
nn_params = params.get("networkhyperparams", {})
output_keys = nn_params.get("output_keys", ["energy"])
for iteration in tqdm(range(max_rounds)):
model, energy_key = init_calculator(atoms=atoms, params=params)
opt_module = getattr(optimize, params.get("opt_type", "BFGS"))
opt_kwargs = get_opt_kwargs(params)
dyn = opt_module(atoms, restart=restart_file)
dyn_converged = dyn.run(**opt_kwargs)
nxyz_list = [atoms.get_nxyz()]
model, loader = get_loader(model=model,
nxyz_list=nxyz_list,
num_states=num_states,
cutoff=cutoff,
needs_angles=params.get(
"needs_angles", False),
base_keys=output_keys)
hess_converged, mode_dic = check_convg(model=model,
loader=loader,
energy_key=energy_key,
device=device,
restart_file=restart_file)
if dyn_converged and hess_converged:
converged = True
break
return atoms, converged, mode_dic
def get_confs(traj_filename, thermo_filename, num_starting_poses):
with open(thermo_filename, "r") as f:
lines = f.readlines()
energies = []
for line in lines:
try:
energies.append(float(line.split()[2]))
except ValueError:
pass
sort_idx = np.argsort(energies)
sorted_steps = np.array(range(len(lines)))[sort_idx[:num_starting_poses]]
trj = AseTrajectory(traj_filename)
best_confs = [AtomsBatch(trj[i]) for i in sorted_steps]
return best_confs
def get_nve_params(params):
nve_params = copy.deepcopy(nve.DEFAULTNVEPARAMS)
common_keys = [key for key in nve_params.keys() if key in params]
for key in common_keys:
nve_params[key] = params[key]
integrator = nve_params["thermostat"]
if type(integrator) is str:
integ_name = integrator.lower().replace("_", "")
nve_params["integrator"] = INTEGRATOR_DIC[integ_name]
return nve_params
def md_to_conf(params):
thermo_filename = params.get(
"thermo_filename", nve.DEFAULTNVEPARAMS["thermo_filename"])
if os.path.isfile(thermo_filename):
os.remove(thermo_filename)
nve_params = get_nve_params(params)
nxyz = np.array(params['nxyz'])
atoms = AtomsBatch(nxyz[:, 0], nxyz[:, 1:])
_, _ = init_calculator(atoms=atoms, params=params)
nve_instance = nve.Dynamics(atomsbatch=atoms,
mdparam=nve_params)
nve_instance.run()
thermo_filename = params.get(
"thermo_filename", nve.DEFAULTNVEPARAMS["thermo_filename"])
traj_filename = params.get(
"traj_filename", nve.DEFAULTNVEPARAMS["traj_filename"])
num_starting_poses = params.get("num_starting_poses", NUM_CONFS)
best_confs = get_confs(traj_filename=traj_filename,
thermo_filename=thermo_filename,
num_starting_poses=num_starting_poses)
return best_confs
def confs_to_opt(params, best_confs):
convg_atoms = []
energy_list = []
mode_list = []
for i in range(len(best_confs)):
atoms = best_confs[i]
atoms, converged, mode_dic = opt_conformer(atoms=atoms, params=params)
if converged:
convg_atoms.append(atoms)
energy_list.append(atoms.get_potential_energy())
mode_list.append(mode_dic)
if not convg_atoms:
raise Exception("No successful optimizations")
# sort results by energy
best_idx = np.argsort(np.array(energy_list)).reshape(-1)
best_atoms = [convg_atoms[i] for i in best_idx]
best_modes = [mode_list[i] for i in best_idx]
return best_atoms, best_modes
def get_opt_and_modes(params):
best_confs = md_to_conf(params)
all_geoms, all_modes = confs_to_opt(params=params,
best_confs=best_confs)
opt_geom = all_geoms[0]
mode_dic = all_modes[0]
return opt_geom, mode_dic
def get_orca_form(cc_mat, cc_freqs, n_atoms):
""" Converts cclib version of Orca's (almost orthogonalizing) matrix
and mode frequencies back into the original
Orca forms. Also converts frequencies from cm^{-1}
into atomic units (Hartree)."""
pure_matrix = np.asarray(cc_mat)
pure_freqs = np.asarray(cc_freqs)
n_modes = len(pure_matrix[:, 0])
n_inactive = n_atoms*3 - len(pure_matrix[:, 0])
n_tot = n_modes + n_inactive
for i in range(len(pure_matrix)):
new_col = pure_matrix[i].reshape(3*len(pure_matrix[i]))
if i == 1:
new_mat = np.column_stack((old_col, new_col))
elif i > 1:
new_mat = np.column_stack((new_mat, new_col))
old_col = new_col[:]
matrix = np.asarray(new_mat[:]).reshape(n_tot, n_modes)
zero_col = np.asarray([[0]]*len(matrix))
for i in range(0, n_inactive):
matrix = np.insert(matrix, [0], zero_col, axis=1)
freqs = np.asarray(pure_freqs[:])
for i in range(0, n_inactive):
freqs = np.insert(freqs, 0, 0)
return matrix, freqs * CM_2_AU
def get_orth(mass_vec, matrix):
"""Makes orthogonalizing matrix given the outputted
(non-orthogonal) matrix from Orca. The mass_vec variable
is a list of the masses of the atoms in the molecule (must be)
in the order given to Orca when it calculated normal modes).
Note that this acts directly on the matrix outputted from Orca,
not on the cclib version that divides columns into sets of
three entries for each atom."""
m = np.array([[mass] for mass in mass_vec])
# repeat sqrt(m) three times, one for each direction
sqrt_m_vec = np.kron(m ** 0.5, np.ones((3, 1)))
# a matrix with sqrt_m repeated K times, where
# K = 3N - 5 or 3N-6 is the number of modes
sqrt_m_mat = np.kron(sqrt_m_vec, np.ones(
(1, len(sqrt_m_vec))))
# orthogonalize the matrix by element-wise multiplication with 1/sqrt(m)
orth = sqrt_m_mat * matrix
for i in range(len(orth)):
if np.linalg.norm(orth[:, i]) != 0:
# normalize the columns
orth[:, i] = orth[:, i] / np.linalg.norm(orth[:, i])
return orth, np.reshape(sqrt_m_vec, len(sqrt_m_vec))
def get_n_in(matrix):
""" Get number of inactive modes """
n_in = 0
for entry in matrix[0]:
if entry == 0:
n_in += 1
return n_in
def get_disp(mass_vec, matrix, freqs, q, p, hb=1):
"""Makes position and momentum displacements from
unitless harmonic oscillator displacements and unitless momenta.
Uses atomic units (hbar = 1). For different units change the value of hbar."""
orth, sqrt_m_vec = get_orth(mass_vec, matrix)
n_in = get_n_in(matrix)
# get actual positions dq from unitless positions q
q_tilde = q[n_in:] * (hb / (freqs[n_in:])) ** 0.5
q_tilde = np.append(np.zeros(n_in), q_tilde)
# multiply by orth, divide element-wise by sqrt(m)
dq = np.matmul(orth, q_tilde) / sqrt_m_vec
# get actual momenta p_tilde from unitless momenta p
p_tilde = p * (hb * (freqs)) ** 0.5
dp = np.matmul(orth, p_tilde) * sqrt_m_vec
return dq, dp
def wigner_sample(w, kt=25.7 / 1000 / 27.2, hb=1):
""" Sample unitless x and unitless p from a Wigner distribution.
Takes frequency and temperature in au as inputs.
Default temperature is 300 K."""
sigma = (1/np.tanh((hb*w)/(2*kt)))**0.5/2**0.5
cov = [[sigma**2, 0], [0, sigma**2]]
mean = (0, 0)
x, p = np.random.multivariate_normal(mean, cov)
return x, p
def classical_sample(w, kt=25.7 / 1000 / 27.2, hb=1):
sigma = (kt / (hb * w)) ** 0.5
cov = [[sigma**2, 0], [0, sigma**2]]
mean = (0, 0)
x, p = np.random.multivariate_normal(mean, cov)
return x, p
def make_dx_dp(mass_vec,
cc_matrix,
cc_freqs,
kt=25.7 / 1000 / 27.2,
hb=1,
classical=False):
"""Make Wigner-sampled p and dx, where dx is the displacement
about the equilibrium geometry.
Takes mass vector, CClib matrix, and CClib vib freqs as inputs.
Inputs in au unless hb is specified in different coordinates. Default 300 K."""
matrix, freqs = get_orca_form(cc_matrix, cc_freqs, n_atoms=len(mass_vec))
unitless_x = np.array([])
unitless_p = np.array([])
n_in = get_n_in(matrix)
for w in freqs[n_in:]:
if classical:
x, p = classical_sample(w, kt, hb=hb)
else:
x, p = wigner_sample(w, kt, hb=hb)
unitless_x = np.append(unitless_x, x)
unitless_p = np.append(unitless_p, p)
unitless_x = np.append(np.zeros(n_in), unitless_x)
unitless_p = np.append(np.zeros(n_in), unitless_p)
dx, dp = get_disp(mass_vec=mass_vec,
matrix=matrix,
freqs=freqs,
q=unitless_x,
p=unitless_p,
hb=hb)
# re-shape to have form of [[dx1, dy1, dz1], [dx2, dy2, dz2], ...]
n_atoms = int(len(dx) / 3)
shaped_dx, shaped_dp = dx.reshape(n_atoms, 3), dp.reshape(n_atoms, 3)
return shaped_dx, shaped_dp
def split_convert_xyz(xyz):
""" Splits xyz into Z, coordinates in au, and masses in au """
coords = [(np.array(element[1:])*ANGS_2_AU).tolist() for element in xyz]
mass_vec = [PERIODICTABLE.GetAtomicWeight(
int(element[0]))*AMU_2_AU for element in xyz]
Z = [element[0] for element in xyz]
return Z, coords, mass_vec
def join_xyz(Z, coords):
""" Joins Z's and coordinates back into xyz """
return [[Z[i], *coords[i]] for i in range(len(coords))]
def make_wigner_init(init_atoms,
vibdisps,
vibfreqs,
num_samples,
kt=25.7 / 1000 / 27.2,
hb=1,
classical=False):
"""Generates Wigner-sampled coordinates and velocities.
xyz is the xyz array at the optimized
geometry. xyz is in Angstrom, so xyz is first converted to
au, added to Wigner dx, and then
converted back to Angstrom. Velocity is in au.
vibdisps and vibfreqs are the CClib quantities
found in the database."""
xyz = np.concatenate([init_atoms.get_atomic_numbers().reshape(-1, 1),
init_atoms.get_positions()], axis=1)
atoms_list = []
for _ in range(num_samples):
assert min(
vibfreqs) >= 0, ("Negative frequencies found. "
"Geometry must not be converged.")
Z, opt_coords, mass_vec = split_convert_xyz(xyz)
dx, dp = make_dx_dp(mass_vec, vibdisps, vibfreqs,
kt, hb, classical=classical)
wigner_coords = ((np.asarray(opt_coords) + dx)/ANGS_2_AU).tolist()
nxyz = np.array(join_xyz(Z, wigner_coords))
velocity = (dp / np.array([[m] for m in mass_vec])).tolist()
atoms = AtomsBatch(nxyz[:, 0], nxyz[:, 1:])
# conv = EV_TO_AU / (ASE_TO_FS * FS_TO_AU)
conv = 1 / BOHR_RADIUS / (ASE_TO_FS * FS_TO_AU)
atoms.set_velocities(np.array(velocity) / conv)
atoms_list.append(atoms)
return atoms_list
def nms_sample(params,
classical,
num_samples,
kt=25.7 / 1000 / 27.2,
hb=1):
atoms, mode_dic = get_opt_and_modes(params)
vibdisps = np.array(mode_dic["modes"])
vibdisps = vibdisps.reshape(vibdisps.shape[0], -1, 3).tolist()
vibfreqs = mode_dic["freqs"]
atoms_list = make_wigner_init(init_atoms=atoms,
vibdisps=vibdisps,
vibfreqs=vibfreqs,
num_samples=num_samples,
kt=kt,
hb=hb,
classical=classical)
return atoms_list
if __name__ == "__main__":
true = True
params = {"htvs": "$HOME/htvs",
"T_init": 300.0,
"temp": 300,
"time_step": 0.5,
"integrator": "velocity_verlet",
"steps": 2,
"save_frequency": 1,
"nbr_list_update_freq": 1,
"thermo_filename": "./thermo.log",
"traj_filename": "./atom.traj",
"num_states": 2,
"iroot": 0,
"opt_max_step": 2000,
"fmax": 0.05,
# "fmax": 10,
"hess_interval": 100000,
"num_starting_poses": 1,
"method": {"name": "sf_tddft_bhhlyp",
"description": ("GAMESS bhhlyp/6-31G*"
" spin flip tddft")},
"num_save": 1,
"nms": true,
"classical": true,
"weightpath": "/home/saxelrod/models",
"nnid": "azo_dimenet_diabat",
"networkhyperparams": {
"n_rbf": 6,
"cutoff": 5.0,
"envelope_p": 5,
"n_spher": 7,
"l_spher": 7,
"embed_dim": 128,
"int_dim": 64,
"out_dim": 256,
"basis_emb_dim": 8,
"activation": "swish",
"n_convolutions": 4,
"use_pp": true,
"output_keys": ["energy_0", "energy_1"],
"diabat_keys": [["d0", "lam"], ["lam", "d1"]],
"grad_keys": ["energy_0_grad", "energy_1_grad"]
},
"model_type": "DimeNetDiabat",
"needs_angles": true,
"device": "cpu",
"nxyz": [[6.0, -3.106523, -0.303932, 1.317003], [6.0, -2.361488, -1.070965, 0.433279], [6.0, -1.500175, -0.466757, -0.466259], [6.0, -1.394372, 0.919597, -0.489914], [7.0, -0.638906, 1.622236, -1.475649], [7.0, 0.53754, 1.376216, -1.741989], [6.0, 1.347532, 0.467035, -0.998026], [6.0, 2.132836, -0.410469, -1.735143], [6.0, 3.015726, -1.257597, -1.087982], [6.0, 3.157147, -1.193886, 0.290324], [6.0, 2.407287, -0.281797, 1.018438], [6.0, 1.497631, 0.545173, 0.382205], [6.0, -2.176213, 1.69203, 0.35984], [6.0, -3.00977, 1.079171, 1.279395], [1.0, -3.769581, -0.781063, 2.019916], [1.0, -2.44819, -2.145275, 0.44532], [1.0, -0.921598, -1.062715, -1.15082], [1.0, 2.038044, -0.418366, -2.808393], [1.0, 3.607298, -1.952544, -1.661382], [1.0, 3.858162, -1.840041, 0.792768], [1.0, 2.528111, -0.214815, 2.087415], [1.0, 0.915297, 1.251752, 0.948046], [1.0, -2.117555, 2.765591, 0.289571], [1.0, -3.598241, 1.681622, 1.952038]]
}
try:
atoms_list = nms_sample(params=params,
classical=True,
num_samples=10,
kt=25.7 / 1000 / 27.2,
hb=1)
except Exception as e:
print(e)
import pdb
pdb.post_mortem()
```
#### File: nff/md/npt.py
```python
import os
import numpy as np
import copy
import math
from ase.md.md import MolecularDynamics
from ase.optimize.optimize import Dynamics
from ase.md.npt import NPT
from ase import units
from ase.md.velocitydistribution import (MaxwellBoltzmannDistribution,
Stationary, ZeroRotation)
class NoseHoovernpt(NPT):
def __init__(self, atoms,
timestep, temperature=None, externalstress=None,
ttime=None, pfactor=None,
temperature_K=None,
mask=None, trajectory=None, logfile=None, loginterval=1,
nbr_update_period=20,append_trajectory=False):
if os.path.isfile(str(trajectory)):
os.remove(trajectory)
NPT.__init__(self,atoms=atoms,
timestep=timestep * units.fs,
ttime=ttime,
externalstress=externalstress,
pfactor=pfactor,
temperature_K=temperature,
mask=mask,
trajectory=trajectory,
logfile=logfile,
loginterval=loginterval,
append_trajectory=append_trajectory)
# Initialize simulation parameters
# convert units
self.nbr_update_period = nbr_update_period
self.max_steps=0
MaxwellBoltzmannDistribution(self.atoms, temperature*units.kB)
Stationary(self.atoms)
ZeroRotation(self.atoms)
self.initialize()
def run(self, steps=None):
if steps is None:
steps = self.num_steps
epochs = math.ceil(steps / self.nbr_update_period)
# number of steps in between nbr updates
steps_per_epoch = int(steps / epochs)
# maximum number of steps starts at `steps_per_epoch`
# and increments after every nbr list update
#self.max_steps = 0
self.atoms.update_nbr_list()
for _ in range(epochs):
self.max_steps += steps_per_epoch
Dynamics.run(self)
self.atoms.update_nbr_list()
class NoseHooverNPT(MolecularDynamics):
def __init__(self,
atoms,
timestep,
temperature,
pressure,
ttime,
Pdamp,
maxwell_temp=None,
trajectory=None,
logfile=None,
loginterval=1,
max_steps=None,
nbr_update_period=20,
append_trajectory=True,
**kwargs):
if os.path.isfile(str(trajectory)):
os.remove(trajectory)
MolecularDynamics.__init__(self,
atoms=atoms,
timestep=timestep * units.fs,
trajectory=trajectory,
logfile=logfile,
loginterval=loginterval,
append_trajectory=append_trajectory)
# Initialize simulation parameters
# convert units
self.dt = timestep * units.fs
self.T = temperature * units.kB
self.P= pressure*units.GPa
self.ttime = ttime # defined as a fraction of self.dt
self.pdamp= Pdamp
# Q is chosen to be 6 N kT
self.Natom = len(atoms)
self.Nf =3*self.Natom - 6
# no rotation or translation, so target kinetic energy
# is 1/2 (3N - 6) kT
self.targeEkin = 0.5 * (self.Nf) * self.T
self.Q = (self.Nf ) * self.T * (self.ttime * self.dt)**2
self.W = (self.Natom-1)* self.T *(self.pdamp*self.dt)**2
self.zeta = 0.0
self.eta=0.0
self.veta=0.0
self.num_steps = max_steps
self.n_steps = 0
self.max_steps = 0
self.nbr_update_period = nbr_update_period
# initial Maxwell-Boltmann temperature for atoms
if maxwell_temp is not None:
# convert units
maxwell_temp = maxwell_temp * units.kB
else:
maxwell_temp = 2 * self.T
MaxwellBoltzmannDistribution(self.atoms, maxwell_temp)
Stationary(self.atoms)
ZeroRotation(self.atoms)
def step(self):
accel = (self.atoms.get_forces() /
self.atoms.get_masses().reshape(-1, 1))
vel = self.atoms.get_velocities()
Pint=-np.sum(self.atoms.get_stress(include_ideal_gas=True)[0:3])/3
F=3*self.atoms.get_volume()*(Pint-self.P) + (6/self.Nf)*self.atoms.get_kinetic_energy()
G= (1/self.Q)*(2*self.atoms.get_kinetic_energy()+self.W*(self.veta**2)-(self.Nf+1)*self.T)
eta0=self.eta
self.eta =self.eta + self.veta * self.dt + 0.5 *((F/self.W)-self.veta*self.zeta)*self.dt*self.dt
x = np.exp(self.eta-eta0)*(self.atoms.get_positions() + vel * self.dt + \
(accel - self.zeta * vel - (2+(3/self.Nf))*vel*self.veta) * (0.5 * self.dt ** 2))
self.atoms.set_positions(x)
# make half a step in velocity
vel_half = np.exp(self.eta-eta0)*(vel + 0.5 * self.dt * (accel - self.zeta * vel- (2+(3/self.Nf))*vel*self.veta))
self.atoms.set_velocities(vel_half)
# make a full step in accelerations
f = self.atoms.get_forces()
accel = f / self.atoms.get_masses().reshape(-1, 1)
self.zeta = self.zeta + 0.5 * self.dt * G
self.veta=self.veta+ 0.5 * self.dt *((F/self.W)-self.veta*self.zeta)
Vol=self.atoms.get_volume()*np.exp(3*self.eta-3*eta0)
h=Vol**(1/3)
self.atoms.set_cell([h,h,h])
Pint=-np.sum(self.atoms.get_stress(include_ideal_gas=True)[0:3])/3
F=3*self.atoms.get_volume()*(Pint-self.P) + (6/self.Nf)*self.atoms.get_kinetic_energy()
G= (1/self.Q)*(2*self.atoms.get_kinetic_energy()+self.W*(self.veta**2)-(self.Nf+1)*self.T)
self.zeta = self.zeta + 0.5 * self.dt * G
self.veta= (self.veta + 0.5*self.dt*(F/self.W))/(1 + 0.5 * self.dt * self.zeta)
vel = (self.atoms.get_velocities() + 0.5 * self.dt * accel) / \
(1 + 0.5 * self.dt * self.zeta + 0.5* self.dt *(2+(3/self.Nf))*self.veta)
self.atoms.set_velocities(vel)
#Vol=self.atoms.get_volume()*np.exp(3*self.eta-3*eta0)
#h=Vol**(1/3)
#self.atoms.set_cell([h,h,h])
return f
def run(self, steps=None):
if steps is None:
steps = self.num_steps
epochs = math.ceil(steps / self.nbr_update_period)
# number of steps in between nbr updates
steps_per_epoch = int(steps / epochs)
# maximum number of steps starts at `steps_per_epoch`
# and increments after every nbr list update
#self.max_steps = 0
self.atoms.update_nbr_list()
for _ in range(epochs):
self.max_steps += steps_per_epoch
Dynamics.run(self)
self.atoms.update_nbr_list()
```
#### File: md/tully/ab_dynamics.py
```python
import argparse
import shutil
import os
import math
import numpy as np
from ase import Atoms
import copy
from nff.md.tully.dynamics import (NeuralTully,
TULLY_LOG_FILE,
TULLY_SAVE_FILE)
from nff.md.tully.io import load_json, coords_to_xyz
from nff.md.tully.ab_io import get_results as ab_results
from nff.utils import constants as const
def load_params(file):
all_params = load_json(file)
all_params['nacv_details'] = {**all_params,
**all_params['nacv_details']}
all_params['grad_details'] = {**all_params,
**all_params['grad_details']}
return all_params
def make_atoms(all_params):
vel = np.array(all_params['velocities'])
nxyz = coords_to_xyz(all_params["coords"])
atoms = Atoms(nxyz[:, 0],
positions=nxyz[:, 1:])
atoms.set_velocities(vel)
atoms_list = [atoms]
return atoms_list
class AbTully(NeuralTully):
def __init__(self,
charge,
grad_config,
nacv_config,
grad_details,
nacv_details,
atoms_list,
num_states,
initial_surf,
dt,
max_time,
elec_substeps,
decoherence,
hop_eqn,
**kwargs):
self.atoms_list = atoms_list
self.vel = self.get_vel()
self.T = None
self.t = 0
self.props = {}
self.num_atoms = len(self.atoms_list[0])
self.num_samples = len(atoms_list)
self.num_states = num_states
self.surfs = np.ones(self.num_samples,
dtype=np.int) * initial_surf
self.dt = dt * const.FS_TO_AU
self.elec_substeps = elec_substeps
self.max_time = max_time * const.FS_TO_AU
self.max_gap_hop = float('inf')
self.log_file = TULLY_LOG_FILE
self.save_file = TULLY_SAVE_FILE
self.log_template = self.setup_logging(remove_old=False)
self.p_hop = 0
self.just_hopped = None
self.explicit_diabat = False
self.c = self.init_c()
self.decoherence = self.init_decoherence(params=decoherence)
self.decoherence_type = decoherence['name']
self.hop_eqn = hop_eqn
self.diabat_propagate = False
self.simple_vel_scale = False
self.charge = charge
self.num_samples = 1
self.grad_config = grad_config
self.nacv_config = nacv_config
self.grad_details = grad_details
self.nacv_details = nacv_details
self.step_num = 0
# only works if you don't use `self.setup_save()`,
# which deletes the pickle file
if os.path.isfile(TULLY_SAVE_FILE):
self.restart()
@property
def forces(self):
inf = np.ones((self.num_atoms,
3)) * float('inf')
_forces = np.stack([-self.props.get(f'energy_{i}_grad',
inf).reshape(-1, 3)
for i in range(self.num_states)])
_forces = _forces.reshape(1, *_forces.shape)
return _forces
@forces.setter
def forces(self, _forces):
for i in range(self.num_states):
self.props[f'energy_{i}_grad'] = -_forces[:, i]
def correct_phase(self,
old_force_nacv):
if old_force_nacv is None:
return
new_force_nacv = self.force_nacv
new_nacv = self.nacv
delta = np.max(np.linalg.norm(old_force_nacv - new_force_nacv,
axis=((-1, -2))), axis=-1)
sigma = np.max(np.linalg.norm(old_force_nacv + new_force_nacv,
axis=((-1, -2))), axis=-1)
delta = delta.reshape(*delta.shape, 1, 1, 1)
sigma = sigma.reshape(*sigma.shape, 1, 1, 1)
phase = (-1) ** (delta > sigma)
print(np.linalg.norm(old_force_nacv - new_force_nacv))
print(np.linalg.norm(old_force_nacv + new_force_nacv))
print(phase.squeeze((-1, -2, -3)))
new_force_nacv = new_force_nacv * phase
new_nacv = new_nacv * phase
num_states = new_nacv.shape[1]
for i in range(num_states):
for j in range(num_states):
self.props[f'force_nacv_{i}{j}'] = new_force_nacv[:, i, j]
self.props[f'nacv_{i}{j}'] = new_nacv[:, i, j]
def update_props(self,
*args,
**kwargs):
old_force_nacv = copy.deepcopy(self.force_nacv)
job_dir = os.path.join(os.getcwd(), str(self.step_num))
if os.path.isdir(job_dir):
shutil.rmtree(job_dir)
else:
os.makedirs(job_dir)
self.props = ab_results(nxyz=self.nxyz,
charge=self.charge,
num_states=self.num_states,
surf=self.surfs[0],
job_dir=job_dir,
grad_config=self.grad_config,
nacv_config=self.nacv_config,
grad_details=self.grad_details,
nacv_details=self.nacv_details)
self.correct_phase(old_force_nacv=old_force_nacv)
self.step_num += 1
def get_vel(self):
"""
Velocities are in a.u. here, not ASE units
"""
vel = np.stack([atoms.get_velocities()
for atoms in self.atoms_list])
return vel
def restart(self):
super().restart()
self.step_num = int(self.t / self.dt) + 2
def new_force_calc(self):
"""
Extra force calc on new state after hop
"""
surf = self.surfs[0]
needs_calc = np.bitwise_not(
np.isfinite(
self.forces[0, surf]
)
).any()
if not needs_calc:
return
new_job_dir = os.path.join(os.getcwd(),
f"{self.step_num - 1}_extra")
if os.path.isdir(new_job_dir):
shutil.rmtree(new_job_dir)
else:
os.makedirs(new_job_dir)
props = ab_results(nxyz=self.nxyz,
charge=self.charge,
num_states=self.num_states,
surf=surf,
job_dir=new_job_dir,
grad_config=self.grad_config,
nacv_config=self.nacv_config,
grad_details=self.grad_details,
nacv_details=self.nacv_details,
calc_nacv=False)
key = f'energy_{surf}_grad'
self.props[key] = props[key]
def run(self):
steps = math.ceil((self.max_time - self.t) / self.dt)
if self.step_num == 0:
self.update_props()
for _ in range(steps):
# if just hopped to new state, then we need to do a force
# calculation on the new state too
self.new_force_calc()
self.save()
self.step(needs_nbrs=False)
with open(self.log_file, 'a') as f:
f.write('\nTully surface hopping terminated normally.')
@classmethod
def from_file(cls,
file):
all_params = load_params(file)
atoms_list = make_atoms(all_params)
instance = cls(atoms_list=atoms_list,
**all_params)
return instance
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--params_file',
type=str,
help='Info file with parameters',
default='job_info.json')
args = parser.parse_args()
path = args.params_file
ab_tully = AbTully.from_file(path)
try:
ab_tully.run()
except Exception as e:
print(e)
import pdb
pdb.post_mortem()
if __name__ == '__main__':
main()
```
#### File: md/tully/io.py
```python
import json
import os
import torch
from torch.utils.data import DataLoader
import numpy as np
from rdkit import Chem
from ase import Atoms
from nff.train import batch_to, batch_detach
from nff.nn.utils import single_spec_nbrs
from nff.data import Dataset, collate_dicts
from nff.utils import constants as const
from nff.utils.scatter import compute_grad
from nff.io.ase_ax import NeuralFF, AtomsBatch
PERIODICTABLE = Chem.GetPeriodicTable()
ANGLE_MODELS = ["DimeNet", "DimeNetDiabat", "DimeNetDiabatDelta"]
def check_hop(model,
results,
max_gap_hop,
surf,
num_states):
# **** this won't work - assumes surf is an integer
"""
`max_gap_hop` in a.u.
"""
gap_keys = []
for i in range(num_states):
if i == surf:
continue
upper = max([i, surf])
lower = min([i, surf])
key = f'energy_{upper}_energy_{lower}_delta'
gap_keys.append(key)
# convert max_gap_hop to kcal
max_conv = max_gap_hop * const.AU_TO_KCAL['energy']
gaps = torch.cat([results[key].reshape(-1, 1)
for key in gap_keys], dim=-1)
can_hop = (gaps <= max_conv).sum(-1).to(torch.bool)
return can_hop
def split_by_hop(dic,
can_hop,
num_atoms):
hop_dic = {}
no_hop_dic = {}
for key, val in dic.items():
if any(['nacv' in key, 'grad' in key, 'nxyz' in key]):
val = torch.split(val, num_atoms)
hop_tensor = torch.cat([item for i, item in enumerate(val)
if can_hop[i]])
no_hop_tensor = torch.cat([item for i, item in enumerate(val)
if not can_hop[i]])
hop_dic[key] = hop_tensor
no_hop_dic[key] = no_hop_tensor
return hop_dic, no_hop_dic
def split_all(model,
xyz,
max_gap_hop,
surf,
num_states,
batch,
results):
can_hop = check_hop(model=model,
results=results,
max_gap_hop=max_gap_hop,
surf=surf,
num_states=num_states)
num_atoms = batch['num_atoms'].tolist()
batch['xyz'] = xyz
hop_batch, no_hop_batch = split_by_hop(dic=batch,
can_hop=can_hop,
num_atoms=num_atoms)
hop_results, no_hop_results = split_by_hop(dic=results,
can_hop=can_hop,
num_atoms=num_atoms)
splits = (hop_batch, no_hop_batch, hop_results, no_hop_results)
return splits, can_hop
def init_results(num_atoms,
num_states):
en_keys = [f'energy_{i}' for i in range(num_states)]
grad_keys = [key + "_grad" for key in en_keys]
nacv_keys = [f"<KEY>}" for i in range(num_states)
for j in range(num_states) if i != j]
force_nacv_keys = ["force_" + key for key in nacv_keys]
num_samples = len(num_atoms)
shapes = {"energy": [num_samples],
"grad": [num_samples, num_atoms[0], 3]}
key_maps = {"energy": en_keys,
"grad": [*grad_keys, *nacv_keys, *force_nacv_keys]}
results = {}
for key_type, keys in key_maps.items():
shape = shapes[key_type]
for key in keys:
init = torch.ones(*shape) * float('nan')
results[key] = init
def fill_results(batch,
these_results,
results,
idx):
num_atoms = batch['num_atoms'].tolist()
grad_flags = ['_grad', 'nacv']
for key, val in these_results.keys():
if any([flag in key for flag in grad_flags]):
val = torch.stack(torch.split(val, num_atoms))
results[key][idx] = val
return results
def combine_all(no_hop_results,
hop_results,
no_hop_batch,
hop_batch,
can_hop,
num_states,
batch):
num_atoms = batch['num_atoms'].tolist()
results = init_results(num_atoms=num_atoms,
num_states=num_states)
hop_idx = can_hop.nonzero()
no_hop_idx = torch.bitwise_not(can_hop).nonzero()
tuples = [(no_hop_batch, no_hop_results, no_hop_idx),
(hop_batch, hop_results, hop_idx)]
for tup in tuples:
batch, these_results, idx = tup
results = fill_results(batch=batch,
these_results=these_results,
results=results,
idx=idx)
return results
def grad_by_split(model,
hop_batch,
hop_results,
no_hop_batch,
no_hop_results,
surf):
# add all the gradients for the hop batch and results
model.diabatic_readout.add_all_grads(xyz=hop_batch['xyz'],
results=hop_results,
num_atoms=hop_batch['num_atoms'],
u=hop_results['U'],
add_u=False)
# just add the state gradient for the non-hop batch / results
key = f'energy_{surf}'
surf_grad = compute_grad(inputs=no_hop_batch['xyz'],
output=no_hop_results[key])
no_hop_results[key + '_grad'] = surf_grad
return hop_results, no_hop_results
def add_grad(model,
batch,
xyz,
results,
max_gap_hop,
surf,
num_states):
# split batches and results into those that require NACVs
# and gradients on all states, and those that only require
# the gradient on the current state
splits, can_hop = split_all(model=model,
xyz=xyz,
max_gap_hop=max_gap_hop,
surf=surf,
num_states=num_states,
batch=batch,
results=results)
(hop_batch, no_hop_batch, hop_results, no_hop_results) = splits
# add the relevant gradients
hop_results, no_hop_results = grad_by_split(model=model,
hop_batch=hop_batch,
hop_results=hop_results,
no_hop_batch=no_hop_batch,
no_hop_results=no_hop_results,
surf=surf)
# combine everything together
results = combine_all(no_hop_results=no_hop_results,
hop_results=hop_results,
no_hop_batch=no_hop_batch,
hop_batch=hop_batch,
can_hop=can_hop,
num_states=num_states,
batch=batch)
return results
# def add_active_grads(batch,
# results,
# xyz,
# surfs,
# num_states):
# num_samples = len(batch['num_atoms'])
# num_atoms = batch['num_atoms'][0].item()
# new_results = {f'energy_{i}_grad':
# np.ones(num_samples, num_atoms, 3)
# * float('nan')
# for i in range(num_states)}
def run_model(model,
batch,
device,
surf,
max_gap_hop,
num_states,
all_engrads,
nacv):
"""
`max_gap_hop` in a.u.
"""
batch = batch_to(batch, device)
# # case 1: we only want one state gradient
# separate_grads = (not nacv) and (not all_engrads)
# if separate_grads:
# xyz = batch['nxyz'][:, 1:]
# xyz.requires_grad = True
# # case 2: we want both state gradients but
# # no nacv
# # Or case 3: we want both state gradients and nacv
# else:
# xyz = None
xyz = None
model.add_nacv = nacv
results = model(batch,
xyz=xyz,
add_nacv=nacv,
# add_grad=all_engrads,
add_grad=True,
add_gap=True,
add_u=True,
inference=True)
# If we use NACV then we can come back to what's commented
# out below, where you only ask for gradients NACVs among states
# close to each other
# For now just take the gradient on the active surfaces
# if separate_grads:
# results = add_active_grads()
# if not all_grads:
# results = add_grad(model=model,
# batch=batch,
# xyz=xyz,
# results=results,
# max_gap_hop=max_gap_hop,
# surf=surf,
# num_states=num_states)
results = batch_detach(results)
return results
def get_phases(U, old_U):
# Compute overlap
S = np.einsum('...ki, ...kj -> ...ij',
old_U, U)
# Take the element in each column with the
# largest absolute value, not just the diagonal.
# When the two diabatic states switch energy
# orderings through a CI, the adiabatic states
# that are most similar to each other will have
# different orderings.
num_states = U.shape[-1]
max_idx = abs(S).argmax(axis=1)
num_samples = S.shape[0]
S_max = np.take_along_axis(
S.transpose(0, 2, 1),
max_idx.reshape(num_samples, num_states, 1),
axis=2
).transpose(0, 2, 1)
new_phases = np.sign(S_max)
return new_phases
def update_phase(new_phases,
i,
j,
results,
key,
num_atoms):
phase = ((new_phases[:, :, i] * new_phases[:, :, j])
.reshape(-1, 1, 1))
updated = np.concatenate(
np.split(results[key], num_atoms)
).reshape(-1, num_atoms[0], 3) * phase
results[key] = updated
return results
def correct_nacv(results,
old_U,
num_atoms,
num_states):
"""
Stack the non-adiabatic couplings and correct their
phases. Also correct the phases of U.
"""
# get phase correction
new_phases = get_phases(U=results["U"],
old_U=old_U)
new_U = results["U"] * new_phases
results["U"] = new_U
# Stack NACVs and multiply by new phases
# They can be stacked because only one type of molecule
# is used in a batched simulation
for i in range(num_states):
for j in range(num_states):
if j == i:
continue
keys = [f"force_nacv_{i}{j}", f"<KEY>"]
for key in keys:
# e.g. if no states are close enough for
# hopping
if key not in results:
continue
results = update_phase(
new_phases=new_phases,
i=i,
j=j,
results=results,
key=key,
num_atoms=num_atoms)
return results
def batched_calc(model,
batch,
device,
num_states,
surf,
max_gap_hop,
all_engrads,
nacv):
"""
Get model results from a batch, including
nacv phase correction
"""
results = run_model(model=model,
batch=batch,
device=device,
surf=surf,
max_gap_hop=max_gap_hop,
num_states=num_states,
all_engrads=all_engrads,
nacv=nacv)
return results
def concat_and_conv(results_list,
num_atoms,
diabat_keys):
"""
Concatenate results from separate batches and convert
to atomic units
"""
keys = results_list[0].keys()
all_results = {}
conv = const.KCAL_TO_AU
grad_shape = [-1, num_atoms, 3]
for key in keys:
val = torch.cat([i[key] for i in results_list])
if 'energy_grad' in key or 'force_nacv' in key:
val *= conv['energy'] * conv['_grad']
val = val.reshape(*grad_shape)
elif 'energy' in key or key in diabat_keys:
val *= conv['energy']
elif 'nacv' in key:
val *= conv['_grad']
val = val.reshape(*grad_shape)
# else:
# msg = f"{key} has no known conversion"
# raise NotImplementedError(msg)
all_results[key] = val.numpy()
return all_results
def make_loader(nxyz,
nbr_list,
num_atoms,
needs_nbrs,
cutoff,
cutoff_skin,
device,
batch_size):
props = {"nxyz": [torch.Tensor(i)
for i in nxyz]}
dataset = Dataset(props=props,
units='kcal/mol',
check_props=True)
if needs_nbrs or nbr_list is None:
nbrs = single_spec_nbrs(dset=dataset,
cutoff=(cutoff +
cutoff_skin),
device=device,
directed=True)
dataset.props['nbr_list'] = nbrs
else:
dataset.props['nbr_list'] = nbr_list
loader = DataLoader(dataset,
batch_size=batch_size,
collate_fn=collate_dicts)
return loader
def timing(func):
import time
def my_func(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
delta = end - start
print("%.2f seconds" % delta)
return result
return my_func
# @timing
def get_results(model,
nxyz,
nbr_list,
num_atoms,
needs_nbrs,
cutoff,
cutoff_skin,
device,
batch_size,
old_U,
num_states,
surf,
max_gap_hop,
all_engrads,
nacv,
diabat_keys):
"""
`nxyz_list` assumed to be in Angstroms
"""
loader = make_loader(nxyz=nxyz,
nbr_list=nbr_list,
num_atoms=num_atoms,
needs_nbrs=needs_nbrs,
cutoff=cutoff,
cutoff_skin=cutoff_skin,
device=device,
batch_size=batch_size)
results_list = []
for batch in loader:
results = batched_calc(model=model,
batch=batch,
device=device,
num_states=num_states,
surf=surf,
max_gap_hop=max_gap_hop,
all_engrads=all_engrads,
nacv=nacv)
results_list.append(results)
all_results = concat_and_conv(results_list=results_list,
num_atoms=num_atoms,
diabat_keys=diabat_keys)
if old_U is not None:
all_results = correct_nacv(results=all_results,
old_U=old_U,
num_atoms=[num_atoms] * old_U.shape[0],
num_states=num_states)
return all_results
def coords_to_xyz(coords):
nxyz = []
for dic in coords:
directions = ['x', 'y', 'z']
n = float(PERIODICTABLE.GetAtomicNumber(dic["element"]))
xyz = [dic[i] for i in directions]
nxyz.append([n, *xyz])
return np.array(nxyz)
def load_json(file):
with open(file, 'r') as f:
info = json.load(f)
if 'details' in info:
details = info['details']
else:
details = {}
all_params = {key: val for key, val in info.items()
if key != "details"}
all_params.update(details)
return all_params
def make_dataset(nxyz,
ground_params):
props = {
'nxyz': [torch.Tensor(nxyz)]
}
cutoff = ground_params["cutoff"]
cutoff_skin = ground_params["cutoff_skin"]
dataset = Dataset(props.copy(), units='kcal/mol')
dataset.generate_neighbor_list(cutoff=(cutoff + cutoff_skin),
undirected=False)
model_type = ground_params["model_type"]
needs_angles = (model_type in ANGLE_MODELS)
if needs_angles:
dataset.generate_angle_list()
return dataset, needs_angles
def get_batched_props(dataset):
batched_props = {}
for key, val in dataset.props.items():
if type(val[0]) is torch.Tensor and len(val[0].shape) == 0:
batched_props.update({key: val[0].reshape(-1)})
else:
batched_props.update({key: val[0]})
return batched_props
def add_calculator(atomsbatch,
model_path,
model_type,
device,
batched_props):
needs_angles = (model_type in ANGLE_MODELS)
nff_ase = NeuralFF.from_file(
model_path=model_path,
device=device,
output_keys=["energy_0"],
conversion="ev",
params=None,
model_type=model_type,
needs_angles=needs_angles,
dataset_props=batched_props
)
atomsbatch.set_calculator(nff_ase)
def get_atoms(ground_params,
all_params):
coords = all_params["coords"]
nxyz = coords_to_xyz(coords)
atoms = Atoms(nxyz[:, 0],
positions=nxyz[:, 1:])
dataset, needs_angles = make_dataset(nxyz=nxyz,
ground_params=ground_params)
batched_props = get_batched_props(dataset)
device = ground_params.get('device', 'cuda')
atomsbatch = AtomsBatch.from_atoms(atoms=atoms,
props=batched_props,
needs_angles=needs_angles,
device=device,
undirected=False,
cutoff_skin=ground_params['cutoff_skin'])
if 'model_path' in all_params:
model_path = all_params['model_path']
else:
model_path = os.path.join(all_params['weightpath'],
str(all_params["nnid"]))
add_calculator(atomsbatch=atomsbatch,
model_path=model_path,
model_type=ground_params["model_type"],
device=device,
batched_props=batched_props)
return atomsbatch
```
#### File: nff/nn/graphop.py
```python
import torch
from nff.utils.scatter import compute_grad
from nff.nn.modules import ConfAttention
EPS = 1e-15
def update_boltz(conf_fp, weight, boltz_nn):
"""
Given a conformer fingerprint and Boltzmann weight,
return a new updated fingerprint.
Args:
conf_fp (torch.Tensor): molecular finerprint of
a conformer
weight (float): Boltzmann weight
boltz_nn (torch.nn.Module): network that converts
the fingerprint and weight into a new
fingerprint. If None, just multiply the Boltzmann
factor with the fingerprint.
Returns:
boltzmann_fp (torch.Tensor): updated fingerprint
"""
if boltz_nn is None:
boltzmann_fp = conf_fp * weight
# otherwise concatenate the weight with the fingerprint
# and put it through the boltzmann nn
else:
weight_tens = torch.Tensor([weight]).to(conf_fp.device)
new_fp = torch.cat((conf_fp, weight_tens))
boltzmann_fp = boltz_nn(new_fp)
return boltzmann_fp
def conf_pool(mol_size,
boltzmann_weights,
mol_fp_nn,
boltz_nns,
conf_fps,
head_pool="concatenate"):
"""
Pool atomic representations of conformers into molecular fingerprint,
and then add those fingerprints together with Boltzmann weights.
Args:
mol_size (int): number of atoms per molecle
boltzmann_weights (torch.Tensor): tensor of length num_conf
with boltzmann weights of each fonroerm.
mol_fp_nn (torch.nn.Module): network that converts the sum
of atomic fingerprints into a molecular fingerprint.
boltz_nns (list[torch.nn.Module]): nns that take a molecular
fingerprint and boltzmann weight as input and returns
a new fingerprint.
conf_fps (torch.Tensor): fingerprints for each conformer
head_pool (str): how to combine species feature vectors from
the different `boltz_nns`.
Returns:
final_fp (torch.Tensor): H-dimensional tensor, where
H is the number of features in the molecular fingerprint.
"""
final_fps = []
final_weights = []
for boltz_nn in boltz_nns:
# if boltz_nn is an instance of ConfAttention,
# put all the conformers and their weights into
# the attention pooler and return
if isinstance(boltz_nn, ConfAttention):
final_fp, learned_weights = boltz_nn(
conf_fps=conf_fps,
boltzmann_weights=boltzmann_weights)
else:
# otherwise get a new fingerprint for each conformer
# based on its Boltzmann weight
boltzmann_fps = []
for i, conf_fp in enumerate(conf_fps):
weight = boltzmann_weights[i]
boltzmann_fp = update_boltz(
conf_fp=conf_fp,
weight=weight,
boltz_nn=boltz_nn)
boltzmann_fps.append(boltzmann_fp)
boltzmann_fps = torch.stack(boltzmann_fps)
learned_weights = boltzmann_weights
# sum all the conformer fingerprints
final_fp = boltzmann_fps.sum(dim=0)
final_fps.append(final_fp)
final_weights.append(learned_weights)
# combine the fingerprints produced by the different
# `boltz_nns`
if head_pool == "concatenate":
final_fp = torch.cat(final_fps, dim=-1)
elif head_pool == "sum":
final_fp = torch.stack(final_fps).sum(dim=0)
else:
raise NotImplementedError
final_weights = torch.stack(final_weights)
return final_fp, final_weights
def split_and_sum(tensor, N):
"""spliting a torch Tensor into a list of uneven sized tensors,
and sum each tensor and stack
Example:
A = torch.rand(10, 10)
N = [4,6]
split_and_sum(A, N).shape # (2, 10)
Args:
tensor (torch.Tensor): tensors to be split and summed
N (list): list of number of atoms
Returns:
torch.Tensor: stacked tensor of summed smaller tensor
"""
batched_prop = list(torch.split(tensor, N))
for batch_idx in range(len(N)):
batched_prop[batch_idx] = torch.sum(batched_prop[batch_idx], dim=0)
return torch.stack(batched_prop)
def batch_and_sum(dict_input, N, predict_keys, xyz):
"""Pooling function to get graph property.
Separate the outputs back into batches, pool the results,
compute gradient of scalar properties if "_grad" is in the key name.
Args:
dict_input (dict): Description
N (list): number of batches
predict_keys (list): Description
xyz (tensor): xyz of the molecule
Returns:
dict: batched and pooled results
"""
results = dict()
for key, val in dict_input.items():
# split
if key in predict_keys and key + "_grad" not in predict_keys:
results[key] = split_and_sum(val, N)
elif key in predict_keys and key + "_grad" in predict_keys:
results[key] = split_and_sum(val, N)
grad = compute_grad(inputs=xyz, output=results[key])
results[key + "_grad"] = grad
# For the case only predicting gradient
elif key not in predict_keys and key + "_grad" in predict_keys:
results[key] = split_and_sum(val, N)
grad = compute_grad(inputs=xyz, output=results[key])
results[key + "_grad"] = grad
return results
def get_atoms_inside_cell(r, N, pbc):
"""Removes atoms outside of the unit cell which are carried in `r`
to ensure correct periodic boundary conditions. Does that by discarding
all atoms beyond N which are not in the reindexing mapping `pbc`.
Args:
r (torch.float): atomic embeddings
N (torch.long): number of atoms inside each graph
pbc (troch.long): atomic embeddings
Returns:
torch.float: atomnic embedding tensors inside the cell
"""
N = N.to(torch.long).tolist()
# make N a list if it is a int
if type(N) == int:
N = [N]
# selecting only the atoms inside the unit cell
atoms_in_cell = [
set(x.cpu().data.numpy())
for x in torch.split(pbc, N)
]
N = [len(n) for n in atoms_in_cell]
atoms_in_cell = torch.cat([
torch.LongTensor(list(x))
for x in atoms_in_cell
])
r = r[atoms_in_cell]
return r, N
```
#### File: nn/models/conformers.py
```python
import torch
import torch.nn as nn
from nff.nn.layers import DEFAULT_DROPOUT_RATE
from nff.nn.modules import (
SchNetConv,
NodeMultiTaskReadOut,
ConfAttention,
LinearConfAttention
)
from nff.nn.graphop import conf_pool
from nff.nn.utils import construct_sequential
from nff.utils.scatter import compute_grad
from nff.utils.confs import split_batch
class WeightedConformers(nn.Module):
"""
Model that uses a representation of a molecule in terms of different 3D
conformers to predict properties. The fingerprints of each conformer are
generated using the SchNet model.
"""
def __init__(self, modelparams):
"""Constructs a SchNet-Like model using a conformer representation.
Args:
modelparams (dict): dictionary of parameters for model. All
are the same as in SchNet, except for `mol_fp_layers`,
which describes how to convert atomic fingerprints into
a single molecular fingerprint.
Example:
n_atom_basis = 256
mol_basis = 512
# all the atomic fingerprints get added together, then go through the network created
# by `mol_fp_layers` to turn into a molecular fingerprint
mol_fp_layers = [{'name': 'linear', 'param' : { 'in_features': n_atom_basis,
'out_features': int((n_atom_basis + mol_basis)/2)}},
{'name': 'shifted_softplus', 'param': {}},
{'name': 'linear', 'param' : { 'in_features': int((n_atom_basis + mol_basis)/2),
'out_features': mol_basis}}]
readoutdict = {
"covid": [{'name': 'linear', 'param' : { 'in_features': mol_basis,
'out_features': int(mol_basis / 2)}},
{'name': 'shifted_softplus', 'param': {}},
{'name': 'linear', 'param' : { 'in_features': int(mol_basis / 2),
'out_features': 1}},
{'name': 'sigmoid', 'param': {}}],
}
# dictionary to tell you what to do with the Boltzmann factors
# ex. 1:
boltzmann_dict = {"type": "multiply"}
# ex. 2
boltzmann_layers = [{'name': 'linear', 'param': {'in_features': mol_basis + 1,
'out_features': mol_basis}},
{'name': 'shifted_softplus', 'param': {}},
{'name': 'linear', 'param': {'in_features': mol_basis,
'out_features': mol_basis}}]
boltzmann_dict = {"type": "layers", "layers": boltzmann_layers}
modelparams = {
'n_atom_basis': n_atom_basis,
'n_filters': 256,
'n_gaussians': 32,
'n_convolutions': 4,
'cutoff': 5.0,
'trainable_gauss': True,
'readoutdict': readoutdict,
'mol_fp_layers': mol_fp_layers,
'boltzmann_dict': boltzmann_dict
'dropout_rate': 0.2
}
model = WeightedConformers(modelparams)
"""
nn.Module.__init__(self)
n_atom_basis = modelparams["n_atom_basis"]
n_filters = modelparams["n_filters"]
n_gaussians = modelparams["n_gaussians"]
n_convolutions = modelparams["n_convolutions"]
cutoff = modelparams["cutoff"]
trainable_gauss = modelparams.get("trainable_gauss", False)
dropout_rate = modelparams.get("dropout_rate", DEFAULT_DROPOUT_RATE)
self.atom_embed = nn.Embedding(100, n_atom_basis, padding_idx=0)
# convolutions
self.convolutions = nn.ModuleList(
[
SchNetConv(
n_atom_basis=n_atom_basis,
n_filters=n_filters,
n_gaussians=n_gaussians,
cutoff=cutoff,
trainable_gauss=trainable_gauss,
dropout_rate=dropout_rate,
)
for _ in range(n_convolutions)
]
)
# extra features to consider
self.extra_feats = modelparams.get("extra_features")
self.ext_feat_types = modelparams.get("ext_feat_types")
mol_fp_layers = modelparams["mol_fp_layers"]
readoutdict = modelparams["readoutdict"]
boltzmann_dict = modelparams["boltzmann_dict"]
# the nn that converts atomic finerprints to a molecular fp
self.mol_fp_nn = construct_sequential(mol_fp_layers)
# create a module that lets a molecular fp interact with the
# conformer's boltzmann weight to give a final molecular fp
self.boltz_nns = self.make_boltz_nn(boltzmann_dict)
self.head_pool = boltzmann_dict.get("head_pool", "concatenate")
# the readout acts on this final molceular fp
self.readout = NodeMultiTaskReadOut(multitaskdict=readoutdict)
# whether this is a classifier
self.classifier = modelparams["classifier"]
# whether to embed fingerprints or just use external features
self.use_mpnn = modelparams.get("use_mpnn", True)
def make_boltz_nn(self, boltzmann_dict):
"""
Make the section of the network that creates weights for each
conformer, which may or may not be equal to the statistical
boltzmann weights.
Args:
boltzmann_dict (dict): dictionary with information about
this section of the network.
Returns:
networks (nn.ModuleList): list of networks that get applied
to the conformer fingerprints to aggregate them. If
it contains more than one network, the different fingerprints
produced will either be averaged or concatenated at the end.
"""
networks = nn.ModuleList([])
# if you just want to multiply the boltmzann weight by each conformer
# fingerprint, return nothing
if boltzmann_dict["type"] == "multiply":
return [None]
# if you supply a dictionary of type `layers`, then the dictionary
# under the key `layers` will be used to create the corresponding
# network
elif boltzmann_dict["type"] == "layers":
layers = boltzmann_dict["layers"]
networks.append(construct_sequential(layers))
# if you ask for some sort of attention network, then make one such
# network for each of the number of heads
elif "attention" in boltzmann_dict["type"]:
if boltzmann_dict["type"] == "attention":
module = ConfAttention
elif boltzmann_dict["type"] == "linear_attention":
module = LinearConfAttention
else:
raise NotImplementedError
# how many attention heads
num_heads = boltzmann_dict.get("num_heads", 1)
# whether to just use equal weights and not learnable weights
# (useful for ablation studies)
equal_weights = boltzmann_dict.get("equal_weights", False)
# what function to use to convert the alpha_ij to probabilities
prob_func = boltzmann_dict.get("prob_func", 'softmax')
# add a network for each head
for _ in range(num_heads):
mol_basis = boltzmann_dict["mol_basis"]
boltz_basis = boltzmann_dict["boltz_basis"]
final_act = boltzmann_dict["final_act"]
networks.append(module(mol_basis=mol_basis,
boltz_basis=boltz_basis,
final_act=final_act,
equal_weights=equal_weights,
prob_func=prob_func))
return networks
def add_features(self, batch, **kwargs):
"""
Get any extra per-species features that were requested for
the dataset.
Args:
batch (dict): batched sample of species
Returns:
feats (list): list of feature tensors for each species.
"""
N = batch["num_atoms"].reshape(-1).tolist()
num_mols = len(N)
# if you didn't ask for any extra features, or none of the requested
# features are per-species features, return empty tensors
if self.extra_feats is None or "species" not in self.ext_feat_types:
return [torch.tensor([]) for _ in range(num_mols)]
assert all([feat in batch.keys() for feat in self.extra_feats])
feats = []
# go through each extra per-species feature
for feat_name, feat_type in zip(self.extra_feats, self.ext_feat_types):
if feat_type == "conformer":
continue
# how long each feature is
feat_len = len(batch[feat_name]) // num_mols
# split the batched features up by species and add them
# to the list
splits = [feat_len] * num_mols
feat = torch.stack(list(
torch.split(batch[feat_name], splits)))
feats.append(feat)
# concatenate the features
feats = torch.cat(feats, dim=-1)
return feats
def convolve_sub_batch(self,
batch,
xyz=None,
xyz_grad=False,
**kwargs):
"""
Apply the convolutional layers to a sub-batch.
Args:
batch (dict): dictionary of props
Returns:
r: new feature vector after the convolutions
N: list of the number of atoms for each molecule in the batch
xyz_grad (bool): whether we'll need the gradient wrt xyz
xyz: xyz (with a "requires_grad") for the batch
"""
# Note: we've given the option to input xyz from another source.
# E.g. if you already created an xyz and set requires_grad=True,
# you don't want to make a whole new one.
if xyz is None:
xyz = batch["nxyz"][:, 1:4]
xyz.requires_grad = xyz_grad
r = batch["nxyz"][:, 0]
a = batch["nbr_list"]
# offsets take care of periodic boundary conditions
offsets = batch.get("offsets", 0)
# to deal with any shape mismatches
if hasattr(offsets, 'max') and offsets.max() == 0:
offsets = 0
if "distances" in batch:
e = batch["distances"][:, None]
else:
e = (xyz[a[:, 0]] - xyz[a[:, 1]] -
offsets).pow(2).sum(1).sqrt()[:, None]
# ensuring image atoms have the same vectors of their corresponding
# atom inside the unit cell
r = self.atom_embed(r.long()).squeeze()
# update function includes periodic boundary conditions
for i, conv in enumerate(self.convolutions):
dr = conv(r=r, e=e, a=a)
r = r + dr
return r, xyz
def convolve(self,
batch,
sub_batch_size=None,
xyz=None,
xyz_grad=False):
"""
Apply the convolution layers to the batch.
Args:
batch (dict): batched sample of species
sub_batch_size (int): maximum number of conformers
in a sub-batch.
xyz (torch.Tensor): xyz of the batch
xyz_grad (bool): whether to set xyz.requires_grad = True
Returns:
new_node_feats (torch.Tensor): new node features after
the convolutions.
xyz (torch.Tensor): xyz of the batch
"""
# for backwards compatability
if not hasattr(self, "classifier"):
self.classifier = True
# split batches as necessary
if sub_batch_size is None:
sub_batches = [batch]
else:
sub_batches = split_batch(batch, sub_batch_size)
# go through each sub-batch, get the xyz and node features,
# and concatenate them when done
new_node_feat_list = []
xyz_list = []
for sub_batch in sub_batches:
new_node_feats, xyz = self.convolve_sub_batch(
sub_batch, xyz, xyz_grad)
new_node_feat_list.append(new_node_feats)
xyz_list.append(xyz)
new_node_feats = torch.cat(new_node_feat_list)
xyz = torch.cat(xyz_list)
return new_node_feats, xyz
def get_external_3d(self,
batch,
n_conf_list):
"""
Get any extra 3D per-conformer features that were requested for
the dataset.
Args:
batch (dict): batched sample of species
n_conf_list (list[int]): list of number of conformers in each
species.
Returns:
split_extra (list): list of stacked per-cofnormer feature tensors
for each species.
"""
# if you didn't ask for any extra features, or none of the requested
# features are per-conformer features, return empty tensors
if (self.extra_feats is None or
"conformer" not in self.ext_feat_types):
return
# get all the features and split them up by species
extra_conf_fps = []
for feat_name, feat_type in zip(self.extra_feats,
self.ext_feat_types):
if feat_type == "conformer":
extra_conf_fps.append(batch[feat_name])
extra_conf_fps = torch.cat(extra_conf_fps, dim=-1)
split_extra = torch.split(extra_conf_fps, n_conf_list)
return split_extra
def get_conf_fps(self,
smiles_fp,
mol_size,
batch,
split_extra,
idx):
"""
Get per-conformer fingerprints.
Args:
smiles_fp (torch.Tensor): per-atom fingerprints
for every atom in the species. Note that this
has length mol_size x n_confs, where `mol_size`
is the number of atoms in the molecule, and
`n_confs` is the number of conformers.
mol_size (int): Number of atoms in the molecule
batch (dict): batched sample of species
split_extra (list): extra 3D fingerprints split by
species
idx (int): index of the current species in the batch.
"""
# total number of atoms
num_atoms = smiles_fp.shape[0]
# unmber of conformers
num_confs = num_atoms // mol_size
N = [mol_size] * num_confs
conf_fps = []
# split the atomic fingerprints up by conformer
for atomic_fps in torch.split(smiles_fp, N):
# sum them and then convert to molecular fp
summed_atomic_fps = atomic_fps.sum(dim=0)
# put them through the network to convert summed
# atomic fps to a molecular fp
mol_fp = self.mol_fp_nn(summed_atomic_fps)
# add to the list of conformer fps
conf_fps.append(mol_fp)
# stack the conformer fps
conf_fps = torch.stack(conf_fps)
# if there are any extra 3D fingerprints, add them here
if split_extra is not None:
this_extra = split_extra[idx]
conf_fps = torch.cat([conf_fps, this_extra], dim=-1)
return conf_fps
def post_process(self, batch,
r,
xyz,
**kwargs):
"""
Split various items up by species, convert atomic fingerprints
to molecular fingerprints, and incorporate non-learnable features.
Args:
batch (dict): batched sample of species
r (torch.Tensor): atomwise learned features from the convolutions
xyz (torch.Tensor): xyz of the batch
Returns:
output (dict): various new items
"""
mol_sizes = batch["mol_size"].reshape(-1).tolist()
N = batch["num_atoms"].reshape(-1).tolist()
num_confs = (torch.tensor(N) / torch.tensor(mol_sizes)).tolist()
# split the fingerprints by species
fps_by_smiles = torch.split(r, N)
# get extra 3D fingerprints
split_extra = self.get_external_3d(batch,
num_confs)
# get all the conformer fingerprints for each species
conf_fps_by_smiles = []
for i, smiles_fp in enumerate(fps_by_smiles):
conf_fps = self.get_conf_fps(smiles_fp=smiles_fp,
mol_size=mol_sizes[i],
batch=batch,
split_extra=split_extra,
idx=i)
conf_fps_by_smiles.append(conf_fps)
# split the boltzmann weights by species
boltzmann_weights = torch.split(batch["weights"], num_confs)
# add any extra per-species features
extra_feats = self.add_features(batch=batch, **kwargs)
# return everything in a dictionary
outputs = dict(r=r,
N=N,
xyz=xyz,
conf_fps_by_smiles=conf_fps_by_smiles,
boltzmann_weights=boltzmann_weights,
mol_sizes=mol_sizes,
extra_feats=extra_feats)
return outputs
def fps_no_mpnn(self, batch, **kwargs):
"""
Get fingerprints without using an MPNN to get any learned fingerprints.
Args:
batch (dict): batched sample of species
Returns:
output (dict): various new items
"""
# number of atoms in each species, which is greater than `mol_size`
# if the number of conformers exceeds 1
N = batch["num_atoms"].reshape(-1).tolist()
# number of atoms in each molecule
mol_sizes = batch["mol_size"].reshape(-1).tolist()
# number of conformers per species
n_conf_list = (torch.tensor(N) / torch.tensor(mol_sizes)).tolist()
# get the conformer fps for each smiles
conf_fps_by_smiles = self.get_external_3d(batch,
n_conf_list)
# add any per-species fingerprints
boltzmann_weights = torch.split(batch["weights"], n_conf_list)
extra_feats = self.add_features(batch=batch, **kwargs)
outputs = {"conf_fps_by_smiles": conf_fps_by_smiles,
"boltzmann_weights": boltzmann_weights,
"mol_sizes": mol_sizes,
"extra_feats": extra_feats}
return outputs
def make_embeddings(self, batch, xyz=None, **kwargs):
"""
Make all conformer fingerprints.
Args:
batch (dict): batched sample of species
xyz (torch.Tensor): xyz of the batch
Returns:
output (dict): various new items
xyz (torch.Tensor): xyz of the batch
"""
# for backward compatability
if not hasattr(self, "use_mpnn"):
self.use_mpnn = True
# if using an MPNN, apply the convolution layers
# and then post-process
if self.use_mpnn:
r, xyz = self.convolve(batch=batch,
xyz=xyz,
**kwargs)
outputs = self.post_process(batch=batch,
r=r,
xyz=xyz, **kwargs)
# otherwise just use the non-learnable features
else:
outputs = self.fps_no_mpnn(batch, **kwargs)
xyz = None
return outputs, xyz
def pool(self, outputs):
"""
Pool the per-conformer outputs of the convolutions.
Here, the atomic fingerprints for each geometry get converted
into a molecular fingerprint. Then, the molecular
fingerprints for the different conformers of a given species
get multiplied by the Boltzmann weights or learned weights of
those conformers and added together to make a final fingerprint
for the species.
Args:
batch (dict): dictionary of props
Returns:
final_fps (torch.Tensor): final per-species fingerprints
final_weights (list): weights assigned to each conformer
in the ensemble.
"""
conf_fps_by_smiles = outputs["conf_fps_by_smiles"]
batched_weights = outputs["boltzmann_weights"]
mol_sizes = outputs["mol_sizes"]
extra_feat_fps = outputs["extra_feats"]
final_fps = []
final_weights = []
# go through each species
for i in range(len(conf_fps_by_smiles)):
boltzmann_weights = batched_weights[i]
conf_fps = conf_fps_by_smiles[i]
mol_size = mol_sizes[i]
extra_feats = extra_feat_fps[i]
# for backward compatibility
if not hasattr(self, "boltz_nns"):
self.boltz_nns = nn.ModuleList([self.boltz_nn])
if not hasattr(self, "head_pool"):
self.head_pool = "concatenate"
# pool the atomic fingerprints
final_fp, learned_weights = conf_pool(
mol_size=mol_size,
boltzmann_weights=boltzmann_weights,
mol_fp_nn=self.mol_fp_nn,
boltz_nns=self.boltz_nns,
conf_fps=conf_fps,
head_pool=self.head_pool)
# add extra features if there are any
if extra_feats is not None:
extra_feats = extra_feats.to(final_fp.device)
final_fp = torch.cat((final_fp, extra_feats))
final_fps.append(final_fp)
final_weights.append(learned_weights)
final_fps = torch.stack(final_fps)
return final_fps, final_weights
def add_grad(self, batch, results, xyz):
"""
Add any required gradients of the predictions.
Args:
batch (dict): dictionary of props
results (dict): dictionary of predicted values
xyz (torch.tensor): (optional) coordinates
Returns:
results (dict): results updated with any gradients
requested.
"""
batch_keys = batch.keys()
# names of the gradients of each property
result_grad_keys = [key + "_grad" for key in results.keys()]
for key in batch_keys:
# if the batch with the ground truth contains one of
# these keys, then compute its predicted value
if key in result_grad_keys:
base_result = results[key.replace("_grad", "")]
results[key] = compute_grad(inputs=xyz,
output=base_result)
return results
def forward(self,
batch,
xyz=None,
**kwargs):
"""
Call the model.
Args:
batch (dict): dictionary of props
xyz (torch.tensor): (optional) coordinates
Returns:
results (dict): dictionary of predicted values
"""
# for backwards compatibility
if not hasattr(self, "classifier"):
self.classifier = True
# make conformer fingerprints
outputs, xyz = self.make_embeddings(batch, xyz, **kwargs)
# pool the fingerprints
pooled_fp, final_weights = self.pool(outputs)
# apply network to fingerprints get predicted value
results = self.readout(pooled_fp)
# add sigmoid if it's a classifier and not in training mode
if self.classifier and not self.training:
keys = list(self.readout.readout.keys())
for key in keys:
results[key] = torch.sigmoid(results[key])
# add any required gradients
results = self.add_grad(batch=batch, results=results, xyz=xyz)
# add in the weights of each conformer for later analysis
results.update({"learned_weights": final_weights})
return results
```
#### File: nn/models/graphconvintegration.py
```python
import torch
import torch.nn as nn
import copy
import torch.nn.functional as F
from nff.nn.layers import Dense, GaussianSmearing
from nff.nn.modules import GraphDis, SchNetConv, BondEnergyModule, SchNetEdgeUpdate, NodeMultiTaskReadOut
from nff.nn.activations import shifted_softplus
from nff.nn.graphop import batch_and_sum, get_atoms_inside_cell
from nff.nn.utils import get_default_readout
class GraphConvIntegration(nn.Module):
"""SchNet with optional aggr_weight for thermodynamic intergration
Attributes:
atom_embed (torch.nn.Embedding): Convert atomic number into an
embedding vector of size n_atom_basis
atomwise1 (Dense): dense layer 1 to compute energy
atomwise2 (Dense): dense layer 2 to compute energy
convolutions (torch.nn.ModuleList): include all the convolutions
prop_dics (dict): A dictionary of the form {name: prop_dic}, where name is the
property name and prop_dic is a dictionary for that property.
module_dict (ModuleDict): a dictionary of modules. Each entry has the form
{name: mod_list}, where name is the name of a property object and mod_list
is a ModuleList of layers to predict that property.
"""
def __init__(self, modelparams):
"""Constructs a SchNet model.
Args:
modelparams (TYPE): Description
"""
super().__init__()
n_atom_basis = modelparams['n_atom_basis']
n_filters = modelparams['n_filters']
n_gaussians = modelparams['n_gaussians']
n_convolutions = modelparams['n_convolutions']
cutoff = modelparams['cutoff']
trainable_gauss = modelparams.get('trainable_gauss', False)
# default predict var
readoutdict = modelparams.get('readoutdict', get_default_readout(n_atom_basis))
post_readout = modelparams.get('post_readout', None)
self.atom_embed = nn.Embedding(100, n_atom_basis, padding_idx=0)
self.convolutions = nn.ModuleList([
SchNetConv(n_atom_basis=n_atom_basis,
n_filters=n_filters,
n_gaussians=n_gaussians,
cutoff=cutoff,
trainable_gauss=trainable_gauss)
for _ in range(n_convolutions)
])
# ReadOut
self.atomwisereadout = NodeMultiTaskReadOut(multitaskdict=readoutdict, post_readout=post_readout)
self.device = None
def forward(self, batch, **kwargs):
"""Summary
Args:
batch (dict): dictionary of props
Returns:
dict: dionary of results
"""
r = batch['nxyz'][:, 0]
xyz = batch['nxyz'][:, 1:4]
N = batch['num_atoms'].reshape(-1).tolist()
a = batch['nbr_list']
aggr_wgt = batch['aggr_wgt']
# offsets take care of periodic boundary conditions
offsets = batch.get('offsets', 0)
xyz.requires_grad = True
# calculating the distances
e = (xyz[a[:, 0]] - xyz[a[:, 1]] + offsets).pow(2).sum(1).sqrt()[:, None]
# ensuring image atoms have the same vectors of their corresponding
# atom inside the unit cell
r = self.atom_embed(r.long()).squeeze()
# update function includes periodic boundary conditions
for i, conv in enumerate(self.convolutions):
dr = conv(r=r, e=e, a=a, aggr_wgt=aggr_wgt)
r = r + dr
r = self.atomwisereadout(r)
results = batch_and_sum(r, N, list(batch.keys()), xyz)
return results
```
#### File: nn/models/schnet.py
```python
from torch import nn
from nff.nn.layers import DEFAULT_DROPOUT_RATE
from nff.nn.modules import (
SchNetConv,
NodeMultiTaskReadOut,
get_rij,
add_stress
)
from nff.nn.modules.diabat import DiabaticReadout
from nff.nn.graphop import batch_and_sum
from nff.nn.utils import get_default_readout
from nff.utils.scatter import scatter_add
class SchNet(nn.Module):
"""SchNet implementation with continous filter.
Attributes:
atom_embed (torch.nn.Embedding): Convert atomic number into an
embedding vector of size n_atom_basis
convolutions (torch.nn.Module): convolution layers applied to the graph
atomwisereadout (torch.nn.Module): fully connected layers applied to the graph
to get the results of interest
device (int): GPU being used.
"""
def __init__(self, modelparams):
"""Constructs a SchNet model.
Args:
modelparams (TYPE): Description
Example:
n_atom_basis = 256
readoutdict = {
"energy_0": [{'name': 'linear', 'param' : { 'in_features': n_atom_basis,
'out_features': int(n_atom_basis / 2)}},
{'name': 'shifted_softplus', 'param': {}},
{'name': 'linear', 'param' : { 'in_features': int(n_atom_basis / 2),
'out_features': 1}}],
"energy_1": [{'name': 'linear', 'param' : { 'in_features': n_atom_basis,
'out_features': int(n_atom_basis / 2)}},
{'name': 'shifted_softplus', 'param': {}},
{'name': 'linear', 'param' : { 'in_features': int(n_atom_basis / 2),
'out_features': 1}}]
}
modelparams = {
'n_atom_basis': n_atom_basis,
'n_filters': 256,
'n_gaussians': 32,
'n_convolutions': 4,
'cutoff': 5.0,
'trainable_gauss': True,
'readoutdict': readoutdict,
'dropout_rate': 0.2
}
model = SchNet(modelparams)
"""
nn.Module.__init__(self)
n_atom_basis = modelparams["n_atom_basis"]
n_filters = modelparams["n_filters"]
n_gaussians = modelparams["n_gaussians"]
n_convolutions = modelparams["n_convolutions"]
cutoff = modelparams["cutoff"]
trainable_gauss = modelparams.get("trainable_gauss", False)
dropout_rate = modelparams.get("dropout_rate", DEFAULT_DROPOUT_RATE)
self.excl_vol = modelparams.get("excl_vol", False)
if self.excl_vol:
self.power = modelparams["V_ex_power"]
self.sigma = modelparams["V_ex_sigma"]
self.atom_embed = nn.Embedding(100, n_atom_basis, padding_idx=0)
readoutdict = modelparams.get(
"readoutdict", get_default_readout(n_atom_basis))
post_readout = modelparams.get("post_readout", None)
# convolutions
self.convolutions = nn.ModuleList(
[
SchNetConv(
n_atom_basis=n_atom_basis,
n_filters=n_filters,
n_gaussians=n_gaussians,
cutoff=cutoff,
trainable_gauss=trainable_gauss,
dropout_rate=dropout_rate,
)
for _ in range(n_convolutions)
]
)
# ReadOut
self.atomwisereadout = NodeMultiTaskReadOut(
multitaskdict=readoutdict, post_readout=post_readout
)
self.device = None
self.cutoff = cutoff
def set_cutoff(self):
if hasattr(self, "cutoff"):
return
gauss_centers = (self.convolutions[0].moduledict
['message_edge_filter'][0].offsets)
self.cutoff = gauss_centers[-1] - gauss_centers[0]
def convolve(self,
batch,
xyz=None):
"""
Apply the convolutional layers to the batch.
Args:
batch (dict): dictionary of props
Returns:
r: new feature vector after the convolutions
N: list of the number of atoms for each molecule in the batch
xyz: xyz (with a "requires_grad") for the batch
"""
# Note: we've given the option to input xyz from another source.
# E.g. if you already created an xyz and set requires_grad=True,
# you don't want to make a whole new one.
if xyz is None:
xyz = batch["nxyz"][:, 1:4]
# this logic is required for adversarial attacks
if not xyz.requires_grad and xyz.grad_fn is None:
xyz.requires_grad = True
r = batch["nxyz"][:, 0]
N = batch["num_atoms"].reshape(-1).tolist()
a = batch["nbr_list"]
# get r_ij including offsets and excluding
# anything in the neighbor skin
self.set_cutoff()
r_ij, a = get_rij(xyz=xyz,
batch=batch,
nbrs=a,
cutoff=self.cutoff)
dist = r_ij.pow(2).sum(1).sqrt()
e = dist[:, None]
r = self.atom_embed(r.long()).squeeze()
# update function includes periodic boundary conditions
for i, conv in enumerate(self.convolutions):
dr = conv(r=r, e=e, a=a)
r = r + dr
return r, N, xyz, r_ij, a
def V_ex(self, r_ij, nbr_list, xyz):
dist = (r_ij).pow(2).sum(1).sqrt()
potential = ((dist.reciprocal() * self.sigma).pow(self.power))
return scatter_add(potential,nbr_list[:, 0], dim_size=xyz.shape[0])[:, None]
def forward(self,
batch,
xyz=None,
requires_stress=False,
**kwargs):
"""Summary
Args:
batch (dict): dictionary of props
xyz (torch.tensor): (optional) coordinates
Returns:
dict: dictionary of results
"""
r, N, xyz, r_ij, a = self.convolve(batch, xyz)
r = self.atomwisereadout(r)
if getattr(self, "excl_vol", None):
# Excluded Volume interactions
r_ex = self.V_ex(r_ij, a, xyz)
r['energy'] += r_ex
results = batch_and_sum(r, N, list(batch.keys()), xyz)
if requires_stress:
results = add_stress(batch=batch,
all_results=results,
nbrs=a,
r_ij=r_ij)
return results
class SchNetDiabat(SchNet):
def __init__(self, modelparams):
super().__init__(modelparams)
self.diabatic_readout = DiabaticReadout(
diabat_keys=modelparams["diabat_keys"],
grad_keys=modelparams["grad_keys"],
energy_keys=modelparams["output_keys"])
def forward(self,
batch,
xyz=None,
add_nacv=False,
add_grad=True,
add_gap=True,
extra_grads=None,
try_speedup=False,
**kwargs):
r, N, xyz = self.convolve(batch, xyz)
output = self.atomwisereadout(r)
results = self.diabatic_readout(batch=batch,
output=output,
xyz=xyz,
add_nacv=add_nacv,
add_grad=add_grad,
add_gap=add_gap,
extra_grads=extra_grads,
try_speedup=try_speedup)
return results
```
#### File: nn/models/spooky_painn.py
```python
import torch
from torch import nn
import copy
import numpy as np
from nff.nn.models.painn import Painn
from nff.nn.models.spooky import parse_add_ons
from nff.nn.modules.spooky_painn import MessageBlock as SpookyMessage
from nff.nn.modules.spooky_painn import Electrostatics as PainnElectrostatics
from nff.nn.modules.spooky_painn import CombinedEmbedding
from nff.nn.modules.painn import MessageBlock as PainnMessage
from nff.nn.modules.spooky import NuclearRepulsion
from nff.nn.modules.diabat import DiabaticReadout
from nff.nn.layers import Diagonalize
from nff.utils.tools import make_directed
from nff.utils.scatter import compute_grad
from nff.nn.modules.schnet import (AttentionPool, SumPool, MolFpPool,
MeanPool, get_offsets, get_rij)
POOL_DIC = {"sum": SumPool,
"mean": MeanPool,
"attention": AttentionPool,
"mol_fp": MolFpPool}
def default(dic, key, val):
dic_val = dic.get(key)
if dic_val is None:
dic_val = val
return dic_val
def get_elec_terms(modelparams):
dic = dict(
charge_charge=default(modelparams, "charge_charge", True),
charge_dipole=default(modelparams, "charge_dipole", False),
dipole_dipole=default(modelparams, "dipole_dipole", False),
point_dipoles=default(modelparams, "point_dipoles", False)
)
return dic
class SpookyPainn(Painn):
def __init__(self,
modelparams):
"""
Args:
modelparams (dict): dictionary of model parameters
"""
Painn.__init__(self,
modelparams)
feat_dim = modelparams["feat_dim"]
activation = modelparams["activation"]
n_rbf = modelparams["n_rbf"]
cutoff = modelparams["cutoff"]
num_conv = modelparams["num_conv"]
learnable_k = modelparams.get("learnable_k", False)
conv_dropout = modelparams.get("conv_dropout", 0)
non_local = modelparams['non_local']
add_ons = parse_add_ons(modelparams)
add_nuc_keys, add_elec_keys, add_disp_keys = add_ons
msg_class = SpookyMessage if non_local else PainnMessage
self.embed_block = CombinedEmbedding(feat_dim=feat_dim,
activation=activation)
self.message_blocks = nn.ModuleList(
[msg_class(feat_dim=feat_dim,
activation=activation,
n_rbf=n_rbf,
cutoff=cutoff,
learnable_k=learnable_k,
dropout=conv_dropout,
fast_feats=modelparams.get("fast_feats"))
for _ in range(num_conv)]
)
elec_terms = get_elec_terms(modelparams)
self.electrostatics = nn.ModuleDict({
key: PainnElectrostatics(feat_dim=feat_dim,
activation=activation,
r_cut=cutoff,
**elec_terms)
for key in add_elec_keys
})
self.nuc_repulsion = nn.ModuleDict({
key: NuclearRepulsion(r_cut=cutoff)
for key in add_nuc_keys
})
if add_disp_keys:
raise NotImplementedError("Dispersion not implemented")
self.cutoff = cutoff
def atomwise(self,
batch,
nbrs,
num_atoms,
xyz=None):
# for backwards compatability
if isinstance(self.skip, bool):
self.skip = {key: self.skip
for key in self.output_keys}
nxyz = batch['nxyz']
charge = batch['charge']
spin = batch['spin']
if xyz is None:
xyz = nxyz[:, 1:]
xyz.requires_grad = True
z_numbers = nxyz[:, 0].long()
# include offests
# get r_ij including offsets and excluding
# anything in the neighbor skin
self.set_cutoff()
r_ij, nbrs = get_rij(xyz=xyz,
batch=batch,
nbrs=nbrs,
cutoff=self.cutoff)
s_i, v_i = self.embed_block(charge=charge,
spin=spin,
z=z_numbers,
num_atoms=num_atoms)
results = {}
for i, message_block in enumerate(self.message_blocks):
update_block = self.update_blocks[i]
ds_message, dv_message = message_block(
s_j=s_i,
v_j=v_i,
r_ij=r_ij,
nbrs=nbrs,
num_atoms=num_atoms.tolist())
s_i = s_i + ds_message
v_i = v_i + dv_message
ds_update, dv_update = update_block(s_i=s_i,
v_i=v_i)
s_i = s_i + ds_update
v_i = v_i + dv_update
if not any(self.skip.values()):
continue
readout_block = self.readout_blocks[i]
new_results = readout_block(s_i=s_i)
for key, skip in self.skip.items():
if not skip:
continue
if key not in new_results:
continue
if key in results:
results[key] += new_results[key]
else:
results[key] = new_results[key]
if not all(self.skip.values()):
first_readout = self.readout_blocks[0]
new_results = first_readout(s_i=s_i)
for key, skip in self.skip.items():
if key not in new_results:
continue
if not skip:
results[key] = new_results[key]
results['features'] = s_i
return results, xyz, s_i, v_i
def add_phys(self,
results,
s_i,
v_i,
xyz,
z,
charge,
nbrs,
num_atoms,
offsets,
mol_offsets,
mol_nbrs):
electrostatics = getattr(self, "electrostatics", {})
nuc_repulsion = getattr(self, "nuc_repulsion", {})
for key in self.output_keys:
if key in electrostatics:
elec_module = self.electrostatics[key]
elec_e, q, dip_atom, full_dip = elec_module(
s_i=s_i,
v_i=v_i,
z=z,
xyz=xyz,
total_charge=charge,
num_atoms=num_atoms,
mol_nbrs=mol_nbrs,
mol_offsets=mol_offsets)
results[key] = results[key] + elec_e.reshape(-1)
if key in nuc_repulsion:
nuc_module = self.nuc_repulsion[key]
nuc_e = nuc_module(xyz=xyz,
z=z,
nbrs=nbrs,
num_atoms=num_atoms,
offsets=offsets)
results[key] = results[key] + nuc_e.reshape(-1)
if key in electrostatics:
suffix = "_" + key.split("_")[-1]
if not any([i.isdigit() for i in suffix]):
suffix = ""
results.update({f"dipole{suffix}": full_dip,
f"q{suffix}": q,
f"dip_atom{suffix}": dip_atom})
def pool(self,
batch,
atomwise_out,
xyz,
nbrs,
num_atoms,
z,
s_i,
v_i):
offsets = get_offsets(batch, 'offsets')
mol_offsets = get_offsets(batch, 'mol_offsets')
mol_nbrs = batch.get('mol_nbrs')
if not hasattr(self, "output_keys"):
self.output_keys = list(self.readout_blocks[0]
.readoutdict.keys())
if not hasattr(self, "pool_dic"):
self.pool_dic = {key: SumPool() for key
in self.output_keys}
all_results = {}
for key, pool_obj in self.pool_dic.items():
results = pool_obj(batch=batch,
xyz=xyz,
atomwise_output=atomwise_out,
grad_keys=[],
out_keys=[key])
all_results.update(results)
self.add_phys(results=all_results,
s_i=s_i,
v_i=v_i,
xyz=xyz,
z=z,
charge=batch['charge'],
nbrs=nbrs,
num_atoms=num_atoms,
offsets=offsets,
mol_offsets=mol_offsets,
mol_nbrs=mol_nbrs)
for key in self.grad_keys:
output = all_results[key.replace("_grad", "")]
grad = compute_grad(output=output,
inputs=xyz)
all_results[key] = grad
return all_results, xyz
def run(self,
batch,
xyz=None,
**kwargs):
nbrs, _ = make_directed(batch['nbr_list'])
num_atoms = batch['num_atoms']
z = batch['nxyz'][:, 0].long()
atomwise_out, xyz, s_i, v_i = self.atomwise(
batch=batch,
xyz=xyz,
nbrs=nbrs,
num_atoms=num_atoms)
all_results, xyz = self.pool(batch=batch,
atomwise_out=atomwise_out,
xyz=xyz,
nbrs=nbrs,
num_atoms=num_atoms,
z=z,
s_i=s_i,
v_i=v_i)
if getattr(self, "compute_delta", False):
all_results = self.add_delta(all_results)
return all_results, xyz
def get_others_to_eig(diabat_keys):
others_to_eig = copy.deepcopy(diabat_keys)
num_states = len(diabat_keys)
for i in range(num_states):
for j in range(num_states):
val = others_to_eig[i][j]
others_to_eig[i][j] = "dipole_" + val.split("_")[-1]
return others_to_eig
class SpookyPainnDiabat(SpookyPainn):
def __init__(self, modelparams):
"""
`diabat_keys` has the shape of a 2x2 matrix
"""
energy_keys = modelparams["output_keys"]
diabat_keys = modelparams["diabat_keys"]
new_out_keys = list(set(np.array(diabat_keys).reshape(-1)
.tolist()))
new_modelparams = copy.deepcopy(modelparams)
new_modelparams.update({"output_keys": new_out_keys,
"grad_keys": []})
super().__init__(new_modelparams)
self.diag = Diagonalize()
others_to_eig = ([get_others_to_eig(diabat_keys)]
if self.electrostatics else None)
self.diabatic_readout = DiabaticReadout(
diabat_keys=diabat_keys,
grad_keys=modelparams["grad_keys"],
energy_keys=energy_keys,
delta=False,
stochastic_dic=modelparams.get("stochastic_dic"),
cross_talk_dic=modelparams.get("cross_talk_dic"),
hellmann_feynman=modelparams.get("hellmann_feynman", True),
others_to_eig=others_to_eig)
self.add_nacv = modelparams.get("add_nacv", False)
self.diabat_keys = diabat_keys
self.off_diag_keys = self.get_off_diag_keys()
@property
def _grad_keys(self):
return self.grad_keys
@_grad_keys.setter
def _grad_keys(self, value):
self.grad_keys = value
self.diabatic_readout.grad_keys = value
def get_off_diag_keys(self):
num_states = len(self.diabat_keys)
off_diag = []
for i in range(num_states):
for j in range(num_states):
if j <= i:
continue
off_diag.append(self.diabat_keys[i][j])
return off_diag
def get_diabat_charge(self,
key,
charge):
if key in self.off_diag_keys:
total_charge = torch.zeros_like(charge)
else:
total_charge = charge
return total_charge
def add_phys(self,
results,
s_i,
v_i,
xyz,
z,
charge,
nbrs,
num_atoms,
offsets,
mol_offsets,
mol_nbrs):
"""
Over-write because transition charges must sum to 0, not
to the total charge
"""
electrostatics = getattr(self, "electrostatics", {})
nuc_repulsion = getattr(self, "nuc_repulsion", {})
for key in self.output_keys:
if key in electrostatics:
elec_module = self.electrostatics[key]
# transition charges sum to 0
total_charge = self.get_diabat_charge(key=key,
charge=charge)
mol_nbrs, _ = make_undirected(batch['mol_nbrs'])
elec_e, q, dip_atom, full_dip = elec_module(
s_i=s_i,
v_i=v_i,
z=z,
xyz=xyz,
total_charge=charge,
num_atoms=num_atoms,
mol_nbrs=mol_nbrs,
mol_offsets=mol_offsets)
results[key] = results[key] + elec_e.reshape(-1)
if key in nuc_repulsion:
nuc_module = self.nuc_repulsion[key]
nuc_e = nuc_module(xyz=xyz,
z=z,
nbrs=nbrs,
num_atoms=num_atoms,
offsets=offsets)
results[key] = results[key] + nuc_e.reshape(-1)
if key in electrostatics:
suffix = "_" + key.split("_")[-1]
if not any([i.isdigit() for i in suffix]):
suffix = ""
results.update({f"dipole{suffix}": full_dip,
f"q{suffix}": q,
f"dip_atom{suffix}": dip_atom})
def forward(self,
batch,
xyz=None,
add_nacv=False,
add_grad=True,
add_gap=True):
# for backwards compatability
self.grad_keys = []
if not hasattr(self, "output_keys"):
diabat_keys = self.diabatic_readout.diabat_keys
self.output_keys = list(set(np.array(diabat_keys)
.reshape(-1)
.tolist()))
if hasattr(self, "add_nacv"):
add_nacv = self.add_nacv
diabat_results, xyz = self.run(batch=batch,
xyz=xyz)
results = self.diabatic_readout(batch=batch,
xyz=xyz,
results=diabat_results,
add_nacv=add_nacv,
add_grad=add_grad,
add_gap=add_gap)
return results
```
#### File: nn/modules/painn.py
```python
import torch
from torch import nn
from nff.utils.tools import layer_types
from nff.nn.layers import (PainnRadialBasis, CosineEnvelope,
ExpNormalBasis, Dense)
from nff.utils.scatter import scatter_add
from nff.nn.modules.schnet import ScaleShift
from nff.nn.modules.torchmd_net import MessageBlock as MDMessage
from nff.nn.modules.torchmd_net import EmbeddingBlock as MDEmbedding
EPS = 1e-15
def norm(vec):
result = ((vec ** 2 + EPS).sum(-1)) ** 0.5
return result
def preprocess_r(r_ij):
"""
r_ij (n_nbrs x 3): tensor of interatomic vectors (r_j - r_i)
"""
dist = norm(r_ij)
unit = r_ij / dist.reshape(-1, 1)
return dist, unit
def to_module(activation):
return layer_types[activation]()
class InvariantDense(nn.Module):
def __init__(self,
dim,
dropout,
activation='swish'):
super().__init__()
self.layers = nn.Sequential(Dense(in_features=dim,
out_features=dim,
bias=True,
dropout_rate=dropout,
activation=to_module(activation)),
Dense(in_features=dim,
out_features=3 * dim,
bias=True,
dropout_rate=dropout))
def forward(self, s_j):
output = self.layers(s_j)
return output
class DistanceEmbed(nn.Module):
def __init__(self,
n_rbf,
cutoff,
feat_dim,
learnable_k,
dropout):
super().__init__()
rbf = PainnRadialBasis(n_rbf=n_rbf,
cutoff=cutoff,
learnable_k=learnable_k)
dense = Dense(in_features=n_rbf,
out_features=3 * feat_dim,
bias=True,
dropout_rate=dropout)
self.block = nn.Sequential(rbf, dense)
self.f_cut = CosineEnvelope(cutoff=cutoff)
def forward(self, dist):
rbf_feats = self.block(dist)
envelope = self.f_cut(dist).reshape(-1, 1)
output = rbf_feats * envelope
return output
class InvariantMessage(nn.Module):
def __init__(self,
feat_dim,
activation,
n_rbf,
cutoff,
learnable_k,
dropout):
super().__init__()
self.inv_dense = InvariantDense(dim=feat_dim,
activation=activation,
dropout=dropout)
self.dist_embed = DistanceEmbed(n_rbf=n_rbf,
cutoff=cutoff,
feat_dim=feat_dim,
learnable_k=learnable_k,
dropout=dropout)
def forward(self,
s_j,
dist,
nbrs):
phi = self.inv_dense(s_j)[nbrs[:, 1]]
w_s = self.dist_embed(dist)
output = phi * w_s
# split into three components, so the tensor now has
# shape n_atoms x 3 x feat_dim
feat_dim = s_j.shape[-1]
out_reshape = output.reshape(output.shape[0], 3, feat_dim)
return out_reshape
class MessageBase(nn.Module):
def forward(self,
s_j,
v_j,
r_ij,
nbrs):
dist, unit = preprocess_r(r_ij)
inv_out = self.inv_message(s_j=s_j,
dist=dist,
nbrs=nbrs)
split_0 = inv_out[:, 0, :].unsqueeze(-1)
split_1 = inv_out[:, 1, :]
split_2 = inv_out[:, 2, :].unsqueeze(-1)
unit_add = split_2 * unit.unsqueeze(1)
delta_v_ij = unit_add + split_0 * v_j[nbrs[:, 1]]
delta_s_ij = split_1
# add results from neighbors of each node
graph_size = s_j.shape[0]
delta_v_i = scatter_add(src=delta_v_ij,
index=nbrs[:, 0],
dim=0,
dim_size=graph_size)
delta_s_i = scatter_add(src=delta_s_ij,
index=nbrs[:, 0],
dim=0,
dim_size=graph_size)
return delta_s_i, delta_v_i
class MessageBlock(MessageBase):
def __init__(self,
feat_dim,
activation,
n_rbf,
cutoff,
learnable_k,
dropout,
**kwargs):
super().__init__()
self.inv_message = InvariantMessage(feat_dim=feat_dim,
activation=activation,
n_rbf=n_rbf,
cutoff=cutoff,
learnable_k=learnable_k,
dropout=dropout)
def forward(self,
s_j,
v_j,
r_ij,
nbrs,
**kwargs):
dist, unit = preprocess_r(r_ij)
inv_out = self.inv_message(s_j=s_j,
dist=dist,
nbrs=nbrs)
split_0 = inv_out[:, 0, :].unsqueeze(-1)
split_1 = inv_out[:, 1, :]
split_2 = inv_out[:, 2, :].unsqueeze(-1)
unit_add = split_2 * unit.unsqueeze(1)
delta_v_ij = unit_add + split_0 * v_j[nbrs[:, 1]]
delta_s_ij = split_1
# add results from neighbors of each node
graph_size = s_j.shape[0]
delta_v_i = scatter_add(src=delta_v_ij,
index=nbrs[:, 0],
dim=0,
dim_size=graph_size)
delta_s_i = scatter_add(src=delta_s_ij,
index=nbrs[:, 0],
dim=0,
dim_size=graph_size)
return delta_s_i, delta_v_i
class InvariantTransformerMessage(nn.Module):
def __init__(self,
rbf,
num_heads,
feat_dim,
activation,
layer_norm):
super().__init__()
self.msg_layer = MDMessage(feat_dim=feat_dim,
num_heads=num_heads,
activation=activation,
rbf=rbf)
self.dense = Dense(in_features=(num_heads * feat_dim),
out_features=(3 * feat_dim),
bias=True,
activation=None)
self.layer_norm = nn.LayerNorm(feat_dim) if (layer_norm) else None
def forward(self,
s_j,
dist,
nbrs):
inp = self.layer_norm(s_j) if self.layer_norm else s_j
output = self.dense(self.msg_layer(dist=dist,
nbrs=nbrs,
x_i=inp))
out_reshape = output.reshape(output.shape[0], 3, -1)
return out_reshape
class TransformerMessageBlock(MessageBase):
def __init__(self,
rbf,
num_heads,
feat_dim,
activation,
layer_norm):
super().__init__()
self.inv_message = InvariantTransformerMessage(
rbf=rbf,
num_heads=num_heads,
feat_dim=feat_dim,
activation=activation,
layer_norm=layer_norm)
class UpdateBlock(nn.Module):
def __init__(self,
feat_dim,
activation,
dropout):
super().__init__()
self.u_mat = Dense(in_features=feat_dim,
out_features=feat_dim,
bias=False)
self.v_mat = Dense(in_features=feat_dim,
out_features=feat_dim,
bias=False)
self.s_dense = nn.Sequential(Dense(in_features=2*feat_dim,
out_features=feat_dim,
bias=True,
dropout_rate=dropout,
activation=to_module(activation)),
Dense(in_features=feat_dim,
out_features=3*feat_dim,
bias=True,
dropout_rate=dropout))
def forward(self,
s_i,
v_i):
# v_i = (num_atoms, num_feats, 3)
# v_i.transpose(1, 2).reshape(-1, v_i.shape[1])
# = (num_atoms, 3, num_feats).reshape(-1, num_feats)
# = (num_atoms * 3, num_feats)
# -> So the same u gets applied to each atom
# and for each of the three dimensions, but differently
# for the different feature dimensions
v_tranpose = v_i.transpose(1, 2).reshape(-1, v_i.shape[1])
# now reshape it to (num_atoms, 3, num_feats) and transpose
# to get (num_atoms, num_feats, 3)
num_feats = v_i.shape[1]
u_v = (self.u_mat(v_tranpose).reshape(-1, 3, num_feats)
.transpose(1, 2))
v_v = (self.v_mat(v_tranpose).reshape(-1, 3, num_feats)
.transpose(1, 2))
v_v_norm = norm(v_v)
s_stack = torch.cat([s_i, v_v_norm], dim=-1)
split = (self.s_dense(s_stack)
.reshape(s_i.shape[0], 3, -1))
# delta v update
a_vv = split[:, 0, :].unsqueeze(-1)
delta_v_i = u_v * a_vv
# delta s update
a_sv = split[:, 1, :]
a_ss = split[:, 2, :]
inner = (u_v * v_v).sum(-1)
delta_s_i = inner * a_sv + a_ss
return delta_s_i, delta_v_i
class EmbeddingBlock(nn.Module):
def __init__(self,
feat_dim):
super().__init__()
self.atom_embed = nn.Embedding(100, feat_dim, padding_idx=0)
self.feat_dim = feat_dim
def forward(self,
z_number,
**kwargs):
num_atoms = z_number.shape[0]
s_i = self.atom_embed(z_number)
v_i = (torch.zeros(num_atoms, self.feat_dim, 3)
.to(s_i.device))
return s_i, v_i
class NewEmbeddingBlock(nn.Module):
def __init__(self,init_size,
feat_dim):
super().__init__()
self.atom_embed = nn.Linear(init_size, feat_dim)
self.feat_dim = feat_dim
def forward(self,
init_vec,num_atoms,
**kwargs):
sz = init_vec.shape[0]
s_i = self.atom_embed(init_vec)
v_i = (torch.zeros(num_atoms, self.feat_dim, 3)
.to(s_i.device))
return s_i, v_i
class NbrEmbeddingBlock(nn.Module):
def __init__(self,
feat_dim,
dropout,
rbf):
super().__init__()
self.embedding = MDEmbedding(feat_dim=feat_dim,
dropout=dropout,
rbf=rbf)
self.feat_dim = feat_dim
def forward(self,
z_number,
nbrs,
r_ij):
num_atoms = z_number.shape[0]
dist, _ = preprocess_r(r_ij)
s_i = self.embedding(z_number=z_number,
nbrs=nbrs,
dist=dist)
v_i = (torch.zeros(num_atoms, self.feat_dim, 3)
.to(s_i.device))
return s_i, v_i
class ReadoutBlock(nn.Module):
def __init__(self,
feat_dim,
output_keys,
activation,
dropout,
means=None,
stddevs=None):
super().__init__()
self.readoutdict = nn.ModuleDict(
{key: nn.Sequential(
Dense(in_features=feat_dim,
out_features=feat_dim//2,
bias=True,
dropout_rate=dropout,
activation=to_module(activation)),
Dense(in_features=feat_dim//2,
out_features=1,
bias=True,
dropout_rate=dropout))
for key in output_keys}
)
self.scale_shift = ScaleShift(means=means,
stddevs=stddevs)
def forward(self, s_i):
"""
Note: no atomwise summation. That's done in the model itself
"""
results = {}
for key, readoutdict in self.readoutdict.items():
output = readoutdict(s_i)
output = self.scale_shift(output, key)
results[key] = output
return results
```
#### File: nff/reactive_tools/neb.py
```python
import numpy as np
import copy
from nff.io.ase import NeuralFF, AtomsBatch
from nff.reactive_tools.utils import *
from ase.io import read
from ase.neb import NEB, SingleCalculatorNEB, NEBTools
from ase.optimize import MDMin, BFGS, QuasiNewton, FIRE
from ase import Atoms
def neural_neb_ase(reactantxyzfile, productxyzfile, nff_dir,
rxn_name,
steps=500, n_images=24,
fmax=0.004, isclimb=False):
#reactant and products as ase Atoms
initial = AtomsBatch(xyz_to_ase_atoms(reactantxyzfile),
cutoff=5.5, directed=True)
final = AtomsBatch(xyz_to_ase_atoms(productxyzfile),
cutoff=5.5, directed=True)
# Make a band consisting of n_images:
images = [initial]
images += [copy.deepcopy(initial) for i in range(n_images)]
images += [final]
neb = SingleCalculatorNEB(images, k=0.02, climb=isclimb)
neb.method = 'improvedtangent'
# Interpolate linearly the potisions of the n_images:
neb.interpolate()
neb.idpp_interpolate(optimizer=BFGS, steps=steps)
images = read('idpp.traj@-{}:'.format(str(n_images+2)))
# # Set calculators:
nff_ase = NeuralFF.from_file(nff_dir, device='cuda:0')
neb.set_calculators(nff_ase)
# # Optimize:
optimizer = BFGS(neb,
trajectory = '{}/{}.traj'.format(nff_dir, rxn_name))
optimizer.run(fmax=fmax, steps=steps)
# Read NEB images from File
images = read('{}/{}.traj@-{}:'.format(nff_dir, rxn_name, str(n_images+2)))
return images
```
#### File: nff/reactive_tools/reactive_langevin.py
```python
from ase.io import Trajectory
from ase.md.langevin import *
from ase import Atoms
from ase.units import Bohr,Rydberg,kJ,kB,fs,Hartree,mol,kcal,second,Ang
from nff.md.utils import NeuralMDLogger, write_traj
class Reactive_Dynamics:
def __init__(self,
atomsbatch,
nms_vel,
mdparam,
):
# initialize the atoms batch system
self.atomsbatch = atomsbatch
self.mdparam = mdparam
#initialize velocity from nms
self.vel = nms_vel
self.temperature = self.mdparam['T_init']
self.friction = self.mdparam['friction']
# todo: structure optimization before starting
# intialize system momentum by normal mode sampling
self.atomsbatch.set_velocities(self.vel.reshape(-1,3) * Ang / second)
# set thermostats
integrator = self.mdparam['thermostat']
self.integrator = integrator(self.atomsbatch,
self.mdparam['time_step'] * fs,
self.temperature * kB,
self.friction)
# attach trajectory dump
self.traj = Trajectory(self.mdparam['traj_filename'], 'w', self.atomsbatch)
self.integrator.attach(self.traj.write, interval=mdparam['save_frequency'])
# attach log file
self.integrator.attach(NeuralMDLogger(self.integrator,
self.atomsbatch,
self.mdparam['thermo_filename'],
mode='a'), interval=mdparam['save_frequency'])
def run(self):
self.integrator.run(self.mdparam['steps'])
#self.traj.close()
def save_as_xyz(self, filename):
'''
TODO: save time information
TODO: subclass TrajectoryReader/TrajectoryReader to digest AtomsBatch instead of Atoms?
TODO: other system variables in .xyz formats
'''
traj = Trajectory(self.mdparam['traj_filename'], mode='r')
xyz = []
for snapshot in traj:
frames = np.concatenate([
snapshot.get_atomic_numbers().reshape(-1, 1),
snapshot.get_positions().reshape(-1, 3)
], axis=1)
xyz.append(frames)
write_traj(filename, np.array(xyz))
```
#### File: nff/reactive_tools/utils.py
```python
from ase.vibrations import Vibrations
from ase.units import Bohr, mol, kcal
from ase import Atoms
import numpy as np
from rdkit import Chem
PT = Chem.GetPeriodicTable()
HA2J = 4.359744E-18
BOHRS2ANG = 0.529177
SPEEDOFLIGHT = 2.99792458E8
AMU2KG = 1.660538782E-27
def neural_hessian_ase(ase_atoms):
print("Calculating Numerical Hessian using ASE")
vib = Vibrations(ase_atoms, delta=0.05)
vib.run()
vib.summary()
hessian = np.array(vib.H) * (kcal/mol) * Bohr**2
vib.clean()
return hessian
def neural_energy_ase(ase_atoms):
return ase_atoms.get_potential_energy()[0]
def neural_force_ase(ase_atoms):
return ase_atoms.get_forces()
def xyz_to_ase_atoms(xyz_file):
sym = []
pos = []
f = open(xyz_file, "r")
lines = f.readlines()
for i, line in enumerate(lines):
if i > 1:
element, x, y, z = line.split()
sym.append(element)
pos.append([float(x), float(y), float(z)])
return Atoms(
symbols=sym,
positions=pos,
pbc=False,
)
def moi_tensor(massvec, expmassvec, xyz):
# Center of Mass
com = np.sum(expmassvec.reshape(-1, 3)
* xyz.reshape(-1, 3), axis=0) / np.sum(massvec)
# xyz shifted to COM
xyz_com = xyz.reshape(-1, 3) - com
# Compute elements need to calculate MOI tensor
mass_xyz_com_sq_sum = np.sum(
expmassvec.reshape(-1, 3) * xyz_com ** 2, axis=0)
mass_xy = np.sum(massvec * xyz_com[:, 0] * xyz_com[:, 1], axis=0)
mass_yz = np.sum(massvec * xyz_com[:, 1] * xyz_com[:, 2], axis=0)
mass_xz = np.sum(massvec * xyz_com[:, 0] * xyz_com[:, 2], axis=0)
# MOI tensor
moi = np.array([[mass_xyz_com_sq_sum[1] + mass_xyz_com_sq_sum[2], -1 * mass_xy, -1 * mass_xz],
[-1 * mass_xy, mass_xyz_com_sq_sum[0] +
mass_xyz_com_sq_sum[2], -1 * mass_yz],
[-1 * mass_xz, -1 * mass_yz, mass_xyz_com_sq_sum[0] + mass_xyz_com_sq_sum[1]]])
# MOI eigenvectors and eigenvalues
moi_eigval, moi_eigvec = np.linalg.eig(moi)
return xyz_com, moi_eigvec
def trans_rot_vec(massvec, xyz_com, moi_eigvec):
# Mass-weighted translational vectors
zero_vec = np.zeros([len(massvec)])
sqrtmassvec = np.sqrt(massvec)
expsqrtmassvec = np.repeat(sqrtmassvec, 3)
d1 = np.transpose(np.stack((sqrtmassvec, zero_vec, zero_vec))).reshape(-1)
d2 = np.transpose(np.stack((zero_vec, sqrtmassvec, zero_vec))).reshape(-1)
d3 = np.transpose(np.stack((zero_vec, zero_vec, sqrtmassvec))).reshape(-1)
# Mass-weighted rotational vectors
big_p = np.matmul(xyz_com, moi_eigvec)
d4 = (np.repeat(big_p[:, 1], 3).reshape(-1)
* np.tile(moi_eigvec[:, 2], len(massvec)).reshape(-1)
- np.repeat(big_p[:, 2], 3).reshape(-1)
* np.tile(moi_eigvec[:, 1], len(massvec)).reshape(-1)) * expsqrtmassvec
d5 = (np.repeat(big_p[:, 2], 3).reshape(-1)
* np.tile(moi_eigvec[:, 0], len(massvec)).reshape(-1)
- np.repeat(big_p[:, 0], 3).reshape(-1)
* np.tile(moi_eigvec[:, 2], len(massvec)).reshape(-1)) * expsqrtmassvec
d6 = (np.repeat(big_p[:, 0], 3).reshape(-1)
* np.tile(moi_eigvec[:, 1], len(massvec)).reshape(-1)
- np.repeat(big_p[:, 1], 3).reshape(-1)
* np.tile(moi_eigvec[:, 0], len(massvec)).reshape(-1)) * expsqrtmassvec
d1_norm = d1 / np.linalg.norm(d1)
d2_norm = d2 / np.linalg.norm(d2)
d3_norm = d3 / np.linalg.norm(d3)
d4_norm = d4 / np.linalg.norm(d4)
d5_norm = d5 / np.linalg.norm(d5)
d6_norm = d6 / np.linalg.norm(d6)
dx_norms = np.stack((d1_norm,
d2_norm,
d3_norm,
d4_norm,
d5_norm,
d6_norm))
return dx_norms
def vib_analy(r, xyz, hessian):
# r is the proton number of atoms
# xyz is the cartesian coordinates in Angstrom
# Hessian elements in atomic units (Ha/bohr^2)
massvec = np.array([PT.GetAtomicWeight(i.item()) * AMU2KG
for i in list(np.array(r.reshape(-1)).astype(int))])
expmassvec = np.repeat(massvec, 3)
sqrtinvmassvec = np.divide(1.0, np.sqrt(expmassvec))
hessian_mwc = np.einsum('i,ij,j->ij', sqrtinvmassvec,
hessian, sqrtinvmassvec)
hessian_eigval, hessian_eigvec = np.linalg.eig(hessian_mwc)
xyz_com, moi_eigvec = moi_tensor(massvec, expmassvec, xyz)
dx_norms = trans_rot_vec(massvec, xyz_com, moi_eigvec)
P = np.identity(3 * len(massvec))
for dx_norm in dx_norms:
P -= np.outer(dx_norm, dx_norm)
# Projecting the T and R modes out of the hessian
mwhess_proj = np.dot(P.T, hessian_mwc).dot(P)
hessian_eigval, hessian_eigvec = np.linalg.eigh(mwhess_proj)
neg_ele = []
for i, eigval in enumerate(hessian_eigval):
if eigval < 0:
neg_ele.append(i)
hessian_eigval_abs = np.abs(hessian_eigval)
pre_vib_freq_cm_1 = np.sqrt(
hessian_eigval_abs * HA2J * 10e19) / (SPEEDOFLIGHT * 2 * np.pi * BOHRS2ANG * 100)
vib_freq_cm_1 = pre_vib_freq_cm_1.copy()
for i in neg_ele:
vib_freq_cm_1[i] = -1 * pre_vib_freq_cm_1[i]
trans_rot_elms = []
for i, freq in enumerate(vib_freq_cm_1):
# Modes that are less than 1.0cm-1 is not a normal mode
if np.abs(freq) < 1.0:
trans_rot_elms.append(i)
force_constants_J_m_2 = np.delete(
hessian_eigval * HA2J * 1e20 / (BOHRS2ANG ** 2) * AMU2KG, trans_rot_elms)
proj_vib_freq_cm_1 = np.delete(vib_freq_cm_1, trans_rot_elms)
proj_hessian_eigvec = np.delete(hessian_eigvec.T, trans_rot_elms, 0)
return force_constants_J_m_2, proj_vib_freq_cm_1, proj_hessian_eigvec
```
#### File: train/builders/trainer.py
```python
import os
import json
import nff
import torch
from torch.optim import Adam
def get_trainer(args, model, train_loader, val_loader, metrics, loss_fn=None):
# setup hook and logging
hooks = [nff.train.MaxEpochHook(args.max_epochs)]
# filter for trainable parameters (https://github.com/pytorch/pytorch/issues/679)
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = Adam(trainable_params, lr=args.lr)
schedule = nff.train.ReduceLROnPlateauHook(
optimizer=optimizer,
patience=args.lr_patience,
factor=args.lr_decay,
min_lr=args.lr_min,
window_length=1,
stop_after_min=True,
)
hooks.append(schedule)
printer = nff.train.PrintingHook(
os.path.join(args.model_path, 'log'),
metrics,
log_memory=(args.device != 'cpu'),
separator=' | '
)
hooks.append(printer)
if args.logger == 'csv':
logger = nff.train.CSVHook(
os.path.join(args.model_path, 'log'),
metrics,
every_n_epochs=args.log_every_n_epochs,
)
hooks.append(logger)
elif args.logger == 'tensorboard':
logger = nff.train.TensorboardHook(
os.path.join(args.model_path, 'log'),
metrics,
every_n_epochs=args.log_every_n_epochs,
)
hooks.append(logger)
if loss_fn is None:
loss_fn = nff.train.build_mse_loss(json.loads(args.loss_coef))
trainer = nff.train.Trainer(
args.model_path,
model,
loss_fn,
optimizer,
train_loader,
val_loader,
checkpoint_interval=1,
hooks=hooks,
)
return trainer
```
#### File: train/hooks/logging.py
```python
import os
import time
import numpy as np
import torch
import json
import sys
from nff.train.hooks import Hook
from nff.train.metrics import (RootMeanSquaredError, PrAuc, RocAuc)
class LoggingHook(Hook):
"""Base class for logging hooks.
Args:
log_path (str): path to directory in which log files will be stored.
metrics (list): metrics to log; each metric has to be a subclass of spk.Metric.
log_train_loss (bool, optional): enable logging of training loss.
log_validation_loss (bool, optional): enable logging of validation loss.
log_learning_rate (bool, optional): enable logging of current learning rate.
global_rank (int): index of the gpu among all gpus in parallel training
world_size (int): total number of gpus in parallel training
"""
def __init__(
self,
log_path,
metrics,
log_train_loss=True,
log_validation_loss=True,
log_learning_rate=True,
mini_batches=1,
global_rank=0,
world_size=1
):
self.log_train_loss = log_train_loss
self.log_validation_loss = log_validation_loss
self.log_learning_rate = log_learning_rate
self.log_path = log_path
self._train_loss = 0
self._counter = 0
self.metrics = metrics
self.mini_batches = mini_batches
self.global_rank = global_rank
self.world_size = world_size
self.par_folders = self.get_par_folders()
self.parallel = world_size > 1
self.metric_dic = None
def on_epoch_begin(self, trainer):
"""Log at the beginning of train epoch.
Args:
trainer (Trainer): instance of schnetpack.train.trainer.Trainer class.
"""
# reset train_loss and counter
if self.log_train_loss:
self._train_loss = 0.0
self._counter = 0
else:
self._train_loss = None
def on_batch_end(self, trainer, train_batch, result, loss):
if self.log_train_loss:
n_samples = self._batch_size(result)
self._train_loss += float(loss.data) * n_samples
self._counter += n_samples
def _batch_size(self, result):
if type(result) is dict:
n_samples = list(result.values())[0].size(0)
elif type(result) in [list, tuple]:
n_samples = result[0].size(0)
else:
n_samples = result.size(0)
return n_samples
def on_validation_begin(self, trainer):
for metric in self.metrics:
metric.reset()
def on_validation_batch_end(self, trainer, val_batch, val_result):
for metric in self.metrics:
metric.add_batch(val_batch, val_result)
def get_base_folder(self):
"""
Get the model folder that has all the sub-folders with parallel
logging.
Args:
None
Returns:
base_folder (str): model folder
"""
sep = os.path.sep
# the log path will always be /path/to/folder/name_of_log_file
# Remove the last part of the path. Also, if this is being logged
# to main_folder/global_rank, then remove the second last
# part of the path
base_folder = os.path.join(*self.log_path.split(sep)[:-1])
if base_folder.endswith(sep + str(self.global_rank)):
base_folder = os.path.join(*base_folder.split(sep)[:-1])
# if the original path began with "/", then add it back
if self.log_path.startswith(sep):
base_folder = sep + base_folder
return base_folder
def get_par_folders(self):
"""
Get names of the parallel folders in the main model directory.
Args:
None
Returns:
par_folders (list): names of the parallel folders
"""
base_folder = self.get_base_folder()
par_folders = [os.path.join(base_folder, str(i))
for i in range(self.world_size)]
return par_folders
def save_metrics(self, epoch, test):
"""
Save data from the metrics calculated on this parallel process.
Args:
epoch (int): current epoch
Returns:
None
"""
# save metrics to json file
par_folder = self.par_folders[self.global_rank]
if test:
json_file = os.path.join(
par_folder, "epoch_{}_test.json".format(epoch))
else:
json_file = os.path.join(par_folder, "epoch_{}.json".format(epoch))
# if the json file you're saving to already exists,
# then load its contents
if os.path.isfile(json_file):
with open(json_file, "r") as f:
dic = json.load(f)
else:
dic = {}
# update with metrics
for metric in self.metrics:
if type(metric) in [RocAuc, PrAuc]:
m = {"y_true": metric.actual,
"y_pred": metric.pred}
else:
m = metric.aggregate()
dic[metric.name] = m
# save
with open(json_file, "w") as f:
json.dump(dic, f, indent=4, sort_keys=True)
def avg_parallel_metrics(self, epoch, test):
"""
Average metrics over parallel processes.
Args:
epoch (int): current epoch
Returns:
metric_dic (dict): dictionary of each metric name with its
corresponding averaged value.
"""
# save metrics from this process
self.save_metrics(epoch, test)
metric_dic = {}
for metric in self.metrics:
# initialize par_dic as a dictionary with None for each parallel
# folder
par_dic = {folder: None for folder in self.par_folders}
# continue looping through other folders until you've succesfully
# loaded their metric values
while None in par_dic.values():
for folder in self.par_folders:
if test:
path = os.path.join(
folder, "epoch_{}_test.json".format(epoch))
else:
path = os.path.join(
folder, "epoch_{}.json".format(epoch))
try:
with open(path, "r") as f:
path_dic = json.load(f)
par_dic[folder] = path_dic[metric.name]
except (json.JSONDecodeError, FileNotFoundError, KeyError):
continue
# average appropriately
if isinstance(metric, RootMeanSquaredError):
metric_val = np.mean(
np.array(list(par_dic.values)) ** 2) ** 0.5
elif type(metric) in [RocAuc, PrAuc]:
y_true = []
y_pred = []
for sub_dic in par_dic.values():
y_true += sub_dic["y_true"]
y_pred += sub_dic["y_pred"]
metric.actual = y_true
metric.pred = y_pred
metric_val = metric.aggregate()
else:
metric_val = np.mean(list(par_dic.values()))
metric_dic[metric.name] = metric_val
return metric_dic
def aggregate(self, trainer, test=False):
"""
Aggregate metrics.
Args:
trainer (Trainer): model trainer
Returns:
metric_dic (dict): dictionary of each metric name with its
corresponding averaged value.
"""
# if parallel, average over parallel metrics
if self.parallel:
metric_dic = self.avg_parallel_metrics(epoch=trainer.epoch,
test=test)
# otherwise aggregate as usual
else:
metric_dic = {}
for metric in self.metrics:
m = metric.aggregate()
metric_dic[metric.name] = m
self.metric_dic = metric_dic
return metric_dic
class CSVHook(LoggingHook):
"""Hook for logging training process to CSV files.
Args:
log_path (str): path to directory in which log files will be stored.
metrics (list): metrics to log; each metric has to be a subclass of spk.Metric.
log_train_loss (bool, optional): enable logging of training loss.
log_validation_loss (bool, optional): enable logging of validation loss.
log_learning_rate (bool, optional): enable logging of current learning rate.
every_n_epochs (int, optional): epochs after which logging takes place.
"""
def __init__(
self,
log_path,
metrics,
log_train_loss=True,
log_validation_loss=True,
log_learning_rate=True,
every_n_epochs=1,
mini_batches=1,
global_rank=0,
world_size=1
):
log_path = os.path.join(log_path, "log.csv")
super().__init__(
log_path, metrics, log_train_loss, log_validation_loss,
log_learning_rate, mini_batches, global_rank, world_size
)
self._offset = 0
self._restart = False
self.every_n_epochs = every_n_epochs
def on_train_begin(self, trainer):
if os.path.exists(self.log_path):
remove_file = False
with open(self.log_path, "r") as f:
# Ensure there is one entry apart from header
lines = f.readlines()
if len(lines) > 1:
self._offset = float(lines[-1].split(",")[0]) - time.time()
self._restart = True
else:
remove_file = True
# Empty up to header, remove to avoid adding header twice
if remove_file:
os.remove(self.log_path)
else:
self._offset = -time.time()
# Create the log dir if it does not exists, since write cannot
# create a full path
log_dir = os.path.dirname(self.log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not self._restart:
log = ""
log += "Time"
if self.log_learning_rate:
log += ",Learning rate"
if self.log_train_loss:
log += ",Train loss"
if self.log_validation_loss:
log += ",Validation loss"
if len(self.metrics) > 0:
log += ","
for i, metric in enumerate(self.metrics):
log += str(metric.name)
if i < len(self.metrics) - 1:
log += ","
with open(self.log_path, "a+") as f:
f.write(log + os.linesep)
def on_validation_end(self, trainer, val_loss):
if trainer.epoch % self.every_n_epochs == 0:
ctime = time.time() + self._offset
log = str(ctime)
if self.log_learning_rate:
log += "," + str(trainer.optimizer.param_groups[0]["lr"])
if self.log_train_loss:
log += "," + str(self._train_loss / self._counter)
if self.log_validation_loss:
log += "," + str(val_loss)
if len(self.metrics) > 0:
log += ","
metric_dic = self.aggregate(trainer)
for i, metric in enumerate(self.metrics):
m = metric_dic[metric.name]
if hasattr(m, "__iter__"):
log += ",".join([str(j) for j in m])
else:
log += str(m)
if i < len(self.metrics) - 1:
log += ","
with open(self.log_path, "a") as f:
f.write(log + os.linesep)
class TensorboardHook(LoggingHook):
"""Hook for logging training process to tensorboard.
Args:
log_path (str): path to directory in which log files will be stored.
metrics (list): metrics to log; each metric has to be a subclass of spk.Metric.
log_train_loss (bool, optional): enable logging of training loss.
log_validation_loss (bool, optional): enable logging of validation loss.
log_learning_rate (bool, optional): enable logging of current learning rate.
every_n_epochs (int, optional): epochs after which logging takes place.
img_every_n_epochs (int, optional):
log_histogram (bool, optional):
"""
def __init__(
self,
log_path,
metrics,
log_train_loss=True,
log_validation_loss=True,
log_learning_rate=True,
every_n_epochs=1,
img_every_n_epochs=10,
log_histogram=False,
mini_batches=1,
global_rank=0,
world_size=1
):
from tensorboardX import SummaryWriter
super().__init__(
log_path, metrics, log_train_loss, log_validation_loss,
log_learning_rate, mini_batches, global_rank, world_size
)
self.writer = SummaryWriter(self.log_path)
self.every_n_epochs = every_n_epochs
self.log_histogram = log_histogram
self.img_every_n_epochs = img_every_n_epochs
def on_epoch_end(self, trainer):
if trainer.epoch % self.every_n_epochs == 0:
if self.log_train_loss:
self.writer.add_scalar(
"train/loss",
self._train_loss / self._counter, trainer.epoch
)
if self.log_learning_rate:
self.writer.add_scalar(
"train/learning_rate",
trainer.optimizer.param_groups[0]["lr"],
trainer.epoch,
)
def on_validation_end(self, trainer, val_loss):
if trainer.epoch % self.every_n_epochs == 0:
metric_dic = self.aggregate(trainer)
for metric in self.metrics:
m = metric_dic[metric.name]
if np.isscalar(m):
self.writer.add_scalar(
"metrics/%s" % metric.name, float(m), trainer.epoch
)
elif m.ndim == 2:
if trainer.epoch % self.img_every_n_epochs == 0:
import matplotlib.pyplot as plt
# tensorboardX only accepts images as numpy arrays.
# we therefore convert plots in numpy array
# see https://github.com/lanpa/tensorboard-
# pytorch/blob/master/examples/matplotlib_demo.py
fig = plt.figure()
plt.colorbar(plt.pcolor(m))
fig.canvas.draw()
np_image = np.fromstring(
fig.canvas.tostring_rgb(), dtype="uint8"
)
np_image = np_image.reshape(
fig.canvas.get_width_height()[::-1] + (3,)
)
plt.close(fig)
self.writer.add_image(
"metrics/%s" % metric.name, np_image, trainer.epoch
)
if self.log_validation_loss:
self.writer.add_scalar(
"train/val_loss", float(val_loss), trainer.step)
if self.log_histogram:
for name, param in trainer._model.named_parameters():
self.writer.add_histogram(
name, param.detach().cpu().numpy(), trainer.epoch
)
def on_train_ends(self, trainer):
self.writer.close()
def on_train_failed(self, trainer):
self.writer.close()
class PrintingHook(LoggingHook):
"""Hook for logging training process to the screen.
Args:
log_path (str): path to directory in which log files will be stored.
metrics (list): metrics to log; each metric has to be a subclass of spk.Metric.
log_train_loss (bool, optional): enable logging of training loss.
log_validation_loss (bool, optional): enable logging of validation loss.
log_learning_rate (bool, optional): enable logging of current learning rate.
every_n_epochs (int, optional): epochs after which logging takes place.
separator (str, optional): separator for columns to be printed
"""
def __init__(
self,
log_path,
metrics,
log_epoch=True,
log_train_loss=True,
log_validation_loss=True,
log_learning_rate=True,
log_memory=True,
every_n_epochs=1,
separator=' ',
time_strf=r'%Y-%m-%d %H:%M:%S',
str_format=r'{1:>{0}}',
mini_batches=1,
global_rank=0,
world_size=1
):
log_path = os.path.join(log_path, "log_human_read.csv")
super().__init__(
log_path, metrics, log_train_loss, log_validation_loss,
log_learning_rate, mini_batches, global_rank, world_size
)
self.every_n_epochs = every_n_epochs
self.log_epoch = log_epoch
self._separator = separator
self.time_strf = time_strf
self._headers = {
'time': 'Time',
'epoch': 'Epoch',
'lr': 'Learning rate',
'train_loss': 'Train loss',
'val_loss': 'Validation loss',
'memory': 'GPU Memory (MB)'
}
self.str_format = str_format
self.log_memory = log_memory
def print(self, log):
print(log)
with open(self.log_path, "a+") as f:
f.write(log + os.linesep)
sys.stdout.flush()
def on_train_begin(self, trainer):
log_dir = os.path.dirname(self.log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log = self.str_format.format(
len(time.strftime(self.time_strf)),
self._headers['time']
)
if self.log_epoch:
log += self._separator
log += self.str_format.format(
len(self._headers['epoch']), self._headers['epoch']
)
if self.log_learning_rate:
log += self._separator
log += self.str_format.format(
len(self._headers['lr']), self._headers['lr']
)
if self.log_train_loss:
log += self._separator
log += self.str_format.format(
len(self._headers['train_loss']), self._headers['train_loss']
)
if self.log_validation_loss:
log += self._separator
log += self.str_format.format(
len(self._headers['val_loss']), self._headers['val_loss']
)
if len(self.metrics) > 0:
log += self._separator
for i, metric in enumerate(self.metrics):
header = str(metric.name)
log += self.str_format.format(len(header), header)
log += self._separator
if self.log_memory:
log += self.str_format.format(
len(self._headers['memory']), self._headers['memory']
)
self.print(log)
def on_validation_end(self, trainer, val_loss):
if trainer.epoch % self.every_n_epochs == 0:
log = time.strftime(self.time_strf)
if self.log_epoch:
log += self._separator
log += self.str_format.format(
len(self._headers['epoch']),
'%d' % trainer.epoch
)
if self.log_learning_rate:
log += self._separator
log += self.str_format.format(
len(self._headers['lr']),
'%.3e' % trainer.optimizer.param_groups[0]['lr']
)
if self.log_train_loss:
log += self._separator
log += self.str_format.format(
len(self._headers['train_loss']),
'%.4f' % (self._train_loss / self._counter)
)
if self.log_validation_loss:
log += self._separator
log += self.str_format.format(
len(self._headers['val_loss']),
'%.4f' % val_loss
)
if len(self.metrics) > 0:
log += self._separator
metric_dic = self.aggregate(trainer)
for i, metric in enumerate(self.metrics):
m = metric_dic[metric.name]
if hasattr(m, '__iter__'):
log += self._separator.join([str(j) for j in m])
else:
log += self.str_format.format(
len(metric.name),
'%.4f' % m
)
log += self._separator
if self.log_memory:
memory = torch.cuda.max_memory_allocated(device=None) * 1e-6
log += self.str_format.format(
len(self._headers['memory']),
'%d' % memory
)
self.print(log)
def on_train_failed(self, trainer):
self.print('the training has failed')
```
#### File: nff/utils/functions.py
```python
import numpy as np
from scipy.optimize import brentq
from scipy import special as sp
import sympy as sym
import copy
import torch
import math
EPS = 1e-15
# DimeNet
def Jn(r, n):
"""
numerical spherical bessel functions of order n
"""
return np.sqrt(np.pi/(2*r)) * sp.jv(n+0.5, r)
def Jn_zeros(n, k):
"""
Compute the first k zeros of the spherical bessel functions up to order n (excluded)
"""
zerosj = np.zeros((n, k), dtype="float32")
zerosj[0] = np.arange(1, k + 1) * np.pi
points = np.arange(1, k + n) * np.pi
racines = np.zeros(k + n - 1, dtype="float32")
for i in range(1, n):
for j in range(k + n - 1 - i):
foo = brentq(Jn, points[j], points[j + 1], (i,))
racines[j] = foo
points = racines
zerosj[i][:k] = racines[:k]
return zerosj
def spherical_bessel_formulas(n):
"""
Computes the sympy formulas for the spherical bessel functions up to order n (excluded)
"""
x = sym.symbols('x')
f = [sym.sin(x)/x]
a = sym.sin(x)/x
for i in range(1, n):
b = sym.diff(a, x)/x
f += [sym.simplify(b*(-x)**i)]
a = sym.simplify(b)
return f
def bessel_basis(n, k):
"""
Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to
order n (excluded) and maximum frequency k (excluded).
"""
zeros = Jn_zeros(n, k)
normalizer = []
for order in range(n):
normalizer_tmp = []
for i in range(k):
normalizer_tmp += [0.5*Jn(zeros[order, i], order+1)**2]
normalizer_tmp = 1/np.array(normalizer_tmp)**0.5
normalizer += [normalizer_tmp]
f = spherical_bessel_formulas(n)
x = sym.symbols('x')
bess_basis = []
for order in range(n):
bess_basis_tmp = []
for i in range(k):
bess_basis_tmp += [sym.simplify(normalizer[order]
[i]*f[order].subs(
x, zeros[order, i]*x))]
bess_basis += [bess_basis_tmp]
return bess_basis
def sph_harm_prefactor(l, m):
"""
Computes the constant pre-factor for the spherical harmonic of degree l and order m
input:
l: int, l>=0
m: int, -l<=m<=l
"""
return ((2*l+1) * np.math.factorial(l-abs(m))
/ (4*np.pi*np.math.factorial(l+abs(m))))**0.5
def associated_legendre_polynomials(l, zero_m_only=True):
"""
Computes sympy formulas of the associated legendre polynomials up to order l (excluded).
"""
z = sym.symbols('z')
P_l_m = [[0]*(j+1) for j in range(l)]
P_l_m[0][0] = 1
if l > 0:
P_l_m[1][0] = z
for j in range(2, l):
P_l_m[j][0] = sym.simplify(
((2*j-1)*z*P_l_m[j-1][0] - (j-1)*P_l_m[j-2][0])/j)
if not zero_m_only:
for i in range(1, l):
P_l_m[i][i] = sym.simplify((1-2*i)*P_l_m[i-1][i-1])
if i + 1 < l:
P_l_m[i+1][i] = sym.simplify((2*i+1)*z*P_l_m[i][i])
for j in range(i + 2, l):
P_l_m[j][i] = sym.simplify(
((2*j-1) * z * P_l_m[j-1][i]
- (i+j-1) * P_l_m[j-2][i]) / (j - i))
return P_l_m
def real_sph_harm(l,
zero_m_only=True,
spherical_coordinates=True):
"""
Computes formula strings of the the real part of the spherical harmonics up to order l (excluded).
Variables are either cartesian coordinates x,y,z on the unit sphere or spherical coordinates phi and theta.
"""
if not zero_m_only:
S_m = [0]
C_m = [1]
for i in range(1, l):
x = sym.symbols('x')
y = sym.symbols('y')
S_m += [x*S_m[i-1] + y*C_m[i-1]]
C_m += [x*C_m[i-1] - y*S_m[i-1]]
P_l_m = associated_legendre_polynomials(l, zero_m_only)
if spherical_coordinates:
theta = sym.symbols('theta')
z = sym.symbols('z')
for i in range(len(P_l_m)):
for j in range(len(P_l_m[i])):
if type(P_l_m[i][j]) != int:
P_l_m[i][j] = P_l_m[i][j].subs(z, sym.cos(theta))
if not zero_m_only:
phi = sym.symbols('phi')
for i in range(len(S_m)):
S_m[i] = S_m[i].subs(x, sym.sin(
theta)*sym.cos(phi)).subs(y, sym.sin(theta)*sym.sin(phi))
for i in range(len(C_m)):
C_m[i] = C_m[i].subs(x, sym.sin(
theta)*sym.cos(phi)).subs(y, sym.sin(theta)*sym.sin(phi))
Y_func_l_m = [['0']*(2*j + 1) for j in range(l)]
for i in range(l):
Y_func_l_m[i][0] = sym.simplify(sph_harm_prefactor(i, 0) * P_l_m[i][0])
if not zero_m_only:
for i in range(1, l):
for j in range(1, i + 1):
Y_func_l_m[i][j] = sym.simplify(
2**0.5 * sph_harm_prefactor(i, j) * C_m[j] * P_l_m[i][j])
for i in range(1, l):
for j in range(1, i + 1):
Y_func_l_m[i][-j] = sym.simplify(
2**0.5 * sph_harm_prefactor(i, -j) * S_m[j] * P_l_m[i][j])
return Y_func_l_m
# SpookyNet
def A_m(x, y, m):
device = x.device
p_vals = torch.arange(0, m + 1,
device=device)
q_vals = m - p_vals
x_p = x.reshape(-1, 1) ** p_vals
y_q = y.reshape(-1, 1) ** q_vals
sin = torch.sin(np.pi / 2 * (m - p_vals))
binoms = (torch.Tensor([sp.binom(m, int(p))
for p in p_vals])
.to(device))
out = (binoms * x_p * y_q * sin).sum(-1)
return out
def B_m(x, y, m):
device = x.device
p_vals = torch.arange(0, m + 1,
device=device)
q_vals = m - p_vals
x_p = x.reshape(-1, 1) ** p_vals
y_q = y.reshape(-1, 1) ** q_vals
cos = torch.cos(np.pi / 2 * (m - p_vals))
binoms = (torch.Tensor([sp.binom(m, int(p)) for p in p_vals])
.to(device))
out = (binoms * x_p * y_q * cos).sum(-1)
return out
def c_plm(p, l, m):
terms = [(-1) ** p,
1 / (2 ** l),
sp.binom(l, p),
sp.binom(2 * l - 2 * p, l),
sp.factorial(l - 2 * p),
1 / sp.factorial(l - 2 * p - m)]
out = torch.Tensor(terms).prod()
return out
def make_c_table(l_max):
c_table = {}
for l in range(l_max + 1):
for m in range(-l, l+1):
for p in range(0, math.floor((l - m) / 2) + 1):
c_table[(p, l, m)] = c_plm(p, l, m)
return c_table
def pi_l_m(r,
z,
l,
m,
c_table):
device = r.device
pref = (sp.factorial(l - m) / sp.factorial(l + m)) ** 0.5
p_vals = (torch.arange(0, math.floor((l - m) / 2) + 1,
device=device,
dtype=torch.float))
c_coefs = (torch.Tensor([c_table[(int(p), l, m)]
for p in p_vals])
.to(device))
r_p = r.reshape(-1, 1) ** (2 * p_vals - l)
z_q = z.reshape(-1, 1) ** (l - 2 * p_vals - m)
out = pref * (c_coefs * r_p * z_q).sum(-1)
return out
def norm(vec):
result = ((vec ** 2 + EPS).sum(-1)) ** 0.5
return result
def y_lm(r_ij,
r,
l,
m,
c_table):
x = r_ij[:, 0].reshape(-1, 1)
y = r_ij[:, 1].reshape(-1, 1)
z = r_ij[:, 2].reshape(-1, 1)
pi = pi_l_m(r=r,
z=z,
l=l,
m=abs(m),
c_table=c_table)
if m < 0:
a = A_m(x, y, abs(m))
out = (2 ** 0.5) * pi * a
elif m == 0:
out = pi
elif m > 0:
b = B_m(x, y, abs(m))
out = (2 ** 0.5) * pi * b
return out
def make_y_lm(l_max):
c_table = make_c_table(l_max)
def func(r_ij, r, l, m):
out = y_lm(r_ij=r_ij,
r=r,
l=l,
m=m,
c_table=c_table)
return out
return func
def spooky_f_cut(r, r_cut):
arg = r ** 2 / ((r_cut - r) * (r_cut + r))
# arg < 20 is for numerical stability
# Anything > 20 will give under 1e-9
output = torch.where(
(r < r_cut) * (arg < 20),
torch.exp(-arg),
torch.Tensor([0]).to(r.device)
)
return output
def b_k(x,
bern_k):
device = x.device
k_vals = (torch.arange(0, bern_k, device=device)
.to(torch.float))
binoms = (torch.Tensor([sp.binom(bern_k - 1, int(k))
for k in k_vals])
.to(device))
out = binoms * (x ** k_vals) * (1-x) ** (bern_k - 1 - k_vals)
return out
def rho_k(r,
r_cut,
bern_k,
gamma):
arg = torch.exp(-gamma * r)
out = b_k(arg, bern_k) * spooky_f_cut(r, r_cut)
return out
def get_g_func(l,
r_cut,
bern_k,
gamma,
y_lm_fn):
def fn(r_ij):
r = norm(r_ij).reshape(-1, 1)
n_pairs = r_ij.shape[0]
device = r_ij.device
m_vals = list(range(-l, l + 1))
y = torch.stack([y_lm_fn(r_ij, r, l, m) for m in
m_vals]).transpose(0, 1)
rho = rho_k(r, r_cut, bern_k, gamma)
g = torch.ones(n_pairs,
bern_k,
len(m_vals),
device=device)
g = g * rho.reshape(n_pairs, -1, 1)
g = g * y.reshape(n_pairs, 1, -1)
return g
return fn
def make_g_funcs(bern_k,
gamma,
r_cut,
l_max=2):
y_lm_fn = make_y_lm(l_max)
g_funcs = {}
letters = {0: "s", 1: "p", 2: "d"}
for l in range(0, l_max + 1):
letter = letters[l]
name = f"g_{letter}"
g_func = get_g_func(l=l,
r_cut=r_cut,
bern_k=bern_k,
gamma=gamma,
y_lm_fn=y_lm_fn)
g_funcs[name] = copy.deepcopy(g_func)
return g_funcs
```
#### File: nff/utils/misc.py
```python
import argparse
import sys
from tqdm import tqdm
import json
import subprocess
import os
import random
import numpy as np
import torch
from sklearn.metrics import (roc_auc_score, auc, precision_recall_curve,
r2_score, accuracy_score, log_loss)
# optimization goal for various metrics
METRIC_DIC = {"pr_auc": "maximize",
"roc_auc": "maximize",
"r2": "maximize",
"class_loss": "minimize",
"regress_loss": "minimize",
"mae": "minimize",
"mse": "minimize"}
METRICS = list(METRIC_DIC.keys())
# transform from chemprop syntax to our syntax for the metrics
CHEMPROP_TRANSFORM = {"auc": "roc_auc",
"prc-auc": "pr_auc",
"binary_cross_entropy": "class_loss",
"mse": "regress_loss"}
# metrics available in chemprop
CHEMPROP_METRICS = ["auc",
"prc-auc",
"rmse",
"mae",
"mse",
"r2",
"accuracy",
"cross_entropy",
"binary_cross_entropy"]
def tqdm_enum(iter):
"""
Wrap tqdm around `enumerate`.
Args:
iter (iterable): an iterable (e.g. list)
Returns
i (int): current index
y: current value
"""
i = 0
for y in tqdm(iter):
yield i, y
i += 1
def parse_args(parser, config_flag="config_file"):
"""
Parse arguments.
Args:
parser (argparse.ArgumentParser): argument parser
config_flag (str): name of the arg key
that gives the name of the config file.
Returns:
args (argparse.Namespace): arguments
"""
# parse the arguments
args = parser.parse_args()
# if the config path is specified, then load
# arguments from that file and apply the results
# to `args`
config_path = getattr(args, config_flag, None)
if config_path is not None:
with open(config_path, "r") as f:
config_args = json.load(f)
for key, val in config_args.items():
if hasattr(args, key):
setattr(args, key, val)
return args
def fprint(msg):
"""
Print a string immediately.
Args:
msg (str): string to print
Returns:
None
"""
print(msg)
sys.stdout.flush()
def bash_command(cmd):
""" Run a command from the command line using subprocess.
Args:
cmd (str): command
Returns:
None
"""
return subprocess.Popen(cmd, shell=True, executable='/bin/bash')
def convert_metric(metric):
"""
Convert a metric name to a fixed name that can be used in
various scripts.
Args:
metric (str): input metric
Returns:
metric (str): output metric
"""
if metric in ["prc_auc", "prc-auc"]:
metric = "pr_auc"
elif metric in ["auc", "roc-auc"]:
metric = "roc_auc"
return metric
def prepare_metric(lines, metric):
"""
Get various metric quantities before parsing a log fine.
Args:
lines (list[str]): lines in the log file
metric (str): name of metric
Returns:
idx (int): index at which the metric score occurs
when the given line has been split by `|`
best_score (float): initial best score
best_epoch (int): initial best_epoch
optim (str): goal of the metric optimization (i.e.
minimize or maximize.)
"""
header_items = [i.strip() for i in lines[0].split("|")]
metric = convert_metric(metric)
if "loss" in metric:
idx = header_items.index("Validation loss")
else:
for i, item in enumerate(header_items):
sub_keys = metric.split("_")
if all([key.lower() in item.lower()
for key in sub_keys]):
idx = i
optim = METRIC_DIC[metric]
if optim == "minimize":
best_score = float("inf")
else:
best_score = -float("inf")
best_epoch = -1
return idx, best_score, best_epoch, optim
def parse_score(model_path, metric):
"""
Find the best score and best epoch according to a given metric.
Args:
model_path (str): path to the training folder
metric (str): name of metric
Returns:
best_score (float): best validation score
best_epoch (int): epoch with the best validation score
"""
log_path = os.path.join(model_path, "log_human_read.csv")
with open(log_path, "r") as f:
lines = f.readlines()
idx, best_score, best_epoch, optim = prepare_metric(
lines=lines,
metric=metric)
for line in lines:
splits = [i.strip() for i in line.split("|")]
try:
score = float(splits[idx])
except (ValueError, IndexError):
continue
if any([(optim == "minimize" and score < best_score),
(optim == "maximize" and score > best_score)]):
best_score = score
best_epoch = splits[1]
return best_score, best_epoch
def read_csv(path):
"""
Read a csv into a dictionary.
Args:
path (str): path to the csv file
Returns:
dic (dict): dictionary version of the file
"""
with open(path, "r") as f:
lines = f.readlines()
keys = lines[0].strip().split(",")
dic = {key: [] for key in keys}
for line in lines[1:]:
vals = line.strip().split(",")
for key, val in zip(keys, vals):
if val.isdigit():
dic[key].append(int(val))
else:
try:
dic[key].append(float(val))
except ValueError:
dic[key].append(val)
return dic
def write_csv(path, dic):
"""
Write a dictionary to a csv.
Args:
path (str): path to the csv file
dic (dict): dictionary
Returns:
None
"""
keys = sorted(list(dic.keys()))
if "smiles" in keys:
keys.remove("smiles")
keys.insert(0, "smiles")
lines = [",".join(keys)]
for idx in range(len(dic[keys[0]])):
vals = [dic[key][idx] for key in keys]
line = ",".join(str(val) for val in vals)
lines.append(line)
text = "\n".join(lines)
with open(path, "w") as f:
f.write(text)
def prop_split(max_specs,
dataset_type,
props,
sample_dic,
seed):
"""
Sample a set of smiles strings by up to a maximum number. If the
property of interest is a binary value, try to get as many of the
underrepresented class as possible.
Args:
max_specs (int): maximum number of species
dataset_type (str): type of problem (classification or regression)
props (list[str]): names of properties you'll be fitting
sample_dic (dict): dictionary of the form {smiles: sub_dic} for the
set of smiles strings, where sub_dic contains other information,
e.g. about `props`.
seed (int): random seed for sampling
Returns:
keep_smiles (list[str]): sampled smiles strings.
"""
random.seed(seed)
if max_specs is not None and dataset_type == "classification":
msg = "Not implemented for multiclass"
assert len(props) == 1, msg
prop = props[0]
pos_smiles = [key for key, sub_dic in sample_dic.items()
if sub_dic.get(prop) == 1]
neg_smiles = [key for key, sub_dic in sample_dic.items()
if sub_dic.get(prop) == 0]
# find the underrepresnted and overrepresented class
if len(pos_smiles) < len(neg_smiles):
underrep = pos_smiles
overrep = neg_smiles
else:
underrep = neg_smiles
overrep = pos_smiles
# if possible, keep all of the underrepresented class
if max_specs >= 2 * len(underrep):
random.shuffle(overrep)
num_left = max_specs - len(underrep)
keep_smiles = underrep + overrep[:num_left]
# otherwise create a dataset with half of each
else:
random.shuffle(underrep)
random.shuffle(overrep)
keep_smiles = (underrep[:max_specs // 2]
+ overrep[max_specs // 2:])
else:
keep_smiles = list(sample_dic.keys())
# if setting a maximum, need to shuffle in order
# to take random smiles
if max_specs is not None:
random.shuffle(keep_smiles)
if max_specs is not None:
keep_smiles = keep_smiles[:max_specs]
return keep_smiles
def get_split_names(train_only,
val_only,
test_only):
"""
Get names of dataset splits.
Args:
train_only (bool): only load the training set
val_only (bool): only load the validation set
test_only (bool): only load the test set
Returns:
names (list[str]): names of splits
(train, val, and/or test) that we're
monitoring.
"""
only_dic = {"train": train_only,
"val": val_only,
"test": test_only}
requested = [name for name, only in only_dic.items()
if only]
if len(requested) > 1:
string = ", ".join(requested)
msg = (f"Requested {string}, which are mutually exclusive")
raise Exception(msg)
if len(requested) != 0:
names = requested
else:
names = ["train", "val", "test"]
return names
def preprocess_class(pred):
"""
Preprocess classifier predictions. This applies,
for example, if you train an sklearn regressor
rather than classifier, which doesn't necessarily
predict a value between 0 and 1.
Args:
pred (np.array or torch.Tensor or list): predictions
Returns:
pred (np.array or torch.Tensor or list): predictions
with max 1 and min 0.
"""
to_list = False
if type(pred) is list:
pred = np.array(pred)
to_list = True
# make sure the min and max are 0 and 1
pred[pred < 0] = 0
pred[pred > 1] = 1
if to_list:
pred = pred.tolist()
return pred
def apply_metric(metric, pred, actual):
"""
Apply a metric to a set of predictions.
Args:
metric (str): name of metric
pred (iterable): predicted values
actual (iterable): actual values
Returns:
score (float): metric score
"""
if metric == "auc":
pred = preprocess_class(pred)
if max(pred) == 0:
score = 0
else:
score = roc_auc_score(y_true=actual, y_score=pred)
elif metric == "prc-auc":
pred = preprocess_class(pred)
if max(pred) == 0:
score = 0
else:
precision, recall, _ = precision_recall_curve(
y_true=actual, probas_pred=pred)
score = auc(recall, precision)
elif metric == "mse":
score = ((np.array(pred) - np.array(actual)) ** 2).mean()
elif metric == "rmse":
score = ((np.array(pred) - np.array(actual)) ** 2).mean() ** 0.5
elif metric == "mae":
score = (abs(np.array(pred) - np.array(actual))).mean()
elif metric == "r2":
score = r2_score(y_true=actual, y_pred=pred)
elif metric == "accuracy":
np_pred = np.array(pred)
mask = np_pred >= 0.5
np_pred[mask] = 1
np_pred[np.bitwise_not(mask)] = 0
score = accuracy_score(y_true=actual, y_pred=np_pred)
elif metric in ["cross_entropy", "binary_cross_entropy"]:
score = log_loss(y_true=actual, y_pred=np_pred)
return score
def avg_distances(dset):
"""
Args:
dset (nff.nn.data.Dataset): NFF dataset where all the geometries are
different conformers for one species.
"""
# Get the neighbor list that includes the neighbor list of each conformer
all_nbrs = []
for nbrs in dset.props['nbr_list']:
for pair in nbrs:
all_nbrs.append(tuple(pair.tolist()))
all_nbrs_tuple = list(set(tuple(all_nbrs)))
all_nbrs = torch.LongTensor([list(i) for i in all_nbrs_tuple])
num_confs = len(dset)
all_distances = torch.zeros(num_confs, all_nbrs.shape[0])
for i, batch in enumerate(dset):
xyz = batch["nxyz"][:, 1:]
all_distances[i] = ((xyz[all_nbrs[:, 0]] - xyz[all_nbrs[:, 1]])
.pow(2).sum(1).sqrt())
weights = dset.props["weights"].reshape(-1, 1)
avg_d = (all_distances * weights).sum(0)
return all_nbrs, avg_d
def cat_props(props):
new_props = {}
for key, val in props.items():
if isinstance(val, list):
if isinstance(val[0], torch.Tensor):
if len(val[0].shape) == 0:
new_props[key] = torch.stack(val)
else:
new_props[key] = torch.cat(val)
else:
new_props[key] = val
elif isinstance(val, torch.Tensor):
new_props[key] = val
return new_props
def kron(a, b):
ein = torch.einsum("ab,cd-> acbd", a, b)
out = ein.view(a.size(0) * b.size(0),
a.size(1) * b.size(1))
return out
def load_defaults(direc,
arg_path):
"""
Load default arguments from a JSON file
"""
args_path = os.path.join(direc, arg_path)
with open(args_path, 'r') as f:
default_args = json.load(f)
return default_args
def parse_args_from_json(arg_path,
direc):
default_args = load_defaults(arg_path=arg_path,
direc=direc)
description = default_args['description']
parser = argparse.ArgumentParser(description=description)
default_args.pop('description')
required = parser.add_argument_group(('required arguments (either in '
'the command line or the config '
'file)'))
optional = parser.add_argument_group('optional arguments')
for name, info in default_args.items():
keys = ['default', 'choices', 'nargs']
kwargs = {key: info[key] for key in keys
if key in info}
# Required arguments get put in one group and optional ones in another
# so that they're separated in `--help` . We don't actually set
# required=True for required ones, though, because they can be given in
# the config file instead of the command line
group = required if info.get('required', False) else optional
group.add_argument(f'--{name}',
type=eval(info['type']),
help=info['help'],
**kwargs)
args = parser.parse_args()
return args
```
#### File: utils/script_utils/setup.py
```python
import os
import logging
from shutil import rmtree
from nff.utils.tools import to_json, set_random_seed, read_from_json
__all__ = ["setup_run"]
def setup_run(args):
argparse_dict = vars(args)
jsonpath = os.path.join(args.model_path, "args.json")
# absolute paths
argparse_dict['data_path'] = os.path.abspath(argparse_dict['data_path'])
argparse_dict['model_path'] = os.path.abspath(argparse_dict['model_path'])
if args.mode == "train":
if args.overwrite and os.path.exists(args.model_path):
logging.info("existing model will be overwritten...")
rmtree(args.model_path)
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
to_json(jsonpath, argparse_dict)
set_random_seed(args.seed)
train_args = args
else:
train_args = read_from_json(jsonpath)
return train_args
```
#### File: make_dset/get_dset/dset_from_pickles.py
```python
import pickle
import json
import os
import torch
import numpy as np
import argparse
from tqdm import tqdm
from rdkit import Chem
import logging
from datetime import datetime
import shutil
from nff.data import Dataset, concatenate_dict
from nff.utils import tqdm_enum, parse_args, fprint, read_csv, avg_distances
import copy
KEY_MAP = {"rd_mol": "nxyz",
"boltzmannweight": "weights",
"relativeenergy": "energy"}
# These are keys that confuse the dataset.
EXCLUDE_KEYS = ["totalconfs", "datasets", "conformerweights",
"uncleaned_smiles", "poplowestpct"]
# these keys are for per-conformer quantities
CONF_KEYS = ["rd_mols", "bonded_nbr_list", "bond_features",
"atom_features"]
# disable logger to avoid annoying pickle messages
logger = logging.getLogger()
logger.disabled = True
def mol_to_smiles(rd_mol):
"""
Get the canonical SMILES from an RDKit mol.
Args:
rd_mol (rdkit.Chem.rdchem.Mol): rdkit Mol
Returns:
smiles (str): canonical smiles
"""
smiles = Chem.MolToSmiles(rd_mol)
new_mol = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(new_mol)
return smiles
def trim_dset(dset, good_idx):
"""
Trim a dataest based on a set of indices you want to keep.
Args:
dset (nff.data.dataset): NFF dataset
good_idx (list[int]): indices that you want to keep
Returns:
dset (nff.data.dataset): trimmmed NFF dataset
"""
for key, val in dset.props.items():
# if it's a list, take element by element and put in list
if type(val) is list:
dset.props[key] = [val[i] for i in good_idx]
# otherwise can take the slice all at once
else:
dset.props[key] = val[good_idx]
return dset
def get_bad_smiles(dset, good_idx):
"""
Get the SMILES whose indices are not in `good_idx`.
Args:
dset (nff.data.dataset): NFF dataset
good_idx (list[int]): indices that you want to keep
Returns:
bad_smiles (list[str]): smiles whose indices are not in
`good_idx`.
"""
bad_smiles = [smiles for i, smiles in enumerate(dset.props["smiles"])
if i not in good_idx]
return bad_smiles
def filter_same_smiles(dset):
"""
Filter out species whose conformers don't all have the same SMILES. Can happen
because, for example, CREST simulations can be reactive. This won't happen if
conformers are generated using RDKit.
Args:
dset (nff.data.dataset): NFF dataset
Returns:
dset (nff.data.dataset): NFF dataset trimmed for conformers that have different
SMILES
bad_smiles (list[str]): The SMILES strings that we're getting rid of
"""
good_idx = []
for i, batch in tqdm_enum(dset):
rd_mols = batch["rd_mols"]
smiles_list = [mol_to_smiles(mol) for mol in rd_mols]
unique_smiles = list(set(smiles_list))
# only keep if there's one unique SMILES string
if len(unique_smiles) == 1:
good_idx.append(i)
# must be before trimming
bad_smiles = get_bad_smiles(dset, good_idx)
# trim
dset = trim_dset(dset, good_idx)
return dset, bad_smiles
def filter_bonds_in_nbr(cutoff, dset):
"""
Filter out conformers whose bonds are not within the cutoff distance
that defines the neighbor list. CP3D can't use these conformers because
there will be bonds that don't have distance features, as the two atoms are
not within each other's cutoff. Any conformer with bonds > 5 A is probably
not too accurate anyway.
Args:
cutoff (float): neighbor list cutoff
dset (nff.data.dataset): NFF dataset
Returns:
dset (nff.data.dataset): NFF dataset trimmed for above criterion
SMILES
bad_smiles (list[str]): The SMILES strings that we're getting rid of
"""
good_idx = []
for i, batch in tqdm_enum(dset):
bond_list = batch["bonded_nbr_list"]
nxyz = batch["nxyz"]
# calculate the bond lengths
bond_lens = (nxyz[:, 1:][bond_list[:, 0]] -
nxyz[:, 1:][bond_list[:, 1]]).norm(dim=1)
# only valid if they're less than the cutoff
valid = (bond_lens < cutoff).all()
if valid:
good_idx.append(i)
bad_smiles = get_bad_smiles(dset, good_idx)
dset = trim_dset(dset, good_idx)
return dset, bad_smiles
def get_thread_dic(sample_dic, thread, num_threads):
"""
Given a thread (i.e., an index that tells us which
section of the total dataset we're creating and saving),
return the section of `sample_dic` that includes SMILES
strings in this thread.
Args:
sample_dic (dict): Sample of `summary_dic` that is used
in this combined dataset. `summary_dic` contains
information about all smiles strings we have, except
for their conformers.
thread (int): Index that tells us which section of the
total dataset that we're creating and saving
num_threads (int): Total number of sections into which
we're splitting and saving the dataset.
Returns:
sample_dic (dict): `sample_dic`, but only with species
from the thread we're looking at.
"""
# sort the keys so the order is reproducible
keys = np.array(sorted(list(
sample_dic.keys())))
# split the keys into `num_threads` sections and take
# the keys in the element `thread`
split_keys = np.array_split(keys, num_threads)
thread_keys = split_keys[thread]
# use these keys in `sample_dic`
sample_dic = {key: sample_dic[key]
for key in thread_keys}
return sample_dic
def get_splits(sample_dic,
csv_folder):
"""
Figure out which split (train, val or test) each SMILES in
`sample_dic` belongs to.
Args:
sample_dic (dict): Sample of `summary_dic` that is used
in this combined dataset. `summary_dic` contains
information about all smiles strings we have, except
for their conformers.
csv_folder (str): path to folder that contains the csv files
with the test/val/train smiles.
Returns:
sample_dic (dict): `sample_dic`, but with each sub-dictionary
updated to contain the split assignment of the SMILES.
"""
for name in ["train", "val", "test"]:
path = os.path.join(csv_folder, f"{name}_full.csv")
csv_dic = read_csv(path)
for i, smiles in enumerate(csv_dic["smiles"]):
# add any properties present in the csv
props = {key: csv_dic[key][i] for key in csv_dic.keys()
if key != "smiles"}
sample_dic[smiles].update({"split": name,
**props})
# get rid of anything that doesn't have a split labels
keys = list(sample_dic.keys())
for key in keys:
if "split" not in sample_dic[key]:
sample_dic.pop(key)
return sample_dic
def resave_splits(csv_folder,
remove_smiles):
"""
Re-save the SMILES splits accounting for the fact that not all
species made it into this dataset.
Args:
csv_folder (str): path to folder that contains the csv files
with the test/val/train smiles.
remove_smiles (list[str]): any SMILES strings that had to be
removed from the NFF dataset.
Returns:
None
"""
split_names = ["train", "val", "test"]
# files have the form train_smiles.csv, train_full.csv, etc.,
# where the "smiles" files just contain the SMILES strings,
# but the "full" files also contain properties
suffixes = ["smiles", "full"]
for name in split_names:
for suffix in suffixes:
while True:
path = os.path.join(csv_folder, f"{name}_{suffix}.csv")
with open(path, "r") as f:
lines = f.readlines()
keep_lines = [lines[0]]
for line in lines[1:]:
smiles = line.split(",")[0].strip()
# don't keep the line if it contains a SMILES string
# in `remove_smiles`
if smiles not in remove_smiles:
keep_lines.append(line)
new_text = "".join(keep_lines)
# re-save the new text to a temporary file
dt = datetime.now()
ms_time = int(float(dt.strftime("%Y%m%d%H%M%S.%f")) * 1e3)
tmp_path = f"{ms_time}.csv"
with open(tmp_path, "w") as f:
f.write(new_text)
# keep looping until you're sure that the file you
# loaded and modified hasn't been changed by another
# process while you were working
with open(path, "r") as f:
new_lines = f.readlines()
if new_lines == lines:
shutil.move(tmp_path, path)
break
os.remove(tmp_path)
def get_sample(summary_dic,
csv_folder,
thread=None,
num_threads=None):
"""
Get the sample of `summary_dic` that is annotated with the
test/train splits of this dataset, and only the SMILES relevant
to this thread (i.e., this chunk of the dataset that we're
currently working on).
Args:
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
csv_folder (str): path to folder that contains the csv files
with the test/val/train smiles.
thread (int, optional): Index that tells us which section of the
total dataset that we're creating and saving
num_threads (int, optional): Total number of sections into which
we're splitting and saving the dataset.
Returns:
sample_dic (dict): The sample of `summary_dic`.
"""
sample_dic = copy.deepcopy(summary_dic)
# generate train/val/test labels
sample_dic = get_splits(sample_dic=sample_dic,
csv_folder=csv_folder)
# restrict to just this thread, if we're using threads
if thread is not None:
sample_dic = get_thread_dic(sample_dic=sample_dic,
thread=thread,
num_threads=num_threads)
return sample_dic
def load_data_from_pickle(sample_dic, pickle_folder):
"""
Load conformer data from pickle files for this chunk
of the dataset.
Args:
sample_dic (dict): Sample of `summary_dic` that is used
in this combined dataset. `summary_dic` contains
information about all smiles strings we have, except
for their conformers.
pickle_folder (str): path to folder that contains all
the pickle files. Each sub-dictionary in `sample_dic`
will have the key `pickle_path`. Joining `pickle_folder`
with `pickle_path` gives the full path to the file.
Returns:
overall_dic (dict): Dictionary that contains the contents of
the pickle file for each SMILES.
"""
overall_dic = {}
keys = list(sample_dic.keys())
for smiles in tqdm(keys):
sub_dic = sample_dic[smiles]
pickle_path = sub_dic["pickle_path"]
full_path = os.path.join(pickle_folder, pickle_path)
# initialize from `sample_dic`, as it may have
# loaded some extra props from the csvs. Ignore
# the split label as it's unnecessary.
dic = {key: val for key, val in sub_dic.items() if
key != "split"}
with open(full_path, "rb") as f:
dic.update(pickle.load(f))
overall_dic.update({smiles: dic})
return overall_dic
def map_key(key):
"""
Args:
key (str): key being used
Returns:
If a key is in `KEY_MAP`, returns the value specified in that dictionary.
Otherwise just returns the key.
"""
if key in KEY_MAP:
return KEY_MAP[key]
else:
return key
def fix_iters(spec_dic, actual_confs):
"""
Anything that is a per-species quantity will have to
get repeated once for each of the conformers in that species
when we make the dataset. Anything in `EXCLUDE_KEYS` shouldn't
be included because it messes up the dataset (e.g. variable length
strings, quantities that don't have either length 1 or length of
the number of conformers, etc.)
Args:
spec_dic (dict): a dictionary of quantities associated with a
species.
actual_confs (int): the number of conformers being used for this
species. This is not the same as the total number of conformers,
because we may have set a limit on the maximum conformers per
species.
Returns:
new_spec_dic (dict): `spec_dic` updated with the above changes.
"""
new_spec_dic = {}
for key, val in spec_dic.items():
if key in EXCLUDE_KEYS:
continue
elif type(val) in [int, float, str]:
new_spec_dic[key] = [val] * actual_confs
else:
new_spec_dic[key] = val
return new_spec_dic
def get_sorted_idx(sub_dic):
"""
Get the indices of each conformer ordered by ascending statistical weight.
Args:
sub_dic (dict): dictionary for a species
Returns:
sorted_idx (list): Sorted indices
"""
confs = sub_dic["conformers"]
weight_list = []
for i, conf in enumerate(confs):
weight_list.append([i, conf["boltzmannweight"]])
sorted_tuples = sorted(weight_list, key=lambda x: -x[-1])
sorted_idx = [i[0] for i in sorted_tuples]
return sorted_idx
def get_xyz(rd_mol):
"""
Convert an RDKit mol to an xyz (atomic number + coordinates).
Args:
rd_mol (rdkit.Chem.rdchem.Mol): RDKit mol
Returns:
xyz (list): atomic number + coordinates
"""
atoms = rd_mol.GetAtoms()
atom_nums = []
for atom in atoms:
atom_nums.append(atom.GetAtomicNum())
# each conformer is a separate rdkit mol object, so each
# mol has only one conformer
rd_conf = rd_mol.GetConformers()[0]
positions = rd_conf.GetPositions()
xyz = []
for atom_num, position in zip(atom_nums, positions):
xyz.append([atom_num, *position])
return xyz
def renorm_weights(spec_dic):
"""
Renormalize weights to sum to 1, accounting for the fact that
not using all conformers may make their sum < 1.
Args:
spec_dic (dict): a dictionary of quantities associated with a
species.
Returns:
spec_dic (dict): Updated `spec_dic` with renormalized weights
"""
new_weights = np.array(spec_dic["weights"]) / sum(spec_dic["weights"])
spec_dic["weights"] = new_weights.tolist()
return spec_dic
def convert_data(overall_dic, max_confs):
"""
Args:
overall_dic (dict): Dictionary that contains the contents of
the pickle file for each SMILES.
max_confs (int): Maximum number of conformers per species
Returns:
spec_dics (list[dict]): a dictionary with data for each species
"""
spec_dics = []
if max_confs is None:
max_confs = float("inf")
for smiles in tqdm(overall_dic.keys()):
# get everything in the dictionary except the conformer info
sub_dic = overall_dic[smiles]
spec_dic = {map_key(key): val for key, val in sub_dic.items()
if key != "conformers"}
# must apply `str()` because the `smiles` actually has type
# `numpy._str`
spec_dic["smiles"] = str(smiles)
# how many conformers we're actually using for this species
actual_confs = min(max_confs, len(sub_dic["conformers"]))
# fix various issues with the data
spec_dic = fix_iters(spec_dic, actual_confs)
# make a key and empty list for every key in the conformer
# list
spec_dic.update({map_key(key): [] for key
in sub_dic["conformers"][0].keys()
if key not in EXCLUDE_KEYS})
# conformers not always ordered by weight - get the ordered
# indices
sorted_idx = get_sorted_idx(sub_dic)
confs = sub_dic["conformers"]
spec_dic["rd_mols"] = []
# Go through the conformers from highest to lowest weight
for idx in sorted_idx[:actual_confs]:
conf = confs[idx]
for conf_key in conf.keys():
# add the RDKit mol and nxyz to the dataset
if conf_key == "rd_mol":
nxyz = get_xyz(conf[conf_key])
spec_dic["nxyz"].append(nxyz)
spec_dic["rd_mols"].append(conf[conf_key])
# add other quantities associated with the conformer
# (e.g. Boltzmann weights)
else:
new_key = map_key(conf_key)
if new_key not in spec_dic:
continue
spec_dic[new_key].append(conf[conf_key])
# renormalize the weights accounting for missing conformers
spec_dic = renorm_weights(spec_dic)
spec_dics.append(spec_dic)
return spec_dics
def add_missing(props_list):
"""
There are certain quantities that are given for one species but not
for another (e.g. whether it binds a certain protein). All quantities
that are present for at least one species should be present in all others,
and if not known it should be assigned as None or nan.
Args:
props_list (list[dict]): list of dictionaries of properties for each species
Returns:
props_list (list[dict]): `props_list` updated as described above
"""
key_list = [list(props.keys()) for props in props_list]
# dictionary of the props that have each set of keys
key_dic = {}
for i, keys in enumerate(key_list):
for key in keys:
if key not in key_dic:
key_dic[key] = []
key_dic[key].append(i)
# all the possible keys
all_keys = []
for keys in key_list:
all_keys += keys
all_keys = list(set(all_keys))
# dictionary of which props dicts are missing certain keys
missing_dic = {}
prop_idx = list(range(len(props_list)))
for key in all_keys:
missing_dic[key] = [i for i in prop_idx if
i not in key_dic[key]]
for key, missing_idx in missing_dic.items():
for i in missing_idx:
props = props_list[i]
given_idx = key_dic[key][0]
given_props = props_list[given_idx]
given_val = given_props[key]
# If it's a list give it None
if type(given_val) is list:
props[key] = [None]
# If it's a tensor give it nan
elif type(given_val) is torch.Tensor:
props[key] = torch.Tensor([np.nan])
# in this case we need to change the
# other props to have type float
for good_idx in key_dic[key]:
other_props = props_list[good_idx]
other_props[key] = other_props[key].to(torch.float)
props_list[good_idx] = other_props
props_list[i] = props
return props_list
def clean_up_dset(dset,
nbrlist_cutoff,
strict_conformers,
csv_folder,
add_directed_idx,
num_procs):
"""
Do various things to clean up the dataset after you've made it.
Args:
dset (nff.data.dataset): NFF dataset
nbrlist_cutoff (float): Cutoff for two atoms to be considered
neighbors.
strict_conformers (bool): Whether to exclude any species whose
conformers don't all have the same SMILES.
csv_folder (str): path to folder that contains the csv files
with the test/val/train smiles.
add_directed_idx (bool): whether to calculate and add the kj
and ji indices. These indices tell you which edges connect
to other edges.
num_procs (int): how many parallel threads to use when making the
kj and ji indices.
Returns:
dset (nff.data.dataset): cleaned up dataset
"""
old_num = len(dset)
# smiles we're getting rid of
remove_smiles = []
total = 3 + int(add_directed_idx)
with tqdm(total=total) as pbar:
# if requested, get rid of any species whose conformers have different
# SMILES strings
if strict_conformers:
dset, removed = filter_same_smiles(dset)
remove_smiles += removed
# iterate the tqdm progress bar
pbar.update(1)
# Get rid of any conformers whose bond lists aren't subsets of the
# neighbor list
dset, removed = filter_bonds_in_nbr(nbrlist_cutoff, dset)
remove_smiles += removed
pbar.update(1)
# Add the indices of the neighbor list that correspond to
# bonded atoms. Only use one process to avoid running
# out of memory
dset.generate_bond_idx(num_procs=1)
pbar.update(1)
# Make sure the dataset is directed
dset.make_all_directed()
# add the kj and ji idx if requested
if add_directed_idx:
# only use one process to avoid running out of memory
dset.generate_kj_ji(num_procs=1)
pbar.update(1)
# Re-save the train/val/test splits accounting for the fact that some
# species are no longer there
resave_splits(csv_folder=csv_folder,
remove_smiles=remove_smiles)
new_num = old_num - len(remove_smiles)
changed_num = old_num != new_num
# Print a warning if the total number of species has changed
if changed_num:
msg = ("WARNING: the original SMILES splits have been re-saved with "
f"{new_num} species, reduced from the original {old_num}, "
f"because only {new_num} species made it into the final "
"dataset. This could be because of conformers with bond "
"lengths greater than the cutoff distance of %.2f"
) % nbrlist_cutoff
if strict_conformers:
msg += (", or because the conformers of certain species didn't "
"all have the same SMILES string")
msg += "."
fprint(msg)
return dset
def add_features(dset,
extra_features,
parallel_feat_threads):
"""
Add any requested features to the dataset
Args:
dset (nff.data.dataset): NFF dataset
extra_features (list[dict]): list of extra features,
where each item is a dictionary of the form
{"name": name, "params": {params needed}}.
parallel_feat_threads (int): how many parallel threads
to use when making the efeatures.
Returns:
dset (nff.data.dataset): updated NFF dataset
"""
for dic in tqdm(extra_features):
name = dic["name"]
params = dic["params"]
if name.lower() == "e3fp":
length = params["length"]
fprint(f"Adding E3FP fingerprints of size {length}...")
dset.add_e3fp(length, num_procs=parallel_feat_threads)
if name.lower() == "whim":
fprint("Adding whim fingerprints...")
dset.featurize_rdkit('whim')
if name.lower() == "morgan":
length = params["length"]
fprint(f"Adding Morgan fingerprints of size {length}...")
dset.add_morgan(length)
return dset
def make_big_dataset(spec_dics,
nbrlist_cutoff,
parallel_feat_threads):
props_list = []
nbr_list = []
rd_mols_list = []
for j, spec_dic in tqdm_enum(spec_dics):
# Exclude keys related to individual conformers. These
# include conformer features, in case you've already put
# those in your pickle files. If not we'll generate them
# below
small_spec_dic = {key: val for key, val in spec_dic.items()
if key not in CONF_KEYS}
# Treat each species' data like a regular dataset
# and use it to generate neighbor lists
dataset = Dataset(small_spec_dic, units='kcal/mol')
# number of atoms in the molecule
mol_size = len(dataset.props["nxyz"][0])
dataset.generate_neighbor_list(cutoff=nbrlist_cutoff,
undirected=False)
# now combine the neighbor lists so that this set
# of nxyz's can be treated like one big molecule
nbrs = dataset.props['nbr_list']
new_nbrs = []
# shift by i * mol_size for each conformer
for i in range(len(nbrs)):
new_nbrs.append(nbrs[i] + i * mol_size)
# add to list of conglomerated neighbor lists
nbr_list.append(torch.cat(new_nbrs))
dataset.props.pop('nbr_list')
# concatenate the nxyz's
nxyz = np.concatenate([np.array(item) for item in spec_dic["nxyz"]]
).reshape(-1, 4).tolist()
# add properties as necessary
new_dic = {"mol_size": mol_size,
"nxyz": nxyz,
"weights": torch.Tensor(spec_dic["weights"]
).reshape(-1, 1) / sum(
spec_dic["weights"]),
"degeneracy": torch.Tensor(spec_dic["degeneracy"]
).reshape(-1, 1),
"energy": torch.Tensor(spec_dic["energy"]
).reshape(-1, 1),
"num_atoms": [len(nxyz)]}
new_dic.update(
{
key: val[:1] for key, val in dataset.props.items()
if key not in new_dic.keys()
}
)
props_list.append(new_dic)
rd_mols_list.append(spec_dic["rd_mols"])
# Add props that are in some datasets but not others
props_list = add_missing(props_list)
# convert the list of dicationaries into a dicationary of lists / tensors
props_dic = concatenate_dict(*props_list)
# make a combined dataset where the species look like they're
# one big molecule
big_dataset = Dataset(props_dic, units='kcal/mol')
# give it the proper neighbor list and rdkit mols
big_dataset.props['nbr_list'] = nbr_list
big_dataset.props["rd_mols"] = rd_mols_list
# generate atom and bond features
big_dataset.featurize(num_procs=parallel_feat_threads)
return big_dataset
def make_avg_dataset(spec_dics,
nbrlist_cutoff,
parallel_feat_threads,
strict_conformers):
if not strict_conformers:
raise NotImplementedError
props_list = []
for j, spec_dic in tqdm_enum(spec_dics):
# Exclude keys related to individual conformers. These
# include conformer features, in case you've already put
# those in your pickle files. If not we'll generate them
# below
small_spec_dic = {key: val for key, val in spec_dic.items()
if key not in CONF_KEYS}
# Treat each species' data like a regular dataset
# and use it to generate neighbor lists
dataset = Dataset(small_spec_dic, units='kcal/mol')
dataset.generate_neighbor_list(cutoff=nbrlist_cutoff,
undirected=False)
all_nbrs, avg_d = avg_distances(dataset)
these_props = {"nbr_list": all_nbrs,
"distances": [avg_d],
"rd_mols": spec_dic["rd_mols"][0],
# we won't use the nxyz but it needs
# to be in an NFF dataset
# so we'll just use the first one
"nxyz": spec_dic["nxyz"][0]}
exclude = ["weights", "degeneracy", "energy", "num_atoms",
"nbr_list", "distances", "rd_mols",
"nxyz", *EXCLUDE_KEYS, *CONF_KEYS]
for key, val in dataset.props.items():
if key in exclude:
continue
per_conf = ((isinstance(val, list) or
isinstance(val, torch.Tensor))
and len(val) != 1)
if per_conf:
val = val[1]
these_props[key] = val
these_props.update({"num_atoms": len(spec_dic["nxyz"][0]),
"mol_size": len(spec_dic["nxyz"][0]),
"weights": torch.Tensor([1])})
props_list.append(these_props)
# Add props that are in some datasets but not others
props_list = add_missing(props_list)
# convert the list of dicationaries into a dicationary of lists / tensors
props_dic = concatenate_dict(*props_list)
rd_mols = copy.deepcopy(props_dic["rd_mols"])
props_dic.pop("rd_mols")
# make a combined dataset where the species look like they're
# one big molecule
final_dataset = Dataset(props_dic, units='kcal/mol')
# generate atom and bond features
final_dataset.props["rd_mols"] = [[i] for i in rd_mols]
final_dataset.featurize(num_procs=parallel_feat_threads)
return final_dataset
def make_nff_dataset(spec_dics,
nbrlist_cutoff,
parallel_feat_threads,
strict_conformers,
csv_folder,
extra_features,
add_directed_idx,
average_nbrs=False):
"""
Make an NFF dataset
Args:
spec_dics (list[dict]): a dictionary with data for each species
nbr_list_cutoff (float): Cutoff for two atoms to be considered
neighbors.
parallel_feat_threads (int): how many parallel threads
to use when making the efeatures.
strict_conformers (bool): Whether to exclude any species whose
conformers don't all have the same SMILES.
csv_folder (str): path to folder that contains the csv files
with the test/val/train smiles.
extra_features (list[dict]): list of extra features dictionaries
add_directed_idx (bool): whether to calculate and add the kj
and ji indices. These indices tell you which edges connect
to other edges.
Returns:
big_dataset (nff.data.dataset): NFF dataset
"""
fprint("Making dataset with %d species" % (len(spec_dics)))
if average_nbrs:
big_dataset = make_avg_dataset(spec_dics=spec_dics,
nbrlist_cutoff=nbrlist_cutoff,
parallel_feat_threads=parallel_feat_threads,
strict_conformers=strict_conformers)
else:
big_dataset = make_big_dataset(spec_dics=spec_dics,
nbrlist_cutoff=nbrlist_cutoff,
parallel_feat_threads=parallel_feat_threads)
# clean up
fprint("Cleaning up dataset...")
big_dataset = clean_up_dset(dset=big_dataset,
nbrlist_cutoff=nbrlist_cutoff,
strict_conformers=strict_conformers,
csv_folder=csv_folder,
add_directed_idx=add_directed_idx,
num_procs=parallel_feat_threads)
# add any other requested features
big_dataset = add_features(dset=big_dataset,
extra_features=extra_features,
parallel_feat_threads=parallel_feat_threads)
return big_dataset
def get_data_folder(dataset_folder, thread):
"""
Get the folder in which you'll save the dataset.
Args:
dataset_folder (str): base folder for the datasets
thread (int): thread for chunk of dataset
Returns:
new_path (str): folder in which you'll save the dataset
"""
# if we're not doing any chunks then just save in the base fodler
if thread is None:
return dataset_folder
# otherwise save in base_folder/<thread>
new_path = os.path.join(dataset_folder, str(thread))
if not os.path.isdir(new_path):
os.makedirs(new_path)
return new_path
def split_dataset(dataset, idx):
"""
Similar to `trim_dset`, but making a new dataset without modifying
the original.
Args:
dataset (nff.data.dataset): NFF dataset
idx (list[int]): indices to keep
Returns:
new_dataset (nff.data.dataset): new dataset with only
`idx` indices, without modifying the old dataset.
"""
# create a reference dataset with the right units and dummy
# properties
ref_props = {"nxyz": dataset.props["nxyz"][:1]}
new_dataset = Dataset(ref_props, units=dataset.units)
# update the properties using `dataset` and `idx`
for key, val in dataset.props.items():
if type(val) is list:
new_dataset.props[key] = [val[i] for i in idx]
else:
new_dataset.props[key] = val[idx]
return new_dataset
def save_splits(dataset,
dataset_folder,
thread,
sample_dic):
"""
Save the train/val/test splits of the dataset
Args:
dataset (nff.data.dataset): NFF dataset
dataset_folder (str): base folder for the datasets
thread (int): thread for chunk of dataset
sample_dic (dict): Sample of `summary_dic` that is used
in this combined dataset. `summary_dic` contains
information about all smiles strings we have, except
for their conformers.
Returns:
None
"""
split_names = ["train", "val", "test"]
split_idx = {name: [] for name in split_names}
for i, smiles in enumerate(dataset.props['smiles']):
split_name = sample_dic[smiles]["split"]
split_idx[split_name].append(i)
fprint("Saving...")
data_folder = get_data_folder(dataset_folder, thread)
for name in split_names:
dset = split_dataset(dataset, split_idx[name])
dset_path = os.path.join(data_folder, name + ".pth.tar")
dset.save(dset_path)
def main(max_confs,
summary_path,
dataset_folder,
pickle_folder,
num_threads,
thread,
nbrlist_cutoff,
csv_folder,
parallel_feat_threads,
strict_conformers,
extra_features,
add_directed_idx,
average_nbrs,
**kwargs):
"""
Sample species, load their pickles, create an NFF dataset, and
save train/val/test splits.
Args:
max_confs (int): Maximum number of conformers per species
summary_path (str): Path to file with summary dictionary
dataset_folder (str): base folder for the datasets
pickle_folder (str): path to folder that contains all
the pickle files. Each sub-dictionary in `sample_dic`
will have the key `pickle_path`. Joining `pickle_folder`
with `pickle_path` gives the full path to the file.
num_threads (int): Total number of sections into which
we're splitting and saving the dataset.
thread (int): Index that tells us which section of the
total dataset that we're creating and saving
nbrlist_cutoff (float): Cutoff for two atoms to be considered
neighbors.
csv_folder (str): path to folder that contains the csv files
with the test/val/train smiles.
parallel_feat_threads (int): how many parallel threads
to use when making the efeatures.
strict_conformers (bool): Whether to exclude any species whose
conformers don't all have the same SMILES.
extra_features (list[dict]): list of extra features,
where each item is a dictionary of the form
{"name": name, "params": {params needed}}.
add_directed_idx (bool): whether to calculate and add the kj
and ji indices. These indices tell you which edges connect
to other edges.
Returns:
None
"""
with open(summary_path, "r") as f:
summary_dic = json.load(f)
fprint("Loading splits...")
sample_dic = get_sample(summary_dic=summary_dic,
thread=thread,
num_threads=num_threads,
csv_folder=csv_folder)
fprint("Loading data from pickle files...")
overall_dic = load_data_from_pickle(sample_dic, pickle_folder)
fprint("Converting data...")
spec_dics = convert_data(overall_dic, max_confs)
fprint("Combining to make NFF dataset...")
dataset = make_nff_dataset(spec_dics=spec_dics,
nbrlist_cutoff=nbrlist_cutoff,
parallel_feat_threads=parallel_feat_threads,
strict_conformers=strict_conformers,
csv_folder=csv_folder,
extra_features=extra_features,
add_directed_idx=add_directed_idx,
average_nbrs=average_nbrs)
fprint("Creating test/train/val splits...")
save_splits(dataset=dataset,
dataset_folder=dataset_folder,
thread=thread,
sample_dic=sample_dic)
fprint((f"Complete! Saved section {thread} of the dataset in "
f"{os.path.join(dataset_folder, str(thread))}.\n\n"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--max_confs', type=int, default=None,
help=("Maximum number of conformers to allow in any "
"species in your dataset. No limit if "
"max_confs isn't specified."))
parser.add_argument('--nbrlist_cutoff', type=float, default=5,
help=("Cutoff for 3D neighbor list"))
parser.add_argument('--summary_path', type=str)
parser.add_argument('--dataset_folder', type=str)
parser.add_argument('--pickle_folder', type=str)
parser.add_argument('--num_threads', type=int, default=None)
parser.add_argument('--thread', type=int, default=None)
parser.add_argument('--prop', type=str, default=None,
help=("Name of property for which to generate "
"a proportional classification sample"))
parser.add_argument('--csv_folder', type=str,
help=("Name of the folder in which "
"you want to save the SMILES "
"splits"))
parser.add_argument('--parallel_feat_threads', type=int,
default=5,
help=("Number of parallel threads to use "
"when generating features"))
parser.add_argument('--strict_conformers', action='store_true',
help=("Exclude any species whose conformers don't "
"all have the same SMILES."))
parser.add_argument('--extra_features', type=str, default=None,
help=("List of dictionaries of extra features, "
"where each dictionary has the name of the"
"feature and any associated parameters. "
"If using the command line, "
"please provide as a JSON string."))
parser.add_argument('--add_directed_idx', action='store_true',
help=("Add the kj and ji indices mapping out edges "
"that are neighbors of other edges. This takes "
"a fair bit of extra time, but if you're "
"training a ChemProp3D model, which uses edge "
"updates, this will save you a lot of time "
"during training."))
parser.add_argument('--average_nbrs', action='store_true',
help=("Use one effective structure with interatomic distances "
"averaged over conformers"))
parser.add_argument('--config_file', type=str,
help=("Path to JSON file with arguments. If given, "
"any arguments in the file override the command "
"line arguments."))
args = parse_args(parser)
if type(args.extra_features) == str:
args.extra_features = json.loads(args.extra_features)
main(**args.__dict__)
```
#### File: make_dset/splits/split.py
```python
import csv
import os
import numpy as np
import json
import shutil
import argparse
from rdkit import Chem
from tqdm import tqdm
from nff.utils import bash_command, parse_args, fprint, prop_split
def apply_transfs(props, summary_dic):
"""
Apply transformation to quantities in the dataset. For example,
if a requested property is log_<actual property>, then create
this requested property by taking logs in the dataset.
Args:
props (list[str]): list of property names that you want to predict
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
Returns:
None
"""
for prop in props:
prop_present = any([prop in sub_dic for sub_dic
in summary_dic.values()])
if prop_present:
continue
if prop.startswith("log_"):
base_prop = prop.split("log_")[-1]
def transf(x): return np.log(x)
else:
raise Exception((f"{prop} is not in the summary "
"dictionary and doesn't have a prefix "
"corresponding to a known transformation, "
"such as log."))
base_present = any([base_prop in sub_dic for sub_dic
in summary_dic.values()])
if not base_present:
raise Exception((f"{base_prop} is not in the summary "
"dictionary."))
# update summary dictionary with transformed keys
for smiles, sub_dic in summary_dic.items():
if base_prop in sub_dic:
sub_dic.update({prop: transf(sub_dic[base_prop])})
def to_csv(summary_dic,
props,
csv_file):
"""
Write the SMILES and properties in the summary dictionary
to a csv file.
Args:
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
props (list[str]): list of property names that you want to predict
csv_file (str): path to csv file that you want to write to
Returns:
None
"""
columns = ['smiles'] + props
dict_data = []
for smiles, sub_dic in summary_dic.items():
dic = {}
for prop in props:
if prop.startswith("log_"):
base_prop = prop.split("log_")[-1]
if base_prop in sub_dic:
dic[prop] = np.log(sub_dic[base_prop])
dic = {prop: sub_dic[prop] for prop in props}
dic["smiles"] = smiles
dict_data.append(dic)
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
def filter_prop_and_pickle(sample_dic, props):
"""
Filter the SMILES strings to exclude those that don't have a known value
of all `props`, or do not have a known path to a pickle file with conformer
information.
Args:
sample_dic (dict): Sample of `summary_dic` that will be used in this dataset
props (list[str]): list of property names that you want to predict
Returns:
sample_dic (dict): Updated `sample_dic` with the above filters applied.
"""
smiles_list = [key for key, sub_dic in sample_dic.items()
if all([prop in sub_dic for prop in props])
and sub_dic.get("pickle_path") is not None]
sample_dic = {key: sample_dic[key] for key in smiles_list}
return sample_dic
def filter_atoms(sample_dic, max_atoms):
"""
Filter the SMILES strings to exclude those whose atom count is above
`max_atoms`.
Args:
sample_dic (dict): Sample of `summary_dic` that will be used in this dataset
max_atoms (int): Maximum number of atoms allowed in a species
Returns:
sample_dic (dict): Updated `sample_dic` with the above filter applied.
"""
# if `max_atoms` is unspecified then the default is no limit
if max_atoms is None:
max_atoms = float("inf")
smiles_list = list(sample_dic.keys())
good_smiles = []
for smiles in tqdm(smiles_list):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
num_atoms = mol.GetNumAtoms()
if num_atoms <= max_atoms:
good_smiles.append(smiles)
sample_dic = {smiles: sample_dic[smiles] for
smiles in good_smiles}
return sample_dic
def subsample(summary_dic,
props,
max_specs,
max_atoms,
dataset_type,
seed):
"""
Reduce the number of species according to `props`, `max_specs`,
and `max_atoms`.
Args:
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
props (list[str]): list of property names that you want to predict
max_specs (int): maximum number of species allowed in dataset
max_atoms (int): Maximum number of atoms allowed in a species
dataset_type (str): type of problem, e.g. "classification" or
"regression".
seed (int): random seed for split
Returns:
sample_dic (dict): Updated `sample_dic` with the above filter applied.
"""
# filter to only include species with the requested props
sample_dic = filter_prop_and_pickle(summary_dic, props)
# filter to only include species with less than `max_atoms` atoms
sample_dic = filter_atoms(sample_dic, max_atoms)
# If you set a limit for `max_specs` and are doing classification,
# try to keep as many of the underrepresented class as possible.
# If you set a limit but aren't doing classification, select them
# randomly.
keep_smiles = prop_split(max_specs=max_specs,
dataset_type=dataset_type,
props=props,
sample_dic=sample_dic,
seed=seed)
sample_dic = {smiles: sample_dic[smiles] for smiles in keep_smiles}
return sample_dic
def make_split(summary_path,
csv_folder,
cp_folder,
props,
split_sizes,
split_type,
max_specs,
max_atoms,
dataset_type,
seed):
"""
Split the species into train, test, and validation sets.
Args:
summary_path (str): path to the JSON file that summarizes
all of the information about the species, apart from their
conformers.
csv_folder (str): path to the folder in which we will save our
csv files with the SMILES, properties and training splits.
cp_folder (str): path to the ChemProp folder on your computer
props (list[str]): list of property names that you want to predict
split_sizes (list[float]): list of the form [train_split_size, val_split_size,
test_split_size].
split_type (str): how to split the data. Options can be found in the Chemprop
script `split_data.py`. A good choice is usually `scaffold_balanced`, which splits
in such a way that similar scaffolds are in the same split.
max_specs (int): maximum number of species allowed in dataset
max_atoms (int): Maximum number of atoms allowed in a species
dataset_type (str): type of problem, e.g. "classification" or
"regression".
seed (int): random seed for split
Returns:
None
"""
with open(summary_path, "r") as f:
summary_dic = json.load(f)
# apply any transformations to the data, e.g. wanting a
# dataset that has the log of a value instead of the
# value itself
apply_transfs(props, summary_dic)
# filter based on props, max species and max number of atoms
summary_dic = subsample(summary_dic=summary_dic,
props=props,
max_specs=max_specs,
max_atoms=max_atoms,
dataset_type=dataset_type,
seed=seed)
# path csv file with SMILES and properties
all_csv = os.path.join(csv_folder, "all.csv")
if not os.path.isdir(csv_folder):
os.makedirs(csv_folder)
# write the contents of `summary_dic` to the csv
to_csv(summary_dic, props, all_csv)
# run the chemprop script `split_data.py` to make the splits
# from `all.csv`
script = os.path.join(cp_folder, "scripts", "split_data.py")
split_str = " ".join(np.array(split_sizes).astype("str"))
cmd = (f"source activate chemprop && "
f"python {script} --split_type {split_type} "
f"--split_sizes {split_str} "
f"--data_path {all_csv} "
f"--save_dir {csv_folder} "
f"--seed {seed}")
p = bash_command(cmd)
p.wait()
def add_just_smiles(csv_folder):
"""
Take csv files with SMILES + properties and use them to crea files
with just the SMILES strings.
Args:
csv_folder (str): path to the folder in which we will save oru
csv files with the SMILES, properties and training splits.
Returns:
None
"""
for name in ['train', 'val', 'test']:
path = os.path.join(csv_folder, name + '.csv')
smiles_list = []
with open(path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(readCSV):
if i == 0:
continue
smiles_list.append(row[0])
# save to "train_smiles.csv", "val_smiles.csv", etc.
smiles_path = os.path.join(csv_folder, f"{name}_smiles.csv")
columns = ["smiles"]
dict_data = [{"smiles": smiles} for smiles in smiles_list]
with open(smiles_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
def rename_csvs(csv_folder):
"""
Rename the csvs saved by the chemprop split function to distinguish
between what is just SMILES and what is SMILES + properties.
Args:
csv_folder (str): path to the folder in which we will save oru
csv files with the SMILES, properties and training splits.
Returns:
None
"""
for name in ['train', 'val', 'test']:
path = os.path.join(csv_folder, name + '.csv')
# "train_full.csv", "val_full.csv", etc.
new_path = os.path.join(csv_folder, name + "_full.csv")
shutil.move(path, new_path)
def summarize(csv_folder, dataset_type):
"""
Summarize where the splits have been saved and what their contents are.
Args:
csv_folder (str): path to the folder in which we will save oru
csv files with the SMILES, properties and training splits.
dataset_type (str): type of problem, e.g. "classification" or
"regression".
Returns:
None
"""
msgs = []
for name in ['train', 'val', 'test', 'all']:
if name == 'all':
path = os.path.join(csv_folder, f"{name}.csv")
else:
path = os.path.join(csv_folder, f"{name}_full.csv")
with open(path, "r") as f:
lines = f.readlines()[1:]
num_specs = len(lines)
this_msg = f"{num_specs} species"
if dataset_type == "classification":
num_pos = len([line for line in lines
if int(line.split(",")[-1]) == 1])
this_msg += f", {num_pos} positives"
msgs.append(this_msg)
msg = (f"Splits saved in {csv_folder}\n"
f"Train files: train_smiles.csv and train_full.csv ({msgs[0]})\n"
f"Validation files: val_smiles.csv and val_full.csv ({msgs[1]}) \n"
f"Test files: test_smiles.csv and test_full.csv ({msgs[2]})\n"
f"Combined file: all.csv ({msgs[3]})")
fprint(msg)
def main(summary_path,
csv_folder,
cp_folder,
props,
split_sizes,
split_type,
max_specs,
max_atoms,
dataset_type,
seed,
**kwargs):
"""
Split the data, write it to csvs, create new csvs with just
SMILES and no properties, and rename the existing csvs.
Args:
summary_path (str): path to the JSON file that summarizes
all of the information about the species, apart from their
conformers.
csv_folder (str): path to the folder in which we will save our
csv files with the SMILES, properties and training splits.
cp_folder (str): path to the ChemProp folder on your computer
props (list[str]): list of property names that you want to predict
split_sizes (list[float]): list of the form [train_split_size, val_split_size,
test_split_size].
split_type (str): how to split the data. Options can be found in the Chemprop
script `split_data.py`. A good choice is usually `scaffold_balanced`, which splits
in such a way that similar scaffolds are in the same split.
max_specs (int): maximum number of species allowed in dataset
max_atoms (int): Maximum number of atoms allowed in a species
dataset_type (str): type of problem, e.g. "classification" or
"regression".
seed (int): random seed for split
Returns:
None
"""
make_split(summary_path=summary_path,
csv_folder=csv_folder,
cp_folder=cp_folder,
props=props,
split_sizes=split_sizes,
split_type=split_type,
max_specs=max_specs,
max_atoms=max_atoms,
dataset_type=dataset_type,
seed=seed)
add_just_smiles(csv_folder)
rename_csvs(csv_folder)
summarize(csv_folder, dataset_type)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--summary_path', type=str,
help="Path to summary dictionary")
parser.add_argument('--csv_folder', type=str,
help=("Name of the folder in which "
"you want to save the SMILES "
"splits"))
parser.add_argument('--cp_folder', type=str,
help="Tour chemprop folder ")
parser.add_argument('--props', type=str,
nargs='+',
help=("Name of the properties you're "
"predicting"))
parser.add_argument('--split_sizes', type=float,
nargs="+",
help="Train/val/test split proportions ",
default=[0.8, 0.1, 0.1])
parser.add_argument('--split_type', type=str,
choices=['random', 'scaffold_balanced'],
help=("Type of split"))
parser.add_argument('--max_specs', type=int, default=None,
help=("Maximum number of species to use in your "
"dataset. No limit if max_specs isn't "
"specified."))
parser.add_argument('--max_atoms', type=int, default=None,
help=("Maximum number of atoms to allow in any "
"species in your dataset. No limit if "
"max_atoms isn't specified."))
parser.add_argument('--dataset_type', type=str,
choices=['regression', 'classification'],
help=("Type of training task."))
parser.add_argument('--seed', type=int,
help=("Random seed for split"))
parser.add_argument('--config_file', type=str,
help=("Path to JSON file with arguments. If given, "
"any arguments in the file override the command "
"line arguments."))
args = parse_args(parser)
main(**args.__dict__)
```
#### File: transfer/get_fps/fps_single.py
```python
import argparse
import os
import pickle
import numpy as np
import json
import sys
import warnings
import copy
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from nff.data import Dataset
from nff.train import load_model
from nff.data import collate_dicts
from nff.utils.cuda import batch_to, batch_detach
from nff.data.dataset import concatenate_dict
from nff.utils import (parse_args, parse_score,
CHEMPROP_TRANSFORM, fprint, get_split_names)
from nff.utils.confs import trim_confs
# ignore warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
def get_iter_func(track, num_track=None):
"""
Get the function to iterate through a process.
Args:
track (bool): track this process with tqdm
num_track (int, optional): number of items
that will come out of this process.
Returns:
iter_func (callable): iteration function
"""
if track and num_track != 1:
iter_func = tqdm
else:
def iter_func(x):
return x
return iter_func
def save(results,
targets,
feat_save_folder,
prop):
"""
Save fingerprints, predicted values, true valuse, and various conformer weights.
Args:
results (dict): dictionary of the results of applying the CP3D model
targets (dict): target values
feat_save_folder (str): folder in which we save the features files
prop (str): property that you're predicting
Returns:
None
"""
# get the true and predicted values of `prop`
if prop is None:
y_true = None
probas_pred = None
else:
probas_pred = (torch.cat(results[prop])
.reshape(-1).numpy())
if prop in targets and targets.get(prop, []):
y_true = torch.stack(targets[prop]).numpy()
else:
y_true = np.ones_like(probas_pred) * float("nan")
fps = torch.stack(results["fp"]).numpy()
all_conf_fps = results["conf_fps"]
learned_weights = results["learned_weights"]
energy = results.get("energy")
boltz_weights = results["boltz_weights"]
smiles_list = targets["smiles"]
dic = {}
# whether we're using alpha_ij attention (i.e., every conformer talks
# to every other), or alpha_i attention (i.e., we just use the conformer's
# fingerprint to get its weight)
alpha_ij_att = all([w.reshape(-1).shape[0] == conf_fp.shape[0] ** 2
for w, conf_fp in zip(learned_weights, all_conf_fps)])
for i, smiles in enumerate(smiles_list):
conf_fps = all_conf_fps[i].numpy()
these_weights = learned_weights[i].numpy().reshape(-1)
num_fps = conf_fps.shape[0]
if alpha_ij_att:
these_weights = these_weights.reshape(num_fps,
num_fps)
dic[smiles] = {"fp": fps[i].reshape(-1),
"conf_fps": conf_fps,
"learned_weights": these_weights,
"boltz_weights": boltz_weights[i].reshape(-1).numpy()}
if energy is not None:
dic[smiles].update({"energy": energy[i].reshape(-1).numpy()})
if y_true is not None and probas_pred is not None:
dic[smiles].update({"true": y_true[i],
"pred": probas_pred[i]})
with open(feat_save_folder, "wb") as f:
pickle.dump(dic, f)
def model_from_metric(model, model_folder, metric):
"""
Get the model with the best validation score according
to a specified metric.
Args:
model (nff.nn.models): original NFF model loaded
model_folder (str): path to the folder that the model is being trained in
metric (str): name of metric to use
Returns:
model (nff.nn.models): NFF model updated with the state dict of
the model with the best metric
"""
# the metric asked for should be in chemprop notation (e.g. auc, prc-auc),
# but when training a CP3D model we use different names
# (e.g. roc_auc, prc_auc), so we need to transform into that name
if metric in CHEMPROP_TRANSFORM:
use_metric = CHEMPROP_TRANSFORM[metric]
else:
use_metric = metric
# find the best epoch by reading the csv with the metrics
best_score, best_epoch = parse_score(model_folder, use_metric)
check_path = os.path.join(model_folder, "checkpoints",
f"checkpoint-{best_epoch}.pth.tar")
state_dict = torch.load(check_path, map_location="cpu"
)["model"]
fprint(f"Loading model state dict from {check_path}")
model.load_state_dict(state_dict)
model.eval()
return model
def fps_and_pred(model, batch, **kwargs):
"""
Get fingeprints and predictions from the model.
Args:
model (nff.nn.models): original NFF model loaded
batch (dict): batch of data
Returns:
results (dict): model predictions and its predicted
fingerprints, conformer weights, etc.
"""
model.eval()
# make the fingerprints
outputs, xyz = model.make_embeddings(batch, xyz=None, **kwargs)
# pool to get the learned weights and pooled fingerprints
pooled_fp, learned_weights = model.pool(outputs)
# get the final results
results = model.readout(pooled_fp)
# add sigmoid if it's a classifier and not in training mode
if model.classifier:
keys = list(model.readout.readout.keys())
for key in keys:
results[key] = torch.sigmoid(results[key])
# add any required gradients
results = model.add_grad(batch=batch, results=results, xyz=xyz)
# put into a dictionary
conf_fps = [i.cpu().detach() for i in outputs["conf_fps_by_smiles"]]
energy = batch.get("energy")
boltz_weights = batch.get("weights")
# with operations to de-batch
n_confs = [(n // m).item()
for n, m in zip(batch['num_atoms'], batch['mol_size'])]
for key, val in results.items():
results[key] = [i for i in val]
results.update({"fp": [i for i in pooled_fp],
"conf_fps": conf_fps,
"learned_weights": learned_weights,
"boltz_weights": (list(torch.split
(boltz_weights, n_confs)))})
if energy is not None:
results.update({"energy": list(torch.split(energy, n_confs))})
return results
def evaluate(model,
loader,
device,
track,
**kwargs):
"""
Evaluate a model on a dataset.
Args:
model (nff.nn.models): original NFF model loaded
loader (torch.utils.data.DataLoader): data loader
device (Union[str, int]): device on which you run the model
Returns:
all_results (dict): dictionary of results
all_batches (dict): dictionary of ground truth
"""
model.eval()
model.to(device)
all_results = []
all_batches = []
iter_func = get_iter_func(track)
for batch in iter_func(loader):
batch = batch_to(batch, device)
results = fps_and_pred(model, batch, **kwargs)
all_results.append(batch_detach(results))
# don't overload memory with unnecessary keys
reduced_batch = {key: val for key, val in batch.items()
if key not in ['bond_idx', 'ji_idx', 'kj_idx',
'nbr_list', 'bonded_nbr_list']}
all_batches.append(batch_detach(reduced_batch))
all_results = concatenate_dict(*all_results)
all_batches = concatenate_dict(*all_batches)
return all_results, all_batches
def get_dset_paths(full_path,
train_only,
val_only,
test_only):
"""
See where the datasets are located and get their paths.
Args:
full_path (str): folder with the data in it
train_only (bool): only load the training set
val_only (bool): only load the validation set
test_only (bool): only load the test set
Returns:
paths (list): list of paths for each split. Each split
gets is own sub-list, which either has a single string
to the corresponding path, or a set of strings if the data
is broken up into sub-folders.
dset_names (list[str]): name of the splits
(e.g. train, val, test)
"""
dset_names = get_split_names(train_only=train_only,
val_only=val_only,
test_only=test_only)
# see if the datasets are in the main folder
main_folder = all([os.path.isfile(os.path.join(full_path, name
+ ".pth.tar")) for name
in dset_names])
if main_folder:
paths = [[os.path.join(full_path, name + ".pth.tar")]
for name in dset_names]
else:
sub_folders = [i for i in os.listdir(full_path) if i.isdigit()]
sub_folders = sorted(sub_folders, key=lambda x: int(x))
paths = [[os.path.join(full_path, i, name + ".pth.tar")
for i in sub_folders] for name in dset_names]
return paths, dset_names
def add_dics(base, new, is_first):
"""
Add a new dictionary to an old dictionary, where the values in each dictionary
are lists that should be concatenated, and the keys in the new dictionary might
not be the same as those in the old one.
Args:
base (dict): base dictionary to be added to
new (dict): new dictionary adding on
is_first (bool): whether this is the first batch we've loaded
Returns:
base (dict): updated base dictionary
"""
for key, val in new.items():
if is_first:
base[key] = val
else:
if key in base:
base[key] += val
if is_first:
return base
# any keys that are new to the dictionary despite this not being
# the first batch added (i.e. they're in this batch but weren't
# in previous batches)
extra_keys = [key for key in new.keys() if key not in
base.keys()]
for key in extra_keys:
dim = len(base["smiles"])
old_val = [torch.tensor(float("nan"))
for _ in range(dim)]
new_val = old_val + new[key]
base[key] = new_val
# same idea for keys that were here before and aren't now
missing_keys = [key for key in base.keys() if key not in
new.keys()]
for key in missing_keys:
dim = len(new["smiles"])
base[key] += [torch.tensor(float('nan')) for _ in range(dim)]
return base
def main(dset_folder,
device,
model_folder,
batch_size,
prop,
sub_batch_size,
feat_save_folder,
metric=None,
val_only=False,
train_only=False,
test_only=False,
track=True,
max_confs=None,
**kwargs):
"""
Get fingerprints and predictions from the model.
Args:
dset_folder (str): folder with the data in it
device (Union[str, int]): device on which you run the model
model_folder (str): path to the folder that the model is being trained in
batch_size (int): how many data points per batch
prop (str): property to predict
sub_batch_size (int): how many conformers to put in memory at a time
feat_save_folder (str): folder in which we're saving teh features
metric (str): name of metric to use. If not given, this defaults to
taking the model with the best validation loss.
train_only (bool): only load the training set
val_only (bool): only load the validation set
test_only (bool): only load the test set
track (bool): Whether to track progress with tqdm
max_confs (int): Maximum number of conformers to use when evaluating the
model
"""
# get the model initially by taken the one saved as "best_model"
model = load_model(model_folder)
# update its state_dict with the checkpoint from the epoch with
# the best metric score
if metric is None:
fprint(("WARNING: You have not specified a metric with which "
"to choose the best model. Defaulting to whichever was "
"chosen as the best model during training "))
else:
fprint(f"Loading model with best validation {metric}")
model = model_from_metric(model=model,
model_folder=model_folder,
metric=metric)
model.eval()
paths, dset_names = get_dset_paths(dset_folder, train_only=train_only,
val_only=val_only, test_only=test_only)
# go through each dataset, create a loader, evaluate the model,
# and save the predictions
iter_func = get_iter_func(track, num_track=len(dset_names))
for i in iter_func(range(len(dset_names))):
results = {}
targets = {}
j = 0
for path in tqdm(paths[i]):
dataset = Dataset.from_file(path)
if max_confs is not None:
dataset = trim_confs(dataset=dataset,
num_confs=max_confs,
idx_dic=None,
enum_func=iter_func)
loader = DataLoader(dataset,
batch_size=batch_size,
collate_fn=collate_dicts)
new_results, new_targets = evaluate(model,
loader,
device=device,
sub_batch_size=sub_batch_size,
track=track)
is_first = (j == 0)
results = add_dics(base=results,
new=new_results,
is_first=is_first)
targets = add_dics(base=targets,
new=new_targets,
is_first=is_first)
j += 1
name = dset_names[i]
save_name = f"pred_{metric}_{name}.pickle"
if feat_save_folder is None:
feat_save_folder = dset_folder
if not os.path.isdir(feat_save_folder):
os.makedirs(feat_save_folder)
pickle_path = os.path.join(feat_save_folder, save_name)
save(results=results,
targets=targets,
feat_save_folder=pickle_path,
prop=prop)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dset_folder', type=str,
help=("Name of the folder with the "
"datasets you want to add "
"fingerprints to"))
parser.add_argument('--feat_save_folder', type=str,
help="Path to save pickles")
parser.add_argument('--no_track', action='store_true',
help=("Don't track progress with tqmd "))
parser.add_argument('--config_file', type=str,
help=("Path to JSON file with arguments."))
args = parse_args(parser)
# need to add this explicitly because `parse_args` will only add
# the keys that are given as options above
with open(args.config_file, "r") as f:
config = json.load(f)
for key, val in config.items():
setattr(args, key, val)
main(**args.__dict__)
```
#### File: transfer/get_fps/make_fps.py
```python
import json
import os
import subprocess
import pickle
import time
import argparse
from tqdm import tqdm
from nff.utils import (METRICS, CHEMPROP_TRANSFORM, parse_args, get_split_names)
# dictionary that transforms our metric syntax to chemprop's
REVERSE_TRANSFORM = {val: key for key, val in CHEMPROP_TRANSFORM.items()}
# available metrics
METRIC_LIST = [REVERSE_TRANSFORM.get(metric, metric) for metric in METRICS]
def make_base_config(config_file, kwargs, par):
"""
Make a config file for `fps_single.py`.
Args:
config_file (str): path to general config file
kwargs (dict): extra dictionary items to put in it
par (bool): whether we're going to be using
this to make fingerprints in parallel.
Returns:
base_path (str): path to the new, base config
file, which is missing certain keys if we're
running this in parallel.
"""
# get the contents of the config file
with open(config_file, "r") as f_open:
config = json.load(f_open)
# update it with any new values
for key, val in kwargs.items():
# if you put the config file in the dictionary,
# but the config file is the original name and
# not the new name, it messes things up
if key not in config and key != "config_file":
config[key] = val
# if running in parallel, get rid of these keys because
# we'll want to specify them individually for each parallel
# process
if par:
par_keys = ["dset_folder", "feat_save_folder", "config_file",
"slurm_parallel"]
for key in par_keys:
if key in config:
config.pop(key)
# make a new path to this "base" config and save
base_path = config_file.replace(".json", "_base.json")
with open(base_path, "w") as f_open:
json.dump(config, f_open, indent=4, sort_keys=True)
return base_path
def get_single_path():
"""
Get the path to `fps_single.py`.
Args:
None
Returns:
single_path (str): path to `fps_single.py`
"""
this_dir = os.path.abspath(".")
single_path = os.path.join(this_dir, "fps_single.py")
return single_path
def run_par(base_config_file,
dset_folder,
idx):
"""
Make fingerprints in parallel.
Args:
base_config_file (str): path to the new, base config
file, which is missing certain keys if we're
running this in parallel.
dset_folder (str): path to dataset
idx (str): index of the dataset thread we're working
on.
Returns:
p: subprocess from executing the parallel command
"""
# create the command
single_path = get_single_path()
idx_folder = os.path.join(dset_folder, str(idx))
cmd = (f"srun -N 1 -n 1 --exclusive python {single_path} "
f" --config_file {base_config_file} "
f" --dset_folder {idx_folder} --feat_save_folder {idx_folder} ")
# figure out whether we should be tracking progress of this thread
# or not
num_nodes = os.environ["SLURM_NNODES"]
if (int(idx) % int(num_nodes) != 0):
cmd += "--no_track"
p = subprocess.Popen([cmd],
shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
return p
def monitor_results(dset_folder,
folders,
split_names,
metric):
"""
Monitor results of parallel processes using tqdm.
Args:
dset_folder (str): path to dataset
folders (list[str]): names of sub-folders
split_names (list[str]): names of splits
(train, val, and/or test) that we're
monitoring.
metric (str): name of the metric that we're using
to evaluate the model.
Returns:
None
"""
# Make a dictionary that has the path of each pickle
# file we're going to make for each sub-folder. Initialize
# each value to False
monitor_dic = {}
for split in split_names:
for folder in folders:
pickle_name = f"pred_{metric}_{split}.pickle"
pickle_path = os.path.join(dset_folder, folder, pickle_name)
monitor_dic[pickle_path] = False
# Update the dictionary as those files are made and use it to
# update the tqdm progress bar. Keep looping until all the files
# exist
total = len(monitor_dic)
with tqdm(total=total) as pbar:
while False in monitor_dic.values():
for path, val in monitor_dic.items():
if os.path.isfile(path) and not val:
monitor_dic[path] = True
pbar.update(1)
time.sleep(5)
def pickle_sub_path(metric,
split,
folder,
dset_folder):
"""
Get path to the pickle file in a dataset sub-folder.
Args:
metric (str): name of the metric that we're using
to evaluate the model.
split (str): name of split (train, val or test)
folder (str): name of sub-folder
dset_folder (str): path to dataset
Returns:
path (str): path to pickle file
"""
pickle_name = f"pred_{metric}_{split}.pickle"
path = os.path.join(dset_folder, folder, pickle_name)
return path
def combine_results(dset_folder,
metric,
split_names):
"""
Combine results from different parallel processes into one big
dictionary.
Args:
dset_folder (str): path to dataset
metric (str): name of the metric that we're using
to evaluate the model.
split_names (list[str]): names of splits
(train, val, and/or test) that we're
monitoring.
Returns:
combined_dics (dict): dictionary of the form {split: sub_dic}
for split in each dataset split (train, val, and/or test),
and sub_dic is the results dictionary (contains predicted
and actual quantity values, fingeprints, etc.)
"""
# find the folders and track results
folders = sorted([i for i in os.listdir(dset_folder) if i.isdigit()],
key=lambda x: int(x))
monitor_results(dset_folder, folders, split_names, metric)
# once all the results are in, put them into a big dictionary
combined_dics = {}
# go through each split
for split in split_names:
overall = {}
# go through each folder and loop until you've succesfully loaded
# all pickles
for folder in folders:
while True:
pickle_path = pickle_sub_path(
metric, split, folder, dset_folder)
try:
with open(pickle_path, "rb") as f:
results = pickle.load(f)
except (EOFError, FileNotFoundError, pickle.UnpicklingError):
time.sleep(1)
continue
for key, val in results.items():
overall[key] = val
break
combined_dics[split] = overall
return combined_dics
def run_all_par(kwargs):
"""
Run all the parallel processes.
Args:
kwargs (dict): dictionary of keywords
Retuns:
None
"""
dset_folder = kwargs["dset_folder"]
config_file = kwargs["config_file"]
metric = kwargs["metric"]
feat_save_folder = kwargs["feat_save_folder"]
# make the config file that has the basic parameters, but
# is missing others that will depend on the process being used
base_config_file = make_base_config(config_file=config_file,
kwargs=kwargs,
par=True)
# get the dataset folders
folders = sorted([i for i in os.listdir(dset_folder) if i.isdigit()],
key=lambda x: int(x))
procs = []
split_names = get_split_names(train_only=kwargs.get("train_only"),
val_only=kwargs.get("val_only"),
test_only=kwargs.get("test_only"))
# submit the parallel command
for idx in folders:
paths = [pickle_sub_path(metric, split, idx, dset_folder)
for split in split_names]
if all([os.path.isfile(path) for path in paths]):
continue
p = run_par(base_config_file, dset_folder, idx)
procs.append(p)
# get the final results
results = combine_results(dset_folder, metric, split_names)
# save them in the feature folder as a pickle file
for split, sub_dic in results.items():
pickle_name = f"pred_{metric}_{split}.pickle"
pickle_path = os.path.join(feat_save_folder, pickle_name)
with open(pickle_path, "wb") as f:
pickle.dump(sub_dic, f)
def run_single(kwargs):
"""
Make fingerprints in series.
Args:
kwargs (dict): dictionary of keywords
Retuns:
None
"""
config_file = kwargs["config_file"]
metric = kwargs["metric"]
# save the arguments in a config file so we can just specify
# its path in the command, instead of adding them as command
# line arguments
base_config_file = make_base_config(config_file=config_file,
kwargs=kwargs,
par=False)
# get the path to `fps_single.py
single_path = get_single_path()
# execute the command
cmd = (f"python {single_path} --config_file {base_config_file}")
print(cmd)
p = subprocess.Popen([cmd],
shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
p.wait()
def main(kwargs):
"""
Get the fingerprints and results from the model.
Args:
kwargs (dict): dictionary of keywords
Retuns:
None
"""
slurm_parallel = kwargs["slurm_parallel"]
if slurm_parallel:
run_all_par(kwargs)
else:
run_single(kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_folder', type=str,
help="Name of model path")
parser.add_argument('--dset_folder', type=str,
help=("Name of the folder with the "
"datasets you want to add "
"fingerprints to"))
parser.add_argument('--feat_save_folder', type=str,
help="Path to save pickles")
parser.add_argument('--device', type=str,
help="Name of device to use")
parser.add_argument('--batch_size', type=int,
help="Batch size")
parser.add_argument('--prop', type=str,
help="Property to predict",
default=None)
parser.add_argument('--sub_batch_size', type=int,
help="Sub batch size",
default=None)
parser.add_argument('--metric', type=str,
help=("Select the model with the best validation "
"score on this metric. If no metric "
"is given, the metric used in the training "
"process will be used."),
default=None,
choices=METRIC_LIST)
parser.add_argument('--test_only', action='store_true',
help=("Only evaluate model "
"and generate fingerprints for "
"the test set"))
parser.add_argument('--train_only', action='store_true',
help=("Only evaluate model "
"and generate fingerprints for "
"the training set"))
parser.add_argument('--val_only', action='store_true',
help=("Only evaluate model "
"and generate fingerprints for "
"the validation set"))
parser.add_argument('--slurm_parallel', action='store_true',
help=("Use slurm to evaluate model predictions "
"in parallel over different nodes."))
parser.add_argument('--config_file', type=str,
help=("Path to JSON file with arguments."))
args = parse_args(parser)
kwargs = args.__dict__
main(kwargs)
```
#### File: transfer/run_cp/predict.py
```python
import os
import json
import argparse
import numpy as np
from nff.utils import (bash_command, parse_args, read_csv,
fprint, CHEMPROP_METRICS, apply_metric)
def is_model_path(cp_model_path):
"""
Check to see if a directory is actually a model path.
Args:
cp_model_path (str): path to folder
Returns:
check_paths (list[str]): paths to the different model checkpoints
is_model (bool): whether it's really a model path
"""
# get the paths of all the models saved with different initial random seeds
check_names = [i for i in os.listdir(cp_model_path)
if i.startswith("fold_") and i.split("_")[-1].isdigit()]
# sort by order
check_names = sorted(check_names, key=lambda x: int(x.split("_")[-1]))
check_paths = [os.path.join(cp_model_path, name, "model_0/model.pt")
for name in check_names]
is_model = len(check_paths) != 0
return check_paths, is_model
def predict(cp_folder,
test_path,
cp_model_path,
device,
check_paths):
"""
Get and save the prediction results from a ChemProp model.
Args:
cp_folder (str): path to the chemprop folder on your computer
test_path (str): path to the file with the test SMILES and their properties
cp_model_path (str): path to the folder with the model of interest
device (Union[str, int]): device to evaluate the model on
check_paths (list[str]): paths to the different model checkpoints
Returns:
reals (dict):dictionary of the form {prop: real}, where `real`
are the real values of the property `prop`.
preds (list[dict]): same as `real` but for predicted. One for each
model.
"""
script = os.path.join(cp_folder, "predict.py")
preds_path = os.path.join(cp_model_path, f"test_pred.csv")
# load the arguments from that model to get the features path
args_path = f"{cp_model_path}/fold_0/args.json"
if not os.path.isfile(args_path):
args_path = args_path.replace("fold_0/", "")
with open(args_path, "r") as f:
args = json.load(f)
features_path = args["separate_test_features_path"]
# predictions from different models
preds = []
for i, check_path in enumerate(check_paths):
# make the chemprop command
this_path = preds_path.replace(".csv", f"_{i}.csv")
cmd = (f"source activate chemprop && python {script} "
f" --test_path {test_path} --preds_path {this_path} "
f" --checkpoint_paths {check_path} ")
if device == "cpu":
cmd += f" --no_cuda"
else:
cmd += f" --gpu {device} "
if features_path is not None:
feat_str = " ".join(features_path)
cmd += f" --features_path {feat_str}"
p = bash_command(cmd)
p.wait()
pred = read_csv(this_path)
preds.append(pred)
real = read_csv(test_path)
return real, preds
def get_metrics(actual_dic, pred_dics, metrics, cp_model_path):
"""
Get all requested metric scores for a set of predictions and save
to a JSON file.
Args:
actual_dic (dict): dictionary of the form {prop: real}, where `real` are the
real values of the property `prop`.
pred_dics (list[dict]): list of dictionaries, each the same as `real` but
with values predicted by each different model.
metrics (list[str]): metrics to apply
cp_model_path (str): path to the folder with the model of interest
Returns:
None
"""
overall_dic = {}
for i, pred_dic in enumerate(pred_dics):
metric_dic = {}
for prop in pred_dic.keys():
if prop == "smiles":
continue
actual = actual_dic[prop]
pred = pred_dic[prop]
metric_dic[prop] = {}
for metric in metrics:
score = apply_metric(metric, pred, actual)
metric_dic[prop][metric] = score
overall_dic[str(i)] = metric_dic
props = [prop for prop in pred_dic.keys() if prop != 'smiles']
overall_dic['average'] = {prop: {} for prop in props}
sub_dics = [val for key, val in overall_dic.items() if key != 'average']
for prop in props:
for key in sub_dics[0][prop].keys():
vals = [sub_dic[prop][key] for sub_dic in sub_dics]
mean = np.mean(vals).item()
std = np.std(vals).item()
overall_dic['average'][prop][key] = {"mean": mean, "std": std}
save_path = os.path.join(cp_model_path, f"test_metrics.json")
with open(save_path, "w") as f:
json.dump(overall_dic, f, indent=4, sort_keys=True)
fprint(f"Saved metric scores to {save_path}")
def main(model_folder_cp,
cp_folder,
test_path,
device,
metrics,
**kwargs):
"""
Get predictions for all models and evaluate with a set of metrics.
Args:
model_folder_cp (str): directory in which all the model folders
can be found
cp_folder (str): path to the chemprop folder on your computer
test_path (str): path to the file with the test SMILES and their properties
device (Union[str, int]): device to evaluate the model on
metrics (list[str]): metrics to apply
Returns:
None
"""
folders = os.listdir(model_folder_cp)
# go through each folder
for folder in folders:
cp_model_path = os.path.join(model_folder_cp,
folder)
# continue if it's a file not a folder
if not os.path.isdir(cp_model_path):
continue
check_paths, is_model = is_model_path(cp_model_path)
if not is_model:
continue
# make predictions
real, preds = predict(cp_folder=cp_folder,
test_path=test_path,
cp_model_path=cp_model_path,
device=device,
check_paths=check_paths)
# get and save metric scores
get_metrics(real, preds, metrics, cp_model_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_folder_cp", type=str,
help=("Folder in which you will train your "
"ChemProp model. Models with different "
"parameters will get their own folders, "
"each located in `model_folder_cp`."))
parser.add_argument("--cp_folder", type=str,
help=("Path to ChemProp folder."))
parser.add_argument("--test_path", type=str,
help=("Path to the CSV with test set SMILES "
"and their actual property values"))
parser.add_argument("--device", type=str,
help=("Device to use for model evaluation: "
"either the index of the GPU, "
"or 'cpu'. "))
parser.add_argument("--metrics", type=str, nargs="+",
default=None, choices=CHEMPROP_METRICS,
help=("Optional metrics with which you want to "
"evaluate predictions."))
parser.add_argument('--config_file', type=str,
help=("Path to JSON file with arguments. If given, "
"any arguments in the file override the command "
"line arguments."))
args = parse_args(parser)
main(**args.__dict__)
```
#### File: tutorials/plotting/utils.py
```python
import numpy as np
import torch
import matplotlib.pyplot as plt
def make_plot(key_pairs, results, targets, target_dic=None):
all_keys = np.array(key_pairs).reshape(-1)
units = dict()
for key in all_keys:
if "grad" in key:
units[key] = r'kcal/mol/$\AA$'
else:
units[key] = 'kcal/mol'
if target_dic is None:
target_dic = {key: key for key in all_keys}
for i in range(len(key_pairs)):
fig, ax_fig = plt.subplots(1, 2, figsize=(12, 6))
for ax, key in zip(ax_fig, key_pairs[i]):
if key not in targets.keys():
targ_key = correspondence_keys[key]
else:
targ_key = key
pred = (torch.cat(results[key])).reshape(-1).cpu().detach().numpy()
target_key = target_dic[key]
try:
targ = (torch.cat(targets[targ_key])).reshape(-1).cpu().detach().numpy()
except:
targ = (torch.stack(targets[targ_key])).reshape(-1).cpu().detach().numpy()
ax.scatter(pred, targ, color='#ff7f0e', alpha=0.3)
mae = np.mean(abs(pred-targ))
if "grad" in key:
these_units = r"kcal/mol/$\AA$"
else:
these_units = r"kcal/mol"
plt.text(0.1, 0.75, "MAE = {} {}".format(str(round(mae, 1)), these_units),
transform=ax.transAxes, fontsize=14)
lim_min = min(np.min(pred), np.min(targ)) * 1.1
lim_max = max(np.max(pred), np.max(targ)) * 1.1
ax.set_xlim(lim_min, lim_max)
ax.set_ylim(lim_min, lim_max)
ax.set_aspect('equal')
ax.plot((lim_min, lim_max),
(lim_min, lim_max),
color='#000000',
zorder=-1,
linewidth=0.5)
ax.set_title(key.upper(), fontsize=14)
ax.set_xlabel('predicted %s (%s)' % (key, units[key]), fontsize=12)
ax.set_ylabel('target %s (%s)' % (key, units[key]), fontsize=12)
``` |
{
"source": "jkarakas/Are-Data-Scientists-happier-than-other-developers-",
"score": 3
} |
#### File: jkarakas/Are-Data-Scientists-happier-than-other-developers-/helper_functions.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
from IPython.core.display import HTML
def hist_box_plot(x, x_label, y_label, bin_incr):
'''Take an array as input and draw a histogram with a boxblot above it'''
f, (ax_box, ax_hist) = plt.subplots(2,
sharex=True,
gridspec_kw={
"height_ratios": (.15, .85)},
figsize=(14, 6))
sns.boxplot(x, ax=ax_box)
bins = np.arange(0, x.max() + bin_incr, bin_incr)
x.hist(grid=False, bins=bins)
ax_box.set(yticks=[])
ax_hist.set_ylabel(y_label)
ax_hist.set_xlabel(x_label)
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
def get_description(column_name, schema):
'''Returns decription on column based on data schema
Parameters
----------
column_name : string
the desired columnto return description
schema : pandas.DataFrame
the dtaframe containing the data schema to be parsed
Returns
-------
desc : string
the description of the column
'''
return schema[schema.Column == column_name].Question.values[0]
def print_perc_nans(df, col):
'''Prints percentage of NaNs in a pandas series'''
print(f'Percentage of NaNs in {col}: ',
round(df[col].isna().mean() * 100, 2),
'%')
def group(df, group_col, val_col):
'''groupby and return grouped'''
props = (df.groupby([group_col])[val_col]
.value_counts(normalize=True)
.rename('percentage')
.mul(100)
.reset_index()
.sort_values(val_col))
return props
def group_plot(df, group_col, val_col, prop=True, orient='h', figsize=(14,8)):
'''group by group col and val_col and plot a barplot'''
plt.figure(figsize=(14,8))
props = (df.groupby([group_col])[val_col]
.value_counts(normalize=True)
.rename('percentage')
.mul(100)
.reset_index()
.sort_values(val_col))
order=['Data Science Developer', 'Other Developer']
if orient == 'h':
p = sns.barplot(y=val_col, x='percentage', hue=group_col, hue_order=order,
estimator=np.mean, data=props)
else:
p = sns.barplot(x=val_col, y='percentage', hue=group_col,
hue_order=order, estimator=np.mean, data=props)
plt.legend(title='')
sns.despine(top=True, right=True, left=False, bottom=False);
def Groupby_OneCol_comp_plot(df, col, plt_style = 'seaborn-ticks', color_palette = "pastel", title=''):
'''
Group by col1, sort by size , return and plot the dataframe with a bar and pie plot
'''
opacity=0.85
gr=pd.DataFrame()
gr['{} No'.format(col)] = df.groupby(col).size()
gr['{} Ratio'.format(col)] = np.round(gr['{} No'.format(col)].divide(gr['{} No'.format(col)].sum())*100,0)
print ('Total No. of {}:{}'.format(col,gr['{} No'.format(col)].sum()))
plt.style.use(plt_style)
sns.set_palette(sns.color_palette(color_palette))
fig=plt.figure()
plt.axis('off')
fig.add_subplot(121)
ax=gr['{} No'.format(col)].plot(kind='bar', title='{} Counts'.format(title), figsize=(16,8),
color=sns.color_palette(),
alpha=opacity)
_ = plt.setp(ax.get_xticklabels(), rotation=0)
for p in ax.patches: ax.annotate(np.round(p.get_height(),decimals=2),
(p.get_x()+p.get_width()/2., p.get_height()),
ha='center', va='center', xytext=(0, 10), textcoords='offset points')
ax.get_yaxis().set_ticks([])
plt.xlabel('')
fig.add_subplot(122)
plt.axis('off')
p = gr.loc[:,'{} Ratio'.format(col)].plot(kind= 'pie',
autopct='%1.1f%%',shadow=False,
title='{} Ratio'.format(title), legend=False, labels=None);
sns.despine(top=True, right=True, left=True, bottom=False);
``` |
{
"source": "jkarimi91/cliggle",
"score": 2
} |
#### File: cliggle/cliggle/cli.py
```python
from __future__ import absolute_import
import click
from .helpers import download
from .helpers import get_competition_list
from .helpers import login_user
from .helpers import shorten
from .helpers import submit
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cliggle():
"""Cliggle: a CLI for Kaggle competitions."""
pass
@click.command('list')
def list_competitions():
"""List the current competition titles.
Note: we use a shortened title for ease of use. Specifically,
we use the first word, of the full title, lower cased and stripped
of all non-alphanumeric characters.
"""
comps = get_competition_list()
titles = [c['competitionTitle'] for c in comps]
titles = '\n'.join(shorten(t) for t in titles)
click.echo(titles)
@click.command('download')
@click.argument('title')
@click.option('-u', '--username', prompt=True, help='Kaggle username.')
@click.option('-p', '--password', prompt=True, hide_input=True, help='Kaggle password.')
def download_files(title, username, password):
"""Download the data files for a competition."""
titles = [shorten(c['competitionTitle']) for c in get_competition_list()]
if title not in titles:
raise click.ClickException('Invalid title.')
competition_url = [c['competitionUrl'] for c in get_competition_list()][titles.index(title)]
session = login_user(username, password)
download(competition_url, session)
@click.command('submit')
@click.argument('title')
@click.argument('filename')
@click.option('-m', '--message', help='A description of the submission.')
@click.option('-u', '--username', prompt=True, help='Kaggle username.')
@click.option('-p', '--password', prompt=True, hide_input=True, help='Kaggle password.')
def submit_predictions(title, filename, message, username, password):
"""Submit predictions for a competition."""
titles = [shorten(c['competitionTitle']) for c in get_competition_list()]
if title not in titles:
raise click.ClickException('Invalid title.')
competition_url = [c['competitionUrl'] for c in get_competition_list()][titles.index(title)]
session = login_user(username, password)
submit(filename, message, competition_url, session)
cliggle.add_command(submit_predictions)
cliggle.add_command(download_files)
cliggle.add_command(list_competitions)
if __name__ == '__main__':
cliggle()
```
#### File: cliggle/cliggle/helpers.py
```python
import json
import os
import re
import click
import requests
import tqdm
BASE_URL = 'https://www.kaggle.com'
def get_competition_list():
response = requests.get(BASE_URL + '/competitions')
pattern = r'\"competitions\":(\[.+?\])'
return get_json(response.text, pattern)
def get_file_list(competition_url, session):
response = session.get(BASE_URL + competition_url + '/data')
pattern = r'\"files\":(\[.+?\])'
return get_json(response.text, pattern)
def get_team(competition_url, session):
response = session.get(BASE_URL + competition_url)
pattern = r'\"team\":({.+?}),'
return get_json(response.text, pattern)
def get_json(text, pattern):
pattern = re.compile(pattern)
match = re.findall(pattern, text)[0]
return json.loads(match)
def shorten(title):
word = title.split()[0]
return ''.join(ch for ch in word.lower() if ch.isalnum())
def login_user(username, password):
session = requests.session()
url = BASE_URL + '/account/login'
data = {
'UserName': username,
'Password': password
}
response = session.post(url, data=data)
if response.url == url:
raise click.ClickException('Incorrect username/password.')
return session
def has_accepted_rules(competition_url, session):
url = BASE_URL + competition_url
response = session.get(url)
pattern = r'\"hasAcceptedRules\":(true|false)'
return get_json(response.text, pattern)
def remaining_daily_submissions(competition_url, session):
url = BASE_URL + competition_url
response = session.get(url)
pattern = r'"remainingDailySubmissions":(\d+)'
return get_json(response.text, pattern)
def download(competition_url, session):
if not has_accepted_rules(competition_url, session):
raise click.ClickException('Accept competition rules to continue.')
for cf in get_file_list(competition_url, session):
url = BASE_URL + cf['url']
response = session.get(url, stream=True)
with open(cf['name'], 'wb') as f:
kwargs = {
'total': int(response.headers['content-length']),
'unit': 'B',
'unit_scale': True,
'desc': cf['name']
}
with tqdm.tqdm(**kwargs) as progress_bar:
chunk_size = 10 ** 6 # 1 MB
content = response.iter_content(chunk_size=chunk_size)
for chunk in content:
progress_bar.update(len(chunk))
f.write(chunk)
def submit(filename, message, competition_url, session):
if not has_accepted_rules(competition_url, session):
raise click.ClickException('Accept competition rules to continue.')
if remaining_daily_submissions(competition_url, session) == 0:
raise click.ClickException('Max number of daily submissions reached. Try again later.')
data = {
'fileName': filename,
'contentLength': os.path.getsize(filename),
'lastModifiedDateUtc': os.path.getmtime(filename)
}
response = session.post(BASE_URL + '/blobs/inbox/submissions', data=data)
file_upload_url = response.json()['createUrl']
files = {'file': (filename, open(filename, 'rb'))}
response = session.post(BASE_URL + file_upload_url, files=files)
blob_file_token = response.json()['token']
# Initialize status.json aka submission status check.
# Note: must initialize status.json before making submission.
team_id = get_team(competition_url, session)['id']
api_version = 1
submission_id = 'null'
competition_id = [c for c in get_competition_list() if c['competitionUrl'] == competition_url][0]['competitionId']
all_submissions_url = '{}/c/{}//submissions.json?sortBy=date&group=all&page=1'.format(BASE_URL, competition_id)
last_submission_id = session.get(all_submissions_url).json()[0]['id']
status_url_str = '{}{}/submissions/status.json?apiVersion={}&teamId={}&submissionId={}&greaterThanSubmissionId={}'
status_url = status_url_str.format(BASE_URL, competition_url, api_version,
team_id, submission_id, last_submission_id)
session.get(status_url)
data = {
'blobFileTokens': [blob_file_token],
'submissionDescription': message
}
session.post(BASE_URL + competition_url + '/submission.json', data=data)
response = session.get(status_url)
submission_id = response.json()['id']
status_url = status_url_str.format(BASE_URL, competition_url, api_version,
team_id, submission_id, last_submission_id)
response = session.get(status_url)
while response.json()['submissionStatus'] == 'pending':
response = session.get(status_url)
click.echo('Submission {}.'.format(response.json()['submissionStatus']))
```
#### File: cliggle/tests/test_cliggle.py
```python
from __future__ import absolute_import
import os
from builtins import object
from builtins import range
from click.testing import CliRunner
from cliggle.cli import cliggle
from .credentials import PASSWORD
from .credentials import USERNAME
os.chdir(os.path.dirname(__file__))
class TestList(object):
def test_output(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['list'])
assert result.exception is None
assert 'digit' in result.output
class TestDownload(object):
def test_incorrect_login(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['download', 'titanic', '-u foo', '-p bar'])
assert result.exception is not None
assert 'Incorrect username/password.' in result.output
def test_invalid_title(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['download', 'foobar', '-u foo', '-p bar'])
assert result.exception is not None
assert 'Invalid title.' in result.output
def test_not_accepted_rules(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['download', 'titanic'], input='\n'.join([USERNAME, PASSWORD]))
assert result.exception is not None
assert 'Accept competition rules to continue.' in result.output
def test_successful_download(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['download', 'digit'], input='\n'.join([USERNAME, PASSWORD]))
assert result.exception is None
assert os.path.isfile('train.csv')
assert os.path.getsize('train.csv') == 76775041
files = ['sample_submission.csv', 'train.csv', 'test.csv']
for f in files:
os.remove(f)
class TestSubmit(object):
def test_incorrect_login(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['submit', 'digit', 'foobar.txt', '-u foo', '-p bar'])
assert result.exception is not None
assert 'Incorrect username/password.' in result.output
def test_invalid_title(self):
runner = CliRunner()
result = runner.invoke(cliggle, args=['submit', 'foobar', 'foobar.txt', '-u foo', '-p bar'])
assert result.exception is not None
assert 'Invalid title.' in result.output
def test_not_accepted_rules(self):
runner = CliRunner()
args = ['submit', 'titanic', 'foobar.txt']
result = runner.invoke(cliggle, args=args, input='\n'.join([USERNAME, PASSWORD]))
assert result.exception is not None
assert 'Accept competition rules to continue.' in result.output
def test_no_remaining_submissions(self):
filename = create_submission('ImageId', 'Label', (1, 28000))
runner = CliRunner()
args = ['submit', 'digit', filename, '-m testing cliggle']
result = runner.invoke(cliggle, args=args, input='\n'.join([USERNAME, PASSWORD]))
while result.exception is None:
result = runner.invoke(cliggle, args=args, input='\n'.join([USERNAME, PASSWORD]))
os.remove(filename)
assert 'Max number of daily submissions reached. Try again later.' in result.output
def test_unsuccessful_submission(self):
filename = create_submission('ImageId', 'Label', (1, 28000))
runner = CliRunner()
args = ['submit', 'house', filename, '-m testing cliggle']
result = runner.invoke(cliggle, args=args, input='\n'.join([USERNAME, PASSWORD]))
os.remove(filename)
assert result.exception is None
assert 'Submission error.' in result.output
def test_successful_submission(self):
filename = create_submission('Id', 'SalePrice', (1461, 2919))
runner = CliRunner()
args = ['submit', 'house', filename, '-m testing cliggle']
result = runner.invoke(cliggle, args=args, input='\n'.join([USERNAME, PASSWORD]))
os.remove(filename)
assert result.exception is None
assert 'Submission complete.' in result.output
def create_submission(id_label, prediction_label, id_range):
with open('submission.csv', 'w') as f:
f.write('{},{}\n'.format(id_label, prediction_label))
prediction = 0
min_id, max_id = id_range
for data_id in range(min_id, max_id + 1):
f.write('{},{}\n'.format(data_id, prediction))
return 'submission.csv'
``` |
{
"source": "jkarimi91/search-xkcd",
"score": 2
} |
#### File: management/commands/fetch_comics.py
```python
from django.conf import settings
from django.core.management.base import BaseCommand
import os
import re
import requests
from sklearn.externals import joblib
class Command(BaseCommand):
help = 'fetches xkcd comics'
def handle(self, *args, **options):
if not os.path.exists(settings.DATA_DIR):
os.makedirs(settings.DATA_DIR)
comics_file = os.path.join(settings.DATA_DIR, 'comics.p')
if not os.path.exists(comics_file):
comics = fetch_comics(1)
else:
comics = joblib.load(comics_file)
comic_num = get_next_comic_num(comics[-1]['num'])
comics.extend(fetch_comics(comic_num))
joblib.dump(comics, comics_file)
self.stdout.write(self.style.SUCCESS('Successfully fetched comics'))
def get_comic_json(comic_num):
homepage = 'http://xkcd.com'
filename = 'info.0.json'
url = os.path.join(homepage, comic_num, filename)
r = requests.get(url)
return r.json()
def get_next_comic_num(comic_num):
comic_html = get_comic_html(comic_num)
pattern = r'<a rel="next" href="/(\d+)/" accesskey="n">Next ></a>'
pattern = re.compile(pattern)
matches = pattern.search(comic_html)
return None if matches is None else matches.group(1)
def get_comic_html(comic_num):
homepage = 'http://xkcd.com'
url = os.path.join(homepage, str(comic_num))
r = requests.get(url)
return r.content
def fetch_comics(comic_num):
if comic_num is not None:
comic_num = str(comic_num)
comics = []
while comic_num is not None:
comic_json = get_comic_json(comic_num)
comics.append(comic_json)
comic_num = get_next_comic_num(comic_num)
return comics
```
#### File: management/commands/fit_model.py
```python
from django.conf import settings
from django.core.management.base import BaseCommand
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import os
from sklearn.externals import joblib
from sklearn.feature_extraction.text import TfidfVectorizer
from string import punctuation
class Command(BaseCommand):
help = 'fit the search engine model'
def handle(self, *args, **options):
comics = joblib.load(os.path.join(settings.DATA_DIR, 'comics.p'))
text = [extract_text(c) for c in comics]
model = TfidfVectorizer(tokenizer=tokenize, ngram_range=(1, 2))
tfidf = model.fit_transform(text)
joblib.dump(model, os.path.join(settings.DATA_DIR, 'model.p'))
joblib.dump(tfidf, os.path.join(settings.DATA_DIR, 'tfidf.p'))
self.stdout.write(self.style.SUCCESS('Successfully fitted model'))
def extract_text(comic):
text = [comic['transcript']]
if comic['alt'] not in comic['transcript']:
text.append(comic['alt'])
if comic['title'] not in comic['transcript']:
text.append(comic['title'])
return ' '.join(text)
def tokenize(text):
tokens = word_tokenize(text.lower())
stemmer = PorterStemmer()
stop_words = stopwords.words('english')
exclude = set(punctuation).union(set(stop_words))
return [stemmer.stem(t) for t in tokens if t not in exclude]
``` |
{
"source": "jkariukidev/geemap",
"score": 3
} |
#### File: geemap/geemap/chart.py
```python
import pandas as pd
from bqplot import Tooltip
from bqplot import pyplot as plt
from .common import ee_to_df
def feature_byFeature(features, xProperty, yProperties, **kwargs):
"""Generates a Chart from a set of features. Plots the value of one or more properties for each feature.
Reference: https://developers.google.com/earth-engine/guides/charts_feature#uichartfeaturebyfeature
Args:
features (ee.FeatureCollection): The feature collection to generate a chart from.
xProperty (str): Features labeled by xProperty.
yProperties (list): Values of yProperties.
Raises:
Exception: Errors when creating the chart.
"""
try:
df = ee_to_df(features)
if "ylim" in kwargs:
min_value = kwargs["ylim"][0]
max_value = kwargs["ylim"][1]
else:
min_value = df[yProperties].to_numpy().min()
max_value = df[yProperties].to_numpy().max()
max_value = max_value + 0.2 * (max_value - min_value)
if "title" not in kwargs:
title = ""
else:
title = kwargs["title"]
if "legend_location" not in kwargs:
legend_location = "top-left"
else:
legend_location = kwargs["legend_location"]
x_data = list(df[xProperty])
y_data = df[yProperties].values.T.tolist()
plt.bar(x_data, y_data)
fig = plt.figure(
title=title,
legend_location=legend_location,
)
if "width" in kwargs:
fig.layout.width = kwargs["width"]
if "height" in kwargs:
fig.layout.height = kwargs["height"]
if "labels" in kwargs:
labels = kwargs["labels"]
else:
labels = yProperties
if "display_legend" not in kwargs:
display_legend = True
else:
display_legend = kwargs["display_legend"]
bar_chart = plt.bar(
x_data, y_data, labels=labels, display_legend=display_legend
)
bar_chart.type = "grouped"
if "colors" in kwargs:
bar_chart.colors = kwargs["colors"]
if "xlabel" in kwargs:
plt.xlabel(kwargs["xlabel"])
if "ylabel" in kwargs:
plt.ylabel(kwargs["ylabel"])
plt.ylim(min_value, max_value)
if "xlabel" in kwargs and ("ylabel" in kwargs):
bar_chart.tooltip = Tooltip(
fields=["x", "y"], labels=[kwargs["xlabel"], kwargs["ylabel"]]
)
else:
bar_chart.tooltip = Tooltip(fields=["x", "y"])
plt.show()
except Exception as e:
raise Exception(e)
def feature_byProperty(features, xProperties, seriesProperty, **kwargs):
"""Generates a Chart from a set of features. Plots property values of one or more features.
Reference: https://developers.google.com/earth-engine/guides/charts_feature#uichartfeaturebyproperty
Args:
features (ee.FeatureCollection): The features to include in the chart.
xProperties (list | dict): One of (1) a list of properties to be plotted on the x-axis; or (2) a (property, label) dictionary specifying labels for properties to be used as values on the x-axis.
seriesProperty (str): The name of the property used to label each feature in the legend.
Raises:
Exception: If the provided xProperties is not a list or dict.
Exception: If the chart fails to create.
"""
try:
df = ee_to_df(features)
if isinstance(xProperties, list):
x_data = xProperties
y_data = df[xProperties].values
elif isinstance(xProperties, dict):
x_data = list(xProperties.values())
y_data = df[list(xProperties.keys())].values
else:
raise Exception("xProperties must be a list or dictionary.")
labels = list(df[seriesProperty])
if "ylim" in kwargs:
min_value = kwargs["ylim"][0]
max_value = kwargs["ylim"][1]
else:
min_value = y_data.min()
max_value = y_data.max()
max_value = max_value + 0.2 * (max_value - min_value)
if "title" not in kwargs:
title = ""
else:
title = kwargs["title"]
if "legend_location" not in kwargs:
legend_location = "top-left"
else:
legend_location = kwargs["legend_location"]
if "display_legend" not in kwargs:
display_legend = True
else:
display_legend = kwargs["display_legend"]
fig = plt.figure(
title=title,
legend_location=legend_location,
)
if "width" in kwargs:
fig.layout.width = kwargs["width"]
if "height" in kwargs:
fig.layout.height = kwargs["height"]
bar_chart = plt.bar(
x=x_data, y=y_data, labels=labels, display_legend=display_legend
)
bar_chart.type = "grouped"
if "colors" in kwargs:
bar_chart.colors = kwargs["colors"]
if "xlabel" in kwargs:
plt.xlabel(kwargs["xlabel"])
if "ylabel" in kwargs:
plt.ylabel(kwargs["ylabel"])
plt.ylim(min_value, max_value)
if "xlabel" in kwargs and ("ylabel" in kwargs):
bar_chart.tooltip = Tooltip(
fields=["x", "y"], labels=[kwargs["xlabel"], kwargs["ylabel"]]
)
else:
bar_chart.tooltip = Tooltip(fields=["x", "y"])
plt.show()
except Exception as e:
raise Exception(e)
def feature_groups(features, xProperty, yProperty, seriesProperty, **kwargs):
"""Generates a Chart from a set of features.
Plots the value of one property for each feature.
Reference:
https://developers.google.com/earth-engine/guides/charts_feature#uichartfeaturegroups
Args:
features (ee.FeatureCollection): The feature collection to make a chart from.
xProperty (str): Features labeled by xProperty.
yProperty (str): Features labeled by yProperty.
seriesProperty (str): The property used to label each feature in the legend.
Raises:
Exception: Errors when creating the chart.
"""
try:
df = ee_to_df(features)
df[yProperty] = pd.to_numeric(df[yProperty])
unique_series_values = df[seriesProperty].unique().tolist()
new_column_names = []
for value in unique_series_values:
sample_filter = (df[seriesProperty] == value).map({True: 1, False: 0})
column_name = str(yProperty) + "_" + str(value)
df[column_name] = df[yProperty] * sample_filter
new_column_names.append(column_name)
if "labels" in kwargs:
labels = kwargs["labels"]
else:
labels = [str(x) for x in unique_series_values]
if "ylim" in kwargs:
min_value = kwargs["ylim"][0]
max_value = kwargs["ylim"][1]
else:
min_value = df[yProperty].to_numpy().min()
max_value = df[yProperty].to_numpy().max()
max_value = max_value + 0.2 * (max_value - min_value)
if "title" not in kwargs:
title = ""
else:
title = kwargs["title"]
if "legend_location" not in kwargs:
legend_location = "top-left"
else:
legend_location = kwargs["legend_location"]
x_data = list(df[xProperty])
y_data = [df[x] for x in new_column_names]
plt.bar(x_data, y_data)
fig = plt.figure(
title=title,
legend_location=legend_location,
)
if "width" in kwargs:
fig.layout.width = kwargs["width"]
if "height" in kwargs:
fig.layout.height = kwargs["height"]
if "display_legend" not in kwargs:
display_legend = True
else:
display_legend = kwargs["display_legend"]
bar_chart = plt.bar(
x_data, y_data, labels=labels, display_legend=display_legend
)
if "colors" in kwargs:
bar_chart.colors = kwargs["colors"]
if "xlabel" in kwargs:
plt.xlabel(kwargs["xlabel"])
if "ylabel" in kwargs:
plt.ylabel(kwargs["ylabel"])
plt.ylim(min_value, max_value)
if "xlabel" in kwargs and ("ylabel" in kwargs):
bar_chart.tooltip = Tooltip(
fields=["x", "y"], labels=[kwargs["xlabel"], kwargs["ylabel"]]
)
else:
bar_chart.tooltip = Tooltip(fields=["x", "y"])
plt.show()
except Exception as e:
raise Exception(e)
def feature_histogram(
features, property, maxBuckets=None, minBucketWidth=None, **kwargs
):
"""
Generates a Chart from a set of features.
Computes and plots a histogram of the given property.
- X-axis = Histogram buckets (of property value).
- Y-axis = Frequency
Reference:
https://developers.google.com/earth-engine/guides/charts_feature#uichartfeaturehistogram
Args:
features (ee.FeatureCollection): The features to include in the chart.
property (str): The name of the property to generate the histogram for.
maxBuckets (int, optional): The maximum number of buckets (bins) to use when building a histogram;
will be rounded up to a power of 2.
minBucketWidth (float, optional): The minimum histogram bucket width, or null to allow any power of 2.
Raises:
Exception: If the provided xProperties is not a list or dict.
Exception: If the chart fails to create.
"""
import math
def nextPowerOf2(n):
return pow(2, math.ceil(math.log2(n)))
def grow_bin(bin_size, ref):
while bin_size < ref:
bin_size *= 2
return bin_size
try:
raw_data = pd.to_numeric(
pd.Series(features.aggregate_array(property).getInfo())
)
y_data = raw_data.tolist()
if "ylim" in kwargs:
min_value = kwargs["ylim"][0]
max_value = kwargs["ylim"][1]
else:
min_value = raw_data.min()
max_value = raw_data.max()
data_range = max_value - min_value
if not maxBuckets:
initial_bin_size = nextPowerOf2(data_range / pow(2, 8))
if minBucketWidth:
if minBucketWidth < initial_bin_size:
bin_size = grow_bin(minBucketWidth, initial_bin_size)
else:
bin_size = minBucketWidth
else:
bin_size = initial_bin_size
else:
initial_bin_size = math.ceil(data_range / nextPowerOf2(maxBuckets))
if minBucketWidth:
if minBucketWidth < initial_bin_size:
bin_size = grow_bin(minBucketWidth, initial_bin_size)
else:
bin_size = minBucketWidth
else:
bin_size = initial_bin_size
start_bins = (math.floor(min_value / bin_size) * bin_size) - (bin_size / 2)
end_bins = (math.ceil(max_value / bin_size) * bin_size) + (bin_size / 2)
if start_bins < min_value:
y_data.append(start_bins)
else:
y_data[y_data.index(min_value)] = start_bins
if end_bins > max_value:
y_data.append(end_bins)
else:
y_data[y_data.index(max_value)] = end_bins
num_bins = math.floor((end_bins - start_bins) / bin_size)
if "title" not in kwargs:
title = ""
else:
title = kwargs["title"]
fig = plt.figure(title=title)
if "width" in kwargs:
fig.layout.width = kwargs["width"]
if "height" in kwargs:
fig.layout.height = kwargs["height"]
if "xlabel" not in kwargs:
xlabel = ""
else:
xlabel = kwargs["xlabel"]
if "ylabel" not in kwargs:
ylabel = ""
else:
ylabel = kwargs["ylabel"]
histogram = plt.hist(
sample=y_data,
bins=num_bins,
axes_options={"count": {"label": ylabel}, "sample": {"label": xlabel}},
)
if "colors" in kwargs:
histogram.colors = kwargs["colors"]
if "stroke" in kwargs:
histogram.stroke = kwargs["stroke"]
else:
histogram.stroke = "#ffffff00"
if "stroke_width" in kwargs:
histogram.stroke_width = kwargs["stroke_width"]
else:
histogram.stroke_width = 0
if ("xlabel" in kwargs) and ("ylabel" in kwargs):
histogram.tooltip = Tooltip(
fields=["midpoint", "count"],
labels=[kwargs["xlabel"], kwargs["ylabel"]],
)
else:
histogram.tooltip = Tooltip(fields=["midpoint", "count"])
plt.show()
except Exception as e:
raise Exception(e)
def image_byClass(
image, classBand, region, reducer, scale, classLabels, xLabels, **kwargs
):
# TODO
pass
def image_byRegion(image, regions, reducer, scale, xProperty, **kwargs):
# TODO
pass
def image_doySeries(
imageCollection,
region,
regionReducer,
scale,
yearReducer,
startDay,
endDay,
**kwargs
):
# TODO
pass
def image_doySeriesByRegion(
imageCollection,
bandName,
regions,
regionReducer,
scale,
yearReducer,
seriesProperty,
startDay,
endDay,
**kwargs
):
# TODO
pass
def image_doySeriesByYear(
imageCollection,
bandName,
region,
regionReducer,
scale,
sameDayReducer,
startDay,
endDay,
**kwargs
):
# TODO
pass
def image_histogram(
image, region, scale, maxBuckets, minBucketWidth, maxRaw, maxPixels, **kwargs
):
# TODO
pass
def image_regions(image, regions, reducer, scale, seriesProperty, xLabels, **kwargs):
# TODO
pass
def image_series(imageCollection, region, reducer, scale, xProperty, **kwargs):
# TODO
pass
def image_seriesByRegion(
imageCollection, regions, reducer, band, scale, xProperty, seriesProperty, **kwargs
):
# TODO
pass
``` |
{
"source": "jkariukidev/personal_website",
"score": 2
} |
#### File: personal_website/accounts/views.py
```python
from django.shortcuts import get_object_or_404
from django.views import generic
from django.contrib.auth import views as auth_views
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from . import forms
from .models import Profile
class UserRegistrationView(generic.CreateView):
"""
User creation view.
"""
form_class = forms.AccountRegistrationForm
template_name = 'registration/register.html'
success_url = reverse_lazy('login')
class UserLoginView(auth_views.LoginView):
form_class = forms.UserLoginForm
success_url = reverse_lazy('dashboard')
class CreateProfilePageView(LoginRequiredMixin, generic.CreateView):
"""
Profile creation view.
"""
model = Profile
form_class = forms.ProfilePageForm
template_name = 'registration/create_user_profile.html'
success_url = reverse_lazy('accounts:dashboard')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class DashboardView(LoginRequiredMixin, generic.TemplateView):
template_name = 'registration/dashboard.html'
class ShowProfileView(LoginRequiredMixin, generic.DetailView):
"""
Profile details of registered user.
"""
model = Profile
template_name = 'registration/user_profile.html'
def get_context_data(self, *args, **kwargs):
context = super(ShowProfileView, self).get_context_data(*args, **kwargs)
page_user = get_object_or_404(Profile, id=self.kwargs['pk'])
context["page_user"] = page_user
return context
class EditProfilePageView(LoginRequiredMixin, generic.UpdateView):
model = Profile
form_class = forms.ProfilePageForm
template_name = 'registration/edit_profile_page.html'
success_url = reverse_lazy('accounts:edit_profile_success')
class UserEditView(LoginRequiredMixin, generic.UpdateView):
"""
Edit user profile.
"""
form_class = forms.ProfileEditForm
template_name = 'edit_user_profile.html'
success_url = reverse_lazy('accounts:dashboard')
def get_object(self, queryset=None):
return self.request.user
class UserEditSuccessView(generic.TemplateView):
template_name = 'registration/edit_profile_success.html'
class UserRegisterSuccessView(generic.TemplateView):
template_name = 'registration/register_done.html'
```
#### File: personal_website/website/models.py
```python
from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth import get_user_model
from ckeditor.fields import RichTextField
from autoslug.fields import AutoSlugField
User = get_user_model()
class Post(models.Model):
"""Post model"""
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
slug = AutoSlugField(populate_from='title', unique=True, editable=True)
header_image = models.ImageField(null=True, blank=False, upload_to="images")
body = RichTextField(blank=True, null=True)
published = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
category = models.CharField(max_length=50, default='Uncategorized')
snippet = models.CharField(max_length=255, default='')
status = models.CharField(
max_length=10,
choices=STATUS_CHOICES,
default='draft'
)
def get_absolute_url(self):
"""
Return the canonical URL of the post.
"""
return reverse('website:article-detail', args=[self.slug])
class Meta:
ordering = ('-published',)
def __str__(self):
return self.title + ' | ' + str(self.author)
class PostComment(models.Model):
"""
Comment for blog posts.
"""
post = models.ForeignKey(
Post, on_delete=models.CASCADE, related_name='comments'
)
name = models.CharField(max_length=80)
email = models.EmailField()
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ('created',)
def __str__(self):
return f'Comment by {self.name} on {self.post}'
class EmailMessage(models.Model):
email = models.EmailField(max_length=254)
subject = models.CharField(max_length=50)
message = models.TextField(max_length=300)
```
#### File: personal_website/website/views.py
```python
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404, render
from django.views.generic import (
TemplateView,
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from django.views.generic import FormView
from .models import Post, PostComment, EmailMessage
from .forms import PostForm, EmailPostForm, CommentForm, ContactForm
class HomePageView(TemplateView):
"""
Website landing page.
"""
template_name = 'website/home.html'
class PostsListView(ListView):
"""
Blog posts page.
"""
model = Post
template_name = 'website/blog.html'
ordering = ['-published']
# paginate_by = 5
class PostArticleView(DetailView):
"""
Post article page.
"""
model = Post
template_name = '../templates/website/article.html'
class AddCommentView(CreateView):
"""
Add comment to post article.
"""
model = PostComment
form_class = CommentForm
template_name = '../templates/website/article-comment.html'
def form_valid(self, form):
form.instance.post_id = self.kwargs['pk']
return super().form_valid(form)
def get_success_url(self):
return reverse_lazy(
'website:article-detail', kwargs={'slug': self.kwargs['slug']}
)
def post_share(request, post_slug):
"""
Share blog post.
"""
post = get_object_or_404(Post, slug=post_slug, status='published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
post_url = request.build_absolute_uri(
post.get_absolute_url()
)
subject = f"{cd['name']} would recommend reading f{post.title}"
message = f"Read {post.title} at {post_url}\n\n {cd['name']}\'s comments: {cd['comments']}"
send_mail(subject, message, '<EMAIL>', [cd['to']])
sent = True
else:
form = EmailPostForm()
return render(request, 'website/share.html',
{'post': post, 'form': form, 'sent': sent})
class AddPostView(LoginRequiredMixin, CreateView):
"""
Create new post article.
"""
model = Post
form_class = PostForm
template_name = 'website/post-new.html'
class UpdatePostView(LoginRequiredMixin, UpdateView):
"""
Update a given post article.
"""
model = Post
template_name = 'website/edit-post.html'
fields = ['title', 'header_image', 'body', 'status']
class DeletePostView(DeleteView):
"""
Delete a given post article.
"""
model = Post
template_name = 'website/delete-post.html'
success_url = reverse_lazy('website:blog')
class AboutPageView(TemplateView):
template_name = 'website/about.html'
class PortfolioPageView(TemplateView):
template_name = 'website/portfolio.html'
class ContactFormView(FormView):
template_name = 'website/contact.html'
form_class = ContactForm
def form_valid(self, form):
email = form.cleaned_data['from_email']
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
message = EmailMessage(email=email, subject=subject, message=message)
message.save()
return super().form_valid(form)
success_url = reverse_lazy('website:success')
class EmailSuccess(TemplateView):
template_name = 'website/success.html'
def category_view(request, cats):
"""
Category view
"""
category_posts = Post.objects.filter(category=cats.replace('-', ' '))
return render(
request,
'website/categories.html',
{'cats': cats.title().replace('-', ' '),
'category_posts': category_posts}
)
``` |
{
"source": "jkariukidev/simple_to-do_list",
"score": 3
} |
#### File: jkariukidev/simple_to-do_list/to_do_list.py
```python
import sys
import sqlite3
from sqlite3 import Error
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QApplication,
QMainWindow,
QVBoxLayout,
QWidget,
QLineEdit,
QPushButton,
QGridLayout,
QListWidget,
QMessageBox
)
class ToDoDB:
"""
Container that handles the database operations.
"""
def __init__(self):
try:
# Create database connection
self._con = sqlite3.connect('to_do.db')
# Create SQLite3 cursor and create table
self._cursor = self._con.cursor()
self._cursor.execute(
"CREATE TABLE if not exists todo_tbl(todo_list_item text)"
)
# Commit changes and close connection.
self._con.commit()
except Error:
# Show message box with error notification.
QMessageBox.critical(
self,
'To Do List App',
str(Error)
)
def fetch_items(self):
"""
Fetch items from the database.
"""
# Execute SQL query.
self._cursor.execute(
"SELECT * FROM todo_tbl"
)
# Fetch all items in the database
all_items = self._cursor.fetchall()
# Commit the changes to the database
self._con.commit()
self._con.close()
return all_items
def save_all_items(self, item):
"""
Save newly added to do items in the database.
:param item: To do item.
:type item: str
"""
self._cursor.execute(
"INSERT INTO todo_tbl VALUES (:item)", {'item': item.text()}
)
# Commit changes to the database.
self._con.commit()
self._con.close()
class ToDOApp(QMainWindow):
"""
To Do application user interface.
"""
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('To Do Application')
self.setFixedSize(400, 400)
# Set the widgets
self.vbox_layout = QVBoxLayout()
self._central_widget = QWidget(self)
self.setCentralWidget(self._central_widget)
self._central_widget.setLayout(self.vbox_layout)
# Creating the user interface controls
self._init_ui()
# Load items for the database
self.get_db_items()
def _init_ui(self):
"""
Create user interface controls and properties.
"""
# Create line edit and set GUI properties
self.line_edit = QLineEdit()
self.line_edit.setPlaceholderText('Type here...')
self.line_edit.setFixedHeight(30)
self.line_edit.setAlignment(Qt.AlignLeft)
# Create buttons
self.add_btn = QPushButton('Add')
self.delete_btn = QPushButton('Delete')
self.save_btn = QPushButton('Save')
self.clear_btn = QPushButton('Clear')
self.close_btn = QPushButton('Close')
# Create button grid layout and add buttons
button_layout = QGridLayout()
button_layout.addWidget(self.add_btn, 0, 0)
button_layout.addWidget(self.delete_btn, 0, 2)
button_layout.addWidget(self.save_btn, 0, 1)
button_layout.addWidget(self.clear_btn, 0, 3)
button_layout.addWidget(self.close_btn, 0, 4)
# Add a list widget to show added/saved items
self.list_widget = QListWidget()
# Add widgets and layouts to the main layout.
self.vbox_layout.addWidget(self.line_edit)
self.vbox_layout.addLayout(button_layout)
self.vbox_layout.addWidget(self.list_widget)
# Connect the signals to their slots.
self._connect_signals()
def _connect_signals(self):
"""
Connect signals to their slots.
"""
self.add_btn.clicked.connect(self.on_add_item)
self.save_btn.clicked.connect(self.on_save_items)
self.delete_btn.clicked.connect(self.on_delete_item)
self.clear_btn.clicked.connect(self.on_clear_item)
self.close_btn.clicked.connect(self.on_close)
def on_add_item(self):
"""
Slot raised when add item button is clicked and ddd item to the list
of to do items.
"""
item = self.line_edit.text()
# Check if the line edit is without text
if item == " " or item == "":
QMessageBox.warning(
self,
'To Do List App',
'Cannot add an empty item'
)
self.list_widget.addItem(item)
# Clear item entry.
self.line_edit.setText("")
def on_save_items(self):
"""
Slot raised when add item button is clicked and save to do items in
the database.
"""
items = []
for item in range(self.list_widget.count()):
items.append(self.list_widget.item(item))
if len(items) == 0:
QMessageBox.warning(
self,
'To Do List App',
'Cannot save empty item'
)
else:
for item in items:
ToDoDB().save_all_items(item)
self.list_widget.clear()
def get_db_items(self):
"""
Get items fetched
from the database.
"""
saved_items = ToDoDB().fetch_items()
for item in saved_items:
self.list_widget.addItem(item[0])
def on_delete_item(self):
"""
Slot raised when add item button is clicked and remove items from
list of to do items.
"""
clicked = self.list_widget.currentRow()
self.list_widget.takeItem(clicked)
def on_clear_item(self):
"""
Slot raised when add item button is clicked and clear all items in
the list widget.
"""
self.list_widget.clear()
def on_close(self):
"""
Slot raised when close button is clicked.
"""
self.close()
def closeEvent(self, event):
"""
Initiates a closing event for the Window.
:param event:
:type event:
"""
reply = QMessageBox.question(
self,
'To Do List App',
'Are you sure you want to quit the app?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No
)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
to_do_app = QApplication(sys.argv)
to_do_ui = ToDOApp()
to_do_ui.show()
sys.exit(to_do_app.exec())
``` |
{
"source": "jkarpen/DataSciDemo",
"score": 3
} |
#### File: jkarpen/DataSciDemo/nba_plot.py
```python
import sys
import csv
import argparse
import plotly.plotly as py
from plotly.graph_objs import Data, Layout, YAxis, XAxis
from plotly.graph_objs import Figure, Box, Marker, Line
POSITIONS = {
'PF': 'Power Forward',
'SG': 'Shooting Guard',
'C': 'Center',
'SF': 'Small Forward',
'PG': 'Point Guard',
}
def get_nba_csv():
"""
Get path to nba csv file
:return: path to nba csv
"""
parser = argparse.ArgumentParser()
parser.add_argument('nba_csv')
args = parser.parse_args()
return args.nba_csv
def get_ages_positions(nba_csv, yaxis_col, positions_col):
"""
Get yaxis and nba positions in file
:nba_csv: csv of nba data for 1 season
:yaxis_col: column number for yaxis values
:positions_col: column number for positions values
:return: tuple of a list of yaxis and list of nba positions
"""
yaxis = []
positions = []
with open(nba_csv, 'rb') as csv_handler:
# remove headers
csv_handler.readline()
reader = csv.reader(csv_handler)
for row in reader:
position_value = row[positions_col]
yaxis_value = row[yaxis_col]
if position_value in POSITIONS:
yaxis.append(yaxis_value)
positions.append(POSITIONS[position_value])
return yaxis, positions
def plot(yaxis_values, positions, yaxis_title,
xaxis_title, plot_title, box_name):
"""
Plot nba data
:ages: list of the ages of players
:positions: list of the positions
:yaxis_title: title of the yaxis
:xaxis_title: title of the xaxis
:plot_title: title of the plot
:box_name: name of the box
:return: None, data sent to plotly via API
"""
data = Data([
Box(
y=yaxis_values,
x=positions,
name=box_name,
boxmean=True,
boxpoints='all',
jitter=0.5,
whiskerwidth=0.5,
fillcolor='rgb(106, 168, 79)',
marker=Marker(
color='rgba(7, 55, 99, 0.5)',
size=4,
symbol='circle',
opacity=0.7
),
line=Line(
color='rgba(7, 55, 99, 0.5)',
width=2
),
opacity=1,
showlegend=False
)
])
layout = Layout(
title=plot_title,
showlegend=False,
autosize=True,
width=792,
height=469,
xaxis=XAxis(
title=xaxis_title,
range=[-0.6799999999999999, 6.5],
type='category',
autorange=True,
showexponent='all',
side='bottom'
),
yaxis=YAxis(
title=yaxis_title,
range=[17.944444444444443, 39.05555555555556],
type='linear',
autorange=True,
showexponent='all'
),
paper_bgcolor='rgb(255, 255, 255)',
plot_bgcolor='rgb(217, 217, 217)',
hovermode='closest',
boxmode='overlay',
boxgap=0.4,
boxgroupgap=0.4
)
fig = Figure(data=data, layout=layout)
py.plot(fig)
def main():
nba_csv = get_nba_csv()
yaxis, positions = get_ages_positions(nba_csv, 3, 2)
yaxis_title = 'Age'
xaxis_title = 'Positions'
plot_title = 'NBA Player Age by Position 2014-15 Season'
box_title = 'Age'
plot(yaxis, positions, yaxis_title, xaxis_title, plot_title, box_title)
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "JKarthaus/hc3-actor",
"score": 3
} |
#### File: JKarthaus/hc3-actor/piRelay.py
```python
try:
import RPi.GPIO as GPIO
except RuntimeError:
print(
"Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script")
import timecode.con
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(True)
class Relay:
''' Class to handle Relay
Arguments:
relay = string Relay label (i.e. "RELAY1","RELAY2","RELAY3","RELAY4")
'''
relayGpio = {"RELAY1": 19, "RELAY2": 13, "RELAY3": 6, "RELAY4": 5}
def __init__(self, relay):
self.pin = self.relayGpio[relay]
self.relay = relay
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
def on(self):
print(self.relay + " - ON")
GPIO.output(self.pin, GPIO.HIGH)
def off(self):
print(self.relay + " - OFF")
GPIO.output(self.pin, GPIO.LOW)
``` |
{
"source": "JKarthaus/RadioRaspi",
"score": 2
} |
#### File: RadioRaspi/hue/hueConnector.py
```python
import logging
import subprocess
import json
import glob
import os
import time
import signal
from phue import Bridge
from time import sleep
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
# create logger
logger = logging.getLogger('hueConnector')
bridge = Bridge("192.168.2.119")
# If the app is not registered and the button is not pressed, press the button and call connect()
# (this only needs to be run a single time)
bridge.connect()
# Get the bridge state (This returns the full dictionary that you can explore)
bridge.get_api()
def parseActualSong ():
logger.info("check Actual Playing Song")
p = subprocess.Popen(["volumio", "status"], stdout=subprocess.PIPE)
parsed_json = json.loads(p.communicate()[0])
if parsed_json['status'] == "play" :
return parsed_json['title']
else :
return "NONE"
def parseHuePlaylistsForScene(songURI):
logger.info("Parsing playlists beginning with hue* and Song " + songURI)
for filename in glob.glob("/home/volumio/hue/playlist/hue_*"):
with open(filename) as playlist_file:
logger.info("Checking Filename:" + filename)
parsed_json = json.load(playlist_file)
for items in parsed_json:
if items["title"] == songURI :
scene = os.path.basename(filename)
scene = os.path.splitext(scene)[0]
scene = "rr_" + scene[4:]
return scene
return "NONE"
def checkGroupExists(groupName):
for groups in bridge.groups:
logger.debug("Found Group:" + groups.name)
if groups.name == groupName:
return True
logger.error("Group : " + groupName + " not found at hue Bridge")
return False
def checkSceneExists(sceneName):
for scenes in bridge.scenes:
logger.debug("Found Scene:" + scenes.name)
if scenes.name == sceneName:
return True
return False
def switchGroupOff(group):
if checkGroupExists(group):
bridge.set_group(group, 'on', False)
def switchGroupOn(group):
if checkGroupExists(group):
bridge.set_group(group, 'on', True)
def selectHueScene(newScene,group):
logger.info("Try to switch on Scene : "+ newScene + " on Group: " + group)
#if checkGroupExists(group) and checkSceneExists(newScene):
bridge.run_scene(group,newScene)
logger.info("Starting Scene :" + newScene + " to Group: " + group)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
killer = GracefulKiller()
logger.info("hueConnector up and running")
switchGroupOn("radioRaspi")
time.sleep(10)
while True :
if killer.kill_now:
logger.info("Service Shutdown requestet -> switch off hue group")
switchGroupOff("radioRaspi")
break
actualSong = parseActualSong()
if actualSong == "NONE":
selectHueScene("rr_pause","radioRaspi")
else:
parsedScene = parseHuePlaylistsForScene(actualSong)
if parsedScene == "NONE":
selectHueScene("rr_play","radioRaspi")
else:
selectHueScene(parsedScene,"radioRaspi")
time.sleep(3)
pass
``` |
{
"source": "jkarthic-akamai/ABR-Broadcaster",
"score": 2
} |
#### File: ABR-Broadcaster/wsgi-scripts/wc_codecs.py
```python
import subprocess
import re
import os
import sys
working_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(working_dir)
import wc_configdb as configdb
def refresh_codecs():
supported_codecs_list = ['libx264', 'h264_videotoolbox', 'libvpx-vp9', 'libx265']
device_name_query = "ffmpeg -encoders"
proc = subprocess.Popen(device_name_query, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
configdb.delete_config('Codecs')
for codec in supported_codecs_list:
if out.find(codec) != -1:
print codec
codec_cfg = {'Name' : codec}
configdb.insert_config(codec_cfg, 'Codecs')
def get_codecs():
codec_list = []
codec_list_db = configdb.get_config('Codecs')
if len(codec_list_db) == 0:
refresh_codecs()
codec_list_db = configdb.get_config('Codecs')
for codec in codec_list_db:
codec_list.append(codec['Name'])
return codec_list
if __name__ == "__main__":
refresh_codecs()
```
#### File: ABR-Broadcaster/wsgi-scripts/wc_input_source.py
```python
import os
import sys
import urlparse
working_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(working_dir)
import wc_capture as wc_capture
def add_input(input_config):
#TODO (rpatagar) add validation for input_config, test case also
parsed_url = urlparse.urlparse(input_config['input']['input_url'])
if False == (bool(parsed_url.scheme)):
msg = ' Invalid input_url ' + str(input_config['input']['input_url'])
return 400, msg
input_config['input']['input_interface'] = wc_capture.INPUT_INTERFACE_URL
statuscode, msg = wc_capture.add_input_source(input_config)
return statuscode, msg
def remove_input(input_src_id=None):
ret_status, ret_reason = wc_capture.remove_input_source(input_src_id)
return ret_status, ret_reason
```
#### File: ABR-Broadcaster/wsgi-scripts/wc_restore_old_config.py
```python
import httplib
import subprocess
import time
import os
import sys
import traceback
import re
import datetime
import shlex
import psutil
import json
working_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(working_dir)
import wc_store_load_input_cfg as store_load_input_cfg
import wc_capture as wc_capture
def log_write(log_str):
try:
fp = open('/dev/shm/web_boot', "a")
ct = datetime.datetime.now()
msg = '[' + str(ct) + ']' + str(log_str) + '\n'
fp.write(msg)
#print msg
fp.close
except:
pass
def start_encoder(query):
headers = {'content-type': 'application/json'}
conn = httplib.HTTPConnection('127.0.0.1')
json_str = json.dumps(query)
conn.request("POST", "/broadcaster/", json_str, headers)
r = conn.getresponse()
r.read()
log_write(r.status)
conn.close()
return r
def wait_for_apache2():
t_end = time.time() + 100
service_status = True
while time.time() < t_end:
service_status = is_service_running('apache2', ' * apache2 is running')
if service_status == True:
log_write("Apache is running now")
break
time.sleep(0.5)
def is_service_running(service_name, ref_status):
cmd = 'service ' + str(service_name) + ' status'
try:
opt_string = subprocess.check_output(shlex.split(cmd))
opt_string = opt_string.rstrip()
if opt_string == str(ref_status):
return True
else:
return False
except Exception as e:
return False
def restore_enc_config():
cfg_json_list = store_load_input_cfg.get_all_enc_json_cfgs()
#for all the confis, start encoder
for i in range(0, len(cfg_json_list)):
try:
add_query = cfg_json_list[i]
log_write(add_query)
#Start encoder
start_encoder(add_query)
except Exception as e:
log_write("Error occured: " + str(e))
pass
def restore_input_config():
input_cfg_json_list = store_load_input_cfg.get_all_input_json_cfgs()
#for all the confis, start encoder
for i in range(0, len(input_cfg_json_list)):
try:
c = input_cfg_json_list[i]
log_write(c)
print c
#Start encoder
status = wc_capture.add_input_source(c, c['input_id'])
log_write("status: " + str(status))
except Exception as e:
log_write("Error occured: " + str(e))
pass
def restore_config():
restore_input_config()
restore_enc_config()
if __name__ == '__main__':
try:
wait_for_apache2()
restore_config()
except Exception as e:
log_write(str(e))
#traceback.print_exc()
pass
```
#### File: ABR-Broadcaster/wsgi-scripts/wc_stopencoder.py
```python
import sqlite3
import os
import sys
import urlparse
import psutil
import time
import signal
import json
import urllib
working_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(working_dir)
import wc_configdb as configdb
import wc_capture as capture
import wc_process as process
import wc_store_load_input_cfg as store_load_input_cfg
def stop_encoder(input_id=None):
if input_id == None:
inp_src = configdb.get_config('CapInputNames')
else:
inp_src = configdb.get_config('CapInputNames', {'InputId': input_id})
if len(inp_src) == 0:
return 400, 'Bad Request: Invalid Id'
for i in range(0, len(inp_src)):
input_id = inp_src[i]['InputId']
cfg = configdb.get_config('StreamConfig', {'InputID': str(input_id)})
if cfg:
pid = int(cfg[0]['ProcessID'])
try:
if True == process.is_process_active(pid):
psproc = psutil.Process(pid)
#Terminate signal doesn't reach WSGI process. Using SIGUSR2 as a workaround.
#psproc.send_signal(signal.SIGTERM);
psproc.send_signal(signal.SIGUSR2);
process.wait_for_process(pid, timeout=7)
except psutil.AccessDenied as err:
print ("Access Denied err ", err.msg)
except psutil.TimeoutExpired as err:
print ("Time out expired ", err.msg)
psproc.kill()
except:
print ("No such process ")
configdb.delete_config('StreamConfig', {'InputID': input_id})
store_load_input_cfg.delete_json_cfg(input_id)
return 200, 'OK'
``` |
{
"source": "jkascend/code-jam-2017",
"score": 3
} |
#### File: code-jam-2017/qualification/pancakes.py
```python
import sys
from math import pow
FILE_NAME = 'in.in'
OUTPUT_NAME = 'pancakes-out.out'
FILE = open(FILE_NAME)
NUM_CASES = int(FILE.readline())
ANSWERS = []
def is_set(val, bit):
return val & 1 << bit > 0
def flip_range(start, length, val):
for k in range(0, length):
val ^= 1 << k + start
return val
def set_bits(s):
ret = 0
for i in range(0, len(s)):
if s[i] == "+":
ret |= 1 << i
return ret
def solve(s, k):
bits = set_bits(s)
num_flips = 0
i = 0
while i < len(s):
if is_set(bits, i) is not True:
if k + i > len(s):
return "IMPOSSIBLE"
else:
bits = flip_range(i, k, bits)
num_flips += 1
i = 0
else:
i += 1
return num_flips
for z in range(0, NUM_CASES):
case = FILE.readline().split()
ANSWERS.append(solve(case[0], int(case[1])))
FILE.close()
OUT_FILE = open(OUTPUT_NAME, 'w')
for i in range(0, NUM_CASES):
OUT_FILE.write('Case #{0}: {1}\n'.format(i + 1, ANSWERS[i]))
OUT_FILE.close()
``` |
{
"source": "jkassel/cerebro",
"score": 2
} |
#### File: server/main/views.py
```python
import os
#################
#### imports ####
#################
from flask import render_template, Blueprint
from project.server import app
################
#### config ####
################
main_blueprint = Blueprint('main', __name__,)
################
#### routes ####
################
@main_blueprint.route('/')
def home():
#env = os.environ['APP_SETTINGS']
env = app.config.get('APP_SETTINGS')
return render_template('main/home.html', environment=env)
@main_blueprint.route("/about/")
def about():
return render_template("main/about.html")
```
#### File: server/user/forms.py
```python
from flask_wtf import FlaskForm
from flask_login import current_user
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SelectField, IntegerField, validators
from wtforms.validators import DataRequired, Email, Length, EqualTo, ValidationError
from wtforms.widgets import TextArea
from project.server.models import User
class LoginForm(FlaskForm):
email = StringField('Email Address', [DataRequired(), Email()])
password = PasswordField('Password', [DataRequired()])
class RegisterForm(FlaskForm):
def validate_username(self, user_name):
user = User.query.filter_by(username=user_name.data).first()
if user:
raise ValidationError('That username taken. Please choose another.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email in use by another account. Please choose another.')
user_name = StringField(
'Username',
validators=[DataRequired(), Length(min=3, max=24)])
email = StringField(
'Email Address',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)])
first_name = StringField('First Name')
last_name = StringField('Last Name')
age = IntegerField('Age', [validators.optional()])
website = StringField('Website')
facebook_url = StringField('Facebook URL')
twitter_url = StringField('Twitter URL')
about_me = StringField('About Me', widget=TextArea())
location = StringField('Location')
password = PasswordField(
'Password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Confirm password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
class IdeaForm(FlaskForm):
title = StringField('Title')
description = StringField('Description', widget=TextArea())
access = SelectField('Access', choices=[("public", "Public"), ("private", "Private"), ("team", "Team")], default="private")
class ResetPasswordForm(FlaskForm):
password = PasswordField(
'New Password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Confirm password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
class UserProfileForm(FlaskForm):
first_name = StringField('')
last_name = StringField('')
user_name = StringField('', validators=[DataRequired(), Length(min=3, max=24)])
profile_pic = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png', 'jpeg'])])
def validate_username(self, user_name):
if user_name.data != current_user.user_name:
user = User.query.filter_by(username=user_name.data).first()
if user:
raise ValidationError('That username taken. Please choose another.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email in use by another account. Please choose another.')
email = StringField('', validators=[DataRequired(), Email()])
location = StringField('')
age = IntegerField('', [validators.optional()])
website = StringField('')
facebook_url = StringField('')
twitter_url = StringField('')
about_me = StringField('', widget=TextArea())
``` |
{
"source": "jkassies/xbbg",
"score": 2
} |
#### File: xbbg/core/conn.py
```python
import pandas as pd
import inspect
import pytest
from functools import wraps
from xbbg.core import utils, assist
from xbbg.io import files, logs, storage, cached
try:
import pdblp
except ImportError:
pdblp = utils.load_module(f'{files.abspath(__file__)}/pdblp.py')
_CON_SYM_ = '_xcon_'
_PORT_, _TIMEOUT_ = 8194, 30000
if hasattr(pytest, 'config'):
if 'with_bbg' not in pytest.config.option:
pytest.skip('no Bloomberg')
if not pytest.config.option.with_bbg:
pytest.skip('no Bloomberg')
def with_bloomberg(func):
"""
Wrapper function for Bloomberg connection
Args:
func: function to wrap
"""
@wraps(func)
def wrapper(*args, **kwargs):
scope = utils.func_scope(func=func)
param = inspect.signature(func).parameters
port = kwargs.pop('port', _PORT_)
timeout = kwargs.pop('timeout', _TIMEOUT_)
restart = kwargs.pop('restart', False)
all_kw = {
k: args[n] if n < len(args) else v.default
for n, (k, v) in enumerate(param.items()) if k != 'kwargs'
}
all_kw.update(kwargs)
log_level = kwargs.get('log', logs.LOG_LEVEL)
for to_list in ['tickers', 'flds']:
conv = all_kw.get(to_list, None)
if hasattr(conv, 'tolist'):
all_kw[to_list] = getattr(conv, 'tolist')()
if isinstance(conv, str):
all_kw[to_list] = [conv]
cached_data = []
if scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']:
to_qry = cached.bdp_bds_cache(func=func.__name__, **all_kw)
cached_data += to_qry.cached_data
if not (to_qry.tickers and to_qry.flds):
if not cached_data: return pd.DataFrame()
res = pd.concat(cached_data, sort=False).reset_index(drop=True)
if not all_kw.get('raw', False):
res = assist.format_output(
data=res, source=func.__name__,
col_maps=all_kw.get('col_maps', dict())
)
return res
all_kw['tickers'] = to_qry.tickers
all_kw['flds'] = to_qry.flds
if scope in ['xbbg.blp.bdib']:
data_file = storage.hist_file(
ticker=all_kw['ticker'], dt=all_kw['dt'], typ=all_kw['typ'],
)
if files.exists(data_file):
logger = logs.get_logger(func, level=log_level)
if all_kw.get('batch', False): return
logger.debug(f'reading from {data_file} ...')
return assist.format_intraday(data=pd.read_parquet(data_file), **all_kw)
_, new = create_connection(port=port, timeout=timeout, restart=restart)
res = func(**{
k: v for k, v in all_kw.items() if k not in ['raw', 'col_maps']
})
if new: delete_connection()
if scope.startswith('xbbg.blp.') and isinstance(res, list):
final = cached_data + res
if not final: return pd.DataFrame()
res = pd.DataFrame(pd.concat(final, sort=False))
if (scope in ['xbbg.blp.bdp', 'xbbg.blp.bds']) \
and (not all_kw.get('raw', False)):
res = assist.format_output(
data=res.reset_index(drop=True), source=func.__name__,
col_maps=all_kw.get('col_maps', dict()),
)
return res
return wrapper
def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):
"""
Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new)
"""
if _CON_SYM_ in globals():
if not isinstance(globals()[_CON_SYM_], pdblp.BCon):
del globals()[_CON_SYM_]
if (_CON_SYM_ in globals()) and (not restart):
con = globals()[_CON_SYM_]
if getattr(con, '_session').start(): con.start()
return con, False
else:
con = pdblp.BCon(port=port, timeout=timeout)
globals()[_CON_SYM_] = con
con.start()
return con, True
def delete_connection():
"""
Stop and destroy Bloomberg connection
"""
if _CON_SYM_ in globals():
con = globals().pop(_CON_SYM_)
if not getattr(con, '_session').start(): con.stop()
```
#### File: xbbg/core/pdblp.py
```python
from abc import abstractmethod
class Session(object):
@abstractmethod
def start(self): return False
class BCon(object):
def __init__(self, port=8194, timeout=500, **kwargs):
self.host = kwargs.pop('host', 'localhost')
self.port = port
self.timeout = timeout
self.debug = kwargs.pop('debug', False)
self.session = kwargs.pop('session', None)
self.identity = kwargs.pop('identity', None)
self._session = Session()
@abstractmethod
def start(self): pass
@abstractmethod
def stop(self): pass
```
#### File: xbbg/io/files.py
```python
import pandas as pd
import os
import re
import glob
import time
DATE_FMT = r'\d{4}-(0?[1-9]|1[012])-(0?[1-9]|[12][0-9]|3[01])'
def exists(path) -> bool:
"""
Check path or file exists (use os.path.exists)
Args:
path: path or file
"""
return os.path.exists(path=path)
def abspath(cur_file, parent=0) -> str:
"""
Absolute path
Args:
cur_file: __file__ or file or path str
parent: level of parent to look for
Returns:
str
"""
file_path = os.path.abspath(cur_file).replace('\\', '/')
if os.path.isdir(file_path) and parent == 0: return file_path
adj = 1 - os.path.isdir(file_path)
return '/'.join(file_path.split('/')[:-(parent + adj)])
def create_folder(path_name: str, is_file=False):
"""
Make folder as well as all parent folders if not exists
Args:
path_name: full path name
is_file: whether input is name of file
"""
path_sep = path_name.replace('\\', '/').split('/')
for i in range(1, len(path_sep) + (0 if is_file else 1)):
cur_path = '/'.join(path_sep[:i])
if not os.path.exists(cur_path): os.mkdir(cur_path)
def all_files(
path_name, keyword='', ext='', full_path=True,
has_date=False, date_fmt=DATE_FMT
) -> list:
"""
Search all files with criteria
Returned list will be sorted by last modified
Args:
path_name: full path name
keyword: keyword to search
ext: file extensions, split by ','
full_path: whether return full path (default True)
has_date: whether has date in file name (default False)
date_fmt: date format to check for has_date parameter
Returns:
list: all file names with criteria fulfilled
"""
if not os.path.exists(path=path_name): return []
path_name = path_name.replace('\\', '/')
if keyword or ext:
keyword = f'*{keyword}*' if keyword else '*'
if not ext: ext = '*'
files = sort_by_modified([
f.replace('\\', '/') for f in glob.iglob(f'{path_name}/{keyword}.{ext}')
if os.path.isfile(f) and (f.replace('\\', '/').split('/')[-1][0] != '~')
])
else:
files = sort_by_modified([
f'{path_name}/{f}' for f in os.listdir(path=path_name)
if os.path.isfile(f'{path_name}/{f}') and (f[0] != '~')
])
if has_date:
files = filter_by_dates(files, date_fmt=date_fmt)
return files if full_path else [f.split('/')[-1] for f in files]
def all_folders(
path_name, keyword='', has_date=False, date_fmt=DATE_FMT
) -> list:
"""
Search all folders with criteria
Returned list will be sorted by last modified
Args:
path_name: full path name
keyword: keyword to search
has_date: whether has date in file name (default False)
date_fmt: date format to check for has_date parameter
Returns:
list: all folder names fulfilled criteria
"""
if not os.path.exists(path=path_name): return []
path_name = path_name.replace('\\', '/')
if keyword:
folders = sort_by_modified([
f.replace('\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*')
if os.path.isdir(f) and (f.replace('\\', '/').split('/')[-1][0] != '~')
])
else:
folders = sort_by_modified([
f'{path_name}/{f}' for f in os.listdir(path=path_name)
if os.path.isdir(f'{path_name}/{f}') and (f[0] != '~')
])
if has_date:
folders = filter_by_dates(folders, date_fmt=date_fmt)
return folders
def sort_by_modified(files_or_folders: list) -> list:
"""
Sort files or folders by modified time
Args:
files_or_folders: list of files or folders
Returns:
list
"""
return sorted(files_or_folders, key=os.path.getmtime, reverse=True)
def filter_by_dates(files_or_folders: list, date_fmt=DATE_FMT) -> list:
"""
Filter files or dates by date patterns
Args:
files_or_folders: list of files or folders
date_fmt: date format
Returns:
list
"""
r = re.compile(f'.*{date_fmt}.*')
return list(filter(
lambda vv: r.match(vv.replace('\\', '/').split('/')[-1]) is not None,
files_or_folders,
))
def latest_file(path_name, keyword='', ext='', **kwargs) -> str:
"""
Latest modified file in folder
Args:
path_name: full path name
keyword: keyword to search
ext: file extension
Returns:
str: latest file name
"""
files = all_files(
path_name=path_name, keyword=keyword, ext=ext, full_path=True
)
if not files:
from xbbg.io import logs
logger = logs.get_logger(latest_file, level=kwargs.pop('log', 'warning'))
logger.debug(f'file is not found in folder: {path_name}')
return ''
modified_time = [os.path.getmtime(f) for f in files]
files = [f for (dt, f) in sorted(zip(modified_time, files))]
return files[-1]
def file_modified_time(file_name) -> pd.Timestamp:
"""
File modified time in python
Args:
file_name: file name
Returns:
pd.Timestamp
"""
return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))
``` |
{
"source": "jkassismz/vmbench",
"score": 3
} |
#### File: vmbench/servers/asyncioecho.py
```python
import argparse
import asyncio
import gc
import uvloop
import os.path
from socket import *
PRINT = 0
async def echo_server(loop, address, unix):
if unix:
sock = socket(AF_UNIX, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(5)
sock.setblocking(False)
if PRINT:
print('Server listening at', address)
with sock:
while True:
client, addr = await loop.sock_accept(sock)
if PRINT:
print('Connection from', addr)
loop.create_task(echo_client(loop, client))
async def echo_client(loop, client):
try:
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
with client:
while True:
data = await loop.sock_recv(client, 102400)
if not data:
break
await loop.sock_sendall(client, data)
if PRINT:
print('Connection closed')
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.readline()
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
class EchoProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
sock = transport.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
self.transport.write(data)
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--streams', default=False, action='store_true')
parser.add_argument('--proto', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('serving on: {}'.format(addr))
if args.streams:
if args.proto:
print('cannot use --stream and --proto simultaneously')
exit(1)
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop,
limit=1024 * 1024)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop,
limit=1024 * 1024)
srv = loop.run_until_complete(coro)
elif args.proto:
if args.streams:
print('cannot use --stream and --proto simultaneously')
exit(1)
print('using simple protocol')
if unix:
coro = loop.create_unix_server(EchoProtocol, addr)
else:
coro = loop.create_server(EchoProtocol, *addr)
srv = loop.run_until_complete(coro)
else:
print('using sock_recv/sock_sendall')
loop.create_task(echo_server(loop, addr, unix))
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
```
#### File: vmbench/servers/curioecho_streams.py
```python
from curio import run, spawn, tcp_server
from socket import *
async def echo_handler(client, addr):
print('Connection from', addr)
try:
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
s = client.as_stream()
while True:
data = await s.readline()
if not data:
break
await s.write(data)
await s.close()
print('Connection closed')
if __name__ == '__main__':
run(tcp_server('', 25000, echo_handler))
```
#### File: vmbench/servers/torecho_readline.py
```python
from tornado.ioloop import IOLoop
from tornado.tcpserver import TCPServer
class StreamHandler:
def __init__(self, stream):
self._stream = stream
stream.set_nodelay(True)
self._stream.read_until(b'\n', self._handle_read)
def _handle_read(self, data):
self._stream.write(data)
self._stream.read_until(b'\n', self._handle_read)
class EchoServer(TCPServer):
def handle_stream(self, stream, address):
StreamHandler(stream)
if __name__ == '__main__':
server = EchoServer()
server.bind(25000)
server.start(1)
IOLoop.instance().start()
IOLoop.instance().close()
``` |
{
"source": "jkatofsky/melodia",
"score": 2
} |
#### File: melodia/server/views.py
```python
from .app import app
from .models import Room, get_search_results
from flask import jsonify
@app.route('/')
def homepage():
return app.send_static_file('index.html')
@app.route('/api/create-room')
def create_room():
room: Room = Room()
room.save()
return {'room_id': str(room.id)}
@app.route('/api/search/<query>')
def search_songs(query):
return jsonify({'results': get_search_results(query)})
``` |
{
"source": "jkatsioloudes/Eblem",
"score": 3
} |
#### File: Eblem/search/domains.py
```python
import search.utilities as util
import search.query as qu
import webbrowser
# Class including functions for OSINT collection for target domains.
class Domains(qu.Query):
def __init__(self, query):
qu.Query.__init__(self, query)
# Performs search in who.is records giving information about a target domain.
def whoIsSearch(self):
# search string used in the address bar to perform search.
parsedQuery = util.Utilities.parseQuery(self.getQuery)
pageGlimpse = 'http://www.pageglimpse.com/' + parsedQuery
whoIsLink_0 = 'https://who.is/whois/' + parsedQuery
whoIsLink_1 = 'http://www.dnsstuff.com/tools#whois|type=domain&&value=' + parsedQuery
whoIsLink_2 = 'http://domainbigdata.com/' + parsedQuery
whoIsLink_3 = 'http://www.domaincrawler.com/' + parsedQuery
whoIsLink_4 = 'http://www.domainhistory.net/' + parsedQuery
whoIsLink_5 = 'http://whois.domaintools.com/' + parsedQuery
whoIsLink_6 = 'https://app2.follow.net/#domains/' + parsedQuery
whoIsLink_7 = 'https://majestic.com/reports/site-explorer?q=' + parsedQuery + '&oq=' + parsedQuery + '&IndexDataSource=F'
whoIsLink_8 = 'https://www.robtex.com/dns-lookup/' + parsedQuery
whoIsLink_9 = 'http://www.whoishostingthis.com/?q=' + parsedQuery
# returns a web page as a result of this search.
print(whoIsLink_5)
print(whoIsLink_0)
print(whoIsLink_1)
print(whoIsLink_9)
print(whoIsLink_2)
print(whoIsLink_3)
print(whoIsLink_4)
print(whoIsLink_7)
print(whoIsLink_6)
print(whoIsLink_8)
print(pageGlimpse)
print()
webbrowser.open(whoIsLink_5)
webbrowser.open(whoIsLink_0)
webbrowser.open(whoIsLink_1)
webbrowser.open(whoIsLink_9)
webbrowser.open(whoIsLink_2)
webbrowser.open(whoIsLink_3)
webbrowser.open(whoIsLink_4)
webbrowser.open(whoIsLink_7)
webbrowser.open(whoIsLink_6)
webbrowser.open(whoIsLink_8)
webbrowser.open(pageGlimpse)
# Performs search in who.is records giving information about a target domain.
def dnsLookup(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
dnsLookupLink_0 = 'https://mxtoolbox.com/SuperTool.aspx?action=a%3' + parsedQuery
dedicatedOrNot = parsedQuery + '.dedicatedornot.com'
netcraft = 'http://toolbar.netcraft.com/site_report?url=' + parsedQuery + '#last_reboot'
dnsLookupLink_1 = 'http://dnshistory.org/dns-records/' + parsedQuery
dnsLookupLink_2 = 'http://www.dnsstuff.com/tools#dnsReport|type=domain&&value=' + parsedQuery
dnsLookupLink_3 = 'http://research.dnstrails.com/tools/lookup.htm?domain=' + parsedQuery
dnsLookupLink_4 = 'http://dnsviz.net/d/' + parsedQuery + '/analyze/'
dnsLookupLink_5 = 'https://intodns.com/' + parsedQuery
dnsLookupLink_6 = 'https://mxtoolbox.com/SuperTool.aspx?action=mx%3a' + parsedQuery + '&run=toolpage'
dnsLookupLink_7 = 'http://sameid.net/id/' + parsedQuery + '/'
dnsLookupLink_8 = 'https://www.tcpiputils.com/search?q=' + parsedQuery
dnsLookupLink_9 = 'http://dnssec-debugger.verisignlabs.com/' + parsedQuery
print(dnsLookupLink_0)
print(dedicatedOrNot)
print(netcraft)
print(dnsLookupLink_1)
print(dnsLookupLink_2)
print(dnsLookupLink_9)
print(dnsLookupLink_8)
print(dnsLookupLink_3)
print(dnsLookupLink_6)
print(dnsLookupLink_4)
print(dnsLookupLink_5)
print(dnsLookupLink_7)
print()
webbrowser.open(dnsLookupLink_0)
webbrowser.open(dedicatedOrNot)
webbrowser.open(netcraft)
webbrowser.open(dnsLookupLink_1)
webbrowser.open(dnsLookupLink_2)
webbrowser.open(dnsLookupLink_9)
webbrowser.open(dnsLookupLink_8)
webbrowser.open(dnsLookupLink_3)
webbrowser.open(dnsLookupLink_6)
webbrowser.open(dnsLookupLink_4)
webbrowser.open(dnsLookupLink_5)
webbrowser.open(dnsLookupLink_7)
# Retrieves a link for scanning a target domain.
def scanSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
scanLink = 'https://asafaweb.com/Scan?Url=' + parsedQuery
print(scanLink)
print()
webbrowser.open(scanLink)
# Retrieves a link into archived version of target domain
def archiveSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
archiveLink = 'https://web.archive.org/web/' + parsedQuery # TODO an issue with star.
googleCache = 'http://webcache.googleusercontent.com/search?q=cache:http://' + parsedQuery + '/'
webCache = 'http://webcache.googleusercontent.com/search?q=cache:' + parsedQuery
screenShot = 'http://www.screenshots.com/' + parsedQuery + '/'
print(archiveLink)
print(googleCache)
print(webCache)
print(screenShot)
print()
webbrowser.open(archiveLink)
webbrowser.open(googleCache)
webbrowser.open(webCache)
webbrowser.open(screenShot)
# Retrieves a link into archived version of target domain
def builtWith(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
buildWithLink = 'https://builtwith.com/' + parsedQuery
print(buildWithLink)
print()
webbrowser.open(buildWithLink)
# Provides the link to view the robots.txt file of a target domain.
# This function has as a precondition that was is queries is a valid link.
def robotsView(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
newQuery = 'https://' + str(parsedQuery) + '/robots.txt'
print(newQuery)
print()
webbrowser.open(newQuery)
# Executes all of the above functions.
def domainAllSearches(self):
print("\n---- DOMAIN INFO ----")
print("View official whois records:")
self.whoIsSearch()
print("View results from DNS lookup:")
self.dnsLookup()
print("View archived versions of the website:")
self.archiveSearch()
print("Technologies and tools used to built the website:")
self.builtWith()
print("View the robots.txt file with disallowed resource access:")
self.robotsView()
print("Scan the domain for common vulnerabilities here:")
self.scanSearch()
```
#### File: Eblem/search/engines.py
```python
import search.utilities as util
import search.query as qu
import webbrowser
# Class including functions for OSINT collection using search engines.
class SearchEngines(qu.Query):
def __init__(self, query):
qu.Query.__init__(self, query)
# Top level domain search
__TLD__ = "com"
# Number of results we want back
__NUM_RESULTS__ = 8
# Last result to retrieve. Use none if you want to search forever.
__STOP__ = 1
# Lapse to wait between HTTP requests.
__PAUSE__ = 2
# if searchQuery requires extra parsing in a potential redirection.
TO_PARSE = True
# returns google search results.
def googleSearch(self):
if self.TO_PARSE:
parsedQuery = util.Utilities.parseQuery(self.getQuery)
googleLink = 'http://www.google.com/search?q=' + parsedQuery
else:
googleLink = 'http://www.google.com/search?q=' + self.getQuery
print(googleLink)
print()
webbrowser.open(googleLink)
# returns duckduckgo searhable link.
def ddGoSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
ddGoLink = 'https://duckduckgo.com/?q=' + parsedQuery
print(ddGoLink)
print()
webbrowser.open(ddGoLink)
# returns a baidu searchable link.
def baiduSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
baiduLink = 'http://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=0&rsv_idx=1&tn=baidu&wd=' + parsedQuery
print(baiduLink)
print()
webbrowser.open(baiduLink)
# returns a link in bing search engine to search.
def bingSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
bingLink = 'https://www.bing.com/search?q=' + parsedQuery
print(bingLink)
print()
webbrowser.open(bingLink)
# returns a link in excite search engine to search - specifically news, latest posts about queries -.
def exciteNewsSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
exciteNewsLink = 'http://msxml.excite.com/search/news?q=' + parsedQuery + '&fcoid=411&fcop=left&om_nextpage=True&fpid=2'
print(exciteNewsLink)
print()
webbrowser.open(exciteNewsLink)
# returns a link in fact bites search engine to search very old posts about search term.
def oldArticlesSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
oldArticlesLink = 'http://www.factbites.com/topics/' + parsedQuery
print(oldArticlesLink)
print()
webbrowser.open(oldArticlesLink)
# returns a link in qwant search engine collecting search, media, and social.
def qwantSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
qwantLink = 'https://www.qwant.com/?q=' + parsedQuery + '&t=all'
print(qwantLink)
print()
webbrowser.open(qwantLink)
# returns a link in clustering websites that provide search to many places.
def clusteringSearchEngines(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
carrotLink = 'http://search.carrot2.org/stable/search?source=web&view=folders&skin=fancy-compact&query=' + parsedQuery
cluzLink = 'http://www.cluuz.com/Default.aspx?list=y&yahoo=y&q=' + parsedQuery
print(carrotLink)
print(cluzLink)
print()
webbrowser.open(carrotLink)
webbrowser.open(cluzLink)
# search engine used to match keywords.
def keywordMatching(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
kmLink = 'http://www.keywordspy.com/research/search.aspx?q=' + parsedQuery + '&type=domains'
qrLink = 'https://keywordtool.io/search/google/10371113?keyword=' + parsedQuery + '&country=&language=en#suggestions'
wolrdTracker = 'https://www.wordtracker.com/search?query=' + parsedQuery
exLink = 'http://www.onelook.com/reverse-dictionary.shtml?s=' + parsedQuery
print(kmLink)
print(qrLink)
print(wolrdTracker)
print(exLink)
print()
webbrowser.open(kmLink)
webbrowser.open(qrLink)
webbrowser.open(wolrdTracker)
webbrowser.open(exLink)
# searches RSS feeds
def searchRSS(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
rssLink_0 = 'https://www.rsssearchhub.com/feeds?q=' + parsedQuery
rssLink_1 = 'http://fetchrss.com/generator/invalid?url=' + parsedQuery
print(rssLink_0)
print(rssLink_1)
print()
webbrowser.open(rssLink_0)
webbrowser.open(rssLink_1)
# Executes all of the above functions.
def searchEngineAllSearches(self):
print("\n---- SEARCH ENGINES ----")
print("Google search:")
self.googleSearch()
print("DuckDuckGo search:")
self.ddGoSearch()
print("Baidu search:")
self.baiduSearch()
print("Bing search:")
self.bingSearch()
print("Qwant search:")
self.qwantSearch()
print("Clustering multi search:")
self.clusteringSearchEngines()
print("Keyword matching search:")
self.keywordMatching()
print("Excite news search:")
self.exciteNewsSearch()
print("Fact bites news search:")
self.oldArticlesSearch()
print("Search in RSS Feeds:")
self.searchRSS()
# Class responsible for people search engines' search.
class PeopleSearchEngines(SearchEngines):
def __init__(self, query):
qu.Query.__init__(self, query)
# Performs a search in www.pipl.com to capture the social media not captured above.
def peopleSearch(self):
parsedQuery = util.Utilities.parseQuery(self.getQuery)
parse411 = self.__411parse__()
peekParse = self._peekParse__()
piplLink = 'https://pipl.com/search/?q=' + parsedQuery
canadaLink = 'http://www.canada411.ca/search/?stype=si&what=' + parsedQuery
forebears = 'http://forebears.io/place-search?q=' + parsedQuery
infospace = 'http://search.infospace.com/search/web?q=' + parsedQuery + '&searchbtn=Search'
intermentSearch = 'http://www.interment.net/data/search-general.htm?cx=partner-pub-1928517298809652%3A6045987309&cof=FORID%3A10&ie=ISO-8859-1&q=' + parsedQuery + '&sa=Search'
marketVisual = 'http://www.marketvisual.com/Search/Results?searchString=' + parsedQuery
nationalArchives = 'http://discovery.nationalarchives.gov.uk/results/r?_q=' + parsedQuery
link411Website = 'http://www.411.com/name/' + parse411
whitePages = 'http://www.whitepages.com/name/' + parse411
spokeo = 'https://www.spokeo.com/' + parse411
thatsThem = 'https://thatsthem.com/name/' + parse411
peekLink = 'http://www.peekyou.com/' + peekParse
yasni = 'http://www.yasni.com/' + parsedQuery + '/check+people?sh'
zabasaSearch = 'http://www.zabasearch.com/people/' + parsedQuery + '/'
journal = 'https://network.expertisefinder.com/searchexperts?query=' + parsedQuery
wink = 'https://www.wink.com/people/?pf=&nm=' + parsedQuery
print(piplLink)
print(peekLink)
print(canadaLink)
print(link411Website)
print(whitePages)
print(forebears)
print(infospace)
print(intermentSearch)
print(marketVisual)
print(nationalArchives)
print(spokeo)
print(thatsThem)
print(yasni)
print(zabasaSearch)
print(journal)
print(wink)
print()
webbrowser.open(piplLink)
webbrowser.open(peekLink)
webbrowser.open(canadaLink)
webbrowser.open(link411Website)
webbrowser.open(whitePages)
webbrowser.open(forebears)
webbrowser.open(infospace)
webbrowser.open(intermentSearch)
webbrowser.open(marketVisual)
webbrowser.open(nationalArchives)
webbrowser.open(spokeo)
webbrowser.open(thatsThem)
webbrowser.open(yasni)
webbrowser.open(zabasaSearch)
webbrowser.open(journal)
webbrowser.open(wink)
# This function performs search to the optional location in www.pipl.com.
def piplSearchLocation(self, flag):
parsedLocation = PeopleSearchEngines.__parseLocation__(self, flag)
parsedQuery = util.Utilities.parseQuery(self.getQuery)
piplLink = 'https://pipl.com/search/?q=' + parsedQuery + '&l=' + parsedLocation
print(piplLink)
print()
webbrowser.open(piplLink)
# Executes all of the above functions.
def peopleEngineAllSearches(self):
print("\n---- PEOPLE SEARCH ENGINES ----")
print("Note: We suggest to add a location to make your search more specific.")
print("To do so, please use: <name> -i <location> -l")
print("\nSearch links in multiple people search engines:")
self.peopleSearch()
print("Search in pipl search engine with location:")
self.piplSearchLocation('-i')
# Parses the query to return the location by splitting it.
def __parseLocation__(self, flag):
first = str(flag)
last = ' -l'
try:
stringQuery = self.getQuery
start = stringQuery.index(first) + len(first)
end = stringQuery.index(last, start)
parsedQuery = self.getQuery[start:end]
subQuery = util.Utilities.substituteSpaces(parsedQuery)
return subQuery
except ValueError:
return ""
# 411 People search website has a very unique matching of keywords using '-'.
def __411parse__(self):
q = util.Utilities.parseQuery(self.getQuery)
numSpaces = self.getQuery.count(' ')
return q.replace('+', '-', numSpaces)
def _peekParse__(self):
q = util.Utilities.parseQuery(self.getQuery)
numSpaces = self.getQuery.count(' ')
return q.replace('+', '_', numSpaces)
# Class responsible for email validity.
class EmailValidityEngine(SearchEngines):
def __init__(self, query):
qu.Query.__init__(self, query)
# This function performs search to the optional location in www.pipl.com.
def emailValidity(self):
evLink_0 = 'https://mailjagger.ga/api/validate/' + self.getQuery
evLink_1 = 'http://www.reversegenie.com/email_search/' + self.getQuery
revLookup = 'https://thatsthem.com/email/' + self.getQuery
print(evLink_0)
print(evLink_1)
print("Reverse email lookup:")
print(revLookup)
print()
webbrowser.open(evLink_0)
webbrowser.open(evLink_1)
webbrowser.open(revLookup)
# Performs email and social media search on given email.
def emailSearch(self):
print("\n---- EMAIL VALIDITY ----")
print("Checks email validity:")
self.emailValidity()
```
#### File: Eblem/search/utilities.py
```python
class Utilities(object):
# Flags intro used by the user to indicate an action.
__SH_FLAG__ = '-'
__LO_FLAG__ = '--'
# Uses social searcher website collecting posts on given keyword on all social media.
__startString__ = 0
# The length of the space character, used to remove trailing space at the end of parsing keyword.
__spaceCharLength__ = 1
# Maximum number of spaces allowed without proceeding into further parsing.
__spaceMAX__ = 1
# Parses the query to avoid the flag inclusion while performing google search.
def parseQuery(query):
flagIndex = str(query).index(Utilities.__SH_FLAG__) or str(query).index(Utilities.__LO_FLAG__)
parsedQuery = query[Utilities.__startString__:flagIndex - Utilities.__spaceCharLength__]
# Handles the cases where search terms are > 1 therefore we need to add a '+' in the position of each space.
parsedQueryNumSpaces = str(query).count(' ')
if parsedQueryNumSpaces > Utilities.__spaceMAX__:
parsedQuery = Utilities.substituteSpaces(parsedQuery)
return parsedQuery
# Removes space and adds the plus sign to complete the query.
def substituteSpaces(query):
numSpaces = str(query).count(' ')
return str(query).replace(' ', '+', numSpaces)
```
#### File: Eblem/tests/domainsTests.py
```python
from unittest import TestCase
import search.domains as domains
import sys
from contextlib import contextmanager
from io import StringIO
class TestDomains(TestCase):
# tests if scan search is returning back the correct link for single flag.
def test_scanSearch_shortFlag(self):
q = "www.abc.com -sc"
query = domains.Domains(q)
with TestDomains.captured_output(self) as (out, err):
domains.Domains.scanSearch(query)
link = out.getvalue().strip()
self.assertEqual(link, 'https://asafaweb.com/Scan?Url=www.abc.com')
# tests if scan search is returning back the correct link for big flag.
def test_scanSearch_longFlag(self):
q = "www.abc.com --scan"
query = domains.Domains(q)
with TestDomains.captured_output(self) as (out, err):
domains.Domains.scanSearch(query)
link = out.getvalue().strip()
self.assertEqual(link, 'https://asafaweb.com/Scan?Url=www.abc.com')
# tests if robots.txt search is returning back the correct link for single flag.
def test_robotsView_shortFlag(self):
q = "www.abc.com -rb"
query = domains.Domains(q)
with TestDomains.captured_output(self) as (out, err):
domains.Domains.robotsView(query)
link = out.getvalue().strip()
self.assertEqual(link, 'https://www.abc.com/robots.txt')
# tests if robots.txt search is returning back the correct link for big flag.
def test_robotsView_longFlag(self):
q = "www.abc.com --robots"
query = domains.Domains(q)
with TestDomains.captured_output(self) as (out, err):
domains.Domains.robotsView(query)
link = out.getvalue().strip()
self.assertEqual(link, 'https://www.abc.com/robots.txt')
# tests output in screen.
@contextmanager
def captured_output(self):
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
``` |
{
"source": "jkatzer/usolitaire",
"score": 3
} |
#### File: usolitaire/tests/test_game.py
```python
import unittest
from usolitaire import game
class GameTest(unittest.TestCase):
def setUp(self):
self.game = game.Game()
def test_game_init(self):
self.assertEqual(len(self.game.waste), 0)
self.assertEqual([len(pile) for pile in self.game.foundations], [0, 0, 0, 0])
self.assertEqual([len(pile) for pile in self.game.tableau], [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(self.game.stock), 24)
self.assertTrue(all(not c.face_up for c in self.game.stock))
for pile in self.game.tableau:
self.assertTrue(all(not c.face_up for c in pile[:-1]))
self.assertTrue(pile[-1].face_up)
def test_game_from_stock(self):
prev_waste_len = len(self.game.waste)
prev_stock_len = len(self.game.stock)
self.game.deal_from_stock()
self.assertEqual(len(self.game.waste), prev_waste_len + 1)
self.assertEqual(len(self.game.stock), prev_stock_len - 1)
```
#### File: usolitaire/usolitaire/app.py
```python
from __future__ import print_function, absolute_import, division
from collections import namedtuple
import urwid
from .game import Game, InvalidMove
from .ui import CardWidget, CardPileWidget, SpacerWidget, EmptyCardWidget, PALETTE
Selection = namedtuple('Selection', 'card tableau_index')
class GameApp(object):
def __init__(self):
self.game = Game()
self._statusbar = urwid.Text(u'Ready')
self.current_selection = Selection(None, None)
self._tableau_columns = urwid.Columns([EmptyCardWidget() for _ in range(7)])
self._top_columns = urwid.Columns([
EmptyCardWidget(),
EmptyCardWidget(),
SpacerWidget(),
EmptyCardWidget(),
EmptyCardWidget(),
EmptyCardWidget(),
EmptyCardWidget(),
])
self._update_stock_and_waste()
self._update_foundations()
self._update_tableaus()
self.main_layout = urwid.Pile([
self._top_columns,
urwid.Divider(),
self._tableau_columns,
urwid.Divider(),
self._statusbar,
])
def _update_tableaus(self):
for i, pile in enumerate(self.game.tableau):
self._tableau_columns.contents[i] = (
CardPileWidget(pile, onclick=self.pile_card_clicked, on_double_click=self.pile_card_double_clicked, index=i),
self._tableau_columns.options())
def _update_stock_and_waste(self):
if self.game.stock:
stock_widget = CardWidget(self.game.stock[-1],
onclick=self.stock_clicked,
playable=True)
else:
stock_widget = EmptyCardWidget(onclick=self.redeal_stock)
self._top_columns.contents[0] = (stock_widget, self._top_columns.options())
if self.game.waste:
waste_widget = CardWidget(self.game.waste[-1],
onclick=self._card_from_waste_clicked,
on_double_click=self.waste_card_double_clicked,
playable=True)
else:
waste_widget = EmptyCardWidget()
self._top_columns.contents[1] = (waste_widget, self._top_columns.options())
def _update_foundations(self):
for index, pile in enumerate(self.game.foundations, 3):
widget = CardWidget(pile[-1]) if pile else EmptyCardWidget()
self._top_columns.contents[index] = (widget, self._top_columns.options())
def stock_clicked(self, stock_widget):
self.game.deal_from_stock()
self._update_stock_and_waste()
self.update_status('Dealt from stock')
self.clear_selection()
def redeal_stock(self, stock_widget):
self.game.restore_stock()
self._update_stock_and_waste()
self.update_status('Restored stock')
self.clear_selection()
def iter_allcards(self):
"""Iterate through all card widgets in the game"""
for pile, _ in self._tableau_columns.contents:
for w in pile.iter_widgets():
yield w
for w, _ in self._top_columns.contents:
if isinstance(w, CardWidget):
yield w
else:
iter_widgets = getattr(w, 'iter_widgets', lambda: [])
for w in iter_widgets():
yield w
def clear_selection(self):
self.current_selection = Selection(None, None)
for card in self.iter_allcards():
card.highlighted = False
card.redraw()
def select_card(self, card_widget, pile=None):
"""Select card, or deselect if it was previously selected"""
should_highlight = not card_widget.highlighted
for card in self.iter_allcards():
card.highlighted = False
card_widget.highlighted = should_highlight
if should_highlight:
self.current_selection = Selection(card_widget, getattr(pile, 'index', None))
else:
self.current_selection = Selection(None, None)
for card in self.iter_allcards():
card.redraw()
def _card_from_waste_clicked(self, card_widget):
self.select_card(card_widget, None)
def _card_from_tableau_clicked(self, card_widget, pile):
if not self.current_selection.card or self.current_selection.card == card_widget:
self.select_card(card_widget, pile)
return
src_index = self.current_selection.tableau_index
try:
if src_index is None:
self.game.move_from_waste_to_tableau(pile.index)
else:
self.game.move_tableau_pile(src_index, pile.index)
except InvalidMove:
self.update_status('Invalid move: %r %r' % (src_index, pile.index))
else:
self._update_stock_and_waste()
self._update_tableaus()
self.clear_selection()
def pile_card_clicked(self, card_widget, pile=None):
if pile and hasattr(pile.top, 'face_up') and not pile.top.face_up:
pile.top.face_up = True
self.clear_selection()
self.update_status('Neat!')
return
self._card_from_tableau_clicked(card_widget, pile)
def waste_card_double_clicked(self, card_widget, pile=None):
try:
self.game.move_to_foundation_from_waste()
except InvalidMove:
self.update_status("Can't move card to foundation")
else:
self._update_stock_and_waste()
self._update_foundations()
def pile_card_double_clicked(self, card_widget, pile=None):
if pile and hasattr(pile.top, 'face_up') and not pile.top.face_up:
pile.top.face_up = True
self.clear_selection()
self.update_status('Neat!')
return
try:
self.game.move_to_foundation_from_tableau(pile.index)
except InvalidMove:
self.update_status("Can't move card to foundation")
else:
pile.redraw()
self._update_foundations()
self.clear_selection()
self.update_status('Great job!!')
def update_status(self, text='', append=False):
if append:
text = self._statusbar.get_text()[0] + '\n' + text
self._statusbar.set_text(text)
def exit_on_q(key):
if key in ('q', 'Q', 'esc'):
raise urwid.ExitMainLoop()
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.parse_args()
app = GameApp()
loop = urwid.MainLoop(
urwid.Filler(app.main_layout, valign='top'),
PALETTE,
unhandled_input=exit_on_q,
)
loop.run()
if '__main__' == __name__:
main()
``` |
{
"source": "jkatzsam/woods_ood",
"score": 3
} |
#### File: woods_ood/CIFAR/train.py
```python
from sklearn.metrics import det_curve, accuracy_score, roc_auc_score
from make_datasets import *
from models.wrn_ssnd import *
import wandb
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
'''
This code implements training and testing functions.
'''
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
parser = argparse.ArgumentParser(description='Tunes a CIFAR Classifier with OE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100'],
default='cifar10',
help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument('--model', '-m', type=str, default='allconv',
choices=['allconv', 'wrn', 'densenet'], help='Choose architecture.')
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=10,
help='Number of epochs to train.')
parser.add_argument('--learning_rate', '-lr', type=float,
default=0.001, help='The initial learning rate.')
parser.add_argument('--batch_size', '-b', type=int,
default=128, help='Batch size.')
parser.add_argument('--oe_batch_size', type=int,
default=256, help='Batch size.')
parser.add_argument('--test_bs', type=int, default=200)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float,
default=0.0005, help='Weight decay (L2 penalty).')
# WRN Architecture
parser.add_argument('--layers', default=40, type=int,
help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='widen factor')
parser.add_argument('--droprate', default=0.3, type=float,
help='dropout probability')
# Checkpoints
parser.add_argument('--results_dir', type=str,
default='results', help='Folder to save .pkl results.')
parser.add_argument('--checkpoints_dir', type=str,
default='checkpoints', help='Folder to save .pt checkpoints.')
parser.add_argument('--load_pretrained', type=str,
default='snapshots/pretrained', help='Load pretrained model to test or resume training.')
parser.add_argument('--test', '-t', action='store_true',
help='Test only flag.')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--gpu_id', type=int, default=1, help='Which GPU to run on.')
parser.add_argument('--prefetch', type=int, default=4,
help='Pre-fetching threads.')
# EG specific
parser.add_argument('--score', type=str, default='SSND', help='SSND|OE|energy|VOS')
parser.add_argument('--seed', type=int, default=1,
help='seed for np(tinyimages80M sampling); 1|2|8|100|107')
parser.add_argument('--classification', type=boolean_string, default=True)
# dataset related
parser.add_argument('--aux_out_dataset', type=str, default='svhn', choices=['svhn', 'lsun_c', 'lsun_r',
'isun', 'dtd', 'places', 'tinyimages_300k'],
help='Auxiliary out of distribution dataset')
parser.add_argument('--test_out_dataset', type=str,choices=['svhn', 'lsun_c', 'lsun_r',
'isun', 'dtd', 'places', 'tinyimages_300k'],
default='svhn', help='Test out of distribution dataset')
parser.add_argument('--pi', type=float, default=1,
help='pi in ssnd framework, proportion of ood data in auxiliary dataset')
###woods/woods_nn specific
parser.add_argument('--in_constraint_weight', type=float, default=1,
help='weight for in-distribution penalty in loss function')
parser.add_argument('--out_constraint_weight', type=float, default=1,
help='weight for out-of-distribution penalty in loss function')
parser.add_argument('--ce_constraint_weight', type=float, default=1,
help='weight for classification penalty in loss function')
parser.add_argument('--false_alarm_cutoff', type=float,
default=0.05, help='false alarm cutoff')
parser.add_argument('--lr_lam', type=float, default=1, help='learning rate for the updating lam (SSND_alm)')
parser.add_argument('--ce_tol', type=float,
default=2, help='tolerance for the loss constraint')
parser.add_argument('--penalty_mult', type=float,
default=1.5, help='multiplicative factor for penalty method')
parser.add_argument('--constraint_tol', type=float,
default=0, help='tolerance for considering constraint violated')
# Energy Method Specific
parser.add_argument('--m_in', type=float, default=-25.,
help='margin for in-distribution; above this value will be penalized')
parser.add_argument('--m_out', type=float, default=-5.,
help='margin for out-distribution; below this value will be penalized')
parser.add_argument('--T', default=1., type=float, help='temperature: energy|Odin') # T = 1 suggested by energy paper
#energy vos method
parser.add_argument('--energy_vos_lambda', type=float, default=2, help='energy vos weight')
# OE specific
parser.add_argument('--oe_lambda', type=float, default=.5, help='OE weight')
# parse argument
args = parser.parse_args()
# method_data_name gives path to the model
if args.score in ['woods_nn']:
method_data_name = "{}_{}_{}_{}_{}_{}_{}_{}".format(args.score,
str(args.in_constraint_weight),
str(args.out_constraint_weight),
str(args.ce_constraint_weight),
str(args.false_alarm_cutoff),
str(args.lr_lam),
str(args.penalty_mult),
str(args.pi))
elif args.score == "energy":
method_data_name = "{}_{}_{}_{}".format(args.score,
str(args.m_in),
str(args.m_out),
args.pi)
elif args.score == "OE":
method_data_name = "{}_{}_{}".format(args.score,
str(args.oe_lambda),
str(args.pi))
elif args.score == "energy_vos":
method_data_name = "{}_{}_{}".format(args.score,
str(args.energy_vos_lambda),
str(args.pi))
elif args.score in ['woods']:
method_data_name = "{}_{}_{}_{}_{}_{}_{}_{}".format(args.score,
str(args.in_constraint_weight),
str(args.out_constraint_weight),
str(args.false_alarm_cutoff),
str(args.ce_constraint_weight),
str(args.lr_lam),
str(args.penalty_mult),
str(args.pi))
state = {k: v for k, v in args._get_kwargs()}
print(state)
#save wandb hyperparameters
# wandb.config = state
wandb.init(project="OOD", entity="ood_learning", config=state)
state['wandb_name'] = wandb.run.name
# store train, test, and valid FNR
state['fnr_train'] = []
state['fnr_valid'] = []
state['fnr_valid_clean'] = []
state['fnr_test'] = []
# in-distribution classification accuracy
state['train_accuracy'] = []
state['valid_accuracy'] = []
state['valid_accuracy_clean'] = []
state['test_accuracy'] = []
# store train, valid, and test OOD scores
state['OOD_scores_P0_train'] = []
state['OOD_scores_PX_train'] = []
state['OOD_scores_P0_valid'] = []
state['OOD_scores_PX_valid'] = []
state['OOD_scores_P0_valid_clean'] = []
state['OOD_scores_PX_valid_clean'] = []
state['OOD_scores_P0_test'] = []
state['OOD_scores_Ptest'] = []
# optimization constraints
state['in_dist_constraint'] = []
state['train_loss_constraint'] = []
def to_np(x): return x.data.cpu().numpy()
torch.manual_seed(args.seed)
rng = np.random.default_rng(args.seed)
#make the data_loaders
train_loader_in, train_loader_aux_in, train_loader_aux_out, test_loader, test_loader_ood, valid_loader_in, valid_loader_aux_in, valid_loader_aux_out = make_datasets(
args.dataset, args.aux_out_dataset, args.test_out_dataset, args.pi, state)
print("\n len(train_loader_in.dataset) {} " \
"len(train_loader_aux_in.dataset) {}, " \
"len(train_loader_aux_out.dataset) {}, " \
"len(test_loader.dataset) {}, " \
"len(test_loader_ood.dataset) {}, " \
"len(valid_loader_in.dataset) {}, " \
"len(valid_loader_aux_in.dataset) {}" \
"len(valid_loader_aux_out.dataset) {}".format(
len(train_loader_in.dataset),
len(train_loader_aux_in.dataset),
len(train_loader_aux_out.dataset),
len(test_loader.dataset),
len(test_loader_ood.dataset),
len(valid_loader_in.dataset),
len(valid_loader_aux_in.dataset),
len(valid_loader_aux_out.dataset)))
state['train_in_size'] = len(train_loader_in.dataset)
state['train_aux_in_size'] = len(train_loader_aux_in.dataset)
state['train_aux_out_size'] = len(train_loader_aux_out.dataset)
state['valid_in_size'] = len(valid_loader_in.dataset)
state['valid_aux_in_size'] = len(valid_loader_aux_in.dataset)
state['valid_aux_out_size'] = len(valid_loader_aux_out.dataset)
state['test_in_size'] = len(test_loader.dataset)
state['test_out_size'] = len(test_loader_ood.dataset)
if args.dataset in ['cifar10']:
num_classes = 10
elif args.dataset in ['cifar100']:
num_classes = 100
# WRN architecture with 10 output classes (extra NN is added later for SSND methods)
net = WideResNet(args.layers, num_classes, args.widen_factor, dropRate=args.droprate)
#create logistic regression layer for energy_vos and woods
if args.score in ['energy_vos', 'woods']:
logistic_regression = nn.Linear(1, 1)
logistic_regression.cuda()
# Restore model
model_found = False
print(args.load_pretrained)
if args.load_pretrained == 'snapshots/pretrained':
print('Restoring trained model...')
for i in range(100, -1, -1):
model_name = os.path.join(args.load_pretrained, args.dataset + '_' + args.model +
'_pretrained_epoch_' + str(i) + '.pt')
if os.path.isfile(model_name):
print('found pretrained model: {}'.format(model_name))
net.load_state_dict(torch.load(model_name))
print('Model restored! Epoch:', i)
model_found = True
break
if not model_found:
assert False, "could not find model to restore"
# add extra NN for OOD detection (for SSND methods)
if args.score in ['woods_nn']:
net = WideResNet_SSND(wrn=net)
if args.ngpu > 1:
print('Available CUDA devices:', torch.cuda.device_count())
print('CUDA available:', torch.cuda.is_available())
print('Running in parallel across', args.ngpu, 'GPUs')
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
net.cuda()
torch.cuda.manual_seed(1)
elif args.ngpu > 0:
print('CUDA available:', torch.cuda.is_available())
print('Available CUDA devices:', torch.cuda.device_count())
print('Sending model to device', torch.cuda.current_device(), ':', torch.cuda.get_device_name())
net.cuda()
torch.cuda.manual_seed(1)
# cudnn.benchmark = True # fire on all cylinders
cudnn.benchmark = False # control reproducibility/stochastic behavior
#energy_vos, woods also use logistic regression in optimization
if args.score in ['energy_vos', 'woods']:
optimizer = torch.optim.SGD(
list(net.parameters()) + list(logistic_regression.parameters()),
state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
else:
optimizer = torch.optim.SGD(
net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
#define scheduler for learning rate
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[int(args.epochs*.5), int(args.epochs*.75), int(args.epochs*.9)], gamma=0.5)
# /////////////// Training ///////////////
# Create extra variable needed for training
# make in_constraint a global variable
in_constraint_weight = args.in_constraint_weight
# make loss_ce_constraint a global variable
ce_constraint_weight = args.ce_constraint_weight
# create the lagrangian variable for lagrangian methods
if args.score in ['woods_nn', 'woods']:
lam = torch.tensor(0).float()
lam = lam.cuda()
lam2 = torch.tensor(0).float()
lam2 = lam.cuda()
def mix_batches(aux_in_set, aux_out_set):
'''
Args:
aux_in_set: minibatch from in_distribution
aux_out_set: minibatch from out distribution
Returns:
mixture of minibatches with mixture proportion pi of aux_out_set
'''
# create a mask to decide which sample is in the batch
mask = rng.choice(a=[False, True], size=(args.batch_size,), p=[1 - args.pi, args.pi])
aux_out_set_subsampled = aux_out_set[0][mask]
aux_in_set_subsampled = aux_in_set[0][np.invert(mask)]
# note: ordering of aux_out_set_subsampled, aux_in_set_subsampled does not matter because you always take the sum
aux_set = torch.cat((aux_out_set_subsampled, aux_in_set_subsampled), 0)
return aux_set
def train(epoch):
'''
Train the model using the specified score
'''
# make the variables global for optimization purposes
global in_constraint_weight
global ce_constraint_weight
# declare lam global
if args.score in ['woods_nn', 'woods']:
global lam
global lam2
# print the learning rate
for param_group in optimizer.param_groups:
print("lr {}".format(param_group['lr']))
net.train() # enter train mode
# track train classification accuracy
train_accuracies = []
# # start at a random point of the dataset for; this induces more randomness without obliterating locality
train_loader_aux_in.dataset.offset = rng.integers(
len(train_loader_aux_in.dataset))
train_loader_aux_out.dataset.offset = rng.integers(
len(train_loader_aux_out.dataset))
batch_num = 1
loaders = zip(train_loader_in, train_loader_aux_in, train_loader_aux_out)
# for logging in weights & biases
losses_ce = []
in_losses = []
out_losses = []
out_losses_weighted = []
losses = []
for in_set, aux_in_set, aux_out_set in loaders:
#create the mixed batch
aux_set = mix_batches(aux_in_set, aux_out_set)
batch_num += 1
data = torch.cat((in_set[0], aux_set), 0)
target = in_set[1]
if args.ngpu > 0:
data, target = data.cuda(), target.cuda()
# forward
x = net(data)
# in-distribution classification accuracy
if args.score in ['woods_nn']:
x_classification = x[:len(in_set[0]), :num_classes]
elif args.score in ['energy', 'OE', 'energy_vos', 'woods']:
x_classification = x[:len(in_set[0])]
pred = x_classification.data.max(1)[1]
train_accuracies.append(accuracy_score(list(to_np(pred)), list(to_np(target))))
optimizer.zero_grad()
# cross-entropy loss
if args.classification:
loss_ce = F.cross_entropy(x_classification, target)
else:
loss_ce = torch.Tensor([0]).cuda()
losses_ce.append(loss_ce.item())
if args.score == 'woods_nn':
'''
This is the same as woods_nn but it now uses separate
weight for in distribution scores and classification scores.
it also updates the weights separately
'''
# penalty for the mixture/auxiliary dataset
out_x_ood_task = x[len(in_set[0]):, num_classes]
out_loss = torch.mean(F.relu(1 - out_x_ood_task))
out_loss_weighted = args.out_constraint_weight * out_loss
in_x_ood_task = x[:len(in_set[0]), num_classes]
f_term = torch.mean(F.relu(1 + in_x_ood_task)) - args.false_alarm_cutoff
if in_constraint_weight * f_term + lam >= 0:
in_loss = f_term * lam + in_constraint_weight / 2 * torch.pow(f_term, 2)
else:
in_loss = - torch.pow(lam, 2) * 0.5 / in_constraint_weight
loss_ce_constraint = loss_ce - args.ce_tol * full_train_loss
if ce_constraint_weight * loss_ce_constraint + lam2 >= 0:
loss_ce = loss_ce_constraint * lam2 + ce_constraint_weight / 2 * torch.pow(loss_ce_constraint, 2)
else:
loss_ce = - torch.pow(lam2, 2) * 0.5 / ce_constraint_weight
# add the losses together
loss = loss_ce + out_loss_weighted + in_loss
in_losses.append(in_loss.item())
out_losses.append(out_loss.item())
out_losses_weighted.append(out_loss.item())
losses.append(loss.item())
elif args.score == 'energy':
Ec_out = -torch.logsumexp(x[len(in_set[0]):], dim=1)
Ec_in = -torch.logsumexp(x[:len(in_set[0])], dim=1)
loss_energy = 0.1 * (torch.pow(F.relu(Ec_in - args.m_in), 2).mean() + torch.pow(F.relu(args.m_out - Ec_out),
2).mean())
loss = loss_ce + loss_energy
losses.append(loss.item())
elif args.score == 'energy_vos':
Ec_out = torch.logsumexp(x[len(in_set[0]):], dim=1)
Ec_in = torch.logsumexp(x[:len(in_set[0])], dim=1)
binary_labels = torch.ones(len(x)).cuda()
binary_labels[len(in_set[0]):] = 0
loss_energy = F.binary_cross_entropy_with_logits(logistic_regression(
torch.cat([Ec_in, Ec_out], -1).unsqueeze(1)).squeeze(),
binary_labels)
loss = loss_ce + args.energy_vos_lambda * loss_energy
losses.append(loss.item())
elif args.score == 'woods':
#compute energies
Ec_out = torch.logsumexp(x[len(in_set[0]):], dim=1)
Ec_in = torch.logsumexp(x[:len(in_set[0])], dim=1)
#apply the sigmoid loss
loss_energy_in = torch.mean(torch.sigmoid(logistic_regression(
Ec_in.unsqueeze(1)).squeeze()))
loss_energy_out = torch.mean(torch.sigmoid(-logistic_regression(
Ec_out.unsqueeze(1)).squeeze()))
#alm function for the in distribution constraint
in_constraint_term = loss_energy_in - args.false_alarm_cutoff
if in_constraint_weight * in_constraint_term + lam >= 0:
in_loss = in_constraint_term * lam + in_constraint_weight / 2 * torch.pow(in_constraint_term, 2)
else:
in_loss = - torch.pow(lam, 2) * 0.5 / in_constraint_weight
#alm function for the cross entropy constraint
loss_ce_constraint = loss_ce - args.ce_tol * full_train_loss
if ce_constraint_weight * loss_ce_constraint + lam2 >= 0:
loss_ce = loss_ce_constraint * lam2 + ce_constraint_weight / 2 * torch.pow(loss_ce_constraint, 2)
else:
loss_ce = - torch.pow(lam2, 2) * 0.5 / ce_constraint_weight
loss = loss_ce + args.out_constraint_weight*loss_energy_out + in_loss
#wandb
in_losses.append(in_loss.item())
out_losses.append(loss_energy_out.item())
out_losses_weighted.append(args.out_constraint_weight * loss_energy_out.item())
losses.append(loss.item())
elif args.score == 'OE':
loss_oe = args.oe_lambda * -(x[len(in_set[0]):].mean(1) - torch.logsumexp(x[len(in_set[0]):], dim=1)).mean()
loss = loss_ce + loss_oe
losses.append(loss.item())
loss.backward()
optimizer.step()
loss_ce_avg = np.mean(losses_ce)
in_loss_avg = np.mean(in_losses)
out_loss_avg = np.mean(out_losses)
out_loss_weighted_avg = np.mean(out_losses_weighted)
loss_avg = np.mean(losses)
train_acc_avg = np.mean(train_accuracies)
wandb.log({
'epoch':epoch,
"learning rate": optimizer.param_groups[0]['lr'],
'CE loss':loss_ce_avg,
'in loss':in_loss_avg,
'out loss':out_loss_avg,
'out loss (weighted)':out_loss_weighted_avg,
'loss':loss_avg,
'train accuracy':train_acc_avg
})
# store train accuracy
state['train_accuracy'].append(train_acc_avg)
# updates for alm methods
if args.score in ["woods_nn"]:
print("making updates for SSND alm methods...")
# compute terms for constraints
in_term, ce_loss = compute_constraint_terms()
# update lam for in-distribution term
if args.score in ["woods_nn"]:
print("updating lam...")
in_term_constraint = in_term - args.false_alarm_cutoff
print("in_distribution constraint value {}".format(in_term_constraint))
state['in_dist_constraint'].append(in_term_constraint.item())
# wandb
wandb.log({"in_term_constraint": in_term_constraint.item(),
'in_constraint_weight':in_constraint_weight,
'epoch':epoch})
# update lambda
if in_term_constraint * in_constraint_weight + lam >= 0:
lam += args.lr_lam * in_term_constraint
else:
lam += -args.lr_lam * lam / in_constraint_weight
# update lam2
if args.score in ["woods_nn"]:
print("updating lam2...")
ce_constraint = ce_loss - args.ce_tol * full_train_loss
print("cross entropy constraint {}".format(ce_constraint))
state['train_loss_constraint'].append(ce_constraint.item())
# wandb
wandb.log({"ce_term_constraint": ce_constraint.item(),
'ce_constraint_weight':ce_constraint_weight,
'epoch':epoch})
# update lambda2
if ce_constraint * ce_constraint_weight + lam2 >= 0:
lam2 += args.lr_lam * ce_constraint
else:
lam2 += -args.lr_lam * lam2 / ce_constraint_weight
# update weight for alm_full_2
if args.score == 'woods_nn' and in_term_constraint > args.constraint_tol:
print('increasing in_constraint_weight weight....\n')
in_constraint_weight *= args.penalty_mult
if args.score == 'woods_nn' and ce_constraint > args.constraint_tol:
print('increasing ce_constraint_weight weight....\n')
ce_constraint_weight *= args.penalty_mult
#alm update for energy_vos alm methods
if args.score in ['woods']:
print("making updates for energy alm methods...")
avg_sigmoid_energy_losses, _, avg_ce_loss = evaluate_energy_logistic_loss()
in_term_constraint = avg_sigmoid_energy_losses - args.false_alarm_cutoff
print("in_distribution constraint value {}".format(in_term_constraint))
state['in_dist_constraint'].append(in_term_constraint.item())
# update lambda
print("updating lam...")
if in_term_constraint * in_constraint_weight + lam >= 0:
lam += args.lr_lam * in_term_constraint
else:
lam += -args.lr_lam * lam / in_constraint_weight
# wandb
wandb.log({"in_term_constraint": in_term_constraint.item(),
'in_constraint_weight':in_constraint_weight,
"avg_sigmoid_energy_losses": avg_sigmoid_energy_losses.item(),
'lam': lam,
'epoch':epoch})
# update lam2
if args.score in ['woods']:
print("updating lam2...")
ce_constraint = avg_ce_loss - args.ce_tol * full_train_loss
print("cross entropy constraint {}".format(ce_constraint))
state['train_loss_constraint'].append(ce_constraint.item())
# wandb
wandb.log({"ce_term_constraint": ce_constraint.item(),
'ce_constraint_weight':ce_constraint_weight,
'epoch':epoch})
# update lambda2
if ce_constraint * ce_constraint_weight + lam2 >= 0:
lam2 += args.lr_lam * ce_constraint
else:
lam2 += -args.lr_lam * lam2 / ce_constraint_weight
# update in-distribution weight for alm
if args.score in ['woods'] and in_term_constraint > args.constraint_tol:
print("energy in distribution constraint violated, so updating in_constraint_weight...")
in_constraint_weight *= args.penalty_mult
# update ce_loss weight for alm
if args.score in ['woods'] and ce_constraint > args.constraint_tol:
print('increasing ce_constraint_weight weight....\n')
ce_constraint_weight *= args.penalty_mult
def compute_constraint_terms():
'''
Compute the in-distribution term and the cross-entropy loss over the whole training set
'''
net.eval()
# create list for the in-distribution term and the ce_loss
in_terms = []
ce_losses = []
num_batches = 0
for in_set in train_loader_in:
num_batches += 1
data = in_set[0]
target = in_set[1]
if args.ngpu > 0:
data, target = data.cuda(), target.cuda()
# forward
net(data)
z = net(data)
# compute in-distribution term
in_x_ood_task = z[:, num_classes]
in_terms.extend(list(to_np(F.relu(1 + in_x_ood_task))))
# compute cross entropy term
z_classification = z[:, :num_classes]
loss_ce = F.cross_entropy(z_classification, target, reduction='none')
ce_losses.extend(list(to_np(loss_ce)))
return np.mean(np.array(in_terms)), np.mean(np.array(ce_losses))
def compute_fnr(out_scores, in_scores, fpr_cutoff=.05):
'''
compute fnr at 05
'''
in_labels = np.zeros(len(in_scores))
out_labels = np.ones(len(out_scores))
y_true = np.concatenate([in_labels, out_labels])
y_score = np.concatenate([in_scores, out_scores])
fpr, fnr, thresholds = det_curve(y_true=y_true, y_score=y_score)
idx = np.argmin(np.abs(fpr - fpr_cutoff))
fpr_at_fpr_cutoff = fpr[idx]
fnr_at_fpr_cutoff = fnr[idx]
if fpr_at_fpr_cutoff > 0.1:
fnr_at_fpr_cutoff = 1.0
return fnr_at_fpr_cutoff
# test function
def test(test_dataset=True, clean_dataset=True):
"""
tests current model
test_dataset: if true, then uses test dataloaders, if false uses validation dataloaders
"""
if test_dataset:
print('testing...')
else:
print('validation...')
# decide which dataloader to use for in-distribution
if test_dataset:
in_loader = test_loader
else:
in_loader = valid_loader_in
net.eval()
# in-distribution performance
print("computing over in-distribution data...\n")
with torch.no_grad():
accuracies = []
OOD_scores_P0 = []
for data, target in in_loader:
if args.ngpu > 0:
data, target = data.cuda(), target.cuda()
# forward
output = net(data)
if args.score in ["woods_nn"]:
# classification accuracy
output_classification = output[:len(data), :num_classes]
pred = output_classification.data.max(1)[1]
accuracies.append(accuracy_score(list(to_np(pred)), list(to_np(target))))
# OOD scores
np_in = to_np(output[:, num_classes])
np_in_list = list(np_in)
OOD_scores_P0.extend(np_in_list)
elif args.score in ['energy', 'OE', 'energy_vos', 'woods']:
# classification accuracy
pred = output.data.max(1)[1]
accuracies.append(accuracy_score(list(to_np(pred)), list(to_np(target))))
if args.score in ['energy', 'energy_vos', 'woods']:
# OOD scores
OOD_scores_P0.extend(list(-to_np((args.T * torch.logsumexp(output / args.T, dim=1)))))
elif args.score == 'OE':
# OOD scores
smax = to_np(F.softmax(output, dim=1))
OOD_scores_P0.extend(list(-np.max(smax, axis=1)))
# OOD scores for either mixture or PX
OOD_scores_P_out = []
if test_dataset:
print("computing over ood data for testing...\n")
# OOD performance
with torch.no_grad():
for data, target in test_loader_ood:
if args.ngpu > 0:
data, target = data.cuda(), target.cuda()
output = net(data)
if args.score in [ "woods_nn"]:
np_out = to_np(output[:, num_classes])
np_out_list = list(np_out)
OOD_scores_P_out.extend(np_out_list)
elif args.score in ['energy', 'energy_vos', 'woods']:
OOD_scores_P_out.extend(list(-to_np((args.T * torch.logsumexp(output / args.T, dim=1)))))
elif args.score == 'OE':
smax = to_np(F.softmax(output, dim=1))
OOD_scores_P_out.extend(list(-np.max(smax, axis=1)))
else:
if clean_dataset:
print("computing over clean OOD validation set...\n")
for aux_out_set in valid_loader_aux_out:
data = aux_out_set[0]
if args.ngpu > 0:
data = data.cuda()
# forward
output = net(data)
if args.score in ["woods_nn"]:
np_out = to_np(output[:, num_classes])
np_out_list = list(np_out)
OOD_scores_P_out.extend(np_out_list)
elif args.score in ['energy', 'energy_vos', 'woods']:
OOD_scores_P_out.extend(list(-to_np((args.T * torch.logsumexp(output / args.T, dim=1)))))
elif args.score == 'OE':
smax = to_np(F.softmax(output, dim=1))
OOD_scores_P_out.extend(list(-np.max(smax, axis=1)))
else:
print("computing over mixture for validation...\n")
for aux_in_set, aux_out_set in zip(valid_loader_aux_in, valid_loader_aux_out):
data = mix_batches(aux_in_set, aux_out_set)
if args.ngpu > 0:
data = data.cuda()
# forward
output = net(data)
if args.score in ["woods_nn"]:
np_out = to_np(output[:, num_classes])
np_out_list = list(np_out)
OOD_scores_P_out.extend(np_out_list)
elif args.score in ['energy', 'energy_vos', 'woods']:
OOD_scores_P_out.extend(list(-to_np((args.T * torch.logsumexp(output / args.T, dim=1)))))
elif args.score == 'OE':
smax = to_np(F.softmax(output, dim=1))
OOD_scores_P_out.extend(list(-np.max(smax, axis=1)))
# compute FNR and accuracy
fnr = compute_fnr(np.array(OOD_scores_P_out), np.array(OOD_scores_P0))
acc = sum(accuracies) / len(accuracies)
# store and print results
if test_dataset:
state['fnr_test'].append(fnr)
state['test_accuracy'].append(acc)
state['OOD_scores_P0_test'].append(OOD_scores_P0)
state['OOD_scores_Ptest'].append(OOD_scores_P_out)
plt.hist(OOD_scores_P0, alpha=0.5, label='in')
plt.hist(OOD_scores_P_out, alpha=0.5, label='out')
plt.legend()
plt.title('epoch {}, test FNR = {}'.format(epoch, fnr))
if not os.path.exists('figs_pycharm'):
os.mkdir('figs_pycharm')
plt.savefig('figs_pycharm/test_epoch{}.png'.format(epoch))
plt.clf()
plt.close()
wandb.log({"fnr_test": fnr,
"test_accuracy": acc,
'epoch':epoch})
print("\n fnr_test {}".format(state['fnr_test']))
print("test_accuracy {} \n".format(state['test_accuracy']))
else:
if clean_dataset:
state['fnr_valid_clean'].append(fnr)
state['valid_accuracy_clean'].append(acc)
state['OOD_scores_P0_valid_clean'].append(OOD_scores_P0)
state['OOD_scores_PX_valid_clean'].append(OOD_scores_P_out)
wandb.log({"validation_fnr_clean": fnr,
"validation_accuracy_clean": acc,
'epoch': epoch})
print("\n fnr_valid_clean {}".format(state['fnr_valid_clean']))
print("valid_accuracy_clean {} \n".format(state['valid_accuracy_clean']))
else:
state['fnr_valid'].append(fnr)
state['valid_accuracy'].append(acc)
state['OOD_scores_P0_valid'].append(OOD_scores_P0)
state['OOD_scores_PX_valid'].append(OOD_scores_P_out)
wandb.log({"validation_fnr": fnr,
"validation_accuracy": acc,
'epoch':epoch})
print("\n fnr_valid {}".format(state['fnr_valid']))
print("valid_accuracy {} \n".format(state['valid_accuracy']))
def evaluate_classification_loss_training():
'''
evaluate classification loss on training dataset
'''
net.eval()
losses = []
for in_set in train_loader_in:
# print('batch', batch_num, '/', min(len(train_loader_in), len(train_loader_out)))
data = in_set[0]
target = in_set[1]
if args.ngpu > 0:
data, target = data.cuda(), target.cuda()
# forward
x = net(data)
# in-distribution classification accuracy
x_classification = x[:, :num_classes]
loss_ce = F.cross_entropy(x_classification, target, reduction='none')
losses.extend(list(to_np(loss_ce)))
avg_loss = np.mean(np.array(losses))
print("average loss fr classification {}".format(avg_loss))
return avg_loss
def evaluate_energy_logistic_loss():
'''
evaluate energy logistic loss on training dataset
'''
net.eval()
sigmoid_energy_losses = []
logistic_energy_losses = []
ce_losses = []
for in_set in train_loader_in:
# print('batch', batch_num, '/', min(len(train_loader_in), len(train_loader_out)))
data = in_set[0]
target = in_set[1]
if args.ngpu > 0:
data, target = data.cuda(), target.cuda()
# forward
x = net(data)
# compute energies
Ec_in = torch.logsumexp(x, dim=1)
# compute labels
binary_labels_1 = torch.ones(len(data)).cuda()
# compute in distribution logistic losses
logistic_loss_energy_in = F.binary_cross_entropy_with_logits(logistic_regression(
Ec_in.unsqueeze(1)).squeeze(), binary_labels_1, reduction='none')
logistic_energy_losses.extend(list(to_np(logistic_loss_energy_in)))
# compute in distribution sigmoid losses
sigmoid_loss_energy_in = torch.sigmoid(logistic_regression(
Ec_in.unsqueeze(1)).squeeze())
sigmoid_energy_losses.extend(list(to_np(sigmoid_loss_energy_in)))
# in-distribution classification losses
x_classification = x[:, :num_classes]
loss_ce = F.cross_entropy(x_classification, target, reduction='none')
ce_losses.extend(list(to_np(loss_ce)))
avg_sigmoid_energy_losses = np.mean(np.array(sigmoid_energy_losses))
print("average sigmoid in distribution energy loss {}".format(avg_sigmoid_energy_losses))
avg_logistic_energy_losses = np.mean(np.array(logistic_energy_losses))
print("average in distribution energy loss {}".format(avg_logistic_energy_losses))
avg_ce_loss = np.mean(np.array(ce_losses))
print("average loss fr classification {}".format(avg_ce_loss))
return avg_sigmoid_energy_losses, avg_logistic_energy_losses, avg_ce_loss
print('Beginning Training\n')
#compute training loss for woods methods
if args.score in [ 'woods_nn', 'woods']:
full_train_loss = evaluate_classification_loss_training()
###################################################################
# Main loop
###################################################################
min_val_fnr = 1.0
min_val_fnr_clean = 1.0
min_test_fnr = 1.0
state['best_epoch_valid'] = 0
state['best_epoch_valid_clean'] = 0
state['best_epoch_test'] = 0
for epoch in range(0, args.epochs):
print('epoch', epoch + 1, '/', args.epochs)
state['epoch'] = epoch
begin_epoch = time.time()
train(epoch)
test(test_dataset=False, clean_dataset=False) # test on mixed validation set
test(test_dataset=False, clean_dataset=True) # test on clean validation set
test(test_dataset=True) # test on test dataset
scheduler.step()
# check for best epoch based on val fnr - mixed
if state['fnr_valid'][-1] < min_val_fnr:
best_epoch_val_old = state['best_epoch_valid']
state['best_epoch_valid'] = epoch
min_val_fnr = state['fnr_valid'][-1]
# check for best epoch based on val fnr - clean
if state['fnr_valid_clean'][-1] < min_val_fnr_clean:
best_epoch_val_old = state['best_epoch_valid_clean']
state['best_epoch_valid_clean'] = epoch
min_val_fnr_clean = state['fnr_valid_clean'][-1]
# check for best epoch based on val fnr
if state['fnr_test'][-1] < min_test_fnr:
best_epoch_test_old = state['best_epoch_test']
state['best_epoch_test'] = epoch
min_test_fnr = state['fnr_test'][-1]
print('best valid epoch = {}'.format(state['best_epoch_valid']))
print('best valid epoch (clean) = {}'.format(state['best_epoch_valid_clean']))
print('best test epoch = {}'.format(state['best_epoch_test']))
wandb.log({"best_epoch_valid": state['best_epoch_valid'],
"best_epoch_valid_clean": state['best_epoch_valid_clean'],
"best_epoch_test": state['best_epoch_test'],
'epoch': epoch})
# save model checkpoint
if args.checkpoints_dir != '' and epoch in [state['best_epoch_valid'], state['best_epoch_valid_clean'], state['best_epoch_test'], args.epochs-1]:
model_checkpoint_dir = os.path.join(args.checkpoints_dir,
args.dataset,
args.aux_out_dataset,
args.score)
if not os.path.exists(model_checkpoint_dir):
os.makedirs(model_checkpoint_dir, exist_ok=True)
model_filename = '{}_epoch_{}.pt'.format(method_data_name, epoch)
model_path = os.path.join(model_checkpoint_dir, model_filename)
print('saving model to {}'.format(model_path))
torch.save(net.state_dict(), model_path)
#save path name
if epoch == state['best_epoch_valid']:
state['model_loc_valid'] = model_path
if epoch == state['best_epoch_valid_clean']:
state['model_loc_valid_clean'] = model_path
if epoch == state['best_epoch_test']:
state['model_loc_test'] = model_path
if epoch == args.epochs-1:
state['model_loc_last'] = model_path
if state['best_epoch_valid'] == epoch:
# delete previous checkpoint
if best_epoch_val_old not in [epoch, state['best_epoch_test'], state['best_epoch_valid_clean']]:
print('deleting old best valid epoch')
model_filename_prev = '{}_epoch_{}.pt'.format(method_data_name, best_epoch_val_old)
model_path_prev = os.path.join(model_checkpoint_dir, model_filename_prev)
if os.path.exists(model_path_prev):
print('removing {}'.format(model_path_prev))
os.remove(model_path_prev)
if state['best_epoch_valid_clean'] == epoch:
# delete previous checkpoint
if best_epoch_val_old not in [epoch, state['best_epoch_test'], state['best_epoch_valid']]:
print('deleting old best valid epoch (clean)')
model_filename_prev = '{}_epoch_{}.pt'.format(method_data_name, best_epoch_val_old)
model_path_prev = os.path.join(model_checkpoint_dir, model_filename_prev)
if os.path.exists(model_path_prev):
print('removing {}'.format(model_path_prev))
os.remove(model_path_prev)
if state['best_epoch_test'] == epoch:
# delete previous checkpoint
if best_epoch_test_old not in [epoch, state['best_epoch_valid'], state['best_epoch_valid_clean']]:
print('deleting old best test epoch')
model_filename_prev = '{}_epoch_{}.pt'.format(method_data_name, best_epoch_test_old)
model_path_prev = os.path.join(model_checkpoint_dir, model_filename_prev)
if os.path.exists(model_path_prev):
print('removing {}'.format(model_path_prev))
os.remove(model_path_prev)
for t in range(epoch):
if t not in [state['best_epoch_valid'], state['best_epoch_valid_clean'], state['best_epoch_test'], args.epochs-1]:
state['OOD_scores_P0_valid'][t] = 0
state['OOD_scores_PX_valid'][t] = 0
state['OOD_scores_P0_valid_clean'][t] = 0
state['OOD_scores_PX_valid_clean'][t] = 0
state['OOD_scores_P0_test'][t] = 0
state['OOD_scores_Ptest'][t] = 0
# save results to .pkl file
results_dir = os.path.join(args.results_dir,
args.dataset,
args.aux_out_dataset,
args.score)
if not os.path.exists(results_dir):
os.makedirs(results_dir, exist_ok=True)
results_filename = '{}.pkl'.format(method_data_name)
results_path = os.path.join(results_dir, results_filename)
with open(results_path, 'wb') as f:
print('saving results to', results_path)
pickle.dump(state, f)
``` |
{
"source": "jkausti/flask-textsapi",
"score": 2
} |
#### File: textsapi/models/text.py
```python
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute, BooleanAttribute, NumberAttribute
import os
def table_name():
return 'textapi-{}'.format(os.getenv('BOILERPLATE_ENV', 'dev'))
class Text(Model):
class Meta:
table_name = table_name()
region = 'eu-central-1'
username = UnicodeAttribute(hash_key=True)
sort = UnicodeAttribute(range_key=True)
bucket_id = NumberAttribute()
raw_text_path = UnicodeAttribute()
preprocessed_text_path = UnicodeAttribute(null=True)
processed_text_path = UnicodeAttribute(null=True)
processing_complete = BooleanAttribute(default=False)
submission_id = NumberAttribute()
public_id = NumberAttribute()
def __repr__(self):
return "Processed: {}".format(self.processing_complete)
```
#### File: textsapi/service/auth_helper.py
```python
from ..models.user import User
from .blacklist_service import save_token
from pynamodb.exceptions import DoesNotExist
from ..config import key
import traceback
import jwt
class Auth:
@staticmethod
def obtain_user_token(data):
try:
user = User.get(hash_key=data['username'], range_key='customer')
if user and user.verify_password(data['password']):
auth_token = user.encode_auth_token(user.username, user.sort)
if auth_token:
response_object = {
'status': 'success',
'message': 'Successfully obtained token.',
'authorization': auth_token.decode()
}
return response_object, 200
else:
response_object = {
'status': 'failed',
'message': 'username or password does not match'
}
return response_object, 401
except DoesNotExist:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'username or password does not match'
}
return response_object, 401
except Exception:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'Application error. Contact system owner.'
}
return response_object, 500
@staticmethod
def destroy_user_token(data):
if data:
auth_token = data.split(" ")[1]
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
if isinstance(resp, str):
return save_token(token=auth_token, username=resp)
else:
response_object = {
'status': 'failed',
'message': resp
}
return response_object, 401
else:
response_object = {
'status': 'failed',
'message': 'Provide a valid auth token'
}
return response_object, 403
@staticmethod
def get_logged_in_user(new_request):
auth_token = new_request.headers.get('Authorization')
if auth_token:
try:
try:
payload = User.decode_auth_token(auth_token.split(' ')[1])
username = payload['sub']
user_type = payload['user_type']
except TypeError:
response_object = {
'status': 'failed',
'message': 'Could not authenticate user.'
}
return response_object, 404
if isinstance(username, str):
user = User.get(hash_key=username, range_key=user_type)
response_object = {
'status': 'success',
'data': {
'username': user.username,
'type': user.sort,
'email': user.email,
'registered_on': str(user.registered_on),
'public_id': user.public_id
}
}
return response_object, 200
else:
response_object = {
'status': 'failed',
'message': username[1]
}
return response_object, 401
except Exception as e:
traceback.print_exc()
return {
'status': 'failed',
'message': 'Could not authenticate user.'
}, 500
else:
response_object = {
'status': 'failed',
'message': 'Provide a valid auth-token'
}
return response_object, 401
@staticmethod
def obtain_root_token(data):
try:
user = User.get(hash_key=data['username'], range_key='root_admin')
if user and user.verify_password(data['password']):
auth_token = user.encode_auth_token(user.username, user.sort)
if auth_token:
response_object = {
'status': 'success',
'message': 'Successfully obtained token.',
'Authorization': auth_token.decode()
}
return response_object, 200
else:
response_object = {
'status': 'failed',
'message': 'username or password does not match'
}
return response_object, 401
except DoesNotExist:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'username or password does not match'
}
return response_object, 401
except Exception:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'Application error. Contact system owner.'
}
return response_object, 500
@staticmethod
def obtain_admin_token(data):
try:
user = User.get(hash_key=data['username'], range_key='admin')
if user and user.verify_password(data['password']):
auth_token = user.encode_auth_token(user.username, user.sort)
if auth_token:
response_object = {
'status': 'success',
'message': 'Successfully obtained token.',
'Authorization': auth_token.decode()
}
return response_object, 200
else:
response_object = {
'status': 'failed',
'message': 'username or password does not match'
}
return response_object, 401
except DoesNotExist:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'username or password does not match'
}
return response_object, 401
except Exception:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'Application error. Contact system owner.'
}
return response_object, 500
```
#### File: textsapi/service/text_service.py
```python
import traceback
import json
from pynamodb.models import DoesNotExist
from ..models.text import Text
from ..models.bucket import Bucket
from ..models.submission import Submission
from ..service.s3buckets import get_object
def get_text(username, text_id):
"""
Method that fetches the requested text from DB and S3.
"""
try:
text = Text.get(hash_key=username, range_key="TEXT_{}".format(text_id))
except DoesNotExist:
response_object = {
'status': 'failed',
'message': 'Text ID not in database.'
}
return response_object, 400
try:
bucket = Bucket.get(hash_key=username, range_key="BUCKET_{}".format(text.bucket_id))
submission = Submission.get(
hash_key=username, range_key="SUBMISSION_{}".format(text.submission_id)
)
raw_text = get_object(bucket.bucket_name, text.raw_text_path)
processed_text = json.loads(get_object(bucket.bucket_name, text.processed_text_path))
response_object = {
"id": text.public_id,
"submitted_date": submission.submitted_date,
"processing_complete": text.processing_complete,
"raw_text": raw_text,
"processed_text": processed_text,
}
return response_object, 200
except Exception:
traceback.print_exc()
response_object = {
'status': 'failed',
'message': 'server error'
}
return response_object, 500
```
#### File: textsapi/util/decorator.py
```python
from functools import wraps
from flask import request
from ..service.auth_helper import Auth
def token_required(f):
wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
return f(*args, **kwargs)
return decorated
def admin_token_required(f):
wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
user_type = token.get('type')
if user_type != 'admin':
response_object = {
'status': 'failed',
'message': 'User does not have admin rights.'
}
return response_object, 401
return f(*args, **kwargs)
return decorated
def root_token_required(f):
wraps(f)
def decorated(*args, **kwargs):
data, status = Auth.get_logged_in_user(request)
token = data.get('data')
if not token:
return data, status
user_type = token.get('type')
if user_type != 'root_admin':
response_object = {
'status': 'failed',
'message': 'User is not root.'
}
return response_object, 401
return f(*args, **kwargs)
return decorated
```
#### File: textsapi/util/preprocessor.py
```python
from nltk.tokenize import sent_tokenize, word_tokenize
def preprocess(text):
sentences = sent_tokenize(text)
preprocessed = []
for sent in sentences:
preprocessed.append("\n".join(word_tokenize(sent)))
preprocessed = "\n\n".join(preprocessed)
return preprocessed
```
#### File: jkausti/flask-textsapi/manage.py
```python
import os
import unittest
import argparse
from flask_script import Manager
from app.textsapi import create_app
from app import blueprint
from app.textsapi.service.user_service import create_root_user
app = create_app(os.getenv("BOILERPLATE_ENV") or "dev")
app.register_blueprint(blueprint)
app.app_context().push()
manager = Manager(app)
@manager.command
def run():
app.run()
@manager.command
def root():
""" Creates a root user """
username = input("Please enter the root username: ")
while len(username) < 4 or any(not c.isalnum() for c in username):
username = input(
"Root username needs to be at least 4 charachters long and only contain alphanumeric characters. Try again: "
)
email = input("Please enter the root email: ")
password = input("Please enter the root password: ")
spec = (" ", "_", "\n")
while len(password) < 6 or any(c in spec for c in password):
password = input(
"Password cannot contain whitespaces or underscores and cannot be less than 6 characters long. Try again: "
)
return create_root_user(username, email, password)
# @manager.command
# def test():
# """
# Runs the unittests.
# """
# tests = unittest.TestLoader().discover("app/tests", pattern="test*.py")
# result = unittest.TextTestRunner(verbosity=2).run(tests)
# if result.wasSuccessful():
# return 0
# return 1
@manager.command
def test(layer="*", component="*"):
"""
Runs the unittests.
"""
tests = unittest.TestLoader().discover(
"app/tests", pattern="test_{layer}_{component}.py".format(layer=layer, component=component)
)
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
if __name__ == "__main__":
manager.run()
``` |
{
"source": "jkavan/highlite",
"score": 3
} |
#### File: jkavan/highlite/highlite.py
```python
import sys
import getopt
import re
from termcolor import colored
#
# You can freely customize the colors and/or styles if you like (though
# the changes may be overwritten by the upgrade process).
#
# Available colors:
# fore back
# ---- ----
# grey on_grey
# red on_red
# green on_green
# yellow on_yellow
# blue on_blue
# magenta on_magenta
# cyan on_cyan
# white on_white
#
# Available styles:
# bold
# dark
# underline
# blink
# reverse
# concealed
colors = [
["green", ""],
["red", ""],
["yellow", ""],
["blue", ""],
["magenta", ""],
["white", ""],
["cyan", ""],
["grey", "on_white"],
["blue", "on_white"],
["yellow", "on_white"],
["white", "on_red"],
["white", "on_yellow"],
["white", "on_blue"],
["white", "on_magenta"],
]
styles = ["bold", "underline"]
# Search is case-sensitive by default. Can be overriden with `-i`
ignore_case = False
# ---
USAGE = ("An utility for highlighting command line output using one or more regular expressions.\n"
"\n"
"Usage: [COMMAND] | hl [OPTION] REGEX...\n"
"\n"
"OPTIONS:\n"
" -h, --help Print this help message\n"
" -v, --version Print version information\n"
" -i, --ignore-case Ignore case when searching\n"
)
VERSION = "highlite version 0.1.0"
def get_fore_color(index):
""" Returns a foreground color from the list """
index = index % (len(colors))
color = colors[index][0]
if color == '':
return None
return color
def get_back_color(index):
""" Returns a background color from the list """
index = index % (len(colors))
color = colors[index][1]
if color == '':
return None
return color
def colorize(text, regexes, ignore_case):
"""
Surrounds regex matches with ANSI colors and returns the colored text
:param text: Text that will be colorized.
:param regexes: A list of search terms (in regexp format). Which text matches to colorize.
:return: Colorized text.
"""
flags = re.IGNORECASE if ignore_case else 0
# Loop through each argument (regex)
for index, regex in enumerate(regexes):
# Add color around the matches
text = re.sub(regex,
lambda m: colored(
'{}'.format(m.group()),
get_fore_color(index),
get_back_color(index),
styles),
text,
flags=flags)
return text
def validate_regex(regexes):
""" Checks if the given regex(es) are valid. """
try:
for regex in regexes:
re.compile(regex)
except re.error:
print("Invalid regex pattern: " + regex)
sys.exit(1)
def parse_args(args):
"""
Parses command line arguments and sets global options
:returns: operands (list of regexes)
"""
global ignore_case
try:
options, arguments = getopt.getopt(
args,
'vhi',
["version", "help", "ignore-case"])
for o, a in options:
if o in ("-v", "--version"):
print(VERSION)
sys.exit()
if o in ("-h", "--help"):
print(VERSION)
print(USAGE)
sys.exit()
if o in ("-i", "--ignore-case"):
ignore_case = True
if not arguments:
print(USAGE)
sys.exit(1)
# save regexes (operands) to a list
operands = [str(arg) for arg in arguments]
except (getopt.GetoptError, ValueError) as e:
print("Error: " + e.msg)
print(USAGE)
sys.exit(1)
return operands
def main():
""" Main function """
regexes = parse_args(sys.argv[1:])
global ignore_case
try:
# Use command line arguments as regexes
validate_regex(regexes)
# Tell Python to ignore the line if it contains invalid Unicode data
sys.stdin.reconfigure(errors='ignore')
# Read lines from stdin
for line in sys.stdin:
line = colorize(line.rstrip(), regexes, ignore_case)
sys.stdout.write(line + '\n')
# Catch Ctrl+C and exit cleanly
except KeyboardInterrupt:
sys.stdout.flush()
if __name__ == "__main__":
main()
``` |
{
"source": "jkawamoto/dargparse",
"score": 3
} |
#### File: dargparse/tests/dsargparse_test.py
```python
import argparse
import textwrap
import unittest
import dsargparse
class TestParser(unittest.TestCase):
"""Unit tests for _parse_doc function.
"""
def test_full_document(self):
"""Test for a full information docstring.
"""
ans = dsargparse._parse_doc(dsargparse._parse_doc.__doc__)
self.assertEqual(ans["headline"], "Parse a docstring.")
self.assertEqual(ans["description"], textwrap.dedent("""\
Parse a docstring.
Parse a docstring and extract three components; headline, description,
and map of arguments to help texts.
"""))
self.assertIn("doc", ans["args"])
self.assertEqual(ans["args"]["doc"], "docstring.")
def test_minimum_document(self):
"""Test for a minimum docstring.
"""
ans = dsargparse._parse_doc(dsargparse._checker.__doc__)
self.assertEqual(
ans["headline"],
"Generate a checker which tests a given value not starts with keywords.")
self.assertEqual(
ans["description"],
"Generate a checker which tests a given value not starts with keywords.")
self.assertEqual(len(ans["args"]), 0)
def test_docstring_without_description(self):
""" Test for a docstring which doesn't have descriptions.
"""
ans = dsargparse._parse_doc("""Test docstring.
Args:
one: definition of one.
two: definition of two.
Returns:
some value.
""")
self.assertEqual(ans["headline"], "Test docstring.")
self.assertEqual(
ans["description"],
textwrap.dedent("""\
Test docstring."""))
self.assertIn("one", ans["args"])
self.assertEqual(ans["args"]["one"], "definition of one.")
self.assertIn("two", ans["args"])
self.assertEqual(ans["args"]["two"], "definition of two.")
def test_docstring_without_args(self):
""" Test for a docstring which doesn't have args.
"""
ans = dsargparse._parse_doc("""Test docstring.
This function do something.
Returns:
some value.
""")
self.assertEqual(ans["headline"], "Test docstring.")
self.assertEqual(
ans["description"],
textwrap.dedent("""\
Test docstring.
This function do something.
"""))
self.assertEqual(len(ans["args"]), 0)
class TestModule(unittest.TestCase):
def test_modules(self):
""" Test dsargparse module has same objects as argparse.
"""
for name in argparse.__all__:
self.assertTrue(hasattr(dsargparse, name))
def test_filetype(self):
""" Test create dsargparse's filetype.
"""
self.assertIsNotNone(dsargparse.FileType("r"))
if __name__ == "__main__":
unittest.main()
```
#### File: dargparse/tests/test_suite.py
```python
import sys
import unittest
from . import dsargparse_test
def suite():
""" Returns a test suite.
"""
loader = unittest.TestLoader()
res = unittest.TestSuite()
res.addTest(loader.loadTestsFromModule(dsargparse_test))
return res
def main():
""" The main function.
Returns:
Status code.
"""
try:
res = unittest.TextTestRunner(verbosity=2).run(suite())
except KeyboardInterrupt:
print("Test canceled.")
return -1
else:
return 0 if res.wasSuccessful() else 1
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jkawamoto/docker-jenkins",
"score": 2
} |
#### File: jkawamoto/docker-jenkins/fabfile.py
```python
from fabric.api import *
from fabric.contrib import files
env.use_ssh_config = True
DIR = "jenkins"
TAG = "jkawamoto/jenkins"
@task
def deploy():
""" Upload contents. """
if not files.exists(DIR):
run("mkdir {0}".format(DIR))
with cd(DIR):
put("Dockerfile", ".")
put("bin", ".", mirror_local_mode=True)
@task
def build():
""" Build a docker image. """
with cd(DIR):
run("docker build -t {0} .".format(TAG))
``` |
{
"source": "jkawamoto/docker-notifier",
"score": 2
} |
#### File: docker-notifier/bin/docker.py
```python
import contextlib
import json
import requests
from adapter import SocketAdapter
BASE="http://127.0.0.1"
MONITORING="/events?since={0}"
LIST_CONTAINERS="/containers/json?"
INSPECT="/containers/{id}/json"
class Docker(object):
def __init__(self, path):
self._path = path
def events(self):
with contextlib.closing(self._new_session()) as session:
res = session.get(BASE+"/events", stream=True)
raw = res.raw
buf = []
while True:
c = raw.read(1)
if c == "":
break
buf.append(c)
if c == "}":
yield json.loads("".join(buf))
buf = []
def list(self, all=None, since=None, before=None):
# GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1
query = []
if all:
query.append("all=true")
if since:
query.append("since=" + since)
if before:
query.append("before=" + since)
with contextlib.closing(self._new_session()) as session:
res = session.get(BASE+LIST_CONTAINERS + "&".join(query))
return res.json()
def inspect(self, id):
with contextlib.closing(self._new_session()) as session:
res = session.get(BASE+INSPECT.format(id=id))
return res.json()
def _new_session(self):
s = requests.Session()
s.mount('http://', SocketAdapter(self._path))
return s
```
#### File: docker-notifier/bin/notifier.py
```python
import argparse
import fnmatch
import json
import re
import sys
from docker import Docker
from pushover import Pushover
__APPLICATION__="docker-notifier"
class PushoverNotifier(object):
def __init__(self, user, token):
self._pushover = Pushover(user, token)
def create(self, id, name=None):
pass
def die(self, id, name=None):
if name:
self._pushover.send("Container {0} exited.".format(name))
class StreamNotifier(object):
def __init__(self, output):
self._output = output
def create(self, id, name=None):
self._write(id, name, "create")
def die(self, id, name=None):
self._write(id, name, "die")
def _write(self, id, name, status):
data = {
"posted-by": __APPLICATION__,
"name": name,
"id": id,
"status": status
}
if name:
data["name"] = name
json.dump(data, self._output)
self._output.write("\n")
def main(socket, filter, notifier, **kwargs):
regex = None
if filter:
regex = fnmatch.translate(filter)
docker = Docker(socket)
push = notifier(**kwargs)
names = {}
for e in docker.events():
if e["status"] == "create":
id = e["id"]
res = docker.inspect(id)
name = res["Name"][1:]
names[id] = name
if not regex or regex.match(name):
push.create(id, name)
if e["status"] == "die":
id = e["id"]
name = names[id] if id in names else None
if not regex or regex.match(name):
push.die(id, name)
if id in names:
del names[id]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--socket", default="/var/run/docker.sock", help="Unix socket file of docker.")
parser.add_argument("--filter", help="Unix style pattern to filter containers.")
subparsers = parser.add_subparsers()
pushover_cmd = subparsers.add_parser("pushover", help="Notify events via Pushover.")
pushover_cmd.add_argument("user", help="User key.")
pushover_cmd.add_argument("token", help="Application key.")
pushover_cmd.set_defaults(notifier=PushoverNotifier)
stream_cmd = subparsers.add_parser("stream", help="Notify events via stdout/file.")
stream_cmd.add_argument("--output", default=sys.stdout, type=argparse.FileType("w"))
stream_cmd.set_defaults(notifier=StreamNotifier)
try:
main(**vars(parser.parse_args()))
except KeyboardInterrupt:
pass
``` |
{
"source": "jkawamoto/docker-sphinx-make",
"score": 3
} |
#### File: docker-sphinx-make/bin/entrypoint.py
```python
import argparse
import logging
import subprocess
import sys
def run(params, cwd):
""" Run make command.
Args:
params: parameters for make command.
cwd: direcory where make command run.
"""
cmd = " ".join(["make", ] + params)
logging.info("Start a build process. (cmd=%s)", cmd)
# Create a subprocess.
proc = subprocess.Popen(
cmd, cwd=cwd, shell=True, stdout=sys.stdout, stderr=sys.stderr)
# Wait the process will end.
proc.wait()
logging.info("The build process has ended.")
def main():
""" The main function.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--cwd", default="/data/",
help="specify a directory containing Makefile.")
parser.add_argument(
"params", default=["html"], nargs="*",
help="specify options of make (default: html).")
run(**vars(parser.parse_args()))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
main()
except KeyboardInterrupt:
sys.exit(1)
finally:
logging.shutdown()
``` |
{
"source": "jkawamoto/i2cdisplay",
"score": 3
} |
#### File: jkawamoto/i2cdisplay/i2cdisplay.py
```python
import argparse
import smbus
class MPTHDisplay(object):
def __init__(self, bus, chip_address, width=16, height=2):
""" Construct a display with a port number of i2c bus and chip address.
Args:
bus: an integer port number of i2c bus.
chip_addres: an integer chip address.
width: width of the display.
height: height of the display.
"""
self._bus = smbus.SMBus(bus)
self._chip_address = chip_address
self._width = width
self._height = height
self._x = 0
self._y = 0
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def write(self, s):
""" Write a string to the display.
Args:
s: a string.
"""
if "\n" in s:
self.writelines(s.split("\n"))
else:
self._raw_write(map(ord, s))
def writelines(self, lines):
""" Write lines.
Args:
lines: a list of strings.
"""
for line in lines:
self.write(line)
self.newline()
def newline(self):
""" Write a new line.
"""
self._raw_write([0x20]*(self.width-self._x))
def clear(self):
""" Clear display.
"""
self._raw_write([0x80, 0x05, 0x01])
def cursor(self, on):
""" Turn on/off a cursor.
Args:
on: If True, a cursor will be shown.
"""
if on:
self._raw_write([0x80, 0x05, 0x0e])
else:
self._raw_write([0x80, 0x05, 0x0c])
def backlight(self, value):
""" Change blightness of the back light.
Args:
value: an integer in 0 to 255.
"""
if value < 0 or value > 255:
raise ValueError("value must be in 0 to 255.")
self._raw_write([0x80, 0x01, value])
def _incliment(self, v):
self._x += v
self._x = self._x % self.width
self._y += self._x / self.width
self._y %= self.height
def _raw_write(self, s):
self._bus.write_i2c_block_data(self._chip_address, s[0], s[1:])
self._incliment(len(s))
def _cbool(v):
if v.lower() == "false":
return False
else:
return True
def _write_action(display, input, **kwargs):
if len(input) == 1:
display.write(input[0])
else:
display.writelines(input)
def _clear_action(display, **kwargs):
display.clear()
def _cursor_action(display, on, **kwargs):
display.cursor(on)
def _bl_action(display, value, **kwargs):
display.backlight(value)
def _exec(func, bus, address, width, height, **kwargs):
display = MPTHDisplay(bus, address, width, height)
func(display, **kwargs)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("bus", type=int, help="an integer port number of i2c bus.")
parser.add_argument("address", type=int, help="an integer chip address.")
parser.add_argument("-W", "--width", default=16, type=int,
help="width of the display. (default: 16)")
parser.add_argument("-H", "--height", default=2, type=int,
help="height of the display. (default: 2)")
subparsers = parser.add_subparsers()
write_cmd = subparsers.add_parser("write", help="show texts.")
write_cmd.add_argument("input", nargs="+")
write_cmd.set_defaults(func=_write_action)
clear_cmd = subparsers.add_parser("clear", help="clear display.")
clear_cmd.set_defaults(func=_clear_action)
cursor_cmd = subparsers.add_parser("cursor", help="on/off a cursor.")
cursor_cmd.add_argument("on", type=_cbool, help="If True, a cursor will be shown.")
cursor_cmd.set_defaults(func=_cursor_action)
bl_cmd = subparsers.add_parser("backlight", help="change blightness.")
bl_cmd.add_argument("value", type=int, help="an integer in 0-255.")
bl_cmd.set_defaults(func=_bl_action)
_exec(**vars(parser.parse_args()))
if __name__ == "__main__":
main()
``` |
{
"source": "jkawamoto/roadie-gcp",
"score": 2
} |
#### File: roadie-gcp/tests/downloader_test.py
```python
import logging
import shutil
import sys
import unittest
import os
from os import path
import downloader # pylint: disable=import-error
TARGET_FILE = "bin/entrypoint.sh"
SAMPLE_FILE = "https://raw.githubusercontent.com/jkawamoto/roadie-gcp/master/bin/entrypoint.sh"
ORIGINAL_FILE = path.normpath(
path.join(path.dirname(__file__), "..", TARGET_FILE))
ARCHIVE_ROOT = "./roadie-gcp-20160618"
ZIP_FILE = "https://github.com/jkawamoto/roadie-gcp/archive/v20160618.zip"
TAR_FILE = "https://github.com/jkawamoto/roadie-gcp/archive/v20160618.tar.gz"
class TestDownload(unittest.TestCase):
""" Test case for download module.
"""
def test_download(self):
""" Test downloading a file.
"""
downloader.download(SAMPLE_FILE)
basename = path.basename(SAMPLE_FILE)
self.evaluate_file(basename, ORIGINAL_FILE)
os.remove(basename)
def test_set_destination(self):
""" Test downloading a file to another directory.
"""
downloader.download(SAMPLE_FILE + ":/tmp/")
target = "/tmp/" + path.basename(SAMPLE_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
os.remove(target)
def test_rename(self):
""" Test downloading a file and renaming it.
"""
target = "test.md"
downloader.download(SAMPLE_FILE + ":" + target)
self.evaluate_file(target, ORIGINAL_FILE)
os.remove(target)
def test_set_destination_and_rename(self):
""" Test downloading a file to a directory and renaming it.
"""
target = "/tmp/test.md"
downloader.download(SAMPLE_FILE + ":" + target)
self.evaluate_file(target, ORIGINAL_FILE)
os.remove(target)
def test_download_zip(self):
""" Test downloading a zip file.
"""
downloader.download(ZIP_FILE)
target = path.join(ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(ARCHIVE_ROOT)
def test_set_destination_zip(self):
""" Test downloading a zip file to a specified path.
"""
downloader.download(ZIP_FILE + ":/tmp/")
target = path.join("/tmp/", ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(path.join("/tmp/", ARCHIVE_ROOT))
def test_download_tarball(self):
""" Test downloading a tarball file.
"""
downloader.download(TAR_FILE)
target = path.join(ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(ARCHIVE_ROOT)
def test_set_destination_taball(self):
""" Test downloading a tarball file to a specified path.
"""
downloader.download(TAR_FILE + ":/tmp/")
target = path.join("/tmp/", ARCHIVE_ROOT, TARGET_FILE)
self.evaluate_file(target, ORIGINAL_FILE)
shutil.rmtree(path.join("/tmp/", ARCHIVE_ROOT))
def evaluate_file(self, target, original):
""" Evaluate existence and contents of the target file.
Args:
target: target file to be checked.
original: original file of which contetns will be compared of the ones
of target.
"""
self.assertTrue(path.exists(target))
self.assertEqual(
self.read_file(target),
self.read_file(original))
@staticmethod
def read_file(fpath):
""" Open a file and read it.
Args:
fpath: Path for a file.
Returns:
Contents of the file.
"""
with open(fpath) as f:
return f.read()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
unittest.main()
``` |
{
"source": "jkawamoto/sitemap-gen",
"score": 3
} |
#### File: sitemap-gen/tests/sitemap_gen_test.py
```python
from __future__ import absolute_import, print_function
import os
from os import path
import subprocess
import tempfile
import time
import unittest
from xml.etree import ElementTree
from sitemap_gen import sitemap_gen
HTMLDIR = path.join(path.dirname(__file__), "html")
class TestFind(unittest.TestCase):
"""Test case for finding html files.
"""
def setUp(self):
"""Set up the test case.
"""
with tempfile.NamedTemporaryFile(
suffix=".html", dir=HTMLDIR, delete=False) as fp:
self.path = fp.name
def tearDown(self):
"""Tear down the test case.
"""
os.remove(self.path)
def test(self):
"""Test find function returns `index.html`, `sub.html`, and a temp file.
"""
res = set(sitemap_gen.find(HTMLDIR))
self.assertIn("index.html", res)
self.assertIn("sub/sub.html", res)
self.assertIn(path.basename(self.path), res)
def test_tracked_files(self):
"""Test find function to get only tracked files.
"""
res = set(sitemap_gen.find(HTMLDIR, True))
self.assertIn("index.html", res)
self.assertIn("sub/sub.html", res)
self.assertNotIn(path.basename(self.path), res)
class TestModTime(unittest.TestCase):
"""Test case for obtaining modified times.
"""
def setUp(self):
"""Set up the test case.
"""
with tempfile.NamedTemporaryFile(
suffix=".html", dir=HTMLDIR, delete=False) as fp:
self.path = fp.name
def tearDown(self):
"""Tear down the test case.
"""
os.remove(self.path)
def test_index(self):
"""Obtaining the modified time of `index.html`.
"""
res = sitemap_gen.mod_time(path.join(HTMLDIR, "./index.html"))
ans = int(subprocess.check_output([
"git", "--no-pager", "log", "--pretty=%at", "-n1",
path.join(HTMLDIR, "index.html")
]))
self.assertEqual(res, ans)
def test_new_file(self):
"""Obtaining the modified time of a temporal file.
"""
res = sitemap_gen.mod_time(self.path)
self.assertEqual(res, int(path.getmtime(self.path)))
class TestSitemapGen(unittest.TestCase):
"""Test case for generating a site map.
"""
def setUp(self):
"""Set up the test case.
"""
with tempfile.NamedTemporaryFile(
suffix=".html", dir=HTMLDIR, delete=False) as fp:
self.path = fp.name
def tearDown(self):
"""Tear down the test case.
"""
os.remove(self.path)
def test(self):
"""Generating a site map.
"""
base_url = "https://jkawamoto.github.io/sitemap-gen/"
res = sitemap_gen.generate(base_url, HTMLDIR)
print(res)
root = ElementTree.fromstring(res)
self.assertEqual(root.tag, self.tagname("urlset"))
for elem in root:
self.assertEqual(elem.tag, self.tagname("url"))
loc = elem.findtext(self.tagname("loc"))[len(base_url):]
mod = time.strftime(
sitemap_gen.TIME_FORMAT,
time.gmtime(sitemap_gen.mod_time(path.join(HTMLDIR, loc))))
self.assertEqual(elem.findtext(self.tagname("lastmod")), mod)
def test_tracked_files(self):
"""Generating a site map with only tracked files.
"""
base_url = "https://jkawamoto.github.io/sitemap-gen/"
res = sitemap_gen.generate(base_url, HTMLDIR, True)
print(res)
root = ElementTree.fromstring(res)
self.assertEqual(root.tag, self.tagname("urlset"))
for elem in root:
self.assertEqual(elem.tag, self.tagname("url"))
loc = elem.findtext(self.tagname("loc"))[len(base_url):]
self.assertFalse(self.path.endswith(loc))
mod = time.strftime(
sitemap_gen.TIME_FORMAT,
time.gmtime(sitemap_gen.mod_time(path.join(HTMLDIR, loc))))
self.assertEqual(elem.findtext(self.tagname("lastmod")), mod)
@staticmethod
def tagname(tag):
"""Get a normalized tag name.
"""
return "{http://www.sitemaps.org/schemas/sitemap/0.9}" + tag
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jkawamoto/sphinx",
"score": 2
} |
#### File: sphinx/sphinx/setup_command.py
```python
import os
import sys
import warnings
from distutils.cmd import Command
from distutils.errors import DistutilsExecError
from io import StringIO
from typing import Any, Dict
from sphinx.application import Sphinx
from sphinx.cmd.build import handle_exception
from sphinx.deprecation import RemovedInSphinx70Warning
from sphinx.util.console import color_terminal, nocolor
from sphinx.util.docutils import docutils_namespace, patch_docutils
from sphinx.util.osutil import abspath
class BuildDoc(Command):
"""
Distutils command to build Sphinx documentation.
The Sphinx build can then be triggered from distutils, and some Sphinx
options can be set in ``setup.py`` or ``setup.cfg`` instead of Sphinx's
own configuration file.
For instance, from `setup.py`::
# this is only necessary when not using setuptools/distribute
from sphinx.setup_command import BuildDoc
cmdclass = {'build_sphinx': BuildDoc}
name = 'My project'
version = '1.2'
release = '1.2.0'
setup(
name=name,
author='<NAME>',
version=release,
cmdclass=cmdclass,
# these are optional and override conf.py settings
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'release': ('setup.py', release)}},
)
Or add this section in ``setup.cfg``::
[build_sphinx]
project = 'My project'
version = 1.2
release = 1.2.0
"""
description = 'Build Sphinx documentation'
user_options = [
('fresh-env', 'E', 'discard saved environment'),
('all-files', 'a', 'build all files'),
('source-dir=', 's', 'Source directory'),
('build-dir=', None, 'Build directory'),
('config-dir=', 'c', 'Location of the configuration directory'),
('builder=', 'b', 'The builder (or builders) to use. Can be a comma- '
'or space-separated list. Defaults to "html"'),
('warning-is-error', 'W', 'Turn warning into errors'),
('project=', None, 'The documented project\'s name'),
('version=', None, 'The short X.Y version'),
('release=', None, 'The full version, including alpha/beta/rc tags'),
('today=', None, 'How to format the current date, used as the '
'replacement for |today|'),
('link-index', 'i', 'Link index.html to the master doc'),
('copyright', None, 'The copyright string'),
('pdb', None, 'Start pdb on exception'),
('verbosity', 'v', 'increase verbosity (can be repeated)'),
('nitpicky', 'n', 'nit-picky mode, warn about all missing references'),
('keep-going', None, 'With -W, keep going when getting warnings'),
]
boolean_options = ['fresh-env', 'all-files', 'warning-is-error',
'link-index', 'nitpicky']
def initialize_options(self) -> None:
self.fresh_env = self.all_files = False
self.pdb = False
self.source_dir: str = None
self.build_dir: str = None
self.builder = 'html'
self.warning_is_error = False
self.project = ''
self.version = ''
self.release = ''
self.today = ''
self.config_dir: str = None
self.link_index = False
self.copyright = ''
# Link verbosity to distutils' (which uses 1 by default).
self.verbosity = self.distribution.verbose - 1 # type: ignore
self.traceback = False
self.nitpicky = False
self.keep_going = False
def _guess_source_dir(self) -> str:
for guess in ('doc', 'docs'):
if not os.path.isdir(guess):
continue
for root, _dirnames, filenames in os.walk(guess):
if 'conf.py' in filenames:
return root
return os.curdir
def finalize_options(self) -> None:
self.ensure_string_list('builder')
if self.source_dir is None:
self.source_dir = self._guess_source_dir()
self.announce('Using source directory %s' % self.source_dir)
self.ensure_dirname('source_dir')
if self.config_dir is None:
self.config_dir = self.source_dir
if self.build_dir is None:
build = self.get_finalized_command('build')
self.build_dir = os.path.join(abspath(build.build_base), 'sphinx') # type: ignore
self.doctree_dir = os.path.join(self.build_dir, 'doctrees')
self.builder_target_dirs = [
(builder, os.path.join(self.build_dir, builder))
for builder in self.builder]
def run(self) -> None:
warnings.warn('setup.py build_sphinx is deprecated.',
RemovedInSphinx70Warning, stacklevel=2)
if not color_terminal():
nocolor()
if not self.verbose: # type: ignore
status_stream = StringIO()
else:
status_stream = sys.stdout # type: ignore
confoverrides: Dict[str, Any] = {}
if self.project:
confoverrides['project'] = self.project
if self.version:
confoverrides['version'] = self.version
if self.release:
confoverrides['release'] = self.release
if self.today:
confoverrides['today'] = self.today
if self.copyright:
confoverrides['copyright'] = self.copyright
if self.nitpicky:
confoverrides['nitpicky'] = self.nitpicky
for builder, builder_target_dir in self.builder_target_dirs:
app = None
try:
confdir = self.config_dir or self.source_dir
with patch_docutils(confdir), docutils_namespace():
app = Sphinx(self.source_dir, self.config_dir,
builder_target_dir, self.doctree_dir,
builder, confoverrides, status_stream,
freshenv=self.fresh_env,
warningiserror=self.warning_is_error,
verbosity=self.verbosity, keep_going=self.keep_going)
app.build(force_all=self.all_files)
if app.statuscode:
raise DistutilsExecError(
'caused by %s builder.' % app.builder.name)
except Exception as exc:
handle_exception(app, self, exc, sys.stderr)
if not self.pdb:
raise SystemExit(1) from exc
if not self.link_index:
continue
src = app.config.root_doc + app.builder.out_suffix # type: ignore
dst = app.builder.get_outfilename('index') # type: ignore
os.symlink(src, dst)
``` |
{
"source": "j-kawa/pytest-freeze-reqs",
"score": 2
} |
#### File: pytest-freeze-reqs/freeze_reqs/pytest_freeze_reqs.py
```python
import pytest
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--freeze_reqs",
action="store_true",
help="run check if requirements (req*.txt|pip) are frozen",
)
parser.addini(
"freeze-reqs-ignore-paths",
type="linelist",
help="each line specifies a part of path to ignore "
"by pytest-freeze-reqs, example: "
"requirement_dev.txt matches /a/b/c/requirement_dev.txt",
)
parser.addini(
"freeze-reqs-include-paths",
type="linelist",
help="each line specifies a part of path to include "
"by pytest-freeze-reqs, example: "
"/base_requirements.txt matches /a/b/c/base_requirements.txt",
)
def pytest_sessionstart(session):
config = session.config
if config.option.freeze_reqs:
config._freeze_reqs_ignore = config.getini("freeze-reqs-ignore-paths")
config._freeze_reqs_include = config.getini("freeze-reqs-include-paths")
def pytest_collect_file(parent, path):
config = parent.config
if not config.option.freeze_reqs:
return None
if path.ext in (".txt", ".pip") and path.basename.startswith("req"):
for ignore_path in config._freeze_reqs_ignore:
if ignore_path in str(path):
return None
return RequirementFile(path, parent)
else:
for include_path in config._freeze_reqs_include:
if include_path in str(path):
return RequirementFile(path, parent)
class RequirementFile(pytest.File):
def collect(self):
import requirements
with open(str(self.fspath), "r") as fd:
for req in requirements.parse(fd):
yield RequirementItem(req.name, self, req)
class RequirementItem(pytest.Item):
def __init__(self, name, parent, req):
super(RequirementItem, self).__init__(name, parent)
self.add_marker("freeze_reqs")
self.req = req
def runtest(self):
# local files
if self.req.local_file:
return
# revision
if self.req.vcs:
if not self.req.revision:
raise RequirementNotFrozenException(self, self.name, "[no revision]")
else:
return
# pip packages
if not self.req.specs:
raise RequirementNotFrozenException(self, self.name, self.req.specs)
for spec in self.req.specs:
operator, _ = spec
if operator in ("<", "<=", "=="):
return
raise RequirementNotFrozenException(self, self.name, self.req.specs)
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
if isinstance(excinfo.value, RequirementNotFrozenException):
args = excinfo.value.args
return "\n".join(
[
"requirement freeze test failed",
" improperly frozen requirement: {1!r}: {2!r}".format(*args),
" try adding pkg==version, or git@revision",
]
)
def reportinfo(self):
return (
self.fspath,
0,
"requirement: {name} is not frozen properly.".format(name=self.name),
)
class RequirementNotFrozenException(Exception):
""" custom exception for error reporting. """
``` |
{
"source": "j-kayes/genetic-algorithm",
"score": 4
} |
#### File: j-kayes/genetic-algorithm/gene_functions.py
```python
import random
# Copywrite <NAME> © 2018
# Genes represent an action for each agent to take for each of the possible states that the agent can be in.
def rank_based_selection(population):
rank_proportions = [0.15, 0.1, 0.09, 0.085, 0.08, 0.07, 0.06, 0.05, 0.03, 0.025, 0.02, 0.0175, 0.015, 0.0125, 0.01]
remaining_probability = 1.0 - sum(rank_proportions)
remaining_items = float(len(population) - len(rank_proportions))
# Applying this same probability to all values ranked lower than the number of items in the rank_proportions list:
lower_rank_prob = remaining_probability/remaining_items
for value in population[10:]:
rank_proportions.append(lower_rank_prob)
# Get a random rank from the genes acording to the rank_proportions distribution above:
random_value = random.random()
total = 0.0
for rank_index in range(len(rank_proportions)):
total += rank_proportions[rank_index] # Probability of choosing this rank
if(random_value <= total): # Select this rank for breeding.
return rank_index
return (len(rank_proportions)-1) # Last element
def crossover(a_genes, b_genes):
result = []
# Uniform crossover with randomization such that each gene has an 50% chance of coming from either parent:
for gene_index in range(len(a_genes)):
if(random.random() > 0.5):
result.append(a_genes[gene_index])
else:
result.append(b_genes[gene_index])
return result
def mutate(genes, rate=0.025, max_gene=9):
for gene_index in range(len(genes)):
if(random.random() <= rate):
genes[gene_index] = random.randint(0, max_gene)
return genes
``` |
{
"source": "JKayman/C-translator",
"score": 4
} |
#### File: C-translator/win/main.pyw
```python
from tkinter import *
import tkinter.messagebox as m_box
import translator as core
window = Tk()
window.title("C Translator")
window.geometry("700x620")
window.resizable(False, False)
frame_input = Frame(window)
label_instruct = Label(window, text="This program will provide lines of code in C language.\nWhen inserted in a "
"program and executed will display output provided in the text field below")
label_instruct.pack(side=TOP, pady=10)
text_input = Text(frame_input, width=80, height=25)
scroll_input = Scrollbar(frame_input)
scroll_input.config(command=text_input.yview)
text_input.config(yscrollcommand=scroll_input.set)
scroll_input.pack(side=RIGHT, fill=Y)
text_input.pack(expand=NO)
frame_input.pack(pady=20)
frame_result = Frame(window)
label_result = Label(frame_result, text="Results will be stored in ")
label_result.pack(side=LEFT)
entry_name = Entry(frame_result)
def command_translate():
name_result = entry_name.get()
if name_result == "":
m_box.showwarning("Unable to proceed", "Enter the name for the .txt file!")
else:
conf = m_box.askyesno("Confirmation", "The program will now create a file " + name_result
+ ".txt\nIf the file exists it will be overwritten.\nDo you want to proceed?")
if conf == 1:
core.translate(text_input.get(index1="1.0", index2=END), name_result)
entry_name.pack(side=LEFT)
label_txt = Label(frame_result, text=".txt")
label_txt.pack(side=LEFT)
frame_result.pack(pady=20)
frame_btn = Frame(window)
def command_clear():
text_input.delete(index1="1.0", index2=END)
entry_name.delete(first=0, last=END)
button_clear = Button(frame_btn, text="Clear", command=command_clear, width=10, height=1)
button_clear.grid(row=0, column=0)
button_translate = Button(frame_btn, text="Translate", command=command_translate, width=10, height=1)
button_translate.grid(row=0, column=1, padx=40)
frame_btn.pack()
window.mainloop()
``` |
{
"source": "jkbak/elasticsearch-py",
"score": 2
} |
#### File: _async/client/transform.py
```python
from typing import Any, Dict, List, Optional, Union
from elastic_transport import ObjectApiResponse
from ._base import NamespacedClient
from .utils import SKIP_IN_PATH, _quote, _quote_query, _rewrite_parameters
class TransformClient(NamespacedClient):
@_rewrite_parameters()
async def delete_transform(
self,
*,
transform_id: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
force: Optional[bool] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Deletes an existing transform.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html>`_
:param transform_id: Identifier for the transform.
:param force: If this value is false, the transform must be stopped before it
can be deleted. If true, the transform is deleted regardless of its current
state.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
if transform_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'transform_id'")
__path = f"/_transform/{_quote(transform_id)}"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if force is not None:
__query["force"] = force
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return await self._perform_request("DELETE", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
parameter_aliases={"from": "from_"},
)
async def get_transform(
self,
*,
transform_id: Optional[Any] = None,
allow_no_match: Optional[bool] = None,
error_trace: Optional[bool] = None,
exclude_generated: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
from_: Optional[int] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
size: Optional[int] = None,
) -> ObjectApiResponse[Any]:
"""
Retrieves configuration information for transforms.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html>`_
:param transform_id: The id or comma delimited list of id expressions of the
transforms to get, '_all' or '*' implies get all transforms
:param allow_no_match: Whether to ignore if a wildcard expression matches no
transforms. (This includes `_all` string or when no transforms have been
specified)
:param exclude_generated: Omits fields that are illegal to set on transform PUT
:param from_: skips a number of transform configs, defaults to 0
:param size: specifies a max number of transforms to get, defaults to 100
"""
if transform_id not in SKIP_IN_PATH:
__path = f"/_transform/{_quote(transform_id)}"
else:
__path = "/_transform"
__query: Dict[str, Any] = {}
if allow_no_match is not None:
__query["allow_no_match"] = allow_no_match
if error_trace is not None:
__query["error_trace"] = error_trace
if exclude_generated is not None:
__query["exclude_generated"] = exclude_generated
if filter_path is not None:
__query["filter_path"] = filter_path
if from_ is not None:
__query["from"] = from_
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if size is not None:
__query["size"] = size
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return await self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
parameter_aliases={"from": "from_"},
)
async def get_transform_stats(
self,
*,
transform_id: Any,
allow_no_match: Optional[bool] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
from_: Optional[int] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
size: Optional[int] = None,
) -> ObjectApiResponse[Any]:
"""
Retrieves usage information for transforms.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html>`_
:param transform_id: The id of the transform for which to get stats. '_all' or
'*' implies all transforms
:param allow_no_match: Whether to ignore if a wildcard expression matches no
transforms. (This includes `_all` string or when no transforms have been
specified)
:param from_: skips a number of transform stats, defaults to 0
:param size: specifies a max number of transform stats to get, defaults to 100
"""
if transform_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'transform_id'")
__path = f"/_transform/{_quote(transform_id)}/_stats"
__query: Dict[str, Any] = {}
if allow_no_match is not None:
__query["allow_no_match"] = allow_no_match
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if from_ is not None:
__query["from"] = from_
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if size is not None:
__query["size"] = size
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return await self._perform_request("GET", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
async def preview_transform(
self,
*,
transform_id: Optional[Any] = None,
description: Optional[str] = None,
dest: Optional[Any] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
frequency: Optional[Any] = None,
human: Optional[bool] = None,
latest: Optional[Any] = None,
pivot: Optional[Any] = None,
pretty: Optional[bool] = None,
retention_policy: Optional[Any] = None,
settings: Optional[Any] = None,
source: Optional[Any] = None,
sync: Optional[Any] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Previews a transform.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html>`_
:param transform_id: The id of the transform to preview.
:param description: Free text description of the transform.
:param dest: The destination for the transform.
:param frequency: The interval between checks for changes in the source indices
when the transform is running continuously. Also determines the retry interval
in the event of transient failures while the transform is searching or indexing.
The minimum value is 1s and the maximum is 1h.
:param latest: The latest method transforms the data by finding the latest document
for each unique key.
:param pivot: The pivot method transforms the data by aggregating and grouping
it. These objects define the group by fields and the aggregation to reduce
the data.
:param retention_policy: Defines a retention policy for the transform. Data that
meets the defined criteria is deleted from the destination index.
:param settings: Defines optional transform settings.
:param source: The source of the data for the transform.
:param sync: Defines the properties transforms require to run continuously.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
if transform_id not in SKIP_IN_PATH:
__path = f"/_transform/{_quote(transform_id)}/_preview"
else:
__path = "/_transform/_preview"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if description is not None:
__body["description"] = description
if dest is not None:
__body["dest"] = dest
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if frequency is not None:
__body["frequency"] = frequency
if human is not None:
__query["human"] = human
if latest is not None:
__body["latest"] = latest
if pivot is not None:
__body["pivot"] = pivot
if pretty is not None:
__query["pretty"] = pretty
if retention_policy is not None:
__body["retention_policy"] = retention_policy
if settings is not None:
__body["settings"] = settings
if source is not None:
__body["source"] = source
if sync is not None:
__body["sync"] = sync
if timeout is not None:
__query["timeout"] = timeout
if not __body:
__body = None # type: ignore[assignment]
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return await self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
parameter_aliases={"_meta": "meta"},
)
async def put_transform(
self,
*,
transform_id: Any,
dest: Any,
source: Any,
defer_validation: Optional[bool] = None,
description: Optional[str] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
frequency: Optional[Any] = None,
human: Optional[bool] = None,
latest: Optional[Any] = None,
meta: Optional[Dict[str, str]] = None,
pivot: Optional[Any] = None,
pretty: Optional[bool] = None,
retention_policy: Optional[Any] = None,
settings: Optional[Any] = None,
sync: Optional[Any] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Instantiates a transform.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html>`_
:param transform_id: Identifier for the transform. This identifier can contain
lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
It has a 64 character limit and must start and end with alphanumeric characters.
:param dest: The destination for the transform.
:param source: The source of the data for the transform.
:param defer_validation: When the transform is created, a series of validations
occur to ensure its success. For example, there is a check for the existence
of the source indices and a check that the destination index is not part
of the source index pattern. You can use this parameter to skip the checks,
for example when the source index does not exist until after the transform
is created. The validations are always run when you start the transform,
however, with the exception of privilege checks.
:param description: Free text description of the transform.
:param frequency: The interval between checks for changes in the source indices
when the transform is running continuously. Also determines the retry interval
in the event of transient failures while the transform is searching or indexing.
The minimum value is `1s` and the maximum is `1h`.
:param latest: The latest method transforms the data by finding the latest document
for each unique key.
:param meta: Defines optional transform metadata.
:param pivot: The pivot method transforms the data by aggregating and grouping
it. These objects define the group by fields and the aggregation to reduce
the data.
:param retention_policy: Defines a retention policy for the transform. Data that
meets the defined criteria is deleted from the destination index.
:param settings: Defines optional transform settings.
:param sync: Defines the properties transforms require to run continuously.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
if transform_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'transform_id'")
if dest is None:
raise ValueError("Empty value passed for parameter 'dest'")
if source is None:
raise ValueError("Empty value passed for parameter 'source'")
__path = f"/_transform/{_quote(transform_id)}"
__body: Dict[str, Any] = {}
__query: Dict[str, Any] = {}
if dest is not None:
__body["dest"] = dest
if source is not None:
__body["source"] = source
if defer_validation is not None:
__query["defer_validation"] = defer_validation
if description is not None:
__body["description"] = description
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if frequency is not None:
__body["frequency"] = frequency
if human is not None:
__query["human"] = human
if latest is not None:
__body["latest"] = latest
if meta is not None:
__body["_meta"] = meta
if pivot is not None:
__body["pivot"] = pivot
if pretty is not None:
__query["pretty"] = pretty
if retention_policy is not None:
__body["retention_policy"] = retention_policy
if settings is not None:
__body["settings"] = settings
if sync is not None:
__body["sync"] = sync
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self._perform_request("PUT", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
async def start_transform(
self,
*,
transform_id: Any,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Starts one or more transforms.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html>`_
:param transform_id: Identifier for the transform.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
if transform_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'transform_id'")
__path = f"/_transform/{_quote(transform_id)}/_start"
__query: Dict[str, Any] = {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return await self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters()
async def stop_transform(
self,
*,
transform_id: Any,
allow_no_match: Optional[bool] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
force: Optional[bool] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
timeout: Optional[Any] = None,
wait_for_checkpoint: Optional[bool] = None,
wait_for_completion: Optional[bool] = None,
) -> ObjectApiResponse[Any]:
"""
Stops one or more transforms.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html>`_
:param transform_id: Identifier for the transform. To stop multiple transforms,
use a comma-separated list or a wildcard expression. To stop all transforms,
use `_all` or `*` as the identifier.
:param allow_no_match: Specifies what to do when the request: contains wildcard
expressions and there are no transforms that match; contains the `_all` string
or no identifiers and there are no matches; contains wildcard expressions
and there are only partial matches. If it is true, the API returns a successful
acknowledgement message when there are no matches. When there are only partial
matches, the API stops the appropriate transforms. If it is false, the request
returns a 404 status code when there are no matches or only partial matches.
:param force: If it is true, the API forcefully stops the transforms.
:param timeout: Period to wait for a response when `wait_for_completion` is `true`.
If no response is received before the timeout expires, the request returns
a timeout exception. However, the request continues processing and eventually
moves the transform to a STOPPED state.
:param wait_for_checkpoint: If it is true, the transform does not completely
stop until the current checkpoint is completed. If it is false, the transform
stops as soon as possible.
:param wait_for_completion: If it is true, the API blocks until the indexer state
completely stops. If it is false, the API returns immediately and the indexer
is stopped asynchronously in the background.
"""
if transform_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'transform_id'")
__path = f"/_transform/{_quote(transform_id)}/_stop"
__query: Dict[str, Any] = {}
if allow_no_match is not None:
__query["allow_no_match"] = allow_no_match
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if force is not None:
__query["force"] = force
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if wait_for_checkpoint is not None:
__query["wait_for_checkpoint"] = wait_for_checkpoint
if wait_for_completion is not None:
__query["wait_for_completion"] = wait_for_completion
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json"}
return await self._perform_request("POST", __target, headers=__headers) # type: ignore[no-any-return,return-value]
@_rewrite_parameters(
body_fields=True,
)
async def update_transform(
self,
*,
transform_id: Any,
defer_validation: Optional[bool] = None,
description: Optional[str] = None,
dest: Optional[Any] = None,
error_trace: Optional[bool] = None,
filter_path: Optional[Union[List[str], str]] = None,
frequency: Optional[Any] = None,
human: Optional[bool] = None,
pretty: Optional[bool] = None,
retention_policy: Optional[Any] = None,
settings: Optional[Any] = None,
source: Optional[Any] = None,
sync: Optional[Any] = None,
timeout: Optional[Any] = None,
) -> ObjectApiResponse[Any]:
"""
Updates certain properties of a transform.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html>`_
:param transform_id: Identifier for the transform. This identifier can contain
lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores.
It must start and end with alphanumeric characters.
:param defer_validation: When true, deferrable validations are not run. This
behavior may be desired if the source index does not exist until after the
transform is created.
:param description: Free text description of the transform.
:param dest: The destination for the transform.
:param frequency: The interval between checks for changes in the source indices
when the transform is running continuously. Also determines the retry interval
in the event of transient failures while the transform is searching or indexing.
The minimum value is 1s and the maximum is 1h.
:param retention_policy: Defines a retention policy for the transform. Data that
meets the defined criteria is deleted from the destination index.
:param settings: Defines optional transform settings.
:param source: The source of the data for the transform.
:param sync: Defines the properties transforms require to run continuously.
:param timeout: Period to wait for a response. If no response is received before
the timeout expires, the request fails and returns an error.
"""
if transform_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'transform_id'")
__path = f"/_transform/{_quote(transform_id)}/_update"
__query: Dict[str, Any] = {}
__body: Dict[str, Any] = {}
if defer_validation is not None:
__query["defer_validation"] = defer_validation
if description is not None:
__body["description"] = description
if dest is not None:
__body["dest"] = dest
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if frequency is not None:
__body["frequency"] = frequency
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if retention_policy is not None:
__body["retention_policy"] = retention_policy
if settings is not None:
__body["settings"] = settings
if source is not None:
__body["source"] = source
if sync is not None:
__body["sync"] = sync
if timeout is not None:
__query["timeout"] = timeout
if __query:
__target = f"{__path}?{_quote_query(__query)}"
else:
__target = __path
__headers = {"accept": "application/json", "content-type": "application/json"}
return await self._perform_request("POST", __target, headers=__headers, body=__body) # type: ignore[no-any-return,return-value]
```
#### File: elasticsearch-py/elasticsearch/exceptions.py
```python
from typing import Any, Dict, Type
from elastic_transport import ApiError as _ApiError
from elastic_transport import ConnectionError as ConnectionError
from elastic_transport import ConnectionTimeout as ConnectionTimeout
from elastic_transport import SerializationError as SerializationError
from elastic_transport import TlsError as SSLError
from elastic_transport import TransportError as _TransportError
from elastic_transport import TransportWarning
__all__ = [
"SerializationError",
"TransportError",
"ConnectionError",
"SSLError",
"ConnectionTimeout",
"AuthorizationException",
"AuthenticationException",
"NotFoundError",
"ConflictError",
"BadRequestError",
]
class ApiError(_ApiError):
@property
def status_code(self) -> int:
"""Backwards-compatible shorthand for 'self.meta.status'"""
return self.meta.status
@property
def error(self) -> str:
"""A string error message."""
return self.message # type: ignore
@property
def info(self) -> Any:
"""Backwards-compatible way to access '.body'"""
return self.body
def __str__(self) -> str:
cause = ""
try:
if self.body and isinstance(self.body, dict) and "error" in self.body:
if isinstance(self.body["error"], dict):
root_cause = self.body["error"]["root_cause"][0]
cause = ", ".join(
filter(
None,
[
repr(root_cause["reason"]),
root_cause.get("resource.id"),
root_cause.get("resource.type"),
],
)
)
else:
cause = repr(self.body["error"])
except LookupError:
pass
msg = ", ".join(filter(None, [str(self.status_code), repr(self.error), cause]))
return f"{self.__class__.__name__}({msg})"
class UnsupportedProductError(ApiError):
"""Error which is raised when the client detects
it's not connected to a supported product.
"""
def __str__(self) -> str:
return self.message # type: ignore
class NotFoundError(ApiError):
"""Exception representing a 404 status code."""
class ConflictError(ApiError):
"""Exception representing a 409 status code."""
class BadRequestError(ApiError):
"""Exception representing a 400 status code."""
class AuthenticationException(ApiError):
"""Exception representing a 401 status code."""
class AuthorizationException(ApiError):
"""Exception representing a 403 status code."""
class ElasticsearchWarning(TransportWarning):
"""Warning that is raised when a deprecated option
or incorrect usage is flagged via the 'Warning' HTTP header.
"""
# Aliases for backwards compatibility
ElasticsearchException = _TransportError
ElasticsearchDeprecationWarning = ElasticsearchWarning
TransportError = ApiError
RequestError = BadRequestError
HTTP_EXCEPTIONS: Dict[int, Type[ApiError]] = {
400: RequestError,
401: AuthenticationException,
403: AuthorizationException,
404: NotFoundError,
409: ConflictError,
}
```
#### File: elasticsearch-py/utils/run-unasync.py
```python
import os
from pathlib import Path
import unasync
def main():
# Unasync all the generated async code
additional_replacements = {
# We want to rewrite to 'Transport' instead of 'SyncTransport', etc
"AsyncTransport": "Transport",
"AsyncElasticsearch": "Elasticsearch",
# We don't want to rewrite this class
"AsyncSearchClient": "AsyncSearchClient",
# Handling typing.Awaitable[...] isn't done yet by unasync.
"_TYPE_ASYNC_SNIFF_CALLBACK": "_TYPE_SYNC_SNIFF_CALLBACK",
}
rules = [
unasync.Rule(
fromdir="/elasticsearch/_async/client/",
todir="/elasticsearch/_sync/client/",
additional_replacements=additional_replacements,
),
]
filepaths = []
for root, _, filenames in os.walk(
Path(__file__).absolute().parent.parent / "elasticsearch/_async"
):
for filename in filenames:
if (
filename.rpartition(".")[-1]
in (
"py",
"pyi",
)
and not filename.startswith("utils.py")
):
filepaths.append(os.path.join(root, filename))
unasync.unasync_files(filepaths, rules)
if __name__ == "__main__":
main()
``` |
{
"source": "jkbao21/bitcoin",
"score": 2
} |
#### File: test/functional/rpc_named_arguments.py
```python
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert h.startswith('getblockchaininfo\n')
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
assert_equal(node.echo(0, 1, arg3=3, arg5=5), [0, 1, None, 3, None, 5])
assert_raises_rpc_error(-8, "Parameter arg1 specified twice both as positional and named argument", node.echo, 0, 1, arg1=1)
assert_raises_rpc_error(-8, "Parameter arg1 specified twice both as positional and named argument", node.echo, 0, None, 2, arg1=1)
if __name__ == '__main__':
NamedArgumentTest().main()
``` |
{
"source": "JKBehrens/STAAMS-Solver",
"score": 2
} |
#### File: cp_planning_examples/scripts/example_scene_manager.py
```python
from roadmap_tools.scene_graph_mockup_manager import SceneGraphMockupManager
from cp_planning_examples.example_scene_object_factory import ExampleSceneObjectFactory
from geometry_msgs.msg import TransformStamped
from roadmap_tools.robot_info import RobotInfo
def get_robot_frame(world_frame, robot_base_frame):
transform = TransformStamped()
transform.header.frame_id = world_frame
transform.child_frame_id = robot_base_frame
transform.transform.translation.x = 0.0
transform.transform.translation.y = 0.0
transform.transform.translation.z = 0.95
transform.transform.rotation.x = 0.0
transform.transform.rotation.y = 0.0
transform.transform.rotation.z = 0.0
transform.transform.rotation.w = 1.0
return transform
def get_task_frame(world_frame):
transform = TransformStamped()
transform.header.frame_id = world_frame
transform.child_frame_id = "workpiece"
transform.transform.translation.x = 0.4
transform.transform.translation.y = 0.0
transform.transform.translation.z = 0.9
transform.transform.rotation.x = 0.0
transform.transform.rotation.y = 0.0
transform.transform.rotation.z = 0.0
transform.transform.rotation.w = 1.0
return transform
if __name__ == "__main__":
world_frame = 'map'
robot_info = RobotInfo.getRobotInfo() # type: RobotInfo
example_scene_object_factory = ExampleSceneObjectFactory()
example_scene_object_factory.addRobotFrame(get_robot_frame(world_frame, robot_info.getBaseFrame()))
example_scene_object_factory.addTaskFrame(get_task_frame(world_frame))
sggm = SceneGraphMockupManager(ref_frame=world_frame, scene_object_factory=example_scene_object_factory)
```
#### File: src/roadmap_planner/BindIntervalsGroupCt.py
```python
from ortools.constraint_solver import pywrapcp
class BindIntervalsGroupCt(pywrapcp.PyConstraint):
'''
A constraint which adds a set of constraints to the problem when the variable group_var gets bound.
'''
def __init__(self, solver, group_var, constraints):
pywrapcp.PyConstraint.__init__(self, solver)
self.group_var = group_var
self.constraints = constraints
def Post(self):
demon = self.Demon(BindIntervalsGroupCt.Propagate)
self.group_var.WhenBound(demon)
def InitialPropagate(self):
'''
If the variable self.group_var is already bound, Propagate is immediately called
:return:
'''
if self.group_var.Bound():
self.Propagate()
def Propagate(self):
'''
Adds the constraints self.constraints to the solver
:return: 0
'''
solver = self.solver() # type: pywrapcp.Solver
group = self.group_var.Value()
for ct in self.constraints[group]:
solver.AddConstraint(ct)
```
#### File: src/roadmap_planner/ovc_planner.py
```python
import itertools
import random
import numpy as np
from ortools.constraint_solver import pywrapcp
from roadmap_planner.BindIntervalsGroupCt import BindIntervalsGroupCt
from roadmap_planner.solution_saving import *
from roadmap_tools.SceneObjectSearchMapping import SceneObjectSearchMapping
from CustomConstraints import IntervalClashCt, Edge2IntervalDurationCt, PathFeasibleCt, \
MoveOnGraphCt, InGoal2StatesCt, EndBound2UpperBoundCt, ConnectedComponentCt, BindIntervalsCt, IndexedEqualityCt, \
HorizonConfigurationCt, BrushArmAssignment2PnPTasks, IndexedEqualityVarCt, ConnectSameGroupCt
from ordered_visiting_ct_var import OrderedVisitingConstraintVar
from roadmap_planner.solution_saving import *
import rospy
from roadmap_planning_common_msgs.srv import BuildMotionAddTrajectory, BuildMotionAddTrajectoryRequest, \
BuildMotionAddTrajectoryResponse
from std_srvs.srv import EmptyRequest, Empty, EmptyResponse
from roadmap_tools.prm_factory import RoadMapFactory
from roadmap_tools.roadmap_clash import RoadMapClash
from roadmap_tools.prm import RoadMap
from roadmap_planner.DependencyDecisionBuilder import DependencyDecisionBuilder
from datetime import datetime
# import plotly.plotly as py
import plotly.offline as py
import plotly.figure_factory as ff
class OvcPlanner:
def __init__(self):
# n, prm_left, prm_right, clash_l, clash_r
self.roadmaps = {} # type: dict[str, RoadMap]
self.clash = RoadMapClash() # type: RoadMapClash
self.sosm = SceneObjectSearchMapping() # Type: SceneObjectSearchMapping
self.solver = pywrapcp.Solver("CP_Motion_Planner") # type: pywrapcp.Solver
self.intvar_lists = {}
self.intervalvar_lists = {}
self.refinement_intervals = {}
self.resource_intervals = {}
self.ordered_visiting_cst_vars = []
self.steps_max = -1
self.time_max = -1
self.db = None
self.search_monitors = None
self.dependencies = DependencyDecisionBuilder()
# self.var_dict = {} # type: dict[str, list[pywrapcp.IntVar]]
def get_refine_interval(self, name):
try:
i_s = self.refinement_intervals[name]
except KeyError:
i_s = self.solver.IntervalVar(0, 10000, 0, 10000, 0, 10000, False, name) # type: pywrapcp.IntervalVar
self.refinement_intervals[name] = i_s
return i_s
def build_manipulation_model(self, steps_max, time_max, collision_on=True):
self.steps_max = steps_max
self.time_max = time_max
self.create_basic_movement_variables(steps_max, time_max)
solver = self.solver
if collision_on:
# Adding the clash constraint on each pair of components
it = itertools.product(self.roadmaps.keys(), self.roadmaps.keys())
for group_combo in it:
if group_combo[0] == group_combo[-1]:
continue
c1 = self.intvar_lists["c_" + group_combo[0]]
c2 = self.intvar_lists["c_" + group_combo[-1]]
i1 = self.intervalvar_lists["i_" + group_combo[0]]
i2 = self.intervalvar_lists["i_" + group_combo[-1]]
clash = self.clash.clashes[group_combo]
rm1 = self.roadmaps[group_combo[0]]
rm2 = self.roadmaps[group_combo[-1]]
# Uncomment for path interval clash ct
# from interval_clash_ct_path import IntervalClashPathCt
# solver.Add(IntervalClashPathCt(solver, c1, c2, i1, i2, rm1, rm2, clash)) # Todo: uncomment!!!
from roadmap_planner.interval_clash_path_refine_ct import IntervalClashPathRefineCt
solver.Add(IntervalClashPathRefineCt(solver, c1, c2, i1, i2, rm1, rm2, clash, self))
# solver.Add(IntervalClashCt(solver, c1, c2, i1, i2, rm1, rm2, clash)) # Todo: uncomment!!!
# solver.Add(IntervalClashCt(solver, c_left, c_right, i_left, i_right, clash_l))
# solver.Add(IntervalClashCt(solver, c_right, c_left, i_right, i_left, clash_r))
# i mod 2:
# 0: nodes
# 1: edges
for group in self.roadmaps.keys():
conf_vars, interval_vars = self.get_vars_for_group(group)
# add duration constraints for the intervals
for i in range(0, len(interval_vars)):
if np.mod(i, 2) == 0:
# Duration constraint for node intervals
interval_vars[i].SetDurationRange(0, time_max)
else:
# Duration constraint for edge intervals
ct = Edge2IntervalDurationCt(solver, interval_vars[i], conf_vars[(i - 1) / 2],
conf_vars[(i - 1) / 2 + 1], self.roadmaps[group])
solver.Add(ct) # Todo: uncomment!!!
# chain all the intervals of one group
for i1, i2 in zip(range(0, len(interval_vars) - 1, 1), range(1, len(interval_vars), 1)):
solver.Add(interval_vars[i1].EndsAtStart(interval_vars[i2]))
# let the intervals start at 0
interval_vars[0].SetStartRange(0, 0) # StartExpr().SetValue(0)
# let the last interval and be a lower bound for the make span
solver.Add(self.intervalvar_lists["make_span_interval"][0].EndsAfterEnd(interval_vars[-1]))
# TODO: test to add all interval variables to opt_time_goal
solver.Add(interval_vars[-1].EndExpr() <= self.intvar_lists["opt_time_goal"][0])
# use sum of durations as lower bound of opt_time_goal
# dur_list = [var.DurationExpr() for i, var in enumerate(interval_vars)]
# solver.Add(solver.Sum(dur_list) <= self.intvar_lists["opt_time_goal"])
self.intervalvar_lists["make_span_interval"][0].SetStartRange(0, 0)
# connect optimization variable and Intervals
solver.Add(self.intervalvar_lists["make_span_interval"][0].EndExpr() <= self.intvar_lists["opt_time_goal"][0])
# Adding Constraint to make configurations adhere to roadmap
for group in self.roadmaps.keys():
conf_vars, interval_vars = self.get_vars_for_group(group)
# solver.Add(PathFeasibleCt(solver, conf_vars, self.roadmaps[group])) # Todo: uncomment!!!
# collect the last interval of each group and constrain them to end at the same time
last_intervals = []
for group in self.roadmaps.keys():
conf_vars, interval_vars = self.get_vars_for_group(group)
last_intervals.append(interval_vars[-1])
it = itertools.combinations(last_intervals, 2)
for end_intervals in it:
solver.Add(end_intervals[0].EndsAtEnd(end_intervals[-1]))
solver.Add(
EndBound2UpperBoundCt(solver, last_intervals, self.intvar_lists["opt_time_goal"][0])) # Todo: uncomment!!!
# # not allow two consecutive same values for configurations
# for group in self.roadmaps.keys():
# interval_vars = self.intervalvar_lists["i_" + group] # type: list[pywrapcp.IntervalVar]
# conf_vars = self.intvar_lists["c_" + group]
#
# for i1 in range(0, len(conf_vars) - 2):
# i2 = i1 + 1
# solver.Add(conf_vars[i1] != conf_vars[i2])
def create_basic_movement_variables(self, steps_max, time_max):
"""
Creates the basic variables to represent the locations of the manipulators and the
associated interval variables representing the traveling and waiting times.
conf index 0 1 2 3
o---o---o---o
interval index 0 1 2 3 4 5 6
conf -> waiting_interval: i_int = i_conf * 2
conf -> traveling_interval: i_int = i_conf * 2 +/-1
:param time_max: maximal ending time for the last interval. this value is NOT in seconds, but gets scaled later.
:param steps_max: number of variables to create. some may be disabled while solving
"""
# create the configuration variables for every roadmap and max steps and save them
# the variable dictionary as c_<group_name>, e.g. c_left_arm
number_format = len(str(2 * steps_max - 1))
for key in self.roadmaps.keys():
n_confs = self.roadmaps[key].get_number_of_vertices()
conf_domain_max = 100000
name = 'c_' + key
conf_var = [self.solver.IntVar(0, conf_domain_max, 'c_' + key + "_{:04d}".format(i)) for i in range(steps_max)]
self.intvar_lists[name] = conf_var
horizon = [self.solver.IntVar(0, steps_max, "horizon_{}".format(key))]
name = "horizon_" + key
self.intvar_lists[name] = horizon
ct = HorizonConfigurationCt(self.solver, conf_var, horizon[0])
self.solver.Add(ct)
# create a series of travel and waiting intervals for each component (with possible states defined as roadmap)
for key in self.roadmaps.keys():
int_var = [self.solver.IntervalVar(0, time_max, # type: list(IntervalVar) # start time
0, time_max, # duration
0, time_max, # end time
False, # optional
"Interval_{}_{:04d}".format(key, i))
for i in range(0, 2 * steps_max - 1)]
name = 'i_' + key
self.intervalvar_lists[name] = int_var
# create interval representing the makespan
make_span_interval = [self.solver.IntervalVar(0, time_max, # type: list(IntervalVar) # start time
0, time_max, # duration
0, time_max, # end time
False, # optional
"Interval_makespan")]
name = "make_span_interval"
self.intervalvar_lists[name] = make_span_interval
# integer variable derived from the makespan as optimization criteria
opt_time_goal = [self.solver.IntVar(0, time_max, "Goal_time")]
name = "opt_time_goal"
self.intvar_lists[name] = opt_time_goal
def register_resource_demand(self, interval_var, res="left_arm_gripper"):
try:
self.resource_intervals[res].append(interval_var)
except KeyError:
self.resource_intervals[res] = []
self.resource_intervals[res].append(interval_var)
def get_range_of_list(self, rlist=[]):
return min(rlist), max(rlist)
def register_order(self, interval_list=[]):
if len(interval_list) < 2:
return
for i1, i2 in zip(interval_list[:-1], interval_list[1:]):
assert isinstance(i1, pywrapcp.IntervalVar)
assert isinstance(i2, pywrapcp.IntervalVar)
self.solver.Add(i2.StartsAfterEnd(i1))
def build_constraints(self):
# TODO: check if we still need this function
from roadmap_planner.CustomConstraints import SameCompOVCConnectAllDifferent
if self.ordered_visiting_cst_vars.__len__() > 1:
ct = SameCompOVCConnectAllDifferent(self.solver, self.ordered_visiting_cst_vars)
self.solver.Add(ct)
for key, interval_list in self.resource_intervals.items():
# do nothing if there is only one interval for a resource
if len(interval_list) <= 1:
continue
it = itertools.combinations(interval_list, 2)
for interval_tuple in it:
print interval_tuple
ct = self.solver.TemporalDisjunction(interval_tuple[0], interval_tuple[1])
self.solver.Add(ct)
def makeOVC_connect_vars_allDifferent(self):
ovc_connect_group_dict = {}
for ovc in self.ordered_visiting_cst_vars:
if ovc._execution_group.Bound():
group = ovc._execution_group.Value()
try:
ovc_connect_group_dict[group] += ovc._conf_connect_vars
except KeyError:
ovc_connect_group_dict[group] = []
ovc_connect_group_dict[group] += ovc._conf_connect_vars
# print(ovc_connect_group_dict[group])
for connect_var_list in ovc_connect_group_dict.values():
ct = self.solver.AllDifferent(connect_var_list)
self.solver.Add(ct)
def make_ovc_loc_alldifferent_except_count(self, ovc_list=[], loc_pos=None, visit_vals=None):
# Todo: check if this works
if len(ovc_list) < 2:
return
if loc_pos is None:
return
if visit_vals is None:
return
loc_vars = []
from collections import Counter
val_hist = Counter(visit_vals)
for ovc in ovc_list:
loc_vars.append(ovc._conf_values[loc_pos])
lb = len(val_hist.values()) * [0]
ct = self.solver.Distribute(loc_vars, val_hist.keys(), lb, val_hist.values())
self.solver.Add(ct)
def make_ovc_loc_alldifferent(self, ovc_list=[], loc_pos=None):
if len(ovc_list) < 2:
return
if loc_pos is None:
return
loc_vars = []
# for ovc in ovc_list:
# loc_vars.append(ovc._conf_values[loc_pos])
for ovc in ovc_list:
loc_vars.append(ovc._loc_values[loc_pos])
self.solver.Add(self.solver.AllDifferent(loc_vars))
def make_ovc_monotonous(self, ovc_list=[]):
if len(ovc_list) < 2:
return
for ovc1, ovc2 in zip(ovc_list[0:-2], ovc_list[1:-1]):
self.solver.Add(ovc1._conf_connect_vars[-1] < ovc2._conf_connect_vars[0])
# self.solver.Add(ovc2._spanning_visit_interval.StartsAfterEnd(ovc1._spanning_visit_interval))
def addOVC(self, group, locations=[]):
'''
This function adds an Ordered visiting constraint for a fixed group and order.
:param group:
:param locations:
:return:
'''
name = "{:04d}".format(len(self.ordered_visiting_cst_vars))
ovc = OrderedVisitingConstraintVar(self.solver, len(locations), self.time_max, self.steps_max,
self.sosm.get_groups().keys(), self.roadmaps[group].get_number_of_vertices(),
name=name) # Type: OrderedVisitingConstraintVar
ovc._execution_group.SetValue(self.sosm.get_groups()[group])
for l in enumerate(locations):
ovc._conf_values[l[0]].SetValues(l[-1])
conf_vars, interval_vars = self.get_vars_for_group(group)
assert len(ovc._conf_connect_vars) == len(ovc._visit_interval_vars)
ct = BindIntervalsCt(self.solver, interval_vars, ovc._conf_connect_vars, ovc._visit_interval_vars)
self.solver.Add(ct)
for k, connect_var in enumerate(ovc._conf_connect_vars):
if not ovc._conf_values[k].Bound():
ct = IndexedEqualityVarCt(self.solver, conf_vars, connect_var, ovc._conf_values[k])
else:
ct = IndexedEqualityCt(self.solver, conf_vars, connect_var, ovc._conf_values[k].Value())
self.solver.Add(ct)
self.ordered_visiting_cst_vars.append(ovc)
return ovc
def addFreeOVC(self, groups=[], locations=[], ranges=None):
'''
This function adds an Ordered visiting constraint for a free group and order.
:param group: the robot components that should be considered fot the task
:param locations: a list of lists with locations (integer)
:return: the ovc object
'''
assert type(locations) == list
for l in locations:
assert type(l) == list
loc_mapping = self.sosm.get_alias_to_poses() # type: dict[str, dict[int, int]]
# nr_locs = max([max(loc_mapping[group].keys()) for group in groups])
nr_confs = max([self.roadmaps[group].get_number_of_vertices() for group in groups])
name = "{:04d}".format(len(self.ordered_visiting_cst_vars))
self.print_model()
ovc = OrderedVisitingConstraintVar(self.solver, len(locations), self.time_max, self.steps_max,
nr_confs,
loc_mapping=loc_mapping,
name=name,
group_mapping=self.sosm.get_groups(),
ranges=ranges) # Type: OrderedVisitingConstraintVar
ovc._execution_group.SetValues([self.sosm.get_groups()[group] for group in groups])
for l in enumerate(locations):
# ovc._conf_values[l[0]].SetValues(l[-1])
ovc._loc_values[l[0]].SetValues(l[-1])
cts = {}
for gn, gv in self.sosm.get_groups().items():
conf_vars, interval_vars = self.get_vars_for_group(gn)
cts[gv] = [BindIntervalsCt(self.solver, interval_vars, ovc._conf_connect_vars, ovc._visit_interval_vars)]
ct = BindIntervalsGroupCt(self.solver, ovc._execution_group, cts)
self.solver.Add(ct)
assert len(ovc._conf_connect_vars) == len(ovc._visit_interval_vars)
cts = {}
for gn, gv in self.sosm.get_groups().items():
conf_vars, interval_vars = self.get_vars_for_group(gn)
cts[gv] = []
for k, connect_var in enumerate(ovc._conf_connect_vars):
if not ovc._conf_values[k].Bound():
cts[gv].append(IndexedEqualityVarCt(self.solver, conf_vars, connect_var, ovc._conf_values[k]))
else:
cts[gv].append(IndexedEqualityCt(self.solver, conf_vars, connect_var, ovc._conf_values[k].Value()))
# self.solver.Add(ct)
self.solver.Add(BindIntervalsGroupCt(self.solver, ovc._execution_group, cts))
if self.dependencies:
for connect_var in ovc._conf_connect_vars:
self.dependencies.addTask(connect_var)
if len(ovc._conf_connect_vars) >= 2:
for con_1, con_2 in zip(ovc._conf_connect_vars[:-2], ovc._conf_connect_vars[1:-1]):
self.dependencies.addDependency(con_2, con_1)
self.ordered_visiting_cst_vars.append(ovc)
return ovc
def add_visiting_Ct(self, group, locations=[], start=False, goal=False, state_index=None, earliest=-1, latest=-1):
conf_vars, interval_vars = self.get_vars_for_group(group)
if start:
conf_vars[0].SetValues(locations)
return
if goal:
conf_vars[-1].SetValues(locations)
return
if state_index is not None:
try:
conf_vars[state_index].SetValues(locations)
return
except KeyError:
print("invalid state_index: {}. Len(conf_vars) is {}.".format(state_index, len(conf_vars)))
assert False
return
if earliest == -1 and latest == -1:
for loc in locations:
count_var = self.solver.IntVar(1, self.steps_max)
ct = self.solver.Count(conf_vars, loc, count_var)
# ct = solver.Distribute(c_left, pos_visit_left, len(pos_visit_left) * [1], len(pos_visit_left) * [steps])
self.solver.Add(ct)
return
if earliest in range(self.steps_max) and latest in range(self.steps_max) and earliest < latest and len(
locations) == 1:
conf_vars_select = []
for step in range(earliest, latest):
conf_vars_select.append(conf_vars[step])
count_var = self.solver.IntVar(1, self.steps_max)
ct = self.solver.Count(conf_vars_select, locations[0], count_var)
self.solver.Add(ct)
def define_decision_builders(self):
solver = self.solver
intervalvars = []
# db_0a = solver.Phase([self.intvar_lists["opt_time_goal"][0]],
# solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MAX_VALUE)
conf_vars = []
interval_vars = []
for group in self.roadmaps.keys():
c_vars, i_vars = self.get_vars_for_group(group)
conf_vars += c_vars
interval_vars += i_vars
intervalvars += interval_vars
# will be filled with decision builders
decision_builders = []
visit_intervals = []
visit_configurations = []
for group in self.roadmaps.keys():
try:
visit_intervals += self.intervalvar_lists['visit_i_' + group]
visit_configurations += self.intvar_lists['visit_c_' + group]
intervalvars += visit_intervals
except KeyError:
print "There are no ordered visit constraints for group {}".format(group)
have_visit_ct = False
if len(visit_configurations) > 0:
have_visit_ct = True
if have_visit_ct:
var_list = []
for l in visit_configurations:
for var in l:
var_list.append(var)
db_visit_confs = solver.Phase(var_list, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
have_visiting_ct_var = False
try:
if len(self.ordered_visiting_cst_vars) > 0:
have_visiting_ct_var = True
except AttributeError:
pass
if have_visiting_ct_var:
var_list = []
for l in self.ordered_visiting_cst_vars:
for var in l._conf_connect_vars:
var_list.append(var)
db_visiting_ct_var = solver.Phase(var_list, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
resource_intervals = []
for var in self.ordered_visiting_cst_vars:
resource_intervals.append(var._spanning_visit_interval)
var_list = []
for l in self.ordered_visiting_cst_vars: # type: OrderedVisitingConstraintVar
for var in l._visit_interval_vars:
var_list.append(var)
for var in [l._spanning_visit_interval]:
var_list.append(var)
db_visiting_intervals_ct_var = solver.Phase(var_list + resource_intervals, solver.INTERVAL_DEFAULT)
intervalvars += var_list
# db_1 = solver.Phase(self.intvar_lists["opt_time_goal"] + conf_vars,
# solver.CHOOSE_MIN_SIZE, solver.ASSIGN_RANDOM_VALUE)
db_1 = solver.Phase(conf_vars,
solver.CHOOSE_MIN_SIZE, solver.ASSIGN_RANDOM_VALUE)
# db_1b = solver.Phase(self.intvar_lists["opt_time_goal"],
# solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MAX_VALUE)
# db_2 = solver.Phase([self.intervalvar_lists["make_span_interval"]], solver.INTERVAL_DEFAULT)
# db_3 = solver.Phase([self.intervalvar_lists["make_span_interval"]] + interval_vars, solver.INTERVAL_DEFAULT)
db_3 = solver.Phase(interval_vars, solver.INTERVAL_DEFAULT)
resource_intervals = []
for intervals in self.resource_intervals:
resource_intervals += intervals
intervalvars += resource_intervals
if have_visit_ct:
var_list = []
for l in visit_intervals:
for var in l:
var_list.append(var)
db_visit_intervals = solver.Phase(var_list + resource_intervals, solver.INTERVAL_DEFAULT)
db_4a = solver.Phase(self.intervalvar_lists["make_span_interval"], solver.INTERVAL_DEFAULT)
intervalvars += self.intervalvar_lists["make_span_interval"]
db_4 = solver.Phase(self.intvar_lists["opt_time_goal"],
solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
# self.db = solver.Compose([db_0a, db_1, db_3])
# self.db = solver.Compose([db_1, db_3])
# self.db = solver.Compose([db_1, db_1b, db_3])
# all interval variables db
db_all_intervals = solver.Phase(intervalvars, solver.INTERVAL_DEFAULT)
# we estimate the horizon when after we know how many configurations are needed
horizons = []
for key in self.roadmaps.keys():
horizons += self.intvar_lists["horizon_" + key]
db_horizon = solver.Phase(horizons, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
if have_visit_ct:
decision_builders.append(db_visit_confs)
if have_visiting_ct_var:
pass
# decision_builders.append(db_visiting_ct_var) # we should not search over the OVC variables
# adding the horizon
decision_builders.append(db_horizon)
decision_builders.append(db_1) # configurations
####################
# limit = solver.FailuresLimit(20)
limit = solver.TimeLimit(2000)
# limit = solver.BranchesLimit(100)
db_interval_once = solver.SolveOnce(db_all_intervals, [limit])
decision_builders.append(db_interval_once)
# decision_builders.append(db_all_intervals)
####################
# TODO: comment in ?
# decision_builders.append(db_3) # interval vars
# if have_visit_ct:
# decision_builders.append(db_visit_intervals)
# if have_visiting_ct_var:
# pass
# # decision_builders.append(db_visiting_intervals_ct_var) # TODO: we should not search over the OVC intervals
# decision_builders.append(db_4a) # make span interval
decision_builders.append(db_4) # opt time goal
# decision_builders.append(db_horizon)
self.db = solver.Compose(decision_builders)
# self.db = solver.Compose([db_1, db_3, db_4a, db_4])
return
def define_search_monitors(self):
optimize = self.solver.Minimize(self.intvar_lists["opt_time_goal"][0], 1)
search_log = self.solver.SearchLog(10000, self.intvar_lists["opt_time_goal"][0])
self.search_monitors = [optimize, search_log]
def print_model(self):
pmv = self.solver.PrintModelVisitor()
psv = self.solver.StatisticsModelVisitor()
print pmv
# self.solver.Accept(pmv)
self.solver.Accept(psv)
return
def get_last_interval_vars(self):
last_intervals = []
for group in self.roadmaps.keys():
# conf_vars = self.intvar_lists["c_" + group]
interval_vars = self.intervalvar_lists["i_" + group]
last_intervals.append(interval_vars[-1])
return last_intervals
def get_vars_for_group(self, group):
conf_vars = self.intvar_lists["c_" + group]
interval_vars = self.intervalvar_lists["i_" + group]
return conf_vars, interval_vars
def get_object_name(self, node_alias=None):
return self.sosm.get_name_for_alias(node_alias)
def constrain_conf_vars(self):
for gn, rm in self.roadmaps.items():
conf_domain = [int(v) for v in rm.get_nodes_of_major_component()]
for conf_var in self.intvar_lists["c_" + gn]: # type: pywrapcp.IntVar
conf_var.SetValues(conf_domain)
def constrain_conn_group(self):
conn_var_dict = {}
group_var_dict = {}
for var in self.ordered_visiting_cst_vars: # type: OrderedVisitingConstraintVar
execution_group, conf_var, loc_var, connect_var, intervals = var.get_var_list()
conn_var_dict[var.get_name()] = connect_var
group_var_dict[var.get_name()] = execution_group
ct = ConnectSameGroupCt(self.solver, conn_var_dict, group_var_dict)
self.solver.Add(ct)
def ovc_search(self, exp=None, luby_constant=5, EXECUTE_MOTION=False, time_limit=5000, seed=None, bc_solution=None,
bc_solver_stats=None, bc_extra_sol_files=None, bc_motion_req=None):
self.constrain_conf_vars()
self.constrain_conn_group()
solver = self.solver
rand = random.Random()
if not seed:
seed = rand.randint(1, 10000)
solver.ReSeed(seed)
# collect the variables and build the decision builders
interval_list = []
e_groups = []
conf_vars = []
connect_vars = []
loc_vars = []
# collect OVC variables
for var in self.ordered_visiting_cst_vars:
execution_group, conf_var, loc_var, connect_var, intervals = var.get_var_list()
e_groups += [execution_group]
conf_vars += conf_var
loc_vars += loc_var
connect_vars += connect_var
interval_list += intervals
# define dbs for OVC variables - VARIABLE AND VALUE SELECTION HEURISTICS!!!
# visiting_intvar_db = self.solver.Phase(visiting_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
execution_group_db = self.solver.Phase(e_groups, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_RANDOM_VALUE)
conf_vars_db = self.solver.Phase(conf_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_RANDOM_VALUE)
loc_vars_db = self.solver.Phase(loc_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_RANDOM_VALUE)
# connect_vars_db = self.solver.Phase(connect_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
# connect_vars_db = self.solver.Phase(connect_vars, solver.CHOOSE_RANDOM, solver.ASSIGN_MIN_VALUE)
connect_vars_db = self.dependencies
# connect_vars_db = self.solver.Phase(connect_vars, solver.CHOOSE_RANDOM, solver.ASSIGN_CENTER_VALUE)
# concat the OVC vars with a relevant order - VARIABLE ORDERING
# visiting_intvar_db = solver.Compose([execution_group_db, conf_vars_db, connect_vars_db])
visiting_intvar_db = solver.Compose([loc_vars_db, execution_group_db, conf_vars_db, connect_vars_db])
# visiting_intvar_db = solver.Compose([connect_vars_db, conf_vars_db, execution_group_db])
visiting_interval_db = self.solver.Phase(interval_list, solver.INTERVAL_DEFAULT)
visiting_vars = []
for var in self.ordered_visiting_cst_vars:
# int_var, intervals = var.get_var_list()
visiting_vars += var._conf_values
visiting_vars += [var._execution_group]
# visiting_vars += var._wt_build
visiting_conf_db = self.solver.Phase(visiting_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_RANDOM_VALUE)
# visiting_intvar_db contains OVC dbs and self.db contains motion model variables
# complete_db = self.solver.Compose([visiting_intvar_db, self.db, visiting_interval_db])
complete_db = self.solver.Compose([visiting_intvar_db, self.db]) # we should not search over OVC intervals
# complete_db = self.solver.Compose([self.db, visiting_intvar_db]) # we should not search over OVC intervals
# define the restart strategy
luby_restart = solver.LubyRestart(luby_constant)
const_restart = solver.ConstantRestart(500)
final_db = solver.Compose([complete_db])
# final_db = solver.SolveOnce(final_db, [])
# debug search tracer
trace = solver.SearchTrace('debug')
# define search
# self.solver.NewSearch(final_db, self.search_monitors + [self.solver.TimeLimit(time_limit)])
self.solver.NewSearch(final_db, self.search_monitors + [self.solver.TimeLimit(time_limit), luby_restart])
# self.solver.NewSearch(final_db, self.search_monitors + [self.solver.TimeLimit(time_limit), const_restart])
# solution variables
count = 0
self.dependencies.DrawDependencies()
solutions = []
solution_end_times = []
start_time = datetime.now()
elapsed_time = []
new_opt_bound = np.Inf
# start solver
while self.solver.NextSolution():
elapsed_time.append((datetime.now() - start_time).total_seconds())
exp.add_time_to_solution(elapsed_time[-1])
count += 1
print "Solution: {}".format(count)
if bc_solver_stats:
bc_solver_stats(SolverStats(solver))
# print solver._Solver__python_constraints[2]
solution = self.print_solution()
# send solution back
if bc_solution:
bc_solution(solution)
# bc(solution)
# gantt = self.save_solution_gantt()
# if bc_extra_sol_files:
# bc_extra_sol_files(gantt)
last_intervals = self.get_last_interval_vars()
earliest_ends = [last_intervals[k].EndMin() for k in range(len(last_intervals))]
# if np.max([i_right[-1].EndMin(), i_left[-1].EndMin()]) < new_opt_bound:
if np.max(earliest_ends) < new_opt_bound:
new_opt_bound = np.max(earliest_ends)
solution_end_times.append(new_opt_bound)
exp.add_solution_quality(new_opt_bound)
if bc_motion_req:
motion_requests = self.build_ros_trajectory_request()
bc_motion_req(motion_requests)
solutions.append(motion_requests)
if EXECUTE_MOTION:
self.send_motion_requests(motion_requests)
start_time = datetime.now()
self.solver.EndSearch()
if len(solutions) == 0:
total_search_time = (datetime.now() - start_time).total_seconds()
else:
total_search_time = np.sum(elapsed_time) + (datetime.now() - start_time).total_seconds()
if count > 0:
if EXECUTE_MOTION:
self.trigger_build_plan()
return elapsed_time, solutions, solution_end_times, total_search_time
def test_solution(self, solution):
if solution.Value(self.allocation_vars["wt_arm_assignment_vars"][0]) < -5:
return False
return True
def make_neighbor(self, solution_old, k):
# copy assignment
assert k > 0
solution = self.solver.Assignment(solution_old)
rand = np.random
for n_op in range(k):
decision = rand.randint(0, 100)
if decision < 30:
# flip arm assignment
flip_index = rand.randint(0, 5)
arm_val = solution.Value(self.allocation_vars["wt_arm_assignment_vars"][flip_index])
if arm_val == 1:
new_val = 2
else:
new_val = 1
solution.SetValue(self.allocation_vars["wt_arm_assignment_vars"][flip_index], new_val)
# return solution
if 30 <= decision < 60:
# switch part assignment
flip_index_1 = rand.randint(0, 5)
flip_index_2 = rand.randint(0, 5)
while (flip_index_1 == flip_index_2):
flip_index_2 = rand.randint(0, 5)
part_1 = solution.Value(self.allocation_vars["wt_brush_assignment_vars"][flip_index_1])
part_2 = solution.Value(self.allocation_vars["wt_brush_assignment_vars"][flip_index_2])
solution.SetValue(self.allocation_vars["wt_brush_assignment_vars"][flip_index_1], part_2)
solution.SetValue(self.allocation_vars["wt_brush_assignment_vars"][flip_index_2], part_1)
# return solution
if 60 <= decision < 100:
# switch manufacturing order
flip_index_1 = rand.randint(0, 5)
flip_index_2 = rand.randint(0, 5)
while (flip_index_1 == flip_index_2):
flip_index_2 = rand.randint(0, 5)
part_1 = solution.Value(self.allocation_vars["wt_build_order_vars"][flip_index_1])
part_2 = solution.Value(self.allocation_vars["wt_build_order_vars"][flip_index_2])
solution.SetValue(self.allocation_vars["wt_build_order_vars"][flip_index_1], part_2)
solution.SetValue(self.allocation_vars["wt_build_order_vars"][flip_index_2], part_1)
# return solution
return solution
def local_search_cost_function(self, solution):
sosm = self.sosm
# WT from left to right for left arm: 3,4,5,9,x
# WT from left to right for right arm: 3,4,5,9,8
# Brushes from left to right for right arm: 6,7,2,1,0
# Brushes from left to right for left arm: 7,8,2,1,0
wt_poses = {"right_arm": {1: 3, 2: 3, 3: 5, 4: 9, 5: 8},
"left_arm": {1: 3, 2: 3, 3: 5, 4: 9}}
brush_poses = {"right_arm": {1: 6, 2: 7, 3: 2, 4: 1, 5: 0},
"left_arm": {1: 7, 2: 8, 3: 2, 4: 1, 5: 0}}
tool_poses = {"right_arm": {2: 10},
"left_arm": {1: 6}}
# available_wts = [1, 2, 3, 4, 5]
available_wts = {"WT_1": 1, "WT_2": 2, "WT_3": 3, "WT_4": 4, "WT_5": 5}
# available_tools = [1, 2]
available_tools = {"Tool_1": 1, "Tool_2": 2}
# available_brushes = [1, 2, 3, 4, 5]
available_brushes = {"Brush_1": 1, "Brush_2": 2, "Brush_3": 3, "Brush_4": 4, "Brush_5": 5}
available_groups = {1: "right_arm", 2: "left_arm"}
wt_poses = sosm.get_alias_to_poses_for_type("plate")
brush_poses = sosm.get_alias_to_poses_for_type("brush")
tool_poses = sosm.get_alias_to_poses_for_type("torpedo")
available_wts = sosm.get_available_objects_of_type("plate")
available_tools = sosm.get_available_objects_of_type("torpedo")
available_brushes = sosm.get_available_objects_of_type("brush")
available_groups = {"right_arm": 2, "left_arm": 1}
alloc = self.allocation_vars
alloc_dicts = self.alloc_dict
# visiting_vars = self.ordered_visiting_cst_vars # type: list[OrderedVisitingConstraintVar]
cost_groups = {}
# for index,
paths = {}
for g in available_groups.keys():
paths[g] = [tool_poses[g].values()[0]]
for wt in alloc["wt_build_order_vars"]:
wt_val = solution.Value(wt)
execution_group_val = solution.Value(alloc_dicts["wt_arm_assignment_vars"][wt_val])
execution_group = available_groups.keys()[available_groups.values().index(execution_group_val)]
brush_val = solution.Value(alloc_dicts["wt_brush_assignment_vars"][wt_val])
try:
wt_pose = wt_poses[execution_group][wt_val]
brush_pose = brush_poses[execution_group][brush_val]
except KeyError:
return np.Inf
paths[execution_group].append(wt_pose)
paths[execution_group].append(brush_pose)
paths[execution_group].append(wt_pose)
print "still here"
cost_conflict = 0
for gn, path in paths.items():
dist = self.roadmaps[gn].calc_path_distance(path)
group_tuple = (gn, list(self.clash.groups.difference([gn]))[0])
conflicts = np.array([len(self.clash.clashes[group_tuple][loc]) for loc in path])
average_conflict = 1.0 * sum(conflicts * conflicts) / (len(conflicts) * 120 * 120)
try:
cost_groups[gn] += dist
except KeyError:
cost_groups[gn] = dist
cost_conflict += 100 / average_conflict
# conf_WT_0_0 (5) | conf_WT_0_1 (3) | conf_WT_0_2 (2) | conf_WT_0_3 (3) | execution_groupWT_1 (1)
return max(cost_groups.values()) + cost_conflict
def print_solution(self):
solution_dict = {}
for k, v in self.intvar_lists.items():
print k
print v
for value in v:
variable = IntVariable(value)
solution_dict[variable.name] = variable
for k, v in self.intervalvar_lists.items():
print k
print v
for value in v:
ivar = IntervalVariable(value)
solution_dict[value.Name()] = ivar
# def StartExpr(self):
# return _pywrapcp.IntervalVar_StartExpr(self)
#
# def DurationExpr(self):
# return _pywrapcp.IntervalVar_DurationExpr(self)
#
# def EndExpr(self):
# return _py
for k, v in self.resource_intervals.items():
print k
print v
for value in v:
ivar = IntervalVariable(value)
solution_dict[value.Name()] = ivar
try:
for var in self.ordered_visiting_cst_vars:
ovc = OrderedVisitingConstraint(var)
solution_dict[ovc.name] = ovc
for v in var.get_var_list():
print v
except AttributeError:
pass
# self.save_solution_gantt()
return solution_dict
def save_solution_gantt(self):
df = []
for group in self.roadmaps.keys():
conf_vars, interval_vars = self.get_vars_for_group(group)
for index, var in enumerate(interval_vars):
start = var.StartExpr()
start_in_seconds = str(start)
start_m, start_s = divmod(int(start_in_seconds), 60)
start_h, start_m = divmod(start_m, 60)
finish = var.EndExpr()
finish_in_seconds = str(finish)
finish_m, finish_s = divmod(int(finish_in_seconds), 60)
finish_h, finish_m = divmod(finish_m, 60)
# finish =
s_str = "2017-01-01 {:02}:{:02}:{:02}".format(start_h, start_m, start_s)
f_str = "2017-01-01 {:02}:{:02}:{:02}".format(finish_h, finish_m, finish_s)
if finish_in_seconds == start_in_seconds:
continue
if index % 2 == 1:
resource = "Waiting"
descr = "Waiting at node {}".format(conf_vars[index / 2].Value())
else:
resource = "Travelling"
descr = "Travelling from node {} to {}".format(conf_vars[(index - 1) / 2].Value(),
conf_vars[(index + 1) / 2].Value())
df.append(dict(Task=group, Start="2017-01-01 {:02}:{:02}:{:02}".format(start_h, start_m, start_s),
Finish="2017-01-01 {:02}:{:02}:{:02}".format(finish_h, finish_m, finish_s),
Resource=resource,
Description=descr))
for visit_ct in self.ordered_visiting_cst_vars:
var = visit_ct._spanning_visit_interval
start = var.StartExpr()
start_in_seconds = str(start)
start_m, start_s = divmod(int(start_in_seconds), 60)
start_h, start_m = divmod(start_m, 60)
finish = var.EndExpr()
finish_in_seconds = str(finish)
finish_m, finish_s = divmod(int(finish_in_seconds), 60)
finish_h, finish_m = divmod(finish_m, 60)
# finish =
s_str = "2017-01-01 {:02}:{:02}:{:02}".format(start_h, start_m, start_s)
f_str = "2017-01-01 {:02}:{:02}:{:02}".format(finish_h, finish_m, finish_s)
if visit_ct._execution_group.Value() == 1:
resource = "left_arm_gripper"
if visit_ct._execution_group.Value() == 2:
resource = "right_arm_gripper"
# resource = str(visit_ct._execution_group.Value())
descr = str(var.__repr__())
df.append(dict(Task=resource, Start="2017-01-01 {:02}:{:02}:{:02}".format(start_h, start_m, start_s),
Finish="2017-01-01 {:02}:{:02}:{:02}".format(finish_h, finish_m, finish_s),
Resource=resource,
Description=descr))
for key, var_list in self.resource_intervals.items():
for var in var_list:
start = var.StartExpr()
start_in_seconds = str(start)
start_m, start_s = divmod(int(start_in_seconds), 60)
start_h, start_m = divmod(start_m, 60)
finish = var.EndExpr()
finish_in_seconds = str(finish)
finish_m, finish_s = divmod(int(finish_in_seconds), 60)
finish_h, finish_m = divmod(finish_m, 60)
# finish =
s_str = "2017-01-01 {:02}:{:02}:{:02}".format(start_h, start_m, start_s)
f_str = "2017-01-01 {:02}:{:02}:{:02}".format(finish_h, finish_m, finish_s)
resource = key
descr = str(var.__repr__())
df.append(dict(Task=key, Start="2017-01-01 {:02}:{:02}:{:02}".format(start_h, start_m, start_s),
Finish="2017-01-01 {:02}:{:02}:{:02}".format(finish_h, finish_m, finish_s),
Resource=resource,
Description=descr))
colors = dict(Travelling='rgb(46, 137, 205)',
Waiting='rgb(114, 44, 121)',
right_arm_gripper='rgb(198, 47, 105)',
left_arm_gripper='rgb(58, 149, 136)',
Rest='rgb(107, 127, 135)')
fig = ff.create_gantt(df, colors=colors, index_col='Resource', title='Manipulation Plan',
show_colorbar=True, showgrid_x=True, showgrid_y=True, group_tasks=True)
# py.iplot(fig, filename="gantt_solution", world_readable=True)
plot = py.plot(fig, filename="gantt_solution_{}.html".format(datetime.now()), auto_open=False)
return plot
def load_roadmap(self, file_name, group_name="right_arm"):
"""
:param file_name: file name of roadmap file
:param group_name: deprecated - has no effect and will be removed
"""
# if group_name in self.roadmaps.keys():
# del self.roadmaps[group_name]
# rospy.loginfo("Deleted RoadMap for {} to load another one for same group.".format(group_name))
roadmapfactory = RoadMapFactory()
rm = roadmapfactory.load_from_file(file_name)
self.roadmaps[rm.get_group_name()] = rm
# self.roadmaps[group_name] = RoadMap(file_name, group_name)
rospy.loginfo("Loaded RoadMap for {}.".format(group_name))
def check_planning_requirements(self):
# for key in self.roadmaps.keys():
# assert key in self.clash.keys()
self.clash.verify_clash_roadmap_combinations(self.roadmaps)
def trigger_build_plan(self):
TRIGGER_ROS_SERVICE = True
if TRIGGER_ROS_SERVICE:
self.trigger_build_plan_client(EmptyRequest())
def build_ros_trajectory_request(self):
TRIGGER_ROS_SERVICE = True
requests = []
if TRIGGER_ROS_SERVICE:
try:
pass
# rospy.init_node("CSP_MOTION_SOLVER")
except rospy.exceptions.ROSException:
pass
for group, rm in self.roadmaps.items():
conf_vars, interval_vars = self.get_vars_for_group(group)
req = BuildMotionAddTrajectoryRequest()
import cPickle as pickle
req.prm_pickle = pickle.dumps(rm, pickle.HIGHEST_PROTOCOL)
req.move_group = group
req.prm_name = self.roadmaps[group].get_fingerprint()
index = 0
for interval in interval_vars:
req.time.append(interval.StartMin())
req.time.append(interval.EndMin())
if np.mod(index, 2) == 0:
# interval of point
req.prm_pos.append(conf_vars[index / 2].Value())
req.prm_pos.append(conf_vars[index / 2].Value())
elif np.mod(index, 2) == 1:
# interval of travel
req.prm_pos.append(conf_vars[(index - 1) / 2].Value())
req.prm_pos.append(conf_vars[(index + 1) / 2].Value())
index += 1
print req.move_group
print(req.time)
print(req.prm_pos)
requests.append(req)
return requests
def send_motion_requests(self, requests):
for req in requests:
self.send_trajectory_data_client(req)
def trigger_build_plan_client(self, request):
# type: (EmptyRequest)->EmptyResponse
rospy.wait_for_service('MOTION_DISPATCHER/BUILD_MOTION_PLAN')
try:
build_plan_srv = rospy.ServiceProxy('MOTION_DISPATCHER/BUILD_MOTION_PLAN', Empty)
req = request
res = build_plan_srv(req) # type: EmptyResponse
return res
except rospy.ServiceException, e:
print "Service call failed: %s" % e
def send_trajectory_data_client(self, request):
rospy.wait_for_service('MOTION_DISPATCHER/ADD_TRAJECTORY')
try:
send_trajectory_srv = rospy.ServiceProxy('MOTION_DISPATCHER/ADD_TRAJECTORY', BuildMotionAddTrajectory)
req = request # type: BuildMotionAddTrajectoryRequest
res = send_trajectory_srv(req) # type: BuildMotionAddTrajectoryResponse
print res.msg
print res.success
return res
except rospy.ServiceException, e:
print "Service call failed: %s" % e
```
#### File: roadmap_planner_tools/scripts/pi_manager_example.py
```python
from geometry_msgs.msg import TransformStamped
from roadmap_planner_tools.planner_input_manager import PlannerInputManager
from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType
from roadmap_planning_common_msgs.srv import AddObjectRequest
from rospy_message_converter import json_message_converter
def get_pose_dict():
# type: () -> dict[str, PoseStamped]
PosesDict = {}
s = '{"object_type": "gluepoint", "object_name": "loc_8", "pose": {"header": {"stamp": {"secs": 1527494081, "nsecs": 245750904}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.0673123623248813, "x": -0.11819957737525943, "z": -0.000529293203694906}, "orientation": {"y": -0.09199954920780186, "x": -0.02204239273911617, "z": -0.9826223619036331, "w": -0.15969818331722338}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_6", "pose": {"header": {"stamp": {"secs": 1527494075, "nsecs": 379102230}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11369306929267338, "x": -0.1261289291850433, "z": 0.0007951176021754491}, "orientation": {"y": 0.07187094200286825, "x": -0.061023926496261725, "z": 0.9873831660085665, "w": 0.12722079850990872}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_7", "pose": {"header": {"stamp": {"secs": 1527494078, "nsecs": 595736504}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.1149274695384531, "x": 0.06208364635543662, "z": -0.005476238253788906}, "orientation": {"y": 0.1316436407714954, "x": 0.019739166149056388, "z": 0.9750055991865761, "w": 0.17788872566576838}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_4", "pose": {"header": {"stamp": {"secs": 1527494065, "nsecs": 979056120}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06856865760540547, "x": 0.4478018813158141, "z": -0.000679487573898074}, "orientation": {"y": -0.050516132689598016, "x": 0.014163494691613031, "z": -0.878984408924756, "w": -0.47395561461323}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_5", "pose": {"header": {"stamp": {"secs": 1527494071, "nsecs": 795750141}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06760627153697468, "x": 0.06349269911330815, "z": -0.0007470379806025116}, "orientation": {"y": -0.010168248374561623, "x": 0.04411559477008324, "z": -0.9325496611657705, "w": -0.3581920580954878}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_2", "pose": {"header": {"stamp": {"secs": 1527494059, "nsecs": 112413883}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06838950251450462, "x": 0.6409328063798745, "z": 0.00015782094835932174}, "orientation": {"y": 0.05237392498219545, "x": 0.02846261965189043, "z": 0.8403338301717435, "w": 0.53878187157086}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_3", "pose": {"header": {"stamp": {"secs": 1527494063, "nsecs": 79089880}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11665509017783073, "x": 0.6388356663857032, "z": 0.001613388200793883}, "orientation": {"y": -0.10153267056716704, "x": -0.0370089029955912, "z": -0.9240133314356973, "w": -0.3667708020489993}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_1", "pose": {"header": {"stamp": {"secs": 1527494055, "nsecs": 529085398}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11632455207600452, "x": 0.4453907194544092, "z": 0.0016855318552673815}, "orientation": {"y": -0.11786933993294174, "x": -0.08813291134398896, "z": -0.9653377546033448, "w": -0.21555145135020032}}}}' + '\n'
json_strs = s.readlines()
for json in json_strs:
if json == "\n":
continue
req = json_message_converter.convert_json_to_ros_message('roadmap_planning_common_msgs/AddObjectRequest', json) # type: AddObjectRequest
PosesDict[req.object_name] = req.pose
return PosesDict
if __name__ == "__main__":
pi_manager = PlannerInputManager()
trans = TransformStamped()
trans.child_frame_id = 'board'
trans.header.frame_id = 'world'
board_quat = [-0.6646584989424609, 0.7469166744613165, 0.009387090228191897, -0.016013860629187193]
board_trans = [0.6, 0.3, 0.02]
trans.transform.translation.x = board_trans[0]
trans.transform.translation.y = board_trans[1]
trans.transform.translation.z = board_trans[2]
trans.transform.rotation.x = board_quat[0]
trans.transform.rotation.y = board_quat[1]
trans.transform.rotation.z = board_quat[2]
trans.transform.rotation.w = board_quat[3]
pi_manager.add_frame(transform=trans)
PosesDict = get_pose_dict()
pi_manager.add_loc(PosesDict.values(), PosesDict.keys(), len(PosesDict) * ["gluepoint"])
myOVC = OrderedVisitingConstraint()
myOVC.name = 'ovc_1'
loc_names_1 = StringList()
loc_names_1.values.append('loc_1')
myOVC.location_names.append(loc_names_1)
pi_manager.add_ovc([myOVC])
myOVC_2 = OrderedVisitingConstraint()
myOVC_2.name = 'ovc_2'
loc_names_1 = StringList()
loc_names_1.values.append('loc_2')
myOVC_2.location_names.append(loc_names_1)
pi_manager.add_ovc([myOVC])
# ct = Constraints[0]
pi_manager.add_ovc_ct(constraint_type=ConstraintType.StartsAfterEnd, first_ovcs=['ovc_1'], second_ovcs=['ovc_2'])
pi_manager.write_planner_input_file('test')
```
#### File: roadmap_tools/scripts/calc_clash.py
```python
from roadmap_tools.roadmap_clash import RoadMapClash
from roadmap_tools.kinematics_interface_kuka import StateValidity
from roadmap_tools.prm_factory import RoadMapFactory
import time
def create_clash(group_names, roadmap_names):
sv = StateValidity()
rmc = RoadMapClash(group_names)
# prm_factory = RoadMapFactory()
for group_name in group_names:
r = RoadMapFactory.load_prm_from_database(roadmap_names[group_name])
rmc.add_roadmap(r)
rmc.compute_clash_for_prms(sv=sv)
clash_name = "clash_" + str(time.time()).split(".")[0]
rmc.save_clash(groups=group_names, file_name=clash_name)
# rmc.save_clash(groups=["left_arm", "right_arm"], file_name="demo_build_clash")
print roadmap_names, clash_name
return clash_name
if __name__ == "__main__":
rm_names = {'r2_arm': 'prm_r2_arm_2018-05-30 15:09:33.898024', 'r1_arm': 'prm_r1_arm_2018-05-30 14:57:07.705629'}
clash = create_clash(rm_names.keys(), rm_names)
print(clash)
```
#### File: roadmap_tools/scripts/prm_viewer.py
```python
from roadmap_tools.prm_factory import RoadMapFactory
import rospy
import visualization_msgs.msg
from moveit_msgs.msg import geometry_msgs
from roadmap_tools.prm import RoadMap
import copy
import datetime
class RoadMapVisualizer:
def __init__(self, rm_name=None, rm_path=None):
self.rm_name = rm_name
if rm_path:
self.prm = RoadMapFactory.load_from_file(rm_path)
elif rm_name:
self.prm = RoadMapFactory.load_prm_from_database(rm_name) # type: RoadMap
self.group_name = self.prm.get_group_name()
rospy.init_node("RoadMapVisualizer" + self.group_name)
self.group_name = self.prm.get_group_name()
self.marker_publisher = rospy.Publisher('prm_markers', visualization_msgs.msg.MarkerArray, queue_size=1000)
self.V_viz_open = set()
self.E_viz_open = set()
self.no_subscribers_viz = 0
self.V_poses = {}
self.last_visualized = datetime.datetime.now()
self.highlights = []
self.timer = rospy.Timer(rospy.Duration(0.1), self.handle_visualize_timer)
rospy.on_shutdown(self.shutdown_hook)
rospy.spin()
def handle_visualize_timer(self, e):
rospy.logdebug("In handle_visualize_timer")
no_subscribers = self.marker_publisher.get_num_connections()
if no_subscribers < 1:
self.no_subscribers_viz = no_subscribers
return
# rospy.loginfo("There is " + str(no_subscribers) + " Subscriber. Before: " + str(self.no_subscribers_viz))
if self.no_subscribers_viz < no_subscribers or (
datetime.datetime.now() - self.last_visualized).total_seconds() > 25.0:
self.last_visualized = datetime.datetime.now()
self.add_all_nodes_for_viz()
self.add_all_edges_for_viz()
# self.V_viz_open.clear()
# for key in self.V.keys():
# self.V_viz_open.add(key)
# print self.V_viz_open
# self.E_viz_open.clear()
# for edge in self.edges:
# self.E_viz_open.add(self.edges.index(edge))
# print self.E_viz_open
self.no_subscribers_viz = no_subscribers
rospy.wait_for_service('compute_fk')
self.visualize_edges(self.highlights)
self.visualize_prm(self.highlights)
def visualize_prm(self, highlight=[], action=visualization_msgs.msg.Marker.ADD):
marker_array = visualization_msgs.msg.MarkerArray()
node_set = self.V_viz_open.copy()
# for conf in self.V.keys():
for conf in node_set: # type: Vertex
self.V_viz_open.remove(conf)
# TODO: only vis new configurations and cache poses
marker = visualization_msgs.msg.Marker()
marker.header.frame_id = self.prm._base_frame # _"WAIST"
# marker.header.frame_id = "world"
marker.header.stamp = rospy.Time.now()
marker.ns = "prm_poses_" + self.group_name
marker.id = int(conf)
marker.type = visualization_msgs.msg.Marker.ARROW
marker.action = action
# eef_pose = self.get_pose(self.V[conf])
eef_pose = self.prm.get_eef_pose(conf)
pos = eef_pose.pose_stamped[0]
marker.pose.position.x = pos.pose.position.x
marker.pose.position.y = pos.pose.position.y
marker.pose.position.z = pos.pose.position.z
marker.pose.orientation.x = pos.pose.orientation.x
marker.pose.orientation.y = pos.pose.orientation.y
marker.pose.orientation.z = pos.pose.orientation.z
marker.pose.orientation.w = pos.pose.orientation.w
marker.scale.x = 0.05
marker.scale.y = 0.01
marker.scale.z = 0.01
if conf in highlight:
marker.color.a = 0.9
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
elif len(self.prm.get_vertex_name(conf)) > 0:
# for confs with name
marker.color.a = 0.8
marker.color.r = 1.0
marker.color.g = 0.2
marker.color.b = 0.0
marker.ns = "prm_object_" + self.group_name
# elif conf in self.E.keys():
elif conf.out_degree() + conf.in_degree() > 0:
marker.color.a = 0.3
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 1.0
else:
marker.color.a = 0.6
marker.color.r = 0.0
marker.color.g = 1.0
marker.color.b = 0.0
# make a marker for the node number
text_marker = copy.deepcopy(marker)
text_marker.ns = "prm_poses_numbers" + self.group_name
text_marker.type = visualization_msgs.msg.Marker.TEXT_VIEW_FACING
text_marker.text = str(conf)
# Text position
text_marker.pose.position.z = text_marker.pose.position.z - 0.02
# Text scale
text_marker.scale.z = 0.02
# Text color
text_marker.color.a = 0.6
text_marker.color.r = 1.0
text_marker.color.g = 1.0
text_marker.color.b = 1.0
marker_array.markers.append(text_marker)
# make marker for the vertice name
text_marker2 = copy.deepcopy(text_marker)
text_marker2.ns = "prm_vert_names" + self.group_name
text_marker2.type = visualization_msgs.msg.Marker.TEXT_VIEW_FACING
text_marker2.text = self.prm.get_vertex_name(conf)
# Text position
text_marker2.pose.position.z = text_marker2.pose.position.z - 0.02
# Text scale
text_marker2.scale.z = 0.02
# Text color
text_marker2.color.a = 0.6
text_marker2.color.r = 1.0
text_marker2.color.g = 1.0
text_marker2.color.b = 1.0
marker_array.markers.append(marker)
marker_array.markers.append(text_marker2)
self.marker_publisher.publish(marker_array)
def visualize_edges(self, highlight=[], action=visualization_msgs.msg.Marker.ADD):
marker_array = visualization_msgs.msg.MarkerArray()
edge_set = self.E_viz_open.copy()
# rospy.loginfo("edge_set: {}".format(edge_set))
if len(edge_set) > 0:
pass
# rospy.loginfo("edge_set: {}".format(edge_set))
for edge in edge_set:
self.E_viz_open.remove(edge)
marker = visualization_msgs.msg.Marker()
marker.header.frame_id = self.prm._base_frame #_"WAIST"
marker.header.stamp = rospy.Time.now()
marker.ns = "prm_lines_" + self.group_name
# marker.id = self.edges.index(edge)
marker.id = int(edge.target()) + int(edge.source()) * 10000 # int(edge)
marker.type = visualization_msgs.msg.Marker.LINE_LIST
marker.action = action
# edge_data = self.edges[edge]
source_node = edge.source()
destination_node = edge.target()
for node in [source_node, destination_node]:
eef_pose = self.prm.get_eef_pose(node)
pos = eef_pose.pose_stamped[0]
pt = geometry_msgs.msg.Point()
pt.x = pos.pose.position.x
pt.y = pos.pose.position.y
pt.z = pos.pose.position.z
marker.points.append(pt)
# marker.pose.orientation.x = pos.pose.orientation.x
# marker.pose.orientation.y = pos.pose.orientation.y
# marker.pose.orientation.z = pos.pose.orientation.z
# marker.pose.orientation.w = pos.pose.orientation.w
marker.scale.x = 0.005
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.color.a = 0.4
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
# TODO: reactivate highlighting of edges
# change color for highlighted edges
# if edge_data[0] in highlight and edge_data[1] in highlight and numpy.abs(highlight.index(edge_data[0]) - highlight.index(edge_data[1])) == 1:
# marker.scale.x = 0.007
# marker.scale.y = 0.1
# marker.scale.z = 0.1
# marker.color.a = 0.4
# marker.color.r = 0.0
# marker.color.g = 1.0
# marker.color.b = 0.0
marker_array.markers.append(marker)
self.marker_publisher.publish(marker_array)
def shutdown_hook(self):
self.timer.shutdown()
while self.timer.isAlive():
rospy.sleep(0.5)
rospy.sleep(0.5)
self.delete_vis_markers()
rospy.sleep(1.0)
rospy.loginfo("Shutdown complete")
def delete_vis_markers(self):
self.add_all_nodes_for_viz()
self.add_all_edges_for_viz()
self.visualize_prm(action=visualization_msgs.msg.Marker.DELETE)
self.visualize_edges(action=visualization_msgs.msg.Marker.DELETE)
def add_all_nodes_for_viz(self):
self.V_viz_open.clear()
for vert in self.prm.vertices():
self.V_viz_open.add(vert)
# for key in self.V.keys():
# self.V_viz_open.add(key)
def add_all_edges_for_viz(self):
self.E_viz_open.clear()
for edge in self.prm.get_edges():
self.E_viz_open.add(edge)
# for edge in self.edges:
# self.E_viz_open.add(self.edges.index(edge))
if __name__ == "__main__":
# r = RoadMapVisualizer('prm_r1_arm_2018-05-30 12:27:28.598489')
# r = RoadMapVisualizer(rm_path="/home/beh2rng/cp_planning_experiments/run_9999/created_in_subprocess_2018-06-12 01:42:35.879541/data_files/roadmaps/prm_r1_arm")
# r = RoadMapVisualizer("prm_left_arm_2018-08-27 12:09:41.998069")
# r = RoadMapVisualizer("prm_r2_arm_2018-09-07 11:59:45.851925")
r = RoadMapVisualizer("prm_r1_arm_2018-09-07 11:59:46.622950")
print "still here"
```
#### File: src/roadmap_tools/roadmap_clash.py
```python
import cPickle as pickle
import errno
import os
from prm import RoadMap
import time
import itertools
from graph_tool.all import Vertex
from moveit_msgs.msg import RobotState
import copy
import rospy
class RoadMapClash:
def __init__(self, groups=[], roadmap_fingerprints={}):
self.groups = set(groups)
self.roadmap_fingerprints = {} # type: dict[str, str]
self.clashes = {} # type: dict[tuple(str, str), dict[int, set[int]]]
self.roadmaps = {} # type: dict[str, RoadMap]
def add_roadmap(self, roadmap):
# type: (RoadMap) -> bool
assert isinstance(roadmap, RoadMap)
self.roadmaps[roadmap.get_group_name()] = roadmap
return isinstance(self.roadmaps[roadmap._group_name], RoadMap)
def get_clash(self, from_group, to_group):
try:
return self.clashes[tuple([from_group, to_group])]
except KeyError:
rospy.logerr("No clash from {} to {} available.".format(from_group, to_group))
return False
def load_clash(self, file_name="prm_clash", groups=[], file_format=None):
if file_name == 'empty':
self.clear_clash()
rospy.loginfo("Cleared clash.")
return
version = None
if not file_format:
try:
path = rospy.get_param("/SolverSetup/data_path", "")
path = os.path.expanduser(path)
file_name = os.path.join(path, file_name)
if os.path.exists(file_name):
with open(file_name, 'rb') as output:
loaded_clash = pickle.load(output)
version = loaded_clash.file_format_version
else:
with open(file_name + '.pkl', 'rb') as output:
loaded_clash = pickle.load(output)
version = loaded_clash.file_format_version
except AttributeError:
rospy.logwarn("File {} has no version tag. Try to guess content format.".format(file_name))
else:
version = file_format
if version == 0.1:
with open(file_name, 'rb') as output:
loaded_clash = pickle.load(output) # type: RoadMapClash
# assert isinstance(loaded_clash, RoadMapClash)
# print type(loaded_clash)
for group in groups:
if group not in loaded_clash.groups:
rospy.logwarn(
"Information about group {} is not available in file {}.pkl.".format(group, file_name))
continue
self.groups.add(group)
try:
self.roadmap_fingerprints[group] = loaded_clash.roadmap_fingerprints[group]
except KeyError:
print "No Roadmap fingerprint for group {} in file {}.pkl.".format(group, file_name)
for group_combo in loaded_clash.clashes.keys():
if group_combo[0] in groups and group_combo[-1] in groups:
self.clashes[group_combo] = loaded_clash.clashes[group_combo]
else:
print "Clash information for group combo {} in file {}.pkl not used. Not specified in groups: {}".format(
group_combo, file_name, groups)
def save_clash(self, groups=[], file_name="prm_clash"):
"""
Saves the computed or modified clashes to the disk.
:param groups: group names for which we want to save the information
:param file_name: will be saved to <filename-or-path>.pkl
"""
data_path = rospy.get_param("SolverSetup/data_path", default="")
data_path = os.path.expanduser(data_path)
file_format_version = 0.1
# get a copy of the whole roadmapclash object
clash_save = copy.deepcopy(self)
# strip out unwanted clashes
for group_combo in self.clashes.keys():
if group_combo[0] not in groups or group_combo[-1] not in groups:
del clash_save.clashes[group_combo]
# strips out unwanted groups
for group in self.groups:
if group not in groups:
clash_save.groups.remove(group)
# strips out unwanted fingerprints
for group in self.roadmap_fingerprints.keys():
if group not in groups:
del clash_save.roadmap_fingerprints[group]
# strips out all roadmaps
for group in self.roadmaps.keys():
del clash_save.roadmaps[group]
clash_save.file_format_version = file_format_version
assert clash_save.file_format_version == file_format_version
filename = os.path.join(data_path, file_name + ".pkl")
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, 'wb') as output:
pickle.dump(clash_save, output, pickle.HIGHEST_PROTOCOL)
rospy.loginfo("Saved clash {} to file.".format(filename))
def clear_clash(self, groups=[]):
if len(groups) == 0:
self.clashes = {}
self.groups = set()
else:
del_clash_keys = []
for group_combo in self.clashes.keys():
if group_combo[0] in groups or group_combo[-1] in groups:
del_clash_keys.append(group_combo)
for key in del_clash_keys:
del self.clashes[key]
for group in groups:
self.groups.remove(group)
return
def check_clash_roadmap_and_copy_fingerprint(self, groups=[]):
# TODO: change to new clash key tuple
for group in groups:
if group in self.clashes.keys() and group in self.roadmaps.keys():
if len(self.clashes[group].keys()) == self.roadmaps[group].get_number_of_vertices():
self.roadmap_fingerprints[group] = self.roadmaps[group].get_fingerprint()
self.groups.add(group)
def verify_clash_roadmap_combinations(self, roadmaps):
# type: (dict[str, RoadMap]) -> void
for group in self.groups:
if self.groups.__len__() > 1:
assert group in [clash_name[0] for clash_name in self.clashes.keys()]
assert group in roadmaps.keys()
for clash_key in self.clashes.keys():
gn_0 = clash_key[0]
assert len(self.clashes[clash_key].keys()) == roadmaps[gn_0].get_number_of_vertices()
return
def build_robot_state(self, prm_1, v1, prm_2, v2):
# type: (RoadMap, Vertex, RoadMap, Vertex) -> RobotState
rs1 = prm_1.get_robot_state_for_vertex(v1) # type: RobotState
rs2 = prm_2.get_robot_state_for_vertex(v2) # type: RobotState
joint_names_2 = prm_2.get_joint_names_of_group()
rs = rs1
joint_state_pos_lst = list(rs.joint_state.position)
for joint_name in joint_names_2:
i_prm = rs2.joint_state.name.index(joint_name)
i_rs = rs.joint_state.name.index(joint_name)
joint_state_pos_lst[i_rs] = rs2.joint_state.position[i_prm]
rs.joint_state.position = tuple(joint_state_pos_lst)
return rs
# TODO: test compute clash refactoring
def compute_clash_for_prms(self, sv=None):
start_time = time.time()
prm_it = itertools.product(self.roadmaps.keys(), self.roadmaps.keys())
for prm_combo in prm_it:
prm_combo_l = list(prm_combo)
prm_combo_l.reverse()
if prm_combo[0] == prm_combo[1]:
continue
elif prm_combo in self.clashes.keys():
continue
elif tuple(prm_combo_l) in self.clashes.keys():
continue
else:
prm_1 = self.roadmaps[prm_combo[0]]
prm_2 = self.roadmaps[prm_combo[1]]
it = itertools.product(prm_1.vertices(), prm_2.vertices())
prm_1_comp = {}
for i in prm_1.vertices():
prm_1_comp[int(i)] = set()
prm_2_comp = {}
for i in prm_2.vertices():
prm_2_comp[int(i)] = set()
elapsed_time = time.time() - start_time
print elapsed_time
count = 0
for t in it: # type: tuple[Vertex, Vertex]
rs = self.build_robot_state(prm_1, t[0], prm_2, t[-1])
# TODO: group name for group including all checked groups
stateVal = sv.getStateValidity(rs, group_name="upperbody")
# stateVal = self.SV_SRV.getStateValidity(rs, group_name="upperbody")
if stateVal:
prm_1_comp[int(t[0])].add(int(t[-1]))
prm_2_comp[int(t[-1])].add(int(t[0]))
count += 1
if count > 1000000:
break
if count % 1000 == 0:
print count
print count
self.clashes[tuple([prm_1.get_group_name(), prm_2.get_group_name()])] = prm_1_comp
self.clashes[tuple([prm_2.get_group_name(), prm_1.get_group_name()])] = prm_2_comp
self.roadmap_fingerprints[prm_1.get_group_name()] = prm_1.get_fingerprint()
self.roadmap_fingerprints[prm_2.get_group_name()] = prm_2.get_fingerprint()
elapsed_time = time.time() - start_time
print elapsed_time
if __name__ == "__main__":
rmc = RoadMapClash()
# rmc.load_clash()
rmc.load_clash(groups=["left_arm"], file_name="test_save_load_clash")
```
#### File: src/roadmap_tools/roadmap_util.py
```python
import itertools
import time
from graph_tool import Vertex
from moveit_msgs.msg import RobotTrajectory
from roadmap_tools.prm import RoadMap
import math
import moveit_commander
import rospy
import numpy as np
from trajectory_msgs.msg import JointTrajectoryPoint
class RoadmapUtils:
def __init__(self, sv, fk, ik):
self.sv = sv # type: StateValidity
self.fk = fk # type: ForwardKinematics
self.ik = ik # type: InverseKinematics
self.mgroups = {} # type: dict[str, moveit_commander.MoveGroupCommander]
def get_mgroup(self, name):
try:
return self.mgroups[name]
except KeyError:
self.mgroups[name] = moveit_commander.MoveGroupCommander(name)
return self.mgroups[name]
def get_robot_state(self, eef_pose, link, group):
resp = self.ik.getIK(group, link, eef_pose)
# returns a list of vertices with ascending distances to the given RobotState
def get_neighborhood(self, prm, q1, dist_max=1.0):
# type: (RoadMap, Vertex) -> (list[Vertex], list[float])
assert isinstance(q1, Vertex)
assert isinstance(prm, RoadMap)
res = []
fin_res = []
for s in prm.vertices():
if prm.get_robot_state_for_vertex(s) is prm.get_robot_state_for_vertex(q1):
continue
# dist = self.distance(self.prm.get_robot_state_for_vertex(s), self.prm.get_robot_state_for_vertex(q1),
# "time")
dist = self.point_point_dist(prm, q1, s)
# dist = self.distance(self.prm.get_robot_state_for_vertex(s), self.prm.get_robot_state_for_vertex(q1),
# "ed")
# dist = self.distance(self.prm.get_robot_state_for_vertex(s), self.prm.get_robot_state_for_vertex(q1),
# "eef")
if dist < dist_max:
res.append((s, dist))
res = sorted(res, key=lambda comp: comp[-1])
# print res
dists = []
for r in res:
fin_res.append(r[0])
dists.append(r[1])
return fin_res, dists
def point_point_dist(self, prm, v1, v2):
assert isinstance(prm, RoadMap)
assert isinstance(v1, (int, Vertex))
assert isinstance(v2, (int, Vertex))
p1 = prm.get_eef_pose(v1, self.fk)
p2 = prm.get_eef_pose(v2, self.fk)
assert p1.pose_stamped[0].header.frame_id == p2.pose_stamped[0].header.frame_id
dist = math.sqrt((p2.pose_stamped[0].pose.position.x - p1.pose_stamped[0].pose.position.x)**2 +
(p2.pose_stamped[0].pose.position.y - p1.pose_stamped[0].pose.position.y)**2 +
(p2.pose_stamped[0].pose.position.z - p1.pose_stamped[0].pose.position.z)**2
)
return dist
def connect_4(self, prm, q1, q2, only_transit=False):
# type: (RoadMap, Vertex, Vertex) -> (bool, RobotTrajectory)
assert isinstance(prm, RoadMap)
if only_transit:
type_q1 = prm.get_type_for_vertex(q1)
type_q2 = prm.get_type_for_vertex(q2)
if type_q1 != "travel" and type_q2 != "travel":
return False, RobotTrajectory()
q1_robot_state = prm.get_robot_state_for_vertex(q1)
q2_robot_state = prm.get_robot_state_for_vertex(q2)
mc = self.get_mgroup(prm.get_group_name())
mc.set_start_state(q1_robot_state)
plan = mc.plan(q2_robot_state.joint_state)
if len(plan.joint_trajectory.points) >= 2:
return True, plan
else:
return False, plan
def connect_4_cart(self, prm, q1, q2):
# type: (RoadMap, Vertex, Vertex) -> (bool, RobotTrajectory)
assert isinstance(prm, RoadMap)
# q1_robot_state = prm.get_robot_state_for_vertex(q1)
pose_1 = prm.get_eef_pose(q1, fk=self.fk).pose_stamped[0].pose
pose_2 = prm.get_eef_pose(q2, fk=self.fk).pose_stamped[0].pose
# q2_robot_state = prm.get_robot_state_for_vertex(q2)
mc = self.get_mgroup(prm.get_group_name())
waypoints = [pose_1, pose_2]
path, fraction = mc.compute_cartesian_path(waypoints, 0.05, 0.0)
if fraction >= 0.9:
return True, path
else:
return False, path
def plan_dist(self, plan):
assert isinstance(plan, RobotTrajectory)
if len(plan.joint_trajectory.points) >= 2:
jpt = plan.joint_trajectory.points[-1] # type: JointTrajectoryPoint
dist = jpt.time_from_start.to_sec()
# print("dist: {}".format(dist))
return True, plan, dist
else:
return False, plan, np.inf
def mp_dist(self, prm, conf_1, conf_2, plan=None):
'''
This function uses moveit to plan a motion and returns the time needed for the planned motion.
:param prm: Roadmap to retrieve the right move_group_name
:param conf_1: starting configuration
:param conf_2: goal configuration
:param plan: if a plan is provided, only its length is calculated
:return: success, plan, dist
'''
if plan is None:
mc = self.get_mgroup(prm.get_group_name())
mc.set_start_state(conf_1)
plan = mc.plan(conf_2.joint_state)
assert isinstance(plan, RobotTrajectory)
if len(plan.joint_trajectory.points) >= 2:
jpt = plan.joint_trajectory.points[-1] # type: JointTrajectoryPoint
dist = jpt.time_from_start.to_sec()
return True, plan, dist
else:
return False, plan, np.inf
def add_edge(self, prm, q1, q2, plan=None):
# type: (RoadMap, Vertex, Vertex, RobotTrajectory) -> None
if q1 is q2:
return
edge = prm.add_edge(q1, q2)
if plan is None:
connect, plan = self.connect_4(prm, q1, q2)
assert isinstance(plan, RobotTrajectory)
if plan is not None:
jpt = plan.joint_trajectory.points[-1] # type: JointTrajectoryPoint
prm.set_edge_distance(edge, jpt.time_from_start.to_sec())
prm.set_edge_traj(edge, plan)
rospy.loginfo("Adding edge no {} with source {} and target {}".format(prm.get_number_of_edges(), q1, q2))
return edge
@staticmethod
def same_component(prm, q1, q2):
"""
method to check weather two nodes are connected in the roadmap.
:param prm:
:param q1: first node
:param q2: second node
:return: True if connected or identical, False if not yet connected
"""
assert isinstance(prm, RoadMap)
if int(q2) in prm.get_nodes_of_component(q1):
return True
else:
return False
def get_connection_distance(self, prm, q1, q2, cutoff=np.inf):
if q1 is q2:
return 0.0
path = RoadmapUtils.find_shortest_path(prm, q1, q2)
if len(path) == 0:
print "Path len is 0!"
return np.inf
# assert len(path) != 0
dist_min = self.calc_path_length(prm, path, cutoff=cutoff)
return dist_min
@staticmethod
def find_shortest_path(prm, start, end):
path = prm.find_path_prm(start, end)
return path
def calc_path_length(self, prm, path=[], cutoff=np.inf):
assert len(path) != 0
dist = 0
for i in range(0, len(path) - 1, 1):
dist += self.distance_for_nodes(prm, path[i], path[i + 1])
if dist > cutoff:
return np.inf
return dist
def distance_for_nodes(self, prm, v1, v2, dist_type="time"):
conf_1 = prm.get_robot_state_for_vertex(v1)
conf_2 = prm.get_robot_state_for_vertex(v2)
dist = self.distance(prm, conf_1, conf_2, dist_type)
return dist
def distance(self, prm, conf_1, conf_2, type="ed", plan=None):
# type: (RoadMap, RobotState, RobotState, str) -> float
sq_sum = 0
dist = 0
if type is "point_dist":
# names = self.robot_commander.get_joint_names(self.mgroup.get_name())
names = prm.get_joint_names_of_group()
for n in names:
idx = conf_1.joint_state.name.index(n)
sq_sum += np.square(conf_1.joint_state.position[idx] - conf_2.joint_state.position[idx])
dist = np.sqrt(sq_sum)
if type is "ed":
# names = self.robot_commander.get_joint_names(self.mgroup.get_name())
names = prm.get_joint_names_of_group()
for n in names:
idx = conf_1.joint_state.name.index(n)
sq_sum += np.square(conf_1.joint_state.position[idx] - conf_2.joint_state.position[idx])
dist = np.sqrt(sq_sum)
elif type is "abs":
names = prm.get_joint_names_of_group()
for n in names:
idx = conf_1.joint_state.name.index(n)
val1 = conf_1.joint_state.position[idx]
val2 = conf_2.joint_state.position[idx]
sq_sum += np.abs(conf_1.joint_state.position[idx] - conf_2.joint_state.position[idx])
dist = sq_sum
elif type is "vec_abs":
c1 = np.array(conf_1.joint_state.position)
c2 = np.array(conf_2.joint_state.position)
dist = np.sum(np.abs(c1 - c2))
# diffs = numpy.array(numpy.abs(list(conf_1.robot_state.joint_state.position) - list(conf_2.robot_state.joint_state.position)))
# dist = numpy.sum(diffs)
elif type is "time":
c1 = np.array(conf_1.joint_state.position)
c2 = np.array(conf_2.joint_state.position)
assert len(c1) == len(c2)
dists = np.abs(c1 - c2)
# dist = numpy.sum(numpy.abs(c1 - c2))
# TODO: for nxo, the joint speeds are all 0.5. Quick hack!!!
times = np.divide(dists, len(conf_1.joint_state.position) * [0.5])
dist = np.max(times)
# diffs = numpy.array(numpy.abs(list(conf_1.robot_state.joint_state.position) - list(conf_2.robot_state.joint_state.position)))
# dist = numpy.sum(diffs)
elif type is "mp":
# use a motion planner to find time distance
con, plan, dist = self.mp_dist(prm, conf_1, conf_2, plan=plan)
return dist
# TODO: test compute clash refactoring
def amend_clash_for_prms(self, prm1, prm2, clash1, clash2, it=None, robot_info=None):
# type: (RoadMap, RoadMap, dict[int, list[int]], dict[int, list[int]], None, RobotInfo) -> None
sv = self.sv
start_time = time.time()
elapsed_time = time.time() - start_time
# when method is called without iterator, we complete the whole clash with recursive calls
if it is None:
# find all vertices which are not in handled in clash
not_in_clash_1 = set(prm1.vertices()).difference(clash1.keys())
not_in_clash_2 = set(prm2.vertices()).difference(clash2.keys())
if len(not_in_clash_1) > 0 or len(not_in_clash_2) > 0:
if len(not_in_clash_1) > 0:
it = itertools.product(not_in_clash_1, prm2.vertices())
self.amend_clash_for_prms(prm1, prm2, clash1, clash2, it, robot_info)
self.amend_clash_for_prms(prm1, prm2, clash1, clash2, None, robot_info)
return
if len(not_in_clash_2) > 0:
it = itertools.product(not_in_clash_2, prm1.vertices())
self.amend_clash_for_prms(prm2, prm1, clash2, clash1, it, robot_info)
return
return
print elapsed_time
count = 0
for t in it: # type: tuple[Vertex, Vertex]
rs = RoadmapUtils.build_robot_state(prm1, t[0], prm2, t[-1])
if False:
pub = rospy.Publisher("display_robot_state", DisplayRobotState, queue_size=10)
# rospy.sleep(1.0)
msg = DisplayRobotState()
msg.state = rs
# msg.state.joint_state.header = "blah"
pub.publish(msg)
gn = robot_info.getCompleteGroup()
stateVal = sv.getStateValidity(rs, group_name=gn)
# stateVal = self.SV_SRV.getStateValidity(rs, group_name="upperbody")
# TODO: decide for a consistent semantics of stateval: Response vs bool - It's bool atm
# if stateVal.valid:
if stateVal is True:
try:
clash1[int(t[0])].add(int(t[-1]))
except KeyError:
clash1[int(t[0])] = set([int(t[-1])])
try:
clash2[int(t[-1])].add(int(t[0]))
except KeyError:
clash2[int(t[-1])] = set([int(t[0])])
count += 1
if count > 1000000:
break
if count % 1000 == 0:
print count
elapsed_time = time.time() - start_time
# print elapsed_time
@staticmethod
def build_robot_state(prm_1, v1, prm_2, v2):
# type: (RoadMap, Vertex, RoadMap, Vertex) -> RobotState
# rs = RobotState()
rs1 = prm_1.get_robot_state_for_vertex(v1) # type: RobotState
rs2 = prm_2.get_robot_state_for_vertex(v2) # type: RobotState
joint_names_2 = prm_2.get_joint_names_of_group()
rs = rs1
joint_state_pos_lst = list(rs.joint_state.position)
for joint_name in joint_names_2:
i_prm = rs2.joint_state.name.index(joint_name)
i_rs = rs.joint_state.name.index(joint_name)
joint_state_pos_lst[i_rs] = rs2.joint_state.position[i_prm]
rs.joint_state.position = tuple(joint_state_pos_lst)
return rs
```
#### File: src/roadmap_tools/rs_vertex.py
```python
import moveit_commander
import moveit_msgs.msg
import moveit_msgs.srv
import cPickle as pickle
import random
from roadmap_tools.robot_info import RobotInfo
import copy
import rospkg
import os
class rs_vertex:
joint_state_default = None
rand = random.Random()
sv = None
robot_com = None
rospack = rospkg.RosPack()
ri = None
def __init__(self, group_name, joint_names, robot_com=None, robot_name="kawada"):
# self.rand = random.Random()
self.joint_names = joint_names # robot_com.get_joint_names(group_name)
self.robot_state = None
self.checked = False # type: bool
self.validity_last_check = None
self.id = None
self.group_name = group_name
self.robot_name = robot_name
def check_vertex_compatibility(self, other, sv=None):
# type: (rs_vertex) -> bool
if rs_vertex.robot_com is None:
rs_vertex.robot_com = moveit_commander.RobotCommander()
for jn in self.joint_names:
assert jn not in rs_vertex.robot_com.get_joint_names(other.group_name)
robot_state = copy.copy(self.robot_state)
joint_state_position_lst = list(robot_state.joint_state.position)
for name in other.joint_names:
i = robot_state.joint_state.name.index(name)
joint_state_position_lst[i] = other.robot_state.joint_state.position[i]
robot_state.joint_state.position = tuple(joint_state_position_lst)
val = rs_vertex.getStateValidity(robot_state, sv=sv)
# val = rs_vertex.sv.getStateValidity(robot_state, group_name="upperbody")
return val.valid
@staticmethod
def getStateValidity(robot_state, sv=None):
if rs_vertex.ri is None:
rs_vertex.ri = RobotInfo.getRobotInfo() # type: RobotInfo
groupname = rs_vertex.ri.getCompleteGroup()
if sv is not None:
sv.getStateValidity(robot_state, group_name=groupname)
return
if rs_vertex.sv is None:
rs_vertex.sv = rs_vertex.ri.getSV()
return rs_vertex.sv.getStateValidity(robot_state, group_name=groupname)
def set_robot_state(self, group_name, robot, set_joint_state=None):
# load joint state into memory and do not load it from the file every time.
js = self.get_off_joint_state(robot_name=self.robot_name)
robot_state = moveit_msgs.msg.RobotState()
# TODO: get rid of the base state
js = js.joint_state
joint_state_position_lst = list(js.position)
if set_joint_state is None:
robot_state.joint_state = js
else:
robot_state.joint_state = set_joint_state
for name in self.joint_names:
if set_joint_state is None:
i = robot_state.joint_state.name.index(name)
joint_state_position_lst[i] = robot.joint_map[name].limit.lower + rs_vertex.rand() * (
robot.joint_map[name].limit.upper - robot.joint_map[name].limit.lower)
else:
try:
i2 = set_joint_state.name.index(name)
except ValueError:
print("Joint {} not in state".format(name))
continue
i = i2
joint_state_position_lst[i] = set_joint_state.position[i2]
robot_state.joint_state.position = tuple(joint_state_position_lst)
robot_state.is_diff = False
self.robot_state = robot_state
@staticmethod
def get_off_joint_state(robot_name="kawada", file_name_suffix=None):
path = rs_vertex.rospack.get_path("roadmap_tools")
file_name = os.path.join(path, "data", robot_name + "_joint_state")
if file_name_suffix is not None:
file_name += "_" + file_name_suffix + ".pkl"
else:
file_name += ".pkl"
if rs_vertex.joint_state_default is None:
with open(file_name, 'rb') as output:
js = pickle.load(output)
rs_vertex.joint_state_default = js
else:
js = copy.copy(rs_vertex.joint_state_default)
print(js)
return js
@staticmethod
def load_off_joint_state(robot_name="kawada", file_name_suffix=""):
path = rs_vertex.rospack.get_path("roadmap_tools")
file_name = os.path.join(path, "data", robot_name + "_joint_state")
file_name += "_" + file_name_suffix + ".pkl"
with open(file_name, 'rb') as output:
js = pickle.load(output)
rs_vertex.joint_state_default = js
print(js)
return js
@staticmethod
def save_off_joint_state(robot_name="kuka", file_name_suffix=""):
path = rs_vertex.rospack.get_path("roadmap_tools")
file_name = os.path.join(path, "data", robot_name + "_joint_state")
file_name = file_name + "_" + file_name_suffix + ".pkl"
print(file_name)
if rs_vertex.robot_com is None:
rs_vertex.robot_com = moveit_commander.RobotCommander()
rs = rs_vertex.robot_com.get_current_state()
print(rs.joint_state)
with open(file_name, 'wb') as output:
output.write(pickle.dumps(rs, pickle.HIGHEST_PROTOCOL))
return
```
#### File: src/roadmap_tools/SceneObjectSearchMapping.py
```python
import copy
from roadmap_tools.prm import RoadMap
class SceneObjectSearchMapping:
def __init__(self, roadmaps=[], old_sosm=None):
# self._groups = []
self._rm = {} # type: dict[str, RoadMap]
for rm in roadmaps:
self._rm[rm.get_group_name()] = rm
self._object_names = set()
self._rm_node = {}
for gn in self._rm.keys():
self._rm_node[gn] = {}
self._name_int_alias = {}
self._object_type = {}
self.lookup_object_names_from_roadmaps()
self.lookup_rm_nodes_for_objects()
self.lookup_object_types()
if old_sosm is None:
self.create_int_alias()
else:
assert isinstance(old_sosm, SceneObjectSearchMapping)
self.create_int_alias(old_sosm.get_name_int_alias())
print "still here"
def get_groups(self):
available_groups = {}
for gn in enumerate(self._rm.keys()):
available_groups[gn[-1]] = gn[0]
return available_groups
def get_name_for_alias(self, alias):
if alias in self._name_int_alias.values():
index = self._name_int_alias.values().index(alias)
name = self._name_int_alias.keys()[index]
return name
return None
def get_alias_to_poses(self):
objs = self.get_available_objects()
av_objs = {}
for gn in self._rm.keys():
av_objs[gn] = {}
for on, alias in objs.items():
for gn in self._rm.keys():
try:
av_objs[gn][alias] = self._rm_node[gn][on]
except KeyError:
pass
return av_objs
def get_alias_to_poses_for_type(self, obj_type):
objs = self.get_available_objects_of_type(obj_type)
av_objs = {}
for gn in self._rm.keys():
av_objs[gn] = {}
for on, alias in objs.items():
for gn in self._rm.keys():
try:
av_objs[gn][alias] = self._rm_node[gn][on]
except KeyError:
pass
return av_objs
def get_alias_to_poses_for_types(self, obj_types=[]):
objs = self.get_available_objects_of_types(obj_types)
av_objs = {}
for gn in self._rm.keys():
av_objs[gn] = {}
for on, alias in objs.items():
for gn in self._rm.keys():
try:
av_objs[gn][alias] = self._rm_node[gn][on]
except KeyError:
pass
return av_objs
def get_available_objects(self):
objs = {}
for obj, o_type in self._object_type.items():
objs[obj] = self._name_int_alias[obj]
return objs
def get_available_objects_of_type(self, obj_type):
objs = {}
for obj, o_type in self._object_type.items():
if obj_type == o_type:
objs[obj] = self._name_int_alias[obj]
return objs
def get_available_objects_of_types(self, obj_type=[]):
objs = {}
for obj, o_type in self._object_type.items():
if o_type in obj_type:
objs[obj] = self._name_int_alias[obj]
return objs
def create_int_alias(self, old_int_alias={}):
if len(old_int_alias.values()) == 0:
next_id = 0
else:
next_id = max(old_int_alias.values()) + 1
for on in self._object_names:
try:
self._name_int_alias[on] = old_int_alias[on]
except KeyError:
self._name_int_alias[on] = next_id
next_id += 1
def lookup_object_names_from_roadmaps(self, obj_types=["block"]):
for gn in self._rm.keys():
names = self._rm[gn].get_vertices_for_property(obj_types)
self._object_names = self._object_names.union(set(names))
def lookup_rm_nodes_for_objects(self):
for gn in self._rm.keys():
for on in self._object_names:
vertex = self._rm[gn].get_vertex_for_name(on)
if vertex:
self._rm_node[gn][on] = int(str(vertex))
def lookup_object_types(self):
for on in self._object_names:
for gn in self._rm.keys():
if on in self._rm_node[gn].keys():
vertex = self._rm_node[gn][on]
self._object_type[on] = self._rm[gn].get_type_for_vertex(vertex)
def get_pose_for_name(self, on, default=None):
for gn, node_dict in self._rm_node.items():
if on in node_dict.keys():
vert = node_dict[on]
pose = self._rm[gn].get_eef_pose(vert)
return pose
def get_poses(self):
poses = {}
for name, alias in self._name_int_alias.items():
pose = None
pose = self.get_pose_for_name(name)
if pose:
poses[alias] = pose
return poses
def get_name_int_alias(self):
return copy.copy(self._name_int_alias)
```
#### File: roadmap_tools/test/test_prm.py
```python
import unittest
import rostest
class TestRoadmap(unittest.TestCase):
def test_create_roadmap_with_parameters(self):
from roadmap_tools.prm import RoadMap
active_joints = ['RARM_JOINT0',
'RARM_JOINT1',
'RARM_JOINT2',
'RARM_JOINT3',
'RARM_JOINT4',
'RARM_JOINT5']
rm = RoadMap(group_name="right_arm", base_frame="WAIST", eef_link="RARM_JOINT5_Link", active_joints=active_joints)
self.assertEqual(rm.get_joint_names_of_group(), active_joints)
def test_add_nodes_and_vertices(self):
from roadmap_tools.prm import RoadMap
from roadmap_tools.prm import RobotState
active_joints = ['RARM_JOINT0',
'RARM_JOINT1',
'RARM_JOINT2',
'RARM_JOINT3',
'RARM_JOINT4',
'RARM_JOINT5']
rm = RoadMap(group_name="right_arm", base_frame="WAIST", eef_link="RARM_JOINT5_Link", active_joints=active_joints)
v1 = rm.add_vertex(RobotState(), "my_node_1", "test_node")
v2 = rm.add_vertex(RobotState(), "my_node_2", "test_node")
e = rm.add_edge(v1, v2)
self.assertEqual(rm.get_number_of_vertices(), 2)
self.assertEqual(rm.get_number_of_edges(), 1)
PKG = 'roadmap_tools'
NAME = 'test_prm'
if __name__ == '__main__':
rostest.unitrun(PKG, NAME, TestRoadmap)
``` |
{
"source": "jkbgbr/Srep3",
"score": 3
} |
#### File: Srep3/tests/test_SRLine.py
```python
from geometry.SR_line import LineSegment, divide_line, add_point_on_line, point_on_line_at_ratio
from geometry.SR_point import Point, EPS, PRECISION
import unittest
import math
SMALL_DISTANCE = 1./(10.**(PRECISION-5))
class TestLines(unittest.TestCase):
"""
"""
def setUp(self):
self.l0 = LineSegment(Point((0, 0)), Point((0, 0)))
self.l1 = LineSegment(Point((0, 1)), Point((0, 2)))
self.l2 = LineSegment(Point((0, 1)), Point((0, 2)))
self.l3 = LineSegment(Point((0, 1)), Point((0, 3)))
self.l4 = LineSegment(Point((0, 0)), Point((2, 2)))
self.l5 = LineSegment(Point((0, 2)), Point((0, 1)))
def test_repr(self):
self.assertEqual(self.l1, eval(self.l1.__repr__()))
self.assertEqual(self.l1.__str__(), 'LineSegment(Point((0.0, 1.0)), Point((0.0, 2.0)))')
def test_if_null(self):
# checking if LineSegment is null
l = LineSegment()
self.assertEqual(l.isnull(), False)
self.assertEqual(self.l1.isnull(), False)
def test_endpoint_Setting(self):
import copy
l1 = copy.deepcopy(self.l1)
p1 = Point((0, 56))
p2 = Point((48, 56))
l1._set_endpoints([p1, p2])
self.assertEqual(l1, LineSegment(p1, p2))
self.assertEqual(l1.loose_equal(LineSegment(p2, p1)), True)
self.assertEqual(l1.loose_equal(LineSegment(p1, p2)), True)
def test_equality(self):
# equality
self.assertEqual(self.l1, self.l2)
self.assertNotEqual(self.l1, self.l3)
self.assertNotEqual(self.l1, self.l5)
self.assertEqual(self.l1.ij, (self.l1.i, self.l1.j))
def test_deltas(self):
self.assertEqual(self.l1.delta_x, 0)
self.assertEqual(self.l1.delta_y, 1)
self.assertEqual(self.l4.delta_x, 2)
self.assertEqual(self.l4.delta_y, 2)
def test_norm(self):
self.assertEqual(self.l1.norm, 1)
self.assertEqual(self.l3.norm, 2)
self.assertEqual(self.l4.norm, math.sqrt(8))
def test_unitvector(self):
self.assertTupleEqual(self.l1.unitvector, (0, 1))
self.assertTupleEqual(self.l0.unitvector, (0, 0))
self.assertTupleEqual(self.l4.unitvector, (1 / math.sqrt(2), 1 / math.sqrt(2)))
def test_direction(self):
l5 = LineSegment(Point((0, 0)), Point((1, 0)))
l6 = LineSegment(Point((0, 0)), Point((1, 1)))
l7 = LineSegment(Point((0, 0)), Point((0, 1)))
l8 = LineSegment(Point((0, 0)), Point((-1, 1)))
l9 = LineSegment(Point((0, 0)), Point((-1, 0)))
l10 = LineSegment(Point((0, 0)), Point((-1, -1)))
l11 = LineSegment(Point((0, 0)), Point((0, -1)))
l12 = LineSegment(Point((0, 0)), Point((1, -1)))
l13 = LineSegment(Point((0, 0)), Point((0, 0)))
self.assertEqual(l5.direction, 0)
self.assertEqual(l6.direction, 45)
self.assertEqual(l7.direction, 90)
self.assertEqual(l8.direction, 135)
self.assertEqual(l9.direction, 180)
self.assertEqual(l10.direction, 225)
self.assertEqual(l11.direction, 270)
self.assertEqual(l12.direction, 315)
self.assertEqual(l13.direction, 0)
def test_validity(self):
# testing for validity
l1 = LineSegment(Point((0, 0)), Point((0, 0)))
self.assertEqual(l1.isvalid, True)
l1 = LineSegment((0, 0), (0, 0))
self.assertEqual(l1.isvalid, False)
l1 = LineSegment(Point(), Point())
self.assertEqual(l1.isvalid, False)
l1 = LineSegment(Point((0, 0)), Point((0, 1)))
self.assertEqual(l1.isvalid, True)
def test_is_internal_point(self):
# checking if a point is internal
l1 = LineSegment(Point((0, 0)), Point((0, 1)))
self.assertEqual(l1.is_internal_point(Point((0, 2))), False)
self.assertEqual(l1.is_internal_point(Point((0, 0.5))), True)
def test_point_on_line(self):
l1 = LineSegment(Point((0, 0)), Point((0, 1)))
self.assertEqual(l1.is_point_on_line(Point((1, 2))), False)
self.assertEqual(l1.is_point_on_line(Point((0, 2))), True)
self.assertEqual(l1.is_point_on_line(Point((0, 0.5))), True)
self.assertEqual(l1.is_point_on_line(Point((0, 1))), True)
self.assertEqual(l1.is_point_on_line(Point((0, -1))), True)
def test_angle_from_start_point(self):
l1 = LineSegment(Point((0, 0)), Point((0, 1)))
self.assertEqual(l1.angle_from_start_point(Point((1, 0))), 90)
# self.assertEqual(l1.angle_from_end_point(Point((1, 0))), 225)
def test_line_point_distance(self):
l1 = LineSegment(Point((0, 0)), Point((0, 1)))
self.assertEqual(l1.line_point_distance(Point((1, 1))), 1)
l1 = LineSegment(Point((0, 0)), Point((1, 1)))
self.assertAlmostEqual(l1.line_point_distance(Point((0, 1))), math.sqrt(2) / 2, delta=EPS)
def test_segments_parallel(self):
l1 = LineSegment(Point((0, 0)), Point((1, 1)))
l2 = LineSegment(Point((1, 2)), Point((2, 3)))
self.assertEqual(l1.is_parallel(other=l2), True)
l2 = LineSegment(Point((0, 0)), Point((1, 1)))
self.assertEqual(l1.is_parallel(other=l2), True)
l2 = LineSegment(Point((5, 5)), Point((8, 9)))
self.assertEqual(l1.is_parallel(other=l2), False)
self.assertEqual(l1.is_intersecting(other=l2), True)
l2 = LineSegment(Point((5, 5)), Point((4, 4)))
self.assertEqual(l1.is_parallel(other=l2), True)
self.assertEqual(l1.is_intersecting(other=l2), False)
def test_intersection_of_lines(self):
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
l2 = LineSegment(Point((0, 2)), Point((2, 0)))
self.assertEqual(l1.intersection_point(l2), Point((1, 1)))
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
l2 = LineSegment(Point((1, 0)), Point((3, 2)))
self.assertEqual(l1.intersection_point(l2), None)
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
l2 = LineSegment(Point((3, 0)), Point((4, 1)))
self.assertEqual(l1.intersection_point(l2), None)
l1 = LineSegment(Point((0, 0)), Point((15, 4)))
l2 = LineSegment(Point((0, 2)), Point((2, 0)))
self.assertEqual(l1.intersection_point(l2), Point((1.5789473684210527, 0.42105263157894735)))
def test_common_part(self):
# parallel cases
# not collinear
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
l2 = LineSegment(Point((1, 0)), Point((3, 2)))
self.assertIsNone(l1.common_part(l2))
# loose equal A
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
l2 = LineSegment(Point((2, 2)), Point((0, 0)))
self.assertEqual(l1.common_part(l2), l1)
# equal
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
l2 = LineSegment(Point((0, 0)), Point((2, 2)))
self.assertEqual(l1.common_part(l2), l1)
# 1a: both are internal, as l2 is inside l1
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((2, 2)), Point((3, 3)))
self.assertEqual(l1.common_part(l2), l2)
# 1b: both are internal, as l1 is inside l2
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((-2, -2)), Point((13, 13)))
self.assertEqual(l1.common_part(l2), l1)
# 2a: one endpoint touches and there is overlap
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((0, 0)), Point((3, 3)))
self.assertEqual(l1.common_part(l2), l2)
# 2b: one endpoint touches and there is NO overlap
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((0, 0)), Point((-3, -3)))
self.assertEqual(l1.common_part(l2), Point((0, 0)))
# 3a: both endpoints of l1 are inside l2
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((1, 1)), Point((2, 2)))
self.assertEqual(l1.common_part(l2), l2)
# 3b: both endpoints of l2 are inside l1
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((-1, -1)), Point((12, 12)))
self.assertEqual(l1.common_part(l2), l1)
# 4a: partial overlap, one point inside, one outside
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((-1, -1)), Point((3, 3)))
self.assertEqual(l1.common_part(l2), LineSegment(Point((0, 0)), Point((3, 3))))
# 4b: partial overlap, one point inside, one outside
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((4, 4)), Point((13, 13)))
self.assertEqual(l1.common_part(l2), LineSegment(Point((4, 4)), Point((5, 5))))
self.assertNotEqual(l1.common_part(l2), LineSegment(Point((5, 5)), Point((4, 4))))
# non-parallel cases
# intersection on the segments
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((0, 5)), Point((5, 0)))
self.assertEqual(l1.common_part(l2), Point((2.5, 2.5)))
# intersection on the endpoints
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((5, 5)), Point((8, 0)))
self.assertEqual(l1.common_part(l2), Point((5, 5)))
# intersection somewhere
l1 = LineSegment(Point((0, 0)), Point((5, 5)))
l2 = LineSegment(Point((6, 6)), Point((6, 10)))
self.assertEqual(l1.common_part(l2), None)
def test_common_part_type(self):
# paralel but not collinear - no common part
l1 = LineSegment(Point((0, 0)), Point((0, 5)))
l2 = LineSegment(Point((1, 0)), Point((1, 6)))
self.assertEqual(l1.common_part_type(l2), None)
# total overlap - loose equal - common part is a LineSegment
l1 = LineSegment(Point((0, 0)), Point((0, 5)))
l2 = LineSegment(Point((0, 5)), Point((0, 0)))
self.assertEqual(l1.common_part_type(l2), 'LineSegment')
# intersection on line - common part is a Point
l1 = LineSegment(Point((0, 0)), Point((0, 5)))
l2 = LineSegment(Point((-1, 3)), Point((1, 4)))
self.assertEqual(l1.common_part_type(l2), 'Point')
def test_point_which_side(self):
l1 = LineSegment(Point((0, 0)), Point((2, 2)))
self.assertEqual(l1.is_point_to_right(p=Point((1, 1))), False)
self.assertEqual(l1.is_point_to_right(p=Point((2, 0))), True)
self.assertEqual(l1.is_point_to_right(p=Point((2.1, 1.9))), True)
self.assertEqual(l1.is_point_to_right(p=Point((2.+SMALL_DISTANCE, 2-SMALL_DISTANCE))), True)
self.assertEqual(l1.is_point_to_right(p=Point((2.-SMALL_DISTANCE, 2))), False)
self.assertEqual(l1.is_point_to_right(p=Point((-2, 0))), False)
self.assertEqual(l1.is_point_on_line(p=Point((1, 1))), True)
self.assertEqual(l1.is_point_to_left(p=Point((1, 1))), False)
self.assertEqual(l1.is_point_to_left(p=Point((2, 0))), False)
self.assertEqual(l1.is_point_to_left(p=Point((2.1, 2))), False)
self.assertEqual(l1.is_point_to_left(p=Point((1.5, 2))), True)
self.assertEqual(l1.is_point_to_left(p=Point((-2, 0))), True)
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
self.assertEqual(l1.is_point_to_right(p=Point((1, 1))), True)
self.assertEqual(l1.is_point_to_right(p=Point((-2, 0))), False)
self.assertEqual(l1.is_point_to_right(p=Point((SMALL_DISTANCE, 2))), True)
self.assertEqual(l1.is_point_to_right(p=Point((-SMALL_DISTANCE, 2))), False)
self.assertEqual(l1.is_point_to_right(p=Point((0, 3))), False)
self.assertEqual(l1.is_point_to_left(p=Point((1, 1))), False)
self.assertEqual(l1.is_point_to_left(p=Point((-2, 0))), True)
self.assertEqual(l1.is_point_to_left(p=Point((SMALL_DISTANCE, 2))), False)
# self.assertEqual(l1.is_point_to_left(p=Point((-EPS, 2))), True)
self.assertEqual(l1.is_point_to_left(p=Point((0, 3))), False)
def test_cross_product(self):
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
l2 = LineSegment(Point((0, 0)), Point((2, 0)))
self.assertEqual(l1.crossproduct(l2), -4)
self.assertEqual(l1.crossproduct_commonstart(l2), -4)
self.assertEqual(l2.crossproduct_commonstart(l1), 4)
self.assertEqual(l2.crossproduct_commonstart(l2), 0)
def test_check_overlapping(self):
# testing lines are overlapping
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
l2 = LineSegment(Point((0, 0)), Point((2, 0)))
self.assertTupleEqual(l1.overlap(l2), (False, None))
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
l2 = LineSegment(Point((0, 3)), Point((0, 4)))
self.assertTupleEqual(l1.overlap(l2), (False, None))
l1 = LineSegment(Point((0, -99)), Point((0, 99)))
l2 = LineSegment(Point((0, -4)), Point((0, -2)))
# self.assertTupleEqual(l1.overlap(l2), (True, 2))
self.assertTupleEqual(l1.overlap(l2), (True, LineSegment(Point((0.0, -4.0)), Point((0.0, -2.0)))))
l1 = LineSegment(Point((0, 0)), Point((0, 99)))
l2 = LineSegment(Point((0, -4)), Point((0, 101)))
# self.assertTupleEqual(l1.overlap(l2), (True, 99))
self.assertTupleEqual(l1.overlap(l2), (True, LineSegment(Point((0.0, 0.0)), Point((0.0, 99.0)))))
l1 = LineSegment(Point((0, 0)), Point((0, 99)))
l2 = LineSegment(Point((0, 80)), Point((0, 101)))
# self.assertTupleEqual(l1.overlap(l2), (True, 19))
self.assertTupleEqual(l1.overlap(l2), (True, LineSegment(Point((0.0, 80.0)), Point((0.0, 99.0)))))
l1 = LineSegment(Point((0, 0)), Point((0, 99)))
l2 = LineSegment(Point((0, 100)), Point((0, 101)))
self.assertTupleEqual(l1.overlap(l2), (False, None))
l1 = LineSegment(Point((0, 0)), Point((0, 9)))
l2 = LineSegment(Point((0, 1)), Point((0, -2)))
# self.assertTupleEqual(l1.overlap(l2), (True, 1))
self.assertTupleEqual(l1.overlap(l2), (True, LineSegment(Point((0.0, 0.0)), Point((0.0, 1.0)))))
# paralel but not collinear
l1 = LineSegment(Point((0, 0)), Point((0, 5)))
l2 = LineSegment(Point((1, 0)), Point((1, 6)))
self.assertTupleEqual(l1.overlap(l2), (False, None))
# total overlap - loose equal
l1 = LineSegment(Point((0, 0)), Point((0, 5)))
l2 = LineSegment(Point((0, 5)), Point((0, 0)))
self.assertTupleEqual(l1.overlap(l2), (True, l1))
def test_if_segments_of_opposite_direction(self):
# testing opposite direction
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
l2 = LineSegment(Point((0, 0)), Point((0, -3)))
self.assertEqual(l1.opposite_direction(l2), True)
l2 = LineSegment(Point((0, 0)), Point((0, 5)))
self.assertEqual(l1.opposite_direction(l2), False)
l2 = LineSegment(Point((0, 0)), Point((1, 5)))
self.assertEqual(l1.opposite_direction(l2), False)
def test_angle_between_lines(self):
# testing angle between lines
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
l2 = LineSegment(Point((0, -1)), Point((0, -3)))
self.assertAlmostEqual(l1.angle_between_lines(l2), 180, delta=EPS)
l1 = LineSegment(Point((0, 0)), Point((0, 1)))
l2 = LineSegment(Point((0, 0)), Point((0, -1)))
self.assertAlmostEqual(l1.angle_between_lines(l2), 180, delta=EPS)
l2 = LineSegment(Point((0, 0)), Point((1, 1)))
self.assertAlmostEqual(l1.angle_between_lines(l2), 45, delta=EPS)
l2 = LineSegment(Point((0, 0)), Point((1, -1)))
self.assertAlmostEqual(l1.angle_between_lines(l2), 135, delta=EPS)
def test_continuity(self):
# testing continuity
l1 = LineSegment(Point((0, 0)), Point((0, 2)))
l2 = LineSegment(Point((0, 2)), Point((0, -3)))
self.assertEqual(l1.continuous(l2), True)
l2 = LineSegment(Point((0, 0)), Point((0, 5)))
self.assertEqual(l1.continuous(l2), False)
l2 = LineSegment(Point((0, 1)), Point((1, 5)))
self.assertEqual(l1.continuous(l2), False)
def test_intersection(self):
# testing intersection_point on segment
# l1 and l2 overlap
l1 = LineSegment(Point((0., 0.)), Point((0., 2.)))
l2 = LineSegment(Point((0., 2.)), Point((0., -3.)))
self.assertEqual(l1.intersection_on_segment(l2), False)
# l4 ends on l3
l3 = LineSegment(Point((-2.0, 2.0)), Point((4.0, 2.0)))
l4 = LineSegment(Point((-0.5, 2.0)), Point((-0.5, 1.9)))
self.assertEqual(l3.intersection_on_segment(l4), True)
# l3.i == l4.j
l3 = LineSegment(Point((-2.0, 2.0)), Point((4.0, 2.0)))
l4 = LineSegment(Point((-2.0, 1.9)), Point((-2.0, 2.0)))
self.assertEqual(l3.intersection_on_endpoint(l4), True)
# l1 and l2 intersect
l1 = LineSegment(Point((0., 0.)), Point((0., 2.))) # same as previously
l2 = LineSegment(Point((-1, 1)), Point((1, 2)))
self.assertEqual(l1.intersection_on_segment(l2), True)
# l1 and l2 intersect but not on the segment
l2 = LineSegment(Point((-1, 10)), Point((1, 10)))
self.assertEqual(l1.intersection_point(l2), Point((-0.0, 10.0)))
self.assertEqual(l1.intersection_on_segment(l2), False)
# l1 and l2 are collinear but do not overlap or touch
l2 = LineSegment(Point((0, 3)), Point((0, 5)))
self.assertEqual(l1.intersection_point(l2), None)
self.assertEqual(l1.intersection_on_segment(l2), False)
# l1 and l2 are parallel but not collinear
l2 = LineSegment(Point((1, 3)), Point((1, 5)))
self.assertEqual(l1.intersection_point(l2), None)
self.assertEqual(l1.intersection_on_segment(l2), False)
# l1.i == l2.i
l2 = LineSegment(Point((0, 0)), Point((1, 5)))
self.assertEqual(l1.intersection_point(l2), Point((0.0, 0.0)))
self.assertEqual(l1.intersection_on_segment(l2), False)
self.assertEqual(l1.intersection_on_endpoint(l2), True)
self.assertEqual(l1.touch(l2), True)
def test_reversal(self):
# testing line reversal
self.assertTupleEqual(self.l1.line_reversed.ij, LineSegment(self.l1.j, self.l1.i).ij)
l1 = LineSegment(Point((0, 3)), Point((0, 5)))
l2 = LineSegment(Point((0, 5)), Point((0, 3)))
l1.reverse() # reverses
self.assertEqual(l1, l2)
def test_move(self):
# testing line move
l1 = LineSegment(Point((0, 3)), Point((0, 5)))
l2 = LineSegment(Point((1, 4)), Point((1, 6)))
l1.move(x0=1, y0=1) # reverses
self.assertEqual(l1, l2)
def test_midpoint(self):
l1 = LineSegment(Point((0, 3)), Point((0, 5)))
self.assertEqual(l1.midpoint, (0, 11/3.))
def test_bounding_box(self):
l1 = LineSegment(Point((-2, 3)), Point((0, 5)))
self.assertEqual(l1.bounding_box, ((-2, 0), (3, 5)))
def test_loose_equality(self):
l1 = LineSegment(Point((0, 3)), Point((0, 5)))
l2 = LineSegment(Point((0, 5)), Point((0, 3)))
l3 = LineSegment(Point((0, 3)), Point((0, 5)))
self.assertEqual(l1.loose_equal(l2), True)
self.assertEqual(l1.loose_equal(l3), True)
def test_line_division(self):
# line division test
self.line = LineSegment(Point((0, 0)), Point((4, 3)))
_retval = tuple([LineSegment(Point((0.0, 0.0)), Point((1.33333333, 1.0))),
LineSegment(Point((1.33333333, 1.0)), Point((2.66666667, 2.0))),
LineSegment(Point((2.66666667, 2.0)), Point((4.0, 3.0)))])
self.assertTupleEqual(_retval, tuple(divide_line(self.line, 3)), True)
def test_line_division_by_ratio(self):
_line = LineSegment(Point((0, 0)), Point((4, 3)))
by_05 = Point((2, 1.5))
self.assertEqual(point_on_line_at_ratio(line=_line, ratio=0.5), by_05)
by_025 = Point((1, 0.75))
self.assertEqual(point_on_line_at_ratio(line=_line, ratio=0.25), by_025)
by_01 = Point((0.4, 0.3))
self.assertEqual(point_on_line_at_ratio(line=_line, ratio=0.1), by_01)
def test_insert_point(self):
# Point addition test
self.line = LineSegment(Point((0, 0)), Point((4, 3)))
_retval = add_point_on_line(self.line, Point((2, 1.5)))
self.assertTupleEqual((LineSegment(Point((0, 0)), Point((2, 1.5))), LineSegment(Point((2, 1.5)), Point((4, 3)))),
_retval, True)
``` |
{
"source": "jkbgbr/wxmplot",
"score": 3
} |
#### File: wxmplot/examples/rgb_image.py
```python
import wx
from numpy import exp, random, arange, outer, array
from wxmplot import ImageFrame
def gauss2d(x, y, x0, y0, sx, sy):
return outer( exp( -(((y-y0)/float(sy))**2)/2),
exp( -(((x-x0)/float(sx))**2)/2) )
if __name__ == '__main__':
app = wx.App()
frame = ImageFrame(mode='rgb')
ny, nx = 350, 400
x = arange(nx)
y = arange(ny)
ox = x / 100.0
oy = -1 + y / 200.0
red = 0.02 * random.random(size=nx*ny).reshape(ny, nx)
red = red + (6.0*gauss2d(x, y, 90, 76, 5, 6) +
3.0*gauss2d(x, y, 165, 190, 70, 33) +
2.0*gauss2d(x, y, 180, 100, 12, 6))
green = 0.3 * random.random(size=nx*ny).reshape(ny, nx)
green = green + (5.0*gauss2d(x, y, 173, 98, 4, 9) +
3.2*gauss2d(x, y, 270, 230, 78, 63))
blue = 0.1 * random.random(size=nx*ny).reshape(ny, nx)
blue = blue + (2.9*gauss2d(x, y, 240, 265, 78, 23) +
3.5*gauss2d(x, y, 185, 95, 22, 11) +
7.0*gauss2d(x, y, 220, 310, 40, 133))
dat = array([red, green, blue]).swapaxes(2, 0)
frame.display(dat, x=ox, y=oy,
subtitles={'red':'Red Image', 'green': 'Green Blob', 'blue': 'other'})
frame.Show()
app.MainLoop()
```
#### File: wxmplot/examples/stripchart.py
```python
import time
import numpy as np
import sys
import wx
from wx.lib import masked
from floatcontrol import FloatCtrl
from wxmplot import PlotPanel
def next_data():
"simulated data"
t0 = time.time()
lt = time.localtime(t0)
tmin, tsec = lt[4],lt[5]
u = np.random.random()
v = np.random.random()
x = np.sin( (u + tsec)/3.0) + tmin/30. + v/5.0
return t0, x
class StripChartFrame(wx.Frame):
def __init__(self, parent, ID, **kws):
kws["style"] = wx.DEFAULT_FRAME_STYLE|wx.RESIZE_BORDER|wx.TAB_TRAVERSAL
wx.Frame.__init__(self, parent, ID, '',
wx.DefaultPosition, wx.Size(-1,-1), **kws)
self.SetTitle("wxmplot StripChart Demo")
self.tmin = 15.0
self.SetFont(wx.Font(12,wx.SWISS,wx.NORMAL,wx.BOLD,False))
menu = wx.Menu()
menu_exit = menu.Append(-1, "E&xit", "Terminate the program")
menuBar = wx.MenuBar()
menuBar.Append(menu, "&File");
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.OnExit, menu_exit)
self.Bind(wx.EVT_CLOSE, self.OnExit)
sbar = self.CreateStatusBar(2,wx.CAPTION)
sfont = sbar.GetFont()
sfont.SetWeight(wx.BOLD)
sfont.SetPointSize(11)
sbar.SetFont(sfont)
self.SetStatusWidths([-3,-1])
self.SetStatusText('',0)
mainsizer = wx.BoxSizer(wx.VERTICAL)
btnpanel = wx.Panel(self, -1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
b_on = wx.Button(btnpanel, -1, 'Start', size=(-1,-1))
b_off = wx.Button(btnpanel, -1, 'Stop', size=(-1,-1))
b_on.Bind(wx.EVT_BUTTON, self.onStartTimer)
b_off.Bind(wx.EVT_BUTTON, self.onStopTimer)
tlabel = wx.StaticText(btnpanel, -1, ' Time range:')
self.time_range = FloatCtrl(btnpanel, size=(100, -1),
value=abs(self.tmin), precision=1)
btnsizer.Add(b_on, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER|wx.LEFT, 0)
btnsizer.Add(b_off, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER|wx.LEFT, 0)
btnsizer.Add(tlabel, 1, wx.GROW|wx.ALL|wx.ALIGN_LEFT|wx.ALIGN_CENTER|wx.LEFT, 0)
btnsizer.Add(self.time_range, 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER|wx.LEFT, 0)
btnpanel.SetSizer(btnsizer)
btnsizer.Fit(btnpanel)
self.plotpanel = PlotPanel(self, messenger=self.write_message)
self.plotpanel.BuildPanel()
self.plotpanel.set_xlabel('Time from Present (s)')
mainsizer.Add(btnpanel, 0,
wx.GROW|wx.ALIGN_LEFT|wx.ALIGN_CENTER|wx.LEFT, 0)
mainsizer.Add(self.plotpanel, 1,
wx.GROW|wx.ALL|wx.ALIGN_LEFT|wx.ALIGN_CENTER|wx.LEFT, 0)
self.SetSizer(mainsizer)
mainsizer.Fit(self)
self.Bind(wx.EVT_TIMER, self.onTimer)
self.timer = wx.Timer(self)
self.count = 0
self.Refresh()
wx.CallAfter(self.onStartTimer)
def write_message(self, msg, panel=0):
"""write a message to the Status Bar"""
self.SetStatusText(msg, panel)
def onStartTimer(self,event=None):
self.count = 0
t0,y0 = next_data()
self.ylist = [y0]
self.tlist = [t0]
self.tmin_last = -10000
self.time0 = time.time()
self.timer.Start(25)
def onStopTimer(self,event=None):
self.timer.Stop()
def onTimer(self, event):
self.count += 1
etime = time.time() - self.time0
self.tmin = float(self.time_range.GetValue())
t1, y1 = next_data()
self.tlist.append(t1)
self.ylist.append(y1)
tdat = np.array(self.tlist) - t1
mask = np.where(tdat > -abs(self.tmin))
ydat = np.array(self.ylist)
n = len(self.ylist)
if n <= 2:
self.plotpanel.plot(tdat, ydat)
else:
self.plotpanel.update_line(0, tdat, ydat, draw=True)
self.write_message("update %i points in %8.4f s" % (n,etime))
lims = self.plotpanel.get_viewlimits()
try:
ymin, ymax = ydat[mask].min(), ydat[mask].max()
except:
ymin, ymax = ydat.min(), ydat.max()
yrange = abs(ymax-ymin)
ymin -= yrange*0.05
ymax += yrange*0.05
if (ymin < lims[2] or ymax > lims[3] ):
self.plotpanel.set_xylims((-self.tmin, 0, ymin, ymax))
def OnAbout(self, event):
dlg = wx.MessageDialog(self, "wxmplot example: stripchart app",
"About WXMPlot test", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnExit(self, event):
self.Destroy()
app = wx.App()
f = StripChartFrame(None,-1)
f.Show(True)
app.MainLoop()
#
```
#### File: wxmplot/tests/basic_test.py
```python
from numpy import linspace, sin, cos, random
def test_import():
success = False
try:
import wxmplot
success = True
except:
pass
assert(success)
```
#### File: wxmplot/wxmplot/imageconf.py
```python
import wx
import wx.lib.agw.flatnotebook as flat_nb
import wx.lib.scrolledpanel as scrolled
import wx.lib.colourselect as csel
from math import log10
import numpy as np
import matplotlib.cm as cmap
from matplotlib.ticker import FuncFormatter
from .colors import register_custom_colormaps, hexcolor, hex2rgb, mpl_color
from .config import bool_ifnotNone, ifnotNone
from .plotconfigframe import FNB_STYLE, autopack
from .utils import LabeledTextCtrl, SimpleText, Check, Choice, HLine, pack, FloatSpin, MenuItem
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
cm_names = register_custom_colormaps()
ColorMap_List = []
for cm in ('gray', 'coolwarm', 'viridis', 'inferno', 'plasma', 'magma', 'red',
'green', 'blue', 'magenta', 'yellow', 'cyan', 'Reds', 'Greens',
'Blues', 'cool', 'hot', 'copper', 'red_heat', 'green_heat',
'blue_heat', 'spring', 'summer', 'autumn', 'winter', 'ocean',
'terrain', 'jet', 'stdgamma', 'hsv', 'Accent', 'Spectral', 'PiYG',
'PRGn', 'Spectral', 'YlGn', 'YlGnBu', 'RdBu', 'RdPu', 'RdYlBu',
'RdYlGn'):
if cm in cm_names or hasattr(cmap, cm):
ColorMap_List.append(cm)
Contrast_List = ['None', '0.01', '0.02', '0.05', '0.1', '0.2', '0.5', '1.0',
'2.0', '5.0']
Contrast_NDArray = np.array((-1.0, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1., 2, 5.))
Interp_List = ('nearest', 'bicubic', 'quadric', 'gaussian', 'kaiser',
'bessel', 'mitchell', 'catrom', 'spline16', 'spline36',
'bilinear', 'hanning', 'hamming', 'hermite', 'sinc', 'lanczos')
Slices_List = ('None', 'X', 'Y')
RGB_COLORS = ('red', 'green', 'blue')
class ImageConfig:
def __init__(self, axes=None, fig=None, canvas=None):
self.axes = axes
self.fig = fig
self.canvas = canvas
self.cmap = [cmap.gray, cmap.gray, cmap.gray]
self.cmap_reverse = False
self.interp = 'nearest'
self.show_axis = False
self.show_grid = False
self.grid_color = '#807030'
self.grid_alpha = 0.25
self.log_scale = False
self.flip_ud = False
self.flip_lr = False
self.rot_level = 0
self.contrast_level = 0
self.datalimits = [None, None, None, None]
self.cmap_lo = [0, 0, 0]
self.cmap_range = 1000
self.cmap_hi = [1000, 1000, 1000]
self.tricolor_bg = 'black'
self.tricolor_mode = 'rgb'
self.int_lo = [0, 0, 0]
self.int_hi = [1, 1, 1]
self.data = None
self.xdata = None
self.ydata = None
self.xlab = 'X'
self.ylab = 'Y'
self.indices = None
self.title = 'image'
self.style = 'image'
self.highlight_areas = []
self.ncontour_levels = 10
self.contour_levels = None
self.contour_labels = True
self.cursor_mode = 'zoom'
self.zoombrush = wx.Brush('#040410', wx.SOLID)
self.zoompen = wx.Pen('#101090', 3, wx.SOLID)
self.zoom_lims = []
self.slices = Slices_List[0]
self.slice_xy = -1, -1
self.slice_width = 1
self.slice_onmotion = False
self.scalebar_show = False
self.scalebar_showlabel = False
self.scalebar_label = ''
self.scalebar_pos = 5, 5
self.scalebar_size = 1, 1
self.scalebar_pixelsize = None, None
self.scalebar_units = 'mm'
self.scalebar_color = '#EEEE99'
self.set_formatters()
def set_colormap(self, name, reverse=False, icol=0):
self.cmap_reverse = reverse
if reverse and not name.endswith('_r'):
name = name + '_r'
elif not reverse and name.endswith('_r'):
name = name[:-2]
self.cmap[icol] = _cmap_ = cmap.get_cmap(name)
if hasattr(self, 'contour'):
xname = 'gray'
if name == 'gray_r':
xname = 'Reds_r'
elif name == 'gray':
xname = 'Reds'
elif name.endswith('_r'):
xname = 'gray_r'
self.contour.set_cmap(getattr(cmap, xname))
if hasattr(self, 'image'):
self.image.set_cmap(self.cmap[icol])
if hasattr(self, 'highlight_areas'):
if hasattr(self.cmap[icol], '_lut'):
rgb = [int(i*240)^255 for i in self.cmap[icol]._lut[0][:3]]
col = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])
for area in self.highlight_areas:
for w in area.collections + area.labelTexts:
w.set_color(col)
def flip_vert(self):
"flip image along vertical axis (up/down)"
self.data = np.flipud(self.data)
if self.ydata is not None:
self.ydata = self.ydata[::-1]
self.flip_ud = not self.flip_ud
def flip_horiz(self):
"flip image along horizontal axis (left/right)"
self.data = np.fliplr(self.data)
if self.xdata is not None:
self.xdata = self.xdata[::-1]
self.flip_lr = not self.flip_lr
def rotate90(self, event=None):
"rotate 90 degrees, CW"
if self.xdata is not None:
self.xdata = self.xdata[::-1]
if self.ydata is not None:
self.ydata = self.ydata[:]
self.xdata, self.ydata = self.ydata, self.xdata
self.xlab, self.ylab = self.ylab, self.xlab
self.data = np.rot90(self.data)
self.rot_level += 1
if self.rot_level == 4:
self.rot_level = 0
def set_formatters(self):
if self.axes is not None:
self.axes.xaxis.set_major_formatter(FuncFormatter(self.xformatter))
self.axes.yaxis.set_major_formatter(FuncFormatter(self.yformatter))
def xformatter(self, x, pos):
" x-axis formatter "
return self._format(x, pos, dtype='x')
def yformatter(self, y, pos):
" y-axis formatter "
return self._format(y, pos, dtype='y')
def _format(self, x, pos, dtype='x'):
""" home built tick formatter to use with FuncFormatter():
x value to be formatted
type 'x' or 'y' or 'y2' to set which list of ticks to get
also sets self._yfmt/self._xfmt for statusbar
"""
fmt = '%1.5g'
if dtype == 'y':
ax = self.axes.yaxis
dat = self.ydata
if dat is None:
dat = np.arange(self.data.shape[0])
else:
ax = self.axes.xaxis
dat = self.xdata
if dat is None:
dat = np.arange(self.data.shape[1])
ticks = [0,1]
onep = 1.00001
try:
dtick = 0.1 * onep * (dat.max() - dat.min())
except:
dtick = 0.2 * onep
try:
ticks = ax.get_major_locator()()
except:
ticks = [0, 1]
try:
dtick = abs(dat[int(ticks[1])] - dat[int(ticks[0])]) * onep
except:
pass
if dtick > 89999:
fmt = '%.2e'
else:
fmt = '%%1.%df' % max(0, -round(log10(0.75*dtick)))
try:
s = fmt % dat[int(x)]
except:
s = ''
s.strip()
s = s.replace('+', '')
while s.find('e0')>0:
s = s.replace('e0','e')
while s.find('-0')>0:
s = s.replace('-0','-')
return s
def relabel(self):
" re draw labels (title, x,y labels)"
pass
def set_zoombrush(self,color, style):
self.zoombrush = wx.Brush(color, style)
def set_zoompen(self,color, style):
self.zoompen = wx.Pen(color, 3, style)
def tricolor_white_bg(self, img):
"""transforms image from RGB with (0,0,0)
showing black to RGB with 0,0,0 showing white
takes the Red intensity and sets
the new intensity to go
from (0, 0.5, 0.5) (for Red=0) to (0, 0, 0) (for Red=1)
and so on for the Green and Blue maps.
Thus the image will be transformed from
old intensity new intensity
(0.0, 0.0, 0.0) (black) (1.0, 1.0, 1.0) (white)
(1.0, 1.0, 1.0) (white) (0.0, 0.0, 0.0) (black)
(1.0, 0.0, 0.0) (red) (1.0, 0.5, 0.5) (red)
(0.0, 1.0, 0.0) (green) (0.5, 1.0, 0.5) (green)
(0.0, 0.0, 1.0) (blue) (0.5, 0.5, 1.0) (blue)
"""
tmp = 0.5*(1.0 - (img - img.min())/(img.max() - img.min()))
out = tmp*0.0
out[:,:,0] = tmp[:,:,1] + tmp[:,:,2]
out[:,:,1] = tmp[:,:,0] + tmp[:,:,2]
out[:,:,2] = tmp[:,:,0] + tmp[:,:,1]
return out
def rgb2cmy(self, img, whitebg=False):
"""transforms image from RGB to CMY"""
tmp = img*1.0
if whitebg:
tmp = (1.0 - (img - img.min())/(img.max() - img.min()))
out = tmp*0.0
out[:,:,0] = (tmp[:,:,1] + tmp[:,:,2])/2.0
out[:,:,1] = (tmp[:,:,0] + tmp[:,:,2])/2.0
out[:,:,2] = (tmp[:,:,0] + tmp[:,:,1])/2.0
return out
def set_config(self, interp=None, colormap=None, reverse_colormap=None,
contrast_level=None, flip_ud=None, flip_lr=None,
rot=None, tricolor_bg=None, ncontour_levels=None,
title=None, style=None):
"""set configuration options:
interp, colormap, reverse_colormap, contrast_levels, flip_ud,
flip_lr, rot, tricolor_bg, ncontour_levels, title, style
"""
if interp is not None:
interp = interp.lower()
self.interp = interp if interp in Interp_List else self.interp
if colormap is not None:
colormap = colormap.lower()
if colormap.endswith('_r'):
reverse_colormap = True
colormap = colormap[:-2]
self.colormap = colormap if colormap in ColorMap_List else self.colormap
if contrast_level is not None:
self.contrast_level = float(contrast_level)
self.cmap_reverse = bool_ifnotNone(reverse_colormap, self.cmap_reverse)
self.flip_ud = bool_ifnotNone(flip_ud, self.flip_ud)
self.flip_lr = bool_ifnotNone(flip_lr, self.flip_lr)
self.rot = bool_ifnotNone(rot, self.rot)
if tricolor_bg is not None:
tricolor_bg = tricolor_bg.lower()
if tricolor_bg in ('black', 'white'):
self.tricolor_bg = tricolor_bg
if ncontour_levels is not None:
self.ncontour_level = int(ncontour_levels)
if style is not None:
style = style.lower()
if style in ('image', 'contour'):
self.style = style
self.title = ifnotNone(title, self.title)
def get_config(self):
"""get dictionary of configuration options"""
out = {'reverse_colormap': self.cmap_reverse}
for attr in ('interp', 'colormap', 'contrast_levels', 'flip_ud',
'flip_lr', 'rot', 'tricolor_bg', 'ncontour_levels',
'title', 'style'):
out[attr] = getattr(self, attr)
return out
labstyle= wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ALL
class ImageConfigFrame(wx.Frame):
""" GUI Configure Frame for Images"""
def __init__(self, parent=None, config=None, trace_color_callback=None):
if config is None:
config = ImageConfig()
self.conf = config
self.parent = parent
self.canvas = self.conf.canvas
self.axes = self.canvas.figure.get_axes()
self.DrawPanel()
mbar = wx.MenuBar()
fmenu = wx.Menu()
MenuItem(self, fmenu, "Save Configuration\tCtrl+S",
"Save Configuration",
self.save_config)
MenuItem(self, fmenu, "Load Configuration\tCtrl+R",
"Load Configuration",
self.load_config)
mbar.Append(fmenu, 'File')
self.SetMenuBar(mbar)
def save_config(self, evt=None, fname='wxmplot.yaml'):
if not HAS_YAML:
return
file_choices = 'YAML Config File (*.yaml)|*.yaml'
dlg = wx.FileDialog(self, message='Save image configuration',
defaultDir=os.getcwd(),
defaultFile=fname,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
conf = self.conf.get_current_config()
ppath = os.path.abspath(dlg.GetPath())
with open(ppath, 'w') as fh:
fh.write("%s\n" % yaml.dump(conf))
def load_config(self, evt=None):
if not HAS_YAML:
return
file_choices = 'YAML Config File (*.yaml)|*.yaml'
dlg = wx.FileDialog(self, message='Read image configuration',
defaultDir=os.getcwd(),
wildcard=file_choices,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
conf = yaml.safe_load(open(os.path.abspath(dlg.GetPath()), 'r').read())
self.conf.load_config(conf)
def DrawPanel(self):
style = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, self.parent, -1, 'Configure Image', style=style)
conf = self.conf
self.SetFont(wx.Font(12,wx.SWISS,wx.NORMAL,wx.NORMAL,False))
self.SetBackgroundColour(hex2rgb('#FEFEFE'))
sizer = wx.GridBagSizer(2, 2)
irow = 0
bstyle=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ST_NO_AUTORESIZE
# contours
ctitle = SimpleText(self, 'Contours:', colour='#DD0000')
label = SimpleText(self, "# Levels:")
self.ncontours = FloatSpin(self, value=conf.ncontour_levels,
min_val=0, max_val=5000,
increment=1, digits=0, size=(60, -1),
action=self.onContourEvents)
self.showlabels = Check(self, label='Show Labels?',
default=conf.contour_labels,
action=self.onContourEvents)
sizer.Add(ctitle, (irow, 0), (1, 2), labstyle, 2)
irow += 1
sizer.Add(label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.ncontours, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.showlabels, (irow, 2), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# Grid
title = SimpleText(self, 'Image Grid:', colour='#DD0000')
label_gcolor = SimpleText(self, "Color:")
label_galpha = SimpleText(self, "Alpha:")
self.show_grid = Check(self, label='Show Grid with Labeled Axes?',
default=conf.show_grid,
action=self.onGridEvents)
self.grid_alpha = FloatSpin(self, value=conf.grid_alpha,
min_val=0, max_val=1,
increment=0.05, digits=3, size=(130, -1),
action=self.onGridEvents)
self.grid_color = csel.ColourSelect(self, -1, "",
mpl_color(conf.grid_color),
size=(50, -1))
self.grid_color.Bind(csel.EVT_COLOURSELECT, self.onGridEvents)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.show_grid, (irow, 1), (1, 1), labstyle, 2)
irow += 1
sizer.Add(label_gcolor, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.grid_color, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(label_galpha, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.grid_alpha, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# X/Y Slices
title = SimpleText(self, 'X/Y Slices:', colour='#DD0000')
label_dir = SimpleText(self, "Direction:")
label_wid = SimpleText(self, "Width (pixels):")
self.slice_width = FloatSpin(self, value=conf.slice_width,
min_val=0, max_val=5000,
increment=1, digits=0, size=(60, -1),
action=self.onSliceEvents)
self.slice_dir = Choice(self, size=(90, -1),
choices=Slices_List,
action=self.onSliceEvents)
self.slice_dir.SetStringSelection(conf.slices)
self.slice_dynamic = Check(self,label='Slices Follow Mouse Motion?',
default=conf.slice_onmotion,
action=self.onSliceEvents)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.slice_dynamic, (irow, 1), (1, 2), labstyle, 2)
irow += 1
sizer.Add(label_dir, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.slice_dir, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(label_wid, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.slice_width, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
# Scalebar
ypos, xpos = conf.scalebar_pos
ysiz, xsiz = conf.scalebar_size
units = conf.scalebar_units
dshape = conf.data.shape
ystep, xstep = conf.scalebar_pixelsize
if xstep is None or ystep is None:
ystep, xstep = 1, 1
if conf.xdata is not None:
xstep = abs(np.diff(conf.xdata).mean())
if conf.ydata is not None:
ystep = abs(np.diff(conf.ydata).mean())
conf.scalebar_pixelsize = ystep, xstep
title = SimpleText(self, 'Scalebar:', colour='#DD0000')
lab_opts = dict(size=(120, -1))
color_label = SimpleText(self, 'Color: ')
xpos_label = SimpleText(self, 'X Position: ')
ypos_label = SimpleText(self, 'Y Position: ')
size_label = SimpleText(self, 'Scalebar Size: ')
pos_label = SimpleText(self, "Scalebar Position (pixels from lower left):")
width_label = SimpleText(self, 'Width (%s): ' % units)
height_label = SimpleText(self, 'Height (pixels): ')
pixsize_label = SimpleText(self, 'Pixel Size: ')
xpix_label = SimpleText(self, 'X pixelsize: ')
ypix_label = SimpleText(self, 'Y pixelsize: ')
self.pixunits = LabeledTextCtrl(self, value=conf.scalebar_units,
size=(80, -1),
labeltext='Units:',
action=self.onScalebarEvents)
self.show_scalebar = Check(self, label='Show Scalebar',
default=conf.scalebar_show,
action=self.onScalebarEvents)
self.show_label = Check(self, label='Show Label?',
default=conf.scalebar_showlabel,
action=self.onScalebarEvents)
stext = "Image Size: X=%d, Y=%d pixels" % (dshape[1], dshape[0])
scale_text = SimpleText(self, label=stext)
self.label = LabeledTextCtrl(self, value=conf.scalebar_label,
size=(150, -1),
labeltext='Label:',
action=self.onScalebarEvents)
self.color = csel.ColourSelect(self, -1, "",
mpl_color(conf.scalebar_color),
size=(50, -1))
self.color.Bind(csel.EVT_COLOURSELECT, self.onScalebarEvents)
opts = dict(min_val=0, increment=1, digits=0, size=(100, -1),
action=self.onScalebarEvents)
self.xpos = FloatSpin(self, value=xpos, max_val=dshape[1], **opts)
self.ypos = FloatSpin(self, value=ypos, max_val=dshape[0], **opts)
self.height = FloatSpin(self, value=ysiz, max_val=dshape[0], **opts)
opts['increment'] = xstep
opts['digits'] = max(1, 2 - int(np.log10(abs(xstep))))
self.width = FloatSpin(self, value=xsiz, max_val=dshape[1]*xstep, **opts)
opts['increment'] = 0.001
opts['digits'] = 5
self.xpix = FloatSpin(self, value=xstep, **opts)
self.ypix = FloatSpin(self, value=ystep, **opts)
irow += 1
sizer.Add(title, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(scale_text, (irow, 1), (1, 4), labstyle, 2)
irow += 1
sizer.Add(pixsize_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.pixunits.label, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.pixunits, (irow, 2), (1, 1), labstyle, 2)
irow += 1
sizer.Add(xpix_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.xpix, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(ypix_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.ypix, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(size_label, (irow, 0), (1, 3), labstyle, 2)
irow += 1
sizer.Add(width_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.width, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(height_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.height, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(pos_label, (irow, 0), (1, 3), labstyle, 2)
irow += 1
sizer.Add(xpos_label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.xpos, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(ypos_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.ypos, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
irow += 1
sizer.Add(self.label.label, (irow, 0), (1, 1), labstyle, 2)
sizer.Add(self.label, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(color_label, (irow, 2), (1, 1), labstyle, 2)
sizer.Add(self.color, (irow, 3), (1, 1), labstyle, 2)
irow += 1
sizer.Add(self.show_scalebar, (irow, 1), (1, 1), labstyle, 2)
sizer.Add(self.show_label, (irow, 2), (1, 2), labstyle, 2)
irow += 1
sizer.Add(HLine(self, size=(500, -1)), (irow, 0), (1, 4), labstyle, 2)
autopack(self, sizer)
self.SetMinSize((500, 350))
xsiz, ysiz = self.GetBestSize()
self.SetSize((25*(1 + int(xsiz/25)), 25*(2 + int(ysiz/25))))
self.Show()
self.Raise()
def onGridEvents(self, event=None):
self.conf.show_grid = self.show_grid.IsChecked()
self.conf.grid_color = hexcolor(self.grid_color.GetValue()[:3])
self.conf.grid_alpha = self.grid_alpha.GetValue()
self.parent.panel.autoset_margins()
self.parent.panel.redraw()
def onContourEvents(self, event=None):
self.conf.ncontour_levels = self.ncontours.GetValue()
self.conf.contour_labels = self.showlabels.IsChecked()
self.parent.onContourToggle()
def onSliceEvents(self, event=None):
self.conf.slice_width = self.slice_width.GetValue()
self.conf.slices = self.slice_dir.GetStringSelection()
self.conf.slice_onmotion = self.slice_dynamic.IsChecked()
self.parent.onSliceChoice()
def onScalebarEvents(self, event=None):
self.conf.scalebar_show = self.show_scalebar.IsChecked()
self.conf.scalebar_showlabel = self.show_label.IsChecked()
self.conf.scalebar_label = self.label.GetValue()
self.conf.scalebar_pos = self.ypos.GetValue(), self.xpos.GetValue()
self.conf.scalebar_size = self.height.GetValue(), self.width.GetValue()
self.conf.scalebar_color = col = hexcolor(self.color.GetValue()[:3])
self.conf.scalebar_units = self.pixunits.GetValue()
self.conf.scalebar_pixelsize = self.ypix.GetValue(), self.xpix.GetValue()
self.parent.panel.redraw()
``` |
{
"source": "jkbgbr/xls_address",
"score": 4
} |
#### File: jkbgbr/xls_address/xlddss.py
```python
import re
import xlwt
LETTERS = tuple([chr(x) for x in range(65, 65+26)]) # A to Z
def _column_adress(addr='A'):
"""returns the column number for a column adress"""
return _cell_address(''.join([addr, '1']))[0]
def _row_adress(addr='1'):
"""returns the rown number for a column adress"""
return _cell_address(''.join(['A', addr]))[1]
def _parse_address(addr='XFD1048576') -> str:
"""
Parses the address given using a regexp and returns a cleaned string that can be split
Accepts any valid excel cell address definition, incl. absoulte adresses with $.
:param addr:
:return:
"""
patt = re.match(r'^(\$?[A-Z]{1,3}\$?\d{1,7})$', addr)
if patt is None:
raise ValueError('Could not parse the address {}'.format(addr))
else:
return patt.group(0).replace('$', '')
def _cell_address(addr='XFD1048576', rev=False):
"""parses the input address and returns the column, row tuple corresponding"""
# check the address. Expected is something between 'A1' and 'XFD1048576'
try:
_ret = _parse_address(addr)
except ValueError:
raise
_row, _col = None, None
addr = _ret
_letters = ''.join([x for x in addr if x in LETTERS])
_numbers = ''.join([x for x in addr if x not in _letters])
# getting the row number
# try:
_row = int(_numbers) - 1
if _row < 0:
raise ValueError('Incorrect row position in the address: {}!'.format(_numbers))
# getting the column. len(LETTERS)-base arithmetic
_col = 0
for col in range(len(_letters), 0, -1):
he = len(_letters) - col # position, 1, 2 ...
val = _letters[col - 1] # value at position
_col += (LETTERS.index(val) + 1) * (len(LETTERS) ** he)
_col -= 1
if rev:
_col, _row = _row, _col
return _row, _col
def _cell_range(rnge='A1:XFD1048576') -> tuple:
"""Returns the addresses from range"""
# splitting
rnge = rnge.split(':')
# the split results a single value - fall back to cell
if len(rnge) == 1:
return _cell_address(addr=rnge[0])
elif len(rnge) > 2 or len(rnge) <= 0:
raise ValueError('The provided range "{}" is not correct'.format(rnge))
else: # len(rnge) == 2
return _cell_address(rnge[0]), _cell_address(rnge[1])
def _get_cell_range(sheet, start_row, start_col, end_row, end_col):
"""Returns the values from a range
https://stackoverflow.com/a/33938163
"""
return [sheet.row_slice(row, start_colx=start_col, end_colx=end_col+1) for row in range(start_row, end_row+1)]
def get_value(sheet, addr, value_only=True):
"""Use this to retreive stuff"""
# this does not work at all
try:
_addr = _cell_range(addr)
except ValueError:
raise
try:
_addr = _addr[0][0], _addr[0][1], _addr[1][0], _addr[1][1]
# checking a range definition validity: start_ is smaller of same as end_
if not (_addr[0] <= _addr[2]) or not(_addr[1] <= _addr[3]):
raise ValueError('The range definition {} is not correct'.format(addr))
if value_only:
_ret = []
for r in _get_cell_range(sheet, *_addr):
_ret.append([x.value for x in r])
return _ret
else:
return _get_cell_range(sheet, *_addr)
except TypeError:
if value_only:
return sheet.cell_value(*_addr)
else:
return sheet.cell(*_addr)
if __name__ == '__main__':
pass
import xlrd
import os
wb = xlrd.open_workbook('test.xls')
sheet = wb.sheet_by_index(0)
print(get_value(sheet, addr='C4:D5', value_only=True))
print(get_value(sheet, addr='C4', value_only=True))
print(get_value(sheet, addr='C4', value_only=False))
os.remove('test.xls')
``` |
{
"source": "jkbjh/sacreddata",
"score": 2
} |
#### File: src/sacreddata/filereporter.py
```python
import os
try:
import ujson as json
except ImportError:
import json
import dictor
import datetime
import io
import shutil
import pandas as pd
import warnings
class BuildCommandMixin(object):
def build_command(self):
vals = dict(self.run["experiment"])
vals.update(self.run["meta"])
vals = {k: v for k, v in vals.items() if v}
vals["options"] = {k: v for k, v in vals["options"].items() if v}
update = vals["options"].pop("UPDATE", {})
updater = ""
if vals["options"].pop("with", False):
updater += " with "
updater += " ".join(update)
options = vals.pop("options", {})
option_str = " ".join(["%s %s" % (k, v) for k, v in options.items()])
vals["use_options"] = option_str
vals["cfg_updates"] = updater
command = "{base_dir}/{mainfile} {command} {use_options} {cfg_updates}".format(**vals)
return command
def _slurp_json(filename):
with open(filename) as fp:
return json.loads(fp.read())
class lazy_property(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, klass=None):
if obj is None:
return None
result = obj.__dict__[self.__name__] = self._func(obj)
return result
class JSONObj(object):
@classmethod
def slurp(cls, filename):
return cls(_slurp_json(filename))
def __init__(self, json_data):
self._json = json_data
def __getitem__(self, value_path):
return dictor.dictor(self._json, value_path)
@property
def raw(self):
return self._json
def keys(self):
return self._json.keys()
def items(self):
return self._json.items()
def __repr__(self):
return "%s %r>" % (super(JSONObj, self).__repr__()[:-1],
self.keys())
class FileRun(BuildCommandMixin, object):
def __init__(self, base_directory, run_directory, run_json):
self._base_directory = os.path.expanduser(base_directory)
self._run_directory = os.path.expanduser(run_directory)
self._run_json = run_json
self._artifacts = set(self["artifacts"])
@lazy_property
def config(self):
return JSONObj.slurp(os.path.join(self._run_directory, "config.json"))
@lazy_property
def metrics(self):
return JSONObj.slurp(os.path.join(self._run_directory, "metrics.json"))
@property
def run(self):
return JSONObj(self._run_json)
def __getitem__(self, value_path):
return dictor.dictor(self._run_json, value_path)
def keys(self):
return self._run_json.keys()
def info(self):
str_format = "%Y-%m-%dT%H:%M:%S.%f"
start_time = datetime.datetime.strptime(self["start_time"], str_format)
stop_time = datetime.datetime.strptime(self['stop_time'], str_format) if self['stop_time'] else None
return dict(
run_directory=self._run_directory,
name=self["experiment.name"],
start_time=start_time,
duration=(stop_time - start_time) if stop_time is not None else None)
@property
def artifacts(self):
return self._artifacts
def __artifact_path(self, artifact):
return os.path.join(self._run_directory, artifact)
def open(self, artifact, *a):
assert artifact in self._artifacts
return io.open(self.__artifact_path(artifact), *a)
def __repr__(self):
return "%s info=%r>" % (
super(FileRun, self).__repr__()[:-1],
self.info()
)
def extract_artifacts(self, output_path, artifacts, create_output_path=True):
unknown_artifacts = set(artifacts) - self.artifacts
if unknown_artifacts:
raise RuntimeError("Unknown artifacts requested: %r" % (sorted(list(unknown_artifacts))))
if not os.path.exists(output_path) and create_output_path:
os.makedirs(output_path)
targets = []
for artifact in artifacts:
target_path = os.path.join(output_path, artifact)
shutil.copyfile(self.__artifact_path(artifact), target_path)
targets.append(target_path)
return targets
class FileReporter(object):
def __init__(self, directory):
self.base_directory = os.path.expanduser(directory)
self.sources_directory = os.path.join(self.base_directory, "_sources")
if not os.path.exists(self.sources_directory):
raise RuntimeError(("_sources directory not found, probably "
"not a sacred %r results directory!") %
(self.base_directory,))
self._run_json = {}
self.update()
def update(self):
self._runs = [run for run in os.listdir(self.base_directory) if run.isdigit()]
self._runs.sort(key=lambda x: int(x))
old_json = self._run_json
self._run_json = {}
for run in self._runs:
if run in old_json:
self._run_json[run] = old_json[run] # use already loaded version
def _get_run_json(self, run):
assert run in self._runs
json_filename = os.path.join(self.base_directory, run, "run.json")
if os.path.exists(json_filename):
self._run_json[run] = _slurp_json(json_filename)
return self._run_json[run]
def __getitem__(self, run_key):
if not isinstance(run_key, str):
conv_key = str(run_key)
warnings.warn("Got item %r as run_key but expected a string, will be converted to: %r" % (run_key, conv_key))
run_key = conv_key
return FileRun(self.base_directory, os.path.join(self.base_directory, run_key), self._get_run_json(run_key))
def keys(self):
return self._runs
def as_df(self, keyfilter=None):
result = []
keys = self.keys()
if keyfilter is not None:
keys = keyfilter(keys)
for key in keys:
tr = self[key]
info = tr.info()
values = dict(run_key=key,
name=info["name"],
status=tr["status"],
start_time=info["start_time"],
duration=info["duration"],
)
values.update(dict(tr.config.items()))
result.append(values)
return pd.DataFrame(result)
```
#### File: src/sacreddata/gym_recording_playback.py
```python
from gym_recording import playback
import tempfile
import os
import itertools
import numpy as np
def scan_recorded_traces(run, callback, tmp_directory=None):
if tmp_directory is None:
tmp_directory = tempfile.mkdtemp(prefix="sacristan_tmp")
trace_artifacts = [x for x in run.artifacts if x.startswith("openaigym.trace")]
tmp_files = run.extract_artifacts(tmp_directory, trace_artifacts)
playback.scan_recorded_traces(tmp_directory, callback)
for f in tmp_files:
os.remove(f)
class AllTraces(object):
@classmethod
def all_traces_from_run(cls, run):
all_traces = AllTraces()
scan_recorded_traces(run, all_traces.add_trace)
def __init__(self):
self.i = 0
self._observations = []
self._actions = []
self._rewards = []
self.rewards = None
self.observations = None
self.observations1 = None
self.returns = None
self.episode_lengths = None
self.actions = None
self.last_incomplete = None
def stack(self):
"""vstack the data (potentially excluding the last episode if it is incomplete)"""
if (len(self._observations) >= 1) and (self._observations[-1].shape != self._observations[-2].shape):
self.last_incomplete = True
else:
self.last_incomplete = False
stacker = lambda data: np.concatenate(data, axis=0)
self.observations = stacker([o[:-1] for o in self._observations])
self.actions = stacker(self._actions)
self.rewards = stacker(self._rewards)
self.observations1 = stacker([o[1:] for o in self._observations])
self.returns = np.array([np.sum(x) for x in self._rewards])
self.episode_lengths = np.array([len(x) for x in self._rewards])
def add_trace(self, observations, actions, rewards):
observations, actions, rewards = map(np.array, (observations, actions, rewards))
if not (rewards.size and actions.size and observations.size):
return
self._observations.append(observations)
self._actions.append(actions)
self._rewards.append(rewards)
self.i += 1
``` |
{
"source": "jkblume/cosmic-ray",
"score": 3
} |
#### File: cosmic-ray/cosmic_ray/cli.py
```python
import itertools
import json
import logging
import os
import pprint
import sys
import docopt_subcommands as dsc
import transducer.eager
from transducer.functional import compose
import transducer.lazy
from transducer.transducers import filtering, mapping
import cosmic_ray.commands
import cosmic_ray.counting
import cosmic_ray.modules
import cosmic_ray.worker
from cosmic_ray.testing.test_runner import TestOutcome
from cosmic_ray.timing import Timer
from cosmic_ray.util import redirect_stdout
from cosmic_ray.work_db import use_db, WorkDB
LOG = logging.getLogger()
REMOVE_COMMENTS = mapping(lambda x: x.split('#')[0])
REMOVE_WHITESPACE = mapping(str.strip)
NON_EMPTY = filtering(bool)
CONFIG_FILE_PARSER = compose(REMOVE_COMMENTS,
REMOVE_WHITESPACE,
NON_EMPTY)
def _load_file(config_file):
"""Read configuration from a file.
This reads `config_file`, yielding each non-empty line with
whitespace and comments stripped off.
"""
with open(config_file, 'rt', encoding='utf-8') as f:
yield from transducer.lazy.transduce(CONFIG_FILE_PARSER, f)
@dsc.command('load')
def handle_load(config):
"""usage: cosmic-ray load <config-file>
Load a command configuration from <config-file> and run it.
A "command configuration" is simply a command-line invocation for cosmic-ray,
where each token of the command is on a separate line.
"""
filename = config['<config-file>']
argv = _load_file(filename)
return main(argv=list(argv))
@dsc.command('baseline')
def handle_baseline(configuration):
"""usage: cosmic-ray baseline [options] <top-module> [-- <test-args> ...]
Run an un-mutated baseline of <top-module> using the tests in <test-dir>.
This is largely like running a "worker" process, with the difference
that a baseline run doesn't mutate the code.
options:
--no-local-import Allow importing module from the current directory
--test-runner=R Test-runner plugin to use [default: unittest]
"""
sys.path.insert(0, '')
test_runner = cosmic_ray.plugins.get_test_runner(
configuration['--test-runner'],
configuration['<test-args>']
)
work_record = test_runner()
# note: test_runner() results are meant to represent
# status codes when executed against mutants.
# SURVIVED means that the test suite executed without any error
# hence CR thinks the mutant survived. However when running the
# baseline execution we don't have mutations and really want the
# test suite to report PASS, hence the comparison below!
if work_record.test_outcome != TestOutcome.SURVIVED:
# baseline failed, print whatever was returned
# from the test runner and exit
LOG.error('baseline failed')
print(''.join(work_record.data))
sys.exit(2)
def _get_db_name(session_name):
if session_name.endswith('.json'):
return session_name
else:
return '{}.json'.format(session_name)
@dsc.command('init')
def handle_init(configuration):
"""usage: cosmic-ray init [options] [--exclude-modules=P ...] (--timeout=T | --baseline=M) <session-name> <top-module> [-- <test-args> ...]
Initialize a mutation testing run. The primarily creates a database of "work to
be done" which describes all of the mutations and test runs that need to be
executed for a full mutation testing run. The testing run will mutate
<top-module> (and submodules) using the tests in <test-dir>. This doesn't
actually run any tests. Instead, it scans the modules-under-test and simply
generates the work order which can be executed with other commands.
The session-name argument identifies the run you're creating. Its most
important role is that it's used to name the database file.
options:
--no-local-import Allow importing module from the current directory
--test-runner=R Test-runner plugin to use [default: unittest]
--exclude-modules=P Pattern of module names to exclude from mutation
"""
# This lets us import modules from the current directory. Should probably
# be optional, and needs to also be applied to workers!
sys.path.insert(0, '')
if configuration['--timeout'] is not None:
timeout = float(configuration['--timeout'])
else:
baseline_mult = float(configuration['--baseline'])
assert baseline_mult is not None
with Timer() as t:
handle_baseline(configuration)
timeout = baseline_mult * t.elapsed.total_seconds()
LOG.info('timeout = %f seconds', timeout)
modules = set(
cosmic_ray.modules.find_modules(
cosmic_ray.modules.fixup_module_name(configuration['<top-module>']),
configuration['--exclude-modules']))
LOG.info('Modules discovered: %s', [m.__name__ for m in modules])
db_name = _get_db_name(configuration['<session-name>'])
with use_db(db_name) as db:
cosmic_ray.commands.init(
modules,
db,
configuration['--test-runner'],
configuration['<test-args>'],
timeout)
@dsc.command('exec')
def handle_exec(configuration):
"""usage: cosmic-ray exec [--dist] <session-name>
Perform the remaining work to be done in the specified session. This requires
that the rest of your mutation testing infrastructure (e.g. worker processes)
are already running.
options:
--dist Distribute tests to remote workers
"""
db_name = _get_db_name(configuration['<session-name>'])
dist = configuration['--dist']
with use_db(db_name, mode=WorkDB.Mode.open) as db:
cosmic_ray.commands.execute(db, dist)
@dsc.command('run')
def handle_run(configuration):
"""usage: cosmic-ray run [options] [--dist] [--exclude-modules=P ...] (--timeout=T | --baseline=M) <session-name> <top-module> [-- <test-args> ...]
This simply runs the "init" command followed by the "exec" command.
It's important to remember that "init" clears the session database, including
any results you may have already received. So DO NOT USE THIS COMMAND TO
CONTINUE EXECUTION OF AN INTERRUPTED SESSION! If you do this, you will lose any
existing results.
options:
--no-local-import Allow importing module from the current directory
--test-runner=R Test-runner plugin to use [default: unittest]
--exclude-modules=P Pattern of module names to exclude from mutation
"""
handle_init(configuration)
handle_exec(configuration)
@dsc.command('report')
def handle_report(configuration):
"""usage: cosmic-ray report [--full-report] [--show-pending] <session-name>
Print a nicely formatted report of test results and some basic statistics.
options:
--full-report Show test output and mutation diff for killed mutants
"""
db_name = _get_db_name(configuration['<session-name>'])
show_pending = configuration['--show-pending']
full_report = configuration['--full-report']
with use_db(db_name, WorkDB.Mode.open) as db:
for line in cosmic_ray.commands.create_report(db, show_pending, full_report):
print(line)
@dsc.command('survival-rate')
def handle_survival_rate(configuration):
"""usage: cosmic-ray survival-rate <session-name>
Print the session's survival rate.
"""
db_name = _get_db_name(configuration['<session-name>'])
with use_db(db_name, WorkDB.Mode.open) as db:
rate = cosmic_ray.commands.survival_rate(db)
print('{:.2f}'.format(rate))
@dsc.command('counts')
def handle_counts(configuration):
"""usage: cosmic-ray counts [options] [--exclude-modules=P ...] <top-module>
Count the number of tests that would be run for a given testing configuration.
This is mostly useful for estimating run times and keeping track of testing
statistics.
options:
--no-local-import Allow importing module from the current directory
--test-runner=R Test-runner plugin to use [default: unittest]
--exclude-modules=P Pattern of module names to exclude from mutation
"""
sys.path.insert(0, '')
modules = cosmic_ray.modules.find_modules(
cosmic_ray.modules.fixup_module_name(configuration['<top-module>']),
configuration['--exclude-modules'])
operators = cosmic_ray.plugins.operator_names()
counts = cosmic_ray.counting.count_mutants(modules, operators)
print('[Counts]')
pprint.pprint(counts)
print('\n[Total test runs]\n',
sum(itertools.chain(
*(d.values() for d in counts.values()))))
@dsc.command('test-runners')
def handle_test_runners(config):
"""usage: cosmic-ray test-runners
List the available test-runner plugins.
"""
print('\n'.join(cosmic_ray.plugins.test_runner_names()))
return 0
@dsc.command('operators')
def handle_operators(config):
"""usage: cosmic-ray operators
List the available operator plugins.
"""
print('\n'.join(cosmic_ray.plugins.operator_names()))
return 0
@dsc.command('worker')
def handle_worker(config):
"""usage: cosmic-ray worker [options] <module> <operator> <occurrence> <test-runner> [-- <test-args> ...]
Run a worker process which performs a single mutation and test run. Each
worker does a minimal, isolated chunk of work: it mutates the <occurence>-th
instance of <operator> in <module>, runs the test suite defined by
<test-runner> and <test-args>, prints the results, and exits.
Normally you won't run this directly. Rather, it will be launched by celery
worker tasks.
options:
--no-local-import Disallow importing module from the current directory
--keep-stdout Do not squelch stdout
"""
if not config['--no-local-import']:
sys.path.insert(0, '')
operator = cosmic_ray.plugins.get_operator(config['<operator>'])
test_runner = cosmic_ray.plugins.get_test_runner(
config['<test-runner>'],
config['<test-args>'])
with open(os.devnull, 'w') as devnull,\
redirect_stdout(sys.stdout if config['--keep-stdout'] else devnull):
work_record = cosmic_ray.worker.worker(
config['<module>'],
operator,
int(config['<occurrence>']),
test_runner)
sys.stdout.write(
json.dumps(work_record))
DOC_TEMPLATE = """{program}
Usage: {program} [options] <command> [<args> ...]
Options:
-h --help Show this screen.
-v --verbose Use verbose logging
Available commands:
{available_commands}
See '{program} help <command>' for help on specific commands.
"""
def common_option_handler(config):
if config['--verbose']:
logging.basicConfig(level=logging.INFO)
def main(argv=None):
dsc.main(
'cosmic-ray',
'cosmic-ray v.2',
argv=argv,
doc_template=DOC_TEMPLATE,
common_option_handler=common_option_handler)
if __name__ == '__main__':
main()
```
#### File: cosmic_ray/commands/execute.py
```python
import cosmic_ray.tasks.celery
import cosmic_ray.tasks.worker
from cosmic_ray.work_record import WorkRecord
# TODO: These should be put into plugins. Callers of execute() should pass an
# executor.
def local_executor(test_runner, test_args, timeout, pending_work):
for work_record in pending_work:
yield cosmic_ray.tasks.worker.worker_task(
work_record,
test_runner,
test_args,
timeout)
class CeleryExecutor:
def __init__(self, purge_queue=True):
self.purge_queue = purge_queue
def __call__(self, test_runner, test_args, timeout, pending_work):
try:
results = cosmic_ray.tasks.worker.execute_work_records(
test_runner,
test_args,
timeout,
pending_work)
for r in results:
yield WorkRecord(r.get())
finally:
if self.purge_queue:
cosmic_ray.tasks.celery.app.control.purge()
def execute(work_db, dist=True):
"""Execute any pending work in `work_db`, recording the results.
This looks for any work in `work_db` which has no results, schedules to be
executed, and records any results that arrive.
If `dist` is `True` then this uses Celery to distribute tasks to remote
workers; of course you need to make sure that these are running if you want
tests to actually run! If `dist` is `False` then all tests will be run
locally.
"""
test_runner, test_args, timeout = work_db.get_work_parameters()
executor = CeleryExecutor() if dist else local_executor
work_records = executor(test_runner,
test_args,
timeout,
work_db.pending_work)
for work_record in work_records:
work_db.update_work_record(work_record)
```
#### File: cosmic_ray/commands/init.py
```python
import logging
import uuid
import cosmic_ray.modules
from cosmic_ray.work_record import WorkRecord
LOG = logging.getLogger()
def init(modules,
work_db,
test_runner,
test_args,
timeout):
"""Clear and initialize a work-db with work items.
Any existing data in the work-db will be cleared and replaced with entirely
new work orders. In particular, this means that any results in the db are
removed.
"""
operators = cosmic_ray.plugins.operator_names()
counts = cosmic_ray.counting.count_mutants(modules, operators)
work_db.set_work_parameters(
test_runner=test_runner,
test_args=test_args,
timeout=timeout)
work_db.clear_work_records()
work_db.add_work_records(
WorkRecord(
job_id=uuid.uuid4().hex,
module=module.__name__,
operator=opname,
occurrence=occurrence)
for module, ops in counts.items()
for opname, count in ops.items()
for occurrence in range(count))
```
#### File: cosmic_ray/operators/comparison_operator_replacement.py
```python
import ast
from .operator import Operator
from ..util import build_mutations
OPERATORS = (ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE,
ast.Is, ast.IsNot, ast.In, ast.NotIn)
def _to_ops(from_op):
"""The sequence of operators which `from_op` could be mutated to.
There are a number of potential replacements which we avoid because they
almost always produce equivalent mutants. We encode those in the logic
here.
NB: This is an imperfect, stop-gap solution to the problem of certain
equivalent mutants. Obviously `==` is not generally the same as `is`, but
that mutation is also a source of a good number of equivalents. The real
solution to this problem will probably come in the form of real exception
declarations or something.
See https://github.com/sixty-north/cosmic-ray/pull/162 for some more
discussion of this issue.
"""
for to_op in OPERATORS:
if isinstance(from_op, ast.Eq) and to_op is ast.Is:
pass
elif isinstance(from_op, ast.NotEq) and to_op is ast.IsNot:
pass
else:
yield to_op
class MutateComparisonOperator(Operator):
"""An operator that modifies comparisons."""
def visit_Compare(self, node):
"""
http://greentreesnakes.readthedocs.io/en/latest/nodes.html#Compare
"""
return self.visit_mutation_site(
node,
len(build_mutations(node.ops, _to_ops)))
def mutate(self, node, idx):
from_idx, to_op = build_mutations(node.ops, _to_ops)[idx]
node.ops[from_idx] = to_op()
return node
```
#### File: cosmic-ray/cosmic_ray/work_db.py
```python
import contextlib
from enum import Enum
import os
# This db may well not scale very well. We need to be ready to switch it out
# for something quicker if not. But for now it's *very* convenient.
import tinydb
from .work_record import WorkRecord
class WorkDB:
class Mode(Enum):
# Open existing files, creating if necessary
create = 1,
# Open only existing files, failing if it doesn't exist
open = 2
def __init__(self, path, mode):
"""Open a DB in file `path` in mode `mode`.
Args:
path: The path to the DB file.
mode: The mode in which to open the DB. See the `Mode` enum for
details.
Raises:
FileNotFoundError: If `mode` is `Mode.open` and `path` does not
exist.
"""
if (mode == WorkDB.Mode.open) and (not os.path.exists(path)):
raise FileNotFoundError(
'Requested file {} not found'.format(path))
self._db = tinydb.TinyDB(path)
def close(self):
self._db.close()
@property
def _work_parameters(self):
"""The table of work parameters."""
return self._db.table('work-parameters')
@property
def _work_items(self):
"""The table of work items."""
return self._db.table('work-items')
def set_work_parameters(self, test_runner, test_args, timeout):
"""Set (replace) the work parameters for the session.
Args:
test_runner: The name of the test runner plugin to use.
test_args: The arguments to pass to the test runner.
timeout: The timeout for tests.
"""
table = self._work_parameters
table.purge()
table.insert({
'test-runner': test_runner,
'test-args': test_args,
'timeout': timeout,
})
def get_work_parameters(self):
"""Get the work parameters (if set) for the session.
Returns: a tuple of `(test-runner, test-args, timeout)`.
Raises:
ValueError: If there are no work parameters set for the session.
"""
table = self._work_parameters
try:
record = table.all()[0]
except IndexError:
raise ValueError('work-db has no work parameters')
return (record['test-runner'],
record['test-args'],
record['timeout'])
def add_work_records(self, records):
"""Add a sequence of WorkRecords.
Args:
items: An iterable of tuples of the form `(module-name,
operator-name, occurrence)`.
"""
self._work_items.insert_multiple(records)
def clear_work_records(self):
"""Clear all work items from the session.
This removes any associated results as well.
"""
self._work_items.purge()
@property
def work_records(self):
"""The sequence of `WorkItem`s in the session.
This include both complete and incomplete items.
Each work item is a dict with the keys `module-name`, `op-name`, and
`occurrence`. Items with results will also have the keys `results-type`
and `results-data`.
"""
return (WorkRecord(r) for r in self._work_items.all())
def update_work_record(self, work_record):
"""Updates an existing WorkRecord by job_id.
Args:
work_record: A WorkRecord representing the new state of a job.
Raises:
KeyError: If there is no existing record with the same job_id.
"""
self._work_items.update(
work_record,
tinydb.Query().job_id == work_record.job_id)
@property
def pending_work(self):
"""The sequence of pending `WorkItem`s in the session."""
table = self._work_items
work_item = tinydb.Query()
pending = table.search(work_item.worker_outcome == None)
return (WorkRecord(r) for r in pending)
@contextlib.contextmanager
def use_db(path, mode=WorkDB.Mode.create):
"""
Open a DB in file `path` in mode `mode` as a context manager.
On exiting the context the DB will be automatically closed.
Args:
path: The path to the DB file.
mode: The mode in which to open the DB. See the `Mode` enum for
details.
Raises:
FileNotFoundError: If `mode` is `Mode.open` and `path` does not
exist.
"""
db = WorkDB(path, mode)
try:
yield db
except Exception:
db.close()
raise
```
#### File: jkblume/cosmic-ray/setup.py
```python
import io
import os
import re
from setuptools import setup, find_packages
import sys
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.md', mode='rt')
operators = [
'number_replacer = '
'cosmic_ray.operators.number_replacer:NumberReplacer',
'mutate_comparison_operator = '
'cosmic_ray.operators.comparison_operator_replacement:MutateComparisonOperator',
'replace_true_false = '
'cosmic_ray.operators.boolean_replacer:ReplaceTrueFalse',
'replace_and_with_or = '
'cosmic_ray.operators.boolean_replacer:ReplaceAndWithOr',
'replace_or_with_and = '
'cosmic_ray.operators.boolean_replacer:ReplaceOrWithAnd',
'add_not = '
'cosmic_ray.operators.boolean_replacer:AddNot',
'mutate_unary_operator ='
'cosmic_ray.operators.unary_operator_replacement:MutateUnaryOperator',
'mutate_binary_operator ='
'cosmic_ray.operators.binary_operator_replacement:MutateBinaryOperator',
'break_continue_replacement ='
'cosmic_ray.operators.break_continue:ReplaceBreakWithContinue',
]
INSTALL_REQUIRES = [
'astunparse',
'decorator',
'docopt_subcommands',
'nose',
'pathlib',
'pytest>=3.0',
'stevedore',
'tinydb>=3.2.1',
'transducer',
'celery<4',
]
if sys.version_info < (3, 4):
INSTALL_REQUIRES.append('enum34')
setup(
name='cosmic_ray',
version=find_version('cosmic_ray/version.py'),
packages=find_packages(),
author='<NAME>',
author_email='<EMAIL>',
description='Mutation testing',
license='MIT License',
keywords='testing',
url='http://github.com/sixty-north/cosmic-ray',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
],
platforms='any',
include_package_data=True,
install_requires=INSTALL_REQUIRES,
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['hypothesis']
},
entry_points={
'console_scripts': [
'cosmic-ray = cosmic_ray.cli:main',
],
'cosmic_ray.test_runners': [
'nose = cosmic_ray.testing.nose_runner:NoseRunner',
'unittest = cosmic_ray.testing.unittest_runner:UnittestRunner',
'pytest = cosmic_ray.testing.pytest_runner:PytestRunner',
],
'cosmic_ray.operators': operators,
},
long_description=long_description,
)
``` |
{
"source": "jkbm/django_blog",
"score": 2
} |
#### File: django_blog/blog/models.py
```python
from django.db import models
# Create your models here.
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
edited_date = models.DateTimeField(
blank=True, null=True)
tags = models.CharField(
max_length=400, blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post)
name = models.CharField(max_length=50)
comment = models.TextField()
published_date = models.DateTimeField(
blank=True, null=True)
edited_date = models.DateTimeField(
blank=True, null=True)
def __str__(self):
return self.comment
``` |
{
"source": "jkbm/esports",
"score": 2
} |
#### File: esports/hsapp/forms.py
```python
from django import forms
from django.core.validators import EmailValidator
from django.db.models import Q
from .models import Game, Match, Tournament, Group, Player, Post
class MatchForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
tournament = kwargs.pop('tournament')
super(MatchForm, self).__init__(*args, **kwargs)
try:
self.fields['player1'].queryset = tournament.players.all()
self.fields['player2'].queryset = tournament.players.all()
except:
self.fields['player1'].queryset = Player.objects.all()
self.fields['player2'].queryset = Player.objects.all()
class Meta:
model = Match
fields = ('tournament', 'date', 'player1', 'player2',
'score', 'format', 'stage', 'finished')
class GameForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
match = kwargs.pop('match')
super(GameForm, self).__init__(*args, **kwargs)
try:
self.initial['player1'] = Player.objects.get(pk=match.player1.pk)
self.initial['player2'] = Player.objects.get(pk=match.player2.pk)
self.fields['winner'].queryset = Player.objects.filter(
pk__in=(match.player1.pk, match.player2.pk))
except:
self.fields['player1'].queryset = Player.objects.all()
self.fields['player2'].queryset = Player.objects.all()
class Meta:
model = Game
fields = ('match', 'player1', 'player2', 'class1', 'class2', 'winner')
class PlayerForm(forms.ModelForm):
class Meta:
model = Player
exclude = ['pk']
class TournamentForm(forms.ModelForm):
class Meta:
model = Tournament
fields = ('title', 'start_date', 'end_date', 'winner', 'players', 'groups')
class GroupForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
tournament = kwargs.pop('tournament')
super(GroupForm, self).__init__(*args, **kwargs)
self.fields['players'].queryset = tournament.players.all()
class Meta:
model = Group
fields = ('tournament', 'letter', 'players')
class ControlPanelForm(forms.Form):
tournament = forms.ModelChoiceField(queryset=Tournament.objects.all())
class FeedbackForm(forms.Form):
name = forms.CharField(max_length=30)
email = forms.EmailField()
feedback = forms.CharField(widget=forms.Textarea)
class AddPostForm(forms.Form):
title = forms.CharField()
tags = forms.CharField()
article = forms.CharField(widget=forms.Textarea)
```
#### File: esports/hsapp/misc.py
```python
import requests
import urllib.request
from datetime import datetime
import time
from bs4 import BeautifulSoup
from .models import Match, Tournament, Player, Game, Group, Deck, Deckset
from django.db.models import Q
from django.core.files import File
import json
import re
from xml.etree import ElementTree
def fill_from_text(tpk):
t = Tournament.objects.get(pk=tpk)
players = ['Chakki', 'Nostam', 'Talion', 'AlSkyHigh', 'chessdude123', 'Snail', 'wtybill']
casters = []
for p in players:
obj, created = Player.objects.get_or_create(name=p)
t.players.add(obj)
t.save()
```
#### File: esports/hsapp/tests.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from .models import Player
from django.utils import timezone
from django.core.urlresolvers import reverse
# Create your tests here.
class HsappTest(TestCase):
def create_player(self, name = "Jack", birthday='1994-04-19', country='UA', team='CFC'):
return Player.objects.create(name=name, birthday=birthday, country=country, team=team)
def test_player_creation(self):
w = self.create_player()
self.assertTrue(isinstance(w, Player))
self.assertEqual(w.__str__(), w.name)
def test_hsapp_index_view(self):
w = self.create_player()
url = reverse("hsapp.views.tournament_list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(w.title, resp.content)
```
#### File: esports/hsapp/utils.py
```python
from django.db.models import Q
import requests
import urllib.request
import json
import re
import time
from datetime import datetime
from bs4 import BeautifulSoup
from .models import Match, Tournament, Player, Game, Group, Deck, Deckset
from django.core.files import File
class Uploader():
"""
Class for automatic data upload from given source.
"""
def __init__(self, link, tpk, groups=True):
self.link = link
self.tpk = tpk
self.tournament = Tournament.objects.get(pk=tpk)
self.groups = groups
def get_data(self):
"""Get data from web-source"""
page = requests.get(self.link)
soup = BeautifulSoup(page.text, 'html.parser')
scripts = soup.find_all('script', type="text/javascript")
data = scripts[6].string
result = re.search('\tdata:(.*?)"1.0.0"},', data).groups()
result = result[0]+'"1.0.0"}'
jresult = json.loads(result)
if self.groups:
playoffs = jresult['stages'][1]['brackets'][0]['matches']
self.playoffs = sorted(playoffs, key=lambda d: (d['round'], d['ordinal']))
self.groups = jresult['stages'][0]['brackets']
self.players = jresult['competitors']
return self.playoffs, self.groups, self.players
else:
playoffs = jresult['stages'][0]['brackets'][0]['matches']
self.playoffs = sorted(playoffs, key=lambda d: (d['round'], d['ordinal']))
self.players = jresult['competitors']
return self.playoffs, self.players
def add_group_matches(self, tpk=15):
"""Add group stage matches to DB"""
for group in self.groups:
self.add_matches(group['matches'], bracket_stage="Groups")
def add_group(self, group):
"""Define groups for the tournament"""
cgroup = Group.objects.create(tournament=self.tournament, letter=group['name'][-1])
for p in group['rankings']['content']:
player = Player.objects.get(name=p['competitor']['name'])
cgroup.players.add(player)
def add_playoff_matches(self, tpk=15):
"""Add playoff matches to DB"""
self.add_matches(self.playoffs)
def add_matches(self, matches, bracket_stage="Playoffs"):
"""Add matches and games from given data to DB"""
# Add or update matches
for match in matches:
try:
competitors = match['competitors']
#timestamp = match.get('startDate') or match.get('endDate') or match.get('dateFinished') or time.mktime(self.tournament.start_date.timetuple())*1e3
#date = datetime.fromtimestamp(timestamp / 1e3)
date = self.tournament.start_date
for idx, val in enumerate(competitors):
if val is None:
competitors[idx] = {'name': 'None'}
player1 = Player.objects.get(name=competitors[0]['name'])
player2 = Player.objects.get(name=competitors[1]['name'])
score = "{0}-{1}".format(match['scores'][0]['value'], match['scores'][1]['value'])
form = "Best of " + str(match['bestOf'])
round = match['round']
if match['state'] == 'CONCLUDED':
finished = True
else:
finished = False
rounds = {1: 'Quarterfinals', 2: 'Semifinals', 3: 'Finals'}
if bracket_stage == "Playoffs":
stage = rounds[match['round']]
else:
stage = bracket_stage
try:
vod_link = match['vodLink']
except:
vod_link = ""
#Create the object if doesnt exist
obj, created = Match.objects.get_or_create(player1=player1, player2=player2,
stage=stage, tournament=self.tournament,
round=round)
obj.score = score
obj.format = form
obj.date = date
if finished:
obj.winner = Player.objects.get(name=match['winner']['name'])
obj.finished = finished
obj.save()
print('Created: {0}-{1}'.format(obj, created))
self.add_games(obj, match)
except Exception as e:
print(match, e)
print('\n')
def add_games(self, match, data):
"""Add games from the match to DB"""
p1 = match.player1
p2 = match.player2
for g in data['games']:
c1 = g['attributes']['competitor1Class']
c2 = g['attributes']['competitor2Class']
if g['points'] == [0, 1]:
winner = p2
elif g['points'] == [1, 0]:
winner = p1
game = Game.objects.get_or_create(match=match, player1=p1, player2=p2,
class1=c1, class2=c2, winner=winner)
def get_players(self):
"""Add players that participate in the tournament,
create them if they are not in the DB"""
base = "esports\media\HSapp\players\\"
for p in self.players:
try:
obj, created = Player.objects.get_or_create(name=p['competitor']['name'])
file_name = "{0}.jpg".format(p['competitor']['name'])
try:
r = requests.get(p['competitor']['headshot'])
except Exception as e:
print(e)
r = requests.get("https://d2q63o9r0h0ohi.cloudfront.net/images/media/artwork/artwork1-full-e2b8aa5b1470484b8f8a67022ac2364830e8a5511ca56d6ab00dbe1785413e46fbb919bd95be8df710a6d411bb332cd212ec31190e1d3a7a2d7acc58fc1149fb.jpg")
with open(base+"/tmp/temp.png", "wb") as f:
f.write(r.content)
reopen = open(base+"/tmp/temp.png", "rb")
django_file = File(reopen)
try:
obj.country = p['competitor']['nationality']
except:
print("No country")
obj.image.save(file_name, django_file, save=True)
obj.save()
self.tournament.players.add(obj)
print(obj, created)
except Exception as e:
print('Failed' + e)
def clear_nones(self):
"""Delete matches with undecided opponents
"""
none_player = Player.objects.get(name="None")
none_matches = Match.objects.filter(Q(player1=none_player) | Q(player2=none_player),
tournament=self.tournament)
for m in none_matches:
m.delete()
def get_decks(self):
base = "esports\media\HSapp\decks\\"
html = requests.get("https://playhearthstone.com/en-us/blog/21101731/here-are-your-hct-summer-championship-deck-lists-10-5-17")
soup = BeautifulSoup(html.text, 'html.parser')
tabs = soup.find_all(class_="tab-pane")
for t in range(0, len(tabs), 4):
name = self.longestSubstringFinder(tabs[t].get('id'), tabs[t+1].get('id'))
player = Player.objects.get(name__iexact=name)
deckset = Deckset.objects.create(player=player, tournament=self.tournament)
for c in range(0, 4):
deck_class = tabs[t+c].get('id').replace(name, "").upper()
file_name = "{0}{1}.png".format(self.tournament.pk, tabs[t+c].get('id'))
deck = Deck.objects.create(player=player, deck_class=deck_class,
tournament=self.tournament)
r = requests.get(tabs[t+c].img.get('src'))
with open(base+"/tmp/temp.png", "wb") as f:
f.write(r.content)
reopen = open(base+"/tmp/temp.png", "rb")
django_file = File(reopen)
deck.image.save(file_name, django_file, save=True)
deck.save()
deckset.decks.add(deck)
deckset.save()
def longestSubstringFinder(string1, string2):
answer = ""
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if (i + j < len1 and string1[i + j] == string2[j]):
match += string2[j]
else:
if (len(match) > len(answer)): answer = match
match = ""
return answer
def get_stats(player):
classes = {'Mage': 0, 'Warrior':0, 'Warlock': 0, 'Hunter': 0, 'Priest': 0,
'Druid': 0, 'Rogue': 0, 'Paladin': 0, 'Shaman': 0}
stats = {'wins': 0, 'loses': 0, 'gwins': 0, 'gloses': 0, 'classes': classes}
matches = Match.objects.filter(Q(player1=player) | Q(player2=player))
for match in matches:
games = Game.objects.filter(match=match)
if match.winner == player:
stats['wins'] += 1
elif match.winner != player:
stats['loses'] += 1
for game in games:
if game.winner == player:
stats['gwins'] += 1
if game.winner == game.player1:
class__= game.class1.lower().capitalize()
classes[class__] += 1
elif game.winner != player:
stats['gloses'] += 1
return stats
``` |
{
"source": "jkbm/vkdump",
"score": 3
} |
#### File: jkbm/vkdump/full.py
```python
from time import time
from sys import stdout
import urllib.request
import requests
import json
def get_photos(folder, **kwargs):
"""
Fetching API request results
"""
base = "https://api.vk.com/method/messages.getHistoryAttachments"
params = {}
for key, value in kwargs.items():
params[key] = str(value)
print(key, value)
jsons = []
time_then = time()
response = requests.get(base, params=params)
jresponse = response.json()
jsons.append(jresponse)
with open('{0}.json'.format(jresponse['response']["next_from"]), 'w') as outfile:
json.dump(jresponse, outfile)
while "next_from" in jresponse['response']:
start_from = jresponse['response']["next_from"]
params['start_from'] = start_from
response = requests.get(base, params=params)
jresponse = response.json()
jsons.append(jresponse)
with open('{0}.json'.format(jresponse['response']["next_from"]), 'w') as outfile:
json.dump(jresponse, outfile)
print("Data created in %ds" % round(time()-time_then, 3))
return jsons
def download(data):
"""
Downloading, naming and saving photos locally
"""
time_then = time()
count = 0
for part in data:
for item in part['response']:
if part['response'] != [0] and item != "next_from" and item != '0':
link = data[0]['response'][str(item)]['photo']["src_big"]
count += 1
urllib.request.urlretrieve(link, '{0}/{1}.jpg'.format(folder, count))
stdout.write("\r%d done" % int(count))
stdout.flush()
stdout.write("\r \r\n")
print("Files downloaded in %ds" % round(time()-time_then, 3))
if __name__ == "__main__":
access_token = "<GENERATED APP ACCESS TOKEN HERE>"
peer_id = input("Enter dialog id: ") #Enter dialog id from prompt
# peer_id = "<DIALOG ID HERE>" or directly in code
folder = input("Enter folder name to save files into: ")
data = get_photos(folder=folder,
peer_id=peer_id,
access_token=access_token,
count=200,
media_type="photo"
)
download(data)
``` |
{
"source": "jkbngl/YADA",
"score": 3
} |
#### File: YADA/backend/main.py
```python
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from starlette.middleware.cors import CORSMiddleware
from pytube import YouTube
import os
import logging
some_file_path = "Tesla reversing sound.mp4"
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/download/")
def main(url: str, format: str):
print(f"DOWNLOADING {url} in format {format}")
YouTube(url).streams.get_highest_resolution().download()
yt = YouTube(url)
yt.streams.first().download()
download_path = yt.streams.first().default_filename
print(download_path)
file_like = open(download_path, mode="rb")
return StreamingResponse(file_like, media_type="video/mp4")
``` |
{
"source": "jkbockstael/adventofcode-2015",
"score": 4
} |
#### File: jkbockstael/adventofcode-2015/day09_part1.py
```python
import sys
import itertools
# Parse the input and return a map
def parse_graph(distances):
graph = {}
for line in distances:
edge, length = line.split(' = ')
length = int(length)
origin, destination = edge.split(' to ')
graph[(origin, destination)] = length
graph[(destination, origin)] = length
return graph
# Compute tour distances through all the cities
# The problem input size is small enought to allow this rather inelegant brute force approach
def tour_distances(graph):
tours = itertools.permutations(set([x for x, y in graph.keys()]))
lengths = []
for tour in tours:
length = 0
for i in range(len(tour) - 1):
length += graph[(tour[i], tour[i + 1])]
lengths.append(length)
return lengths
# Main
if __name__ == '__main__':
graph = parse_graph(sys.stdin.readlines())
print(min(tour_distances(graph)))
```
#### File: jkbockstael/adventofcode-2015/day13_part1.py
```python
import sys
import itertools
# Parse the guest list as a map
def parse_guest_list(guest_list):
guests = {}
for line in guest_list:
tokens = line.split(' ')
guest = tokens[0]
other_guest = tokens[10][:-2]
happiness_value = int(tokens[3]) * (-1 if tokens[2] == 'lose' else 1)
if guest not in guests:
guests[guest] = {}
guests[guest][other_guest] = happiness_value
return guests
# Return the total happiness for the best seating arrangement
def best_seating_outcome(guests):
return max([sum(happiness(seating, guests)) for seating in itertools.permutations(guests)])
# Calculate the happiness value for each guest in a seating
def happiness(seating, guests):
outcome = []
for i in range(len(seating)):
guest = seating[i]
previous_guest = seating[i - 1]
next_guest = seating[(i + 1) % len(seating)]
outcome.append(guests[guest][previous_guest] + guests[guest][next_guest])
return outcome
# Main
if __name__ == '__main__':
print(best_seating_outcome(parse_guest_list(sys.stdin.readlines())))
```
#### File: jkbockstael/adventofcode-2015/day16_part2.py
```python
from day16_part1 import *
def filter_aunt(key, value):
if key in ['cats', 'trees']:
return lambda aunt: aunt[key] is None or aunt[key] > value
elif key in ['pomeranians', 'goldfish']:
return lambda aunt: aunt[key] is None or aunt[key] < value
else:
return lambda aunt: aunt[key] in [None, value]
# Main
known = {'children':3, 'cats':7, 'samoyeds':2, 'pomeranians':3, 'akitas':0, 'vizslas':0, 'goldfish':5, 'trees':3, 'cars':2, 'perfumes':1}
print(aunt_number(filter_aunts(filter_aunt, known, parse_input(sys.stdin.readlines()))))
``` |
{
"source": "jkbockstael/adventofcode-2017",
"score": 4
} |
#### File: jkbockstael/adventofcode-2017/day05_part1.py
```python
import sys
def run(change_jump, program):
steps = 0
pc = 0
while pc >= 0 and pc < len(program):
target = pc + program[pc]
program[pc] = change_jump(program[pc])
pc = target
steps = steps + 1
return steps
def change_jump(jump):
return jump + 1
if __name__ == '__main__':
program = map(int, sys.stdin.readlines())
print(run(change_jump, program))
```
#### File: jkbockstael/adventofcode-2017/day06_part1.py
```python
import re
def reallocate(memory):
cell = memory.index(max(memory))
value = memory[cell]
memory[cell] = 0
while value > 0:
cell = (cell + 1) % len(memory)
memory[cell] = memory[cell] + 1
value = value - 1
return memory
def reallocate_until_cycle(memory):
steps = 0
visited = []
while memory not in visited:
visited.append(memory)
memory = reallocate(memory[:])
steps = steps + 1
return steps, memory
if __name__ == '__main__':
memory = [int(x) for x in re.split('\s+', input())]
steps, _ = reallocate_until_cycle(memory)
print(steps)
```
#### File: jkbockstael/adventofcode-2017/day08_part1.py
```python
import sys
def run_program(source):
registers = {}
for line in source:
registers = run_instruction(line, registers)
return registers
def run_instruction(line, registers):
tokens = line.rstrip().split(' ')
register, operation, amount, _, guard, comp, value = tokens
amount = int(amount)
if register not in registers:
registers[register] = 0
if guard not in registers:
registers[guard] = 0
if eval(str(registers[guard]) + comp + value):
if operation == 'inc':
registers[register] += amount
else:
registers[register] -= amount
return registers
def largest_register_value(registers):
return max(registers.values())
if __name__ == '__main__':
program = sys.stdin.readlines()
print(largest_register_value(run_program(program)))
``` |
{
"source": "jkbockstael/adventofcode-2020",
"score": 3
} |
#### File: jkbockstael/adventofcode-2020/day01_part1.py
```python
import sys
def parse_input(lines):
return [int(line.strip()) for line in lines]
def part1(expenses):
return [a * b for a in expenses for b in expenses if a + b == 2020][0]
if __name__ == "__main__":
print(part1(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/adventofcode-2020/day03_part1.py
```python
import sys
def parse_input(lines):
return [[char for char in line.strip()] for line in lines]
def count_trees(trees_map, slope_col, slope_row):
row = 0
col = 0
trees = 0
height = len(trees_map)
width = len(trees_map[0])
while row < height - 1:
row = row + slope_row
col = (col + slope_col) % width
if trees_map[row][col] == "#":
trees += 1
return trees
def part1(trees_map):
return count_trees(trees_map, 3, 1)
if __name__ == "__main__":
print(part1(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/adventofcode-2020/day03_part2.py
```python
import sys
from operator import mul
from functools import reduce
from day03_part1 import parse_input, count_trees
def part2(trees_map):
slopes = [(1,1),(3,1),(5,1),(7,1),(1,2)]
return reduce(
mul,
map(lambda s: count_trees(trees_map, s[0], s[1]), slopes))
if __name__ == "__main__":
print(part2(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/adventofcode-2020/day04_part1.py
```python
import sys
def parse_input(lines):
passports = []
passport = {}
for line in lines:
if len(line.strip()) == 0:
passports.append(passport)
passport = {}
else:
pairs = line.split(" ")
for pair in pairs:
key, value = pair.split(":")
passport[key] = value.strip()
passports.append(passport)
return passports
def has_required_keys(passport):
required_keys = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
for key in required_keys:
if key not in passport.keys():
return False
return True
def part1(passports):
return len(list(filter(has_required_keys, passports)))
if __name__ == "__main__":
print(part1(parse_input(sys.stdin.readlines())))
``` |
{
"source": "jkbockstael/leetcode",
"score": 4
} |
#### File: leetcode/2020-04-month-long-challenge/day03.py
```python
class Solution:
def maxSubArray(self, nums: [int]) -> int:
total = 0
best_total = float("-inf")
for num in nums:
total = max(total + num, num)
best_total = max(total, best_total)
return best_total
# Tests
assert Solution().maxSubArray([-2,1,-3,4,-1,2,1,-5,4]) == 6
assert Solution().maxSubArray([-1]) == -1
```
#### File: leetcode/2020-04-month-long-challenge/day07.py
```python
class Solution:
def countElements(self, arr: [int]) -> int:
return sum(1 for x in arr if x + 1 in arr)
# Tests
assert Solution().countElements([1,2,3]) == 2
assert Solution().countElements([1,1,3,3,5,5,7,7]) == 0
assert Solution().countElements([1,3,2,3,5,0]) == 3
assert Solution().countElements([1,1,2,2]) == 2
```
#### File: leetcode/2020-04-month-long-challenge/day22.py
```python
class Solution:
def subarraySum(self, nums: [int], k: int) -> int:
count = 0
ways_to_sum = {}
current_sum = 0
for number in nums:
current_sum += number
if current_sum == k:
count += 1
if current_sum - k in ways_to_sum:
count += ways_to_sum[current_sum - k]
if current_sum not in ways_to_sum:
ways_to_sum[current_sum] = 1
else:
ways_to_sum[current_sum] += 1
return count
# Tests
assert Solution().subarraySum([1,1,1], 2) == 2
assert Solution().subarraySum([1,1], 2) == 1
assert Solution().subarraySum([1,1], 2) == 1
assert Solution().subarraySum([1], 2) == 0
assert Solution().subarraySum([1,2,3], 3) == 2
assert Solution().subarraySum([1,2,3,4,5], 3) == 2
assert Solution().subarraySum([1], 0) == 0
assert Solution().subarraySum([-1,-1,1], 0) == 1
```
#### File: leetcode/2020-04-month-long-challenge/day25.py
```python
class Solution:
def canJump(self, nums: [int]) -> bool:
leftmost = len(nums) - 1
for i in range(len(nums) - 1, -1, -1):
if nums[i] + i >= leftmost:
leftmost = i
return leftmost == 0
# Tests
assert Solution().canJump([2,3,1,1,4]) == True
assert Solution().canJump([3,2,1,0,4]) == False
```
#### File: leetcode/2020-04-month-long-challenge/day28.py
```python
import collections
class FirstUnique:
def __init__(self, nums: [int]):
# The actual queue
self.queue = collections.deque()
# A map of the number of occurences of each number in the queue
self.map = {}
for num in nums:
self.add(num)
def showFirstUnique(self) -> int:
while len(self.queue) > 0 and self.map[self.queue[0]] > 1:
self.queue.popleft()
if len(self.queue) == 0:
return -1
else:
return self.queue[0]
def add(self, value: int) -> None:
if value in self.map:
self.map[value] += 1
else:
self.map[value] = 1
self.queue.append(value)
# Tests
test = FirstUnique([2,3,5])
assert test.showFirstUnique() == 2
test.add(5)
assert test.showFirstUnique() == 2
test.add(2)
assert test.showFirstUnique() == 3
test.add(3)
assert test.showFirstUnique() == -1
```
#### File: leetcode/2020-05-month-long-challenge/day01.py
```python
class Solution:
def firstBadVersion(self, n:int) -> int:
# Yup, this is a binary search
left = 1 # There is no version zero
right = n
while left < right:
middle = left + (right - left) // 2
if isBadVersion(middle):
right = middle
else:
left = middle + 1
return left
# Test
def isBadVersion(version: int) -> bool:
return version >= 4
assert Solution().firstBadVersion(5) == 4
```
#### File: leetcode/2020-05-month-long-challenge/day02.py
```python
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
return sum(1 for x in S if x in J)
# Tests
assert Solution().numJewelsInStones("aA", "aAAbbbb") == 3
assert Solution().numJewelsInStones("z", "ZZ") == 0
```
#### File: leetcode/2020-05-month-long-challenge/day09.py
```python
class Solution:
def isPerfectSquare(self, num: int) -> bool:
# Zero and one are squares of themselves
if num == 0 or num == 1:
return True
# Squares can't have any last digit, this allows ruling out almost half
# the candidates
if num % 10 not in [0, 1, 4, 5, 6, 9]:
return False
# Even numbers produce even squares, odd numbers produce odd squares
if num % 2 == 0:
start = 2
else:
start = 1
# Straightforward linear search as it is fast enough for the input size
for candidate in range(start, num // 2 + 1, 2):
if candidate ** 2 == num:
return True
return False
# Tests
assert Solution().isPerfectSquare(16) == True
assert Solution().isPerfectSquare(14) == False
assert Solution().isPerfectSquare(0) == True
assert Solution().isPerfectSquare(1) == True
assert Solution().isPerfectSquare(4) == True
assert Solution().isPerfectSquare(3481) == True
assert Solution().isPerfectSquare(2147483647) == False
```
#### File: leetcode/2020-05-month-long-challenge/day18.py
```python
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
# This is almost the same code as Day 17, with early returns
if len(s2) < len(s1) or s1 == "":
return False
ALPHABET_SIZE = 26
# Character to index
c2i = lambda c: ord(c) - ord('a')
frequencies_s2 = [0 for _ in range(ALPHABET_SIZE)]
frequencies_s1 = [0 for _ in range(ALPHABET_SIZE)]
positions = []
# First window
for i in range(len(s1)):
frequencies_s1[c2i(s1[i])] += 1
frequencies_s2[c2i(s2[i])] += 1
# Next windows
for i in range(len(s1), len(s2)):
if frequencies_s2 == frequencies_s1:
return True
positions.append(i-len(s1))
frequencies_s2[c2i(s2[i])] += 1
frequencies_s2[c2i(s2[i-len(s1)])] -= 1
# Last window
if frequencies_s2 == frequencies_s1:
return True
positions.append(len(s2)-len(s1))
return False
# Tests
assert Solution().checkInclusion("ab", "eidbaooo") == True
assert Solution().checkInclusion("ab", "eidboaoo") == False
```
#### File: leetcode/2020-05-month-long-challenge/day22.py
```python
import collections
class Solution:
def frequencySort(self, s: str) -> str:
return "".join(map(
lambda t: t[0] * t[1],
collections.Counter(s).most_common(len(s))))
# Tests
assert Solution().frequencySort("tree") in ["eert", "eetr"]
assert Solution().frequencySort("cccaaa") in ["cccaaa", "aaaccc"]
assert Solution().frequencySort("Aabb") in ["bbAa", "bbaA"]
```
#### File: leetcode/2020-06-month-long-challenge/day07.py
```python
class Solution:
def change(self, amount: int, coins: [int]) -> int:
# combinations[amount] := number of ways to get `amount` using `coins`
combinations = [0 for _ in range(0, amount + 1)]
combinations[0] = 1
for coin in coins:
for value in range(1, amount + 1):
if value >= coin:
combinations[value] += combinations[value - coin]
return combinations[amount]
# Tests
assert Solution().change(5, [1,2,5]) == 4
assert Solution().change(3, [2]) == 0
assert Solution().change(10, [10]) == 1
```
#### File: leetcode/2020-06-month-long-challenge/day11.py
```python
class Solution:
def sortColors(self, nums: [int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
def swap(a, b):
nums[a], nums[b] = nums[b], nums[a]
sorted_left = -1
sorted_right = len(nums)
current = 0
while current < sorted_right:
if nums[current] == 0:
sorted_left += 1
swap(current, sorted_left)
current += 1
elif nums[current] == 1:
current += 1
elif nums[current] == 2:
sorted_right -= 1
swap(current, sorted_right)
# Tests
test_array = [2,0,2,1,1,0]
Solution().sortColors(test_array)
assert test_array == [0,0,1,1,2,2]
test_array = [2,0,1]
Solution().sortColors(test_array)
assert test_array == [0,1,2]
```
#### File: leetcode/2020-06-month-long-challenge/day20.py
```python
import math
class Solution:
def getPermutation(self, n: int, k: int) -> str:
digits = [str(x) for x in range(1, n + 1)]
permutation = ""
k = k - 1
perms = math.factorial(n)
while digits != []:
perms = perms // len(digits)
digit = k // perms
k = k % perms
permutation += digits.pop(digit)
return permutation
# Tests
assert Solution().getPermutation(3, 3) == "213"
assert Solution().getPermutation(4, 9) == "2314"
```
#### File: leetcode/2020-06-month-long-challenge/day22.py
```python
class Solution:
def singleNumber(self, nums: [int]) -> int:
# Yes this is pretty naive, but it beats 83.79% of python3 submissions
counts = {}
for number in nums:
if number not in counts:
counts[number] = 1
else:
counts[number] += 1
for number in counts:
if counts[number] == 1:
return number
# Tests
assert Solution().singleNumber([2,2,3,2]) == 3
assert Solution().singleNumber([0,1,0,1,0,1,99]) == 99
```
#### File: leetcode/2020-07-month-long-challenge/day03.py
```python
class Solution:
def prisonAfterNDays(self, cells: [int], N: int) -> [int]:
N = N % 14 # there's a period of 14 days
N = 14 if N == 0 else N # zero is a nasty value
for day in range(N):
next_cells = [0 for _ in cells]
for cell in range(1, len(cells) - 1): # first and last won't change
next_cells[cell] = (cells[cell - 1] ^ cells[cell + 1]) ^ 1
cells = next_cells
return cells
# Tests
assert Solution().prisonAfterNDays([0,1,0,1,1,0,0,1], 7) == [0,0,1,1,0,0,0,0]
assert Solution().prisonAfterNDays([1,0,0,1,0,0,1,0], 1000000000) == [0,0,1,1,1,1,1,0]
assert Solution().prisonAfterNDays([1,0,0,1,0,0,0,1], 826) == [0,1,1,0,1,1,1,0]
```
#### File: leetcode/2020-07-month-long-challenge/day04.py
```python
class Solution:
def nthUglyNumber(self, n: int) -> int:
uglies = set([1]) # 1 is ugly by convention
for _ in range(n):
# Grab the next number in the sequence
ugly = min(uglies)
uglies.remove(ugly)
# Create the next numbers in that sequence
uglies.add(ugly * 2)
uglies.add(ugly * 3)
uglies.add(ugly * 5)
return ugly
# Tests
assert Solution().nthUglyNumber(10) == 12
assert Solution().nthUglyNumber(1) == 1
assert Solution().nthUglyNumber(10000) == 288325195312500000
```
#### File: leetcode/2020-07-month-long-challenge/day26.py
```python
class Solution:
def addDigits(self, num: int) -> int:
if num == 0:
return 0
elif num % 9 == 0:
return 9
else:
return num % 9
# Tests
assert Solution().addDigits(38) == 2
assert Solution().addDigits(0) == 0
```
#### File: leetcode/2020-07-month-long-challenge/day29.py
```python
class Solution:
def maxProfit(self, prices: [int]) -> int:
if len(prices) == 0:
return 0
buy = [0 for _ in prices]
sell = [0 for _ in prices]
buy[0] = -prices[0]
for day in range(1, len(prices)):
buy[day] = max(buy[day - 1], \
(sell[day - 2] if day >= 2 else 0) - prices[day])
sell[day] = max(sell[day - 1], buy[day - 1] + prices[day])
return sell[-1]
# Test
assert Solution().maxProfit([1,2,3,0,2]) == 3
```
#### File: leetcode/2020-08-month-long-challenge/day06.py
```python
class Solution:
def findDuplicates(self, nums: [int]) -> [int]:
# We have an array of length N that contains values from 1 to n, n ≤ N
# We need to keep track of the number we've already seen, for this we
# would need a list of m elements, m < ≤ n ≤ N
# This means we can actually use the input array as it is large enough,
# given that all values are positive we can flip them to negative to
# encode the seen values
duplicates = []
for number in nums:
value = abs(number) # Maybe this position has been used as a marker
seen = abs(number) - 1 # indices start at 0, values at 1
if nums[seen] < 0:
# We already found this number before
duplicates.append(value)
else:
# Mark the array for this number
nums[seen] *= -1
return duplicates
# Test
assert Solution().findDuplicates([4,3,2,7,8,2,3,1]) == [2,3]
```
#### File: leetcode/2020-08-month-long-challenge/day14.py
```python
import collections
class Solution:
def longestPalindrome(self, s: str) -> int:
# Counting frequencies isn't the interesting part here
frequencies = collections.Counter(s)
length = 0
# We want to keep track of how many letters appear in odd numbers
odd_letters = 0
for letter in frequencies:
count = frequencies[letter]
length += count # Include ALL THE THINGS
# ... but keep track of the extra letters we should not have
# included
if count % 2 == 1:
odd_letters += 1
# Now we have to substract the odd ones, except for one that can be put
# in the middle of our palindrome (that is, if they exist)
if odd_letters > 0:
return length - odd_letters + 1
else:
return length
# Tests
assert Solution().longestPalindrome("") == 0
assert Solution().longestPalindrome("abc") == 1
assert Solution().longestPalindrome("ABba") == 1
assert Solution().longestPalindrome("abba") == 4
assert Solution().longestPalindrome("abccccdd") == 7
```
#### File: leetcode/2020-08-month-long-challenge/day15.py
```python
class Solution:
def eraseOverlapIntervals(self, intervals: [[int]]) -> int:
# Edge case
if len(intervals) == 0:
return 0
# Convenience functions for code clarity
start = lambda interval: interval[0]
end = lambda interval: interval[1]
# Sort intervals by their end
intervals = sorted(intervals, key = end)
# Greedy!
intervals_to_remove = 0
previous_start = start(intervals[0])
previous_end = end(intervals[0])
for interval in intervals[1:]:
if start(interval) < previous_end:
intervals_to_remove += 1
else:
previous_start = start(interval)
previous_end = end(interval)
return intervals_to_remove
# Tests
assert Solution().eraseOverlapIntervals([[1,2],[2,3],[3,4],[1,3]]) == 1
assert Solution().eraseOverlapIntervals([[1,2],[1,2],[1,2]]) == 2
assert Solution().eraseOverlapIntervals([[1,2],[2,3]]) == 0
```
#### File: leetcode/2020-08-month-long-challenge/day17.py
```python
class Solution:
def distributeCandies(self, candies: int, num_people: int) -> [int]:
distribution = [0 for _ in range(num_people)]
# How many full rounds can we do?
# The first round will be 1, 2, 3, ..., n
# The second one will be n + 1, n + 2, n + 3, ... 2n
# The third one will be 2n + 1, 2n + 2, 2n + 3, ... 3n
# This means a total of n * (n + 1) / 2 + (round - 1) * n^2 for each
# round
rounds = 0
while True:
round_candies = num_people * (num_people + 1) // 2 \
+ rounds * num_people ** 2
if round_candies > candies:
break
else:
candies -= round_candies
rounds += 1
# Now that we know how many full rounds can be done, we can give all
# these rounds worth of candies
if rounds > 0:
distribution = [sum(round * num_people + position
for round in range(rounds))
for position in range(1, num_people + 1)]
# Then distribute the leftovers
position = 0
while candies > 0:
handout = rounds * num_people + position + 1
distribution[position] += min(candies, handout)
candies -= handout
position += 1
return distribution
# Tests
assert Solution().distributeCandies(10, 3) == [5,2,3]
assert Solution().distributeCandies(7, 4) == [1,2,3,1]
assert Solution().distributeCandies(10, 3) == [5,2,3]
assert Solution().distributeCandies(60, 4) == [15,18,15,12]
```
#### File: leetcode/2020-08-month-long-challenge/day21.py
```python
class Solution:
def sortArrayByParity(self, A: [int]) -> [int]:
# One-liner solution
# return sorted(A, key=lambda x: x % 2)
# Faster solution, as this is a simple partition
# evens = []
# odds = []
# for number in A:
# if number % 2 == 0:
# evens.append(number)
# else:
# odds.append(number)
# return evens + odds
# Even faster solution, thanks to Python list comprehensions
# (runtime beats 99% of submissions, memory usage beats 93%)
return [n for n in A if n % 2 == 0] + [n for n in A if n % 2 != 0]
# Tests
assert Solution().sortArrayByParity([]) == []
assert Solution().sortArrayByParity([1,2]) == [2,1]
assert Solution().sortArrayByParity([3,1,2,4]) in [[2,4,3,1], [4,2,3,1], [2,4,1,3], [4,2,1,3]]
```
#### File: leetcode/2020-09-month-long-challenge/day15.py
```python
class Solution:
def lengthOfLastWord(self, s: str) -> int:
return len(s.strip().split(' ')[-1])
# Tests
assert Solution().lengthOfLastWord("Hello World") == 5
assert Solution().lengthOfLastWord("a ") == 1
```
#### File: leetcode/2020-09-month-long-challenge/day16.py
```python
class Solution:
def findMaximumXOR(self, nums: [int]) -> int:
# This is beautiful, but too slow for LeetCode
# return max(a ^ b for a in nums for b in nums)
# An iterative approach is harder to read but faster
best = 0
mask = 0
prefixes = set()
# Input numbers are guaranteed to be < 2^31
for i in range(30, -1, -1):
mask |= (1 << i)
current_best = best | (1 << i)
for number in nums:
prefixes.add(number & mask)
for prefix in prefixes:
if (current_best ^ prefix) in prefixes:
best = current_best
break
prefixes.clear()
return best
# Tests
assert Solution().findMaximumXOR([3,10,5,25,2,8]) == 28
assert Solution().findMaximumXOR([0]) == 0
assert Solution().findMaximumXOR([2,4]) == 6
assert Solution().findMaximumXOR([8,10,2]) == 10
assert Solution().findMaximumXOR([14,70,53,83,49,91,36,80,92,51,66,70]) == 127
``` |
{
"source": "jkbockstael/projecteuler",
"score": 4
} |
#### File: jkbockstael/projecteuler/euler005-proc.py
```python
import sys
def divided_by_all_up_to(bound, number):
for divisor in range(1, bound + 1):
if number % divisor != 0:
return False
return True
def euler005(bound):
number = bound
while True:
if divided_by_all_up_to(bound, number):
return number
else:
number += 2
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler005(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/projecteuler/euler006-proc.py
```python
import sys
def euler006(bound):
numbers_sum = 0
squares_sum = 0
for number in range(1, bound + 1):
numbers_sum += number
squares_sum += number ** 2
return numbers_sum ** 2 - squares_sum
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler006(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/projecteuler/euler009-proc.py
```python
import sys
def euler009(total):
a = 1
while a <= total - (2 * a + 3):
b = a + 1
while b <= total - (b + 1):
c = total - (a + b)
if a ** 2 + b ** 2 == c ** 2:
return a * b * c
b += 1
a += 1
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler009(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/projecteuler/euler011-proc.py
```python
import sys
def largest_product(count, grid):
size = len(grid)
largest_product = 0
# Lines
for line in range(0, size):
for column in range(0, size - count + 1):
product = 1
for offset in range(0, count):
product *= grid[line][column + offset]
if product > largest_product:
largest_product = product
# Columns
for line in range(0, size - count + 1):
for column in range(0, size):
product = 1
for offset in range(0, count):
product *= grid[line + offset][column]
if product > largest_product:
largest_product = product
# Diagonals
for line in range(0, size - count + 1):
for column in range(0, size - count + 1):
product = 1
for offset in range(0, count):
product *= grid[line + offset][column + offset]
if product > largest_product:
largest_product = product
# The other diagonals
for line in range(count - 1, size):
for column in range(0, size - count + 1):
product = 1
for offset in range(0, count):
product *= grid[line - offset][column + offset]
if product > largest_product:
largest_product = product
return largest_product
def euler011(grid):
return largest_product(4, grid)
def parse_input(lines):
output = []
for line in lines:
parsed_line = []
numbers = line.split(" ")
for number in numbers:
parsed_line.append(int(number))
output.append(parsed_line)
return output
if __name__ == "__main__":
print(euler011(parse_input(sys.stdin.readlines())))
```
#### File: jkbockstael/projecteuler/euler014-proc.py
```python
import sys
def euler014(bound):
best_start = 0
best_length = 0
for start in range(1, bound):
number = start
length = 0
while number != 1:
number = number // 2 if number % 2 == 0 else 3 * number + 1
length += 1
if length > best_length:
best_length = length
best_start = start
return best_start
def parse_input(lines):
return int(lines[0].strip())
if __name__ == "__main__":
print(euler014(parse_input(sys.stdin.readlines())))
``` |
{
"source": "jkboyce/hawkeye",
"score": 3
} |
#### File: hawkeye/hawkeye/tracker.py
```python
import sys
import os
import pickle
import copy
from math import sqrt, exp, isnan, atan, degrees, sin, cos, floor, ceil
from statistics import median, mean
import numpy as np
import cv2
from hawkeye.types import Balltag, Ballarc
class VideoScanner:
"""
This class uses OpenCV to process juggling video, determining ball
movements, juggler positions, and other high-level features. A typical use
of this is something like:
scanner = VideoScanner('video.mp4')
scanner.process()
notes = scanner.notes
print('found {} runs in video'.format(notes['runs']))
Scanning occurs in six distinct steps, and optionally you can specify
which steps to do (default is all), and whether to write results to
disk after processing.
The 'notes' dictionary contains all scan results, with the data recorded as
follows:
notes['version']:
sequential number incremented whenever notes dictionary format
changes (int)
notes['step']:
step number of last processing step completed (int)
notes['source']:
full path to source video (str)
notes['scanvideo']:
full path to scanned video, or None if identical to source video (str)
notes['scanner_params']:
parameters to configure the scanner; see default_scanner_params() for
format (dict)
notes['fps']:
source video frames per second (float)
notes['frame_width']:
source video frame width in pixels (int)
notes['frame_height']:
source video frame height in pixels (int)
notes['camera_tilt']:
inferred camera tilt angle in source video, in radians (float).
Positive camera tilt equates to a counterclockwise rotation of the
juggler in the video.
notes['frame_count']:
actual count of frames in source video (int)
notes['frame_count_estimate']:
estimated count of frames in source video, from metadata (int)
notes['meas'][framenum]:
list of Balltag objects in frame 'framenum' (list)
notes['body'][framenum]:
tuple describing body bounding box
(body_x, body_y, body_w, body_h, was_detected)
observed in frame 'framenum', where all values are in camera
coordinates and pixel units, and was_detected is a boolean indicating
whether the bounding box was a direct detection (True) or was inferred
(False) (tuple)
notes['origin'][framenum]:
tuple (origin_x, origin_y) of body origin in screen coordinates and
pixel units.
notes['arcs']:
list of Ballarc objects detected in video (list)
notes['g_px_per_frame_sq']:
inferred value of g (gravitational constant) in video, in units of
pixels/frame^2 (float)
notes['cm_per_pixel']:
inferred scale of video in juggling plane, in centimeters per pixel
(float)
notes['runs']:
number of runs detected in video (int)
notes['run'][run_num]:
run dictionary describing run number run_num (dict) -- SEE BELOW
The 'run_dict' dictionary for each run is defined as:
run_dict['balls']:
objects detected in run (int)
run_dict['throws']:
number of throws in run (int)
run_dict['throw']:
list of Ballarc objects for throws in run (list)
run_dict['throws per sec']:
inferred throws per second for run, or None (float)
run_dict['frame range']:
tuple (frame_start, frame_end) of run's extent in source video (tuple
of ints)
run_dict['duration']:
duration in seconds of run (float)
run_dict['height']:
height of the pattern, in centimeters (float)
run_dict['width']:
width of the pattern, in centimeters (float)
run_dict['target throw point cm']:
ideal throw location from centerline (float)
run_dict['target catch point cm']:
ideal catch location from centerline (float)
"""
CURRENT_NOTES_VERSION = 4
def __init__(self, filename, scanvideo=None, params=None, notes=None):
"""
Initialize the video scanner. This doesn't do any actual processing;
see the process() method.
Args:
filename(string):
Filename of video to process. May be absolute path, or path
relative to the executable.
scanvideo(string, optional):
Filename of video to do image detection on. This is assumed to
be a rescaled version of the video in the 'filename' argument,
with the same frame rate. If provided, the object detector
in step 2 will use this version of the video and translate
coordinates to the original.
params(dict, optional):
Parameters to configure the scanner. The function
VideoScanner.default_scanner_params() returns a dict of the
expected format.
notes(dict, optional):
Notes dictionary for recording data into
Returns:
None
"""
if notes is None:
self.notes = dict()
self.notes['version'] = VideoScanner.CURRENT_NOTES_VERSION
self.notes['source'] = os.path.abspath(filename)
self.notes['scanvideo'] = (os.path.abspath(scanvideo)
if scanvideo is not None else None)
self.notes['scanner_params'] = (
VideoScanner.default_scanner_params()
if params is None else params)
self.notes['step'] = 0
else:
self.notes = notes
def process(self, steps=(1, 6), readnotes=False, writenotes=False,
notesdir=None, callback=None, verbosity=0):
"""
Process the video. Processing occurs in six distinct steps. The
default is to do all processing steps sequentially, but processing may
be broken up into multiple calls to this method if desired -- see the
'steps' argument.
All output is recorded in the self.notes dictionary. Optionally the
notes dictionary can be read in from disk prior to processing, and/or
written to disk after processing.
Args:
steps((int, int) tuple, optional):
Starting and finishing step numbers to execute. Default is
(1, 6), or all steps.
readnotes(bool, optional):
Should the notes dictionary be read from disk prior to
processing.
writenotes(bool, optional):
Should the notes dictionary be written to disk after the
final step of processing.
notesdir(string, optional):
Directory for the optional notes files. Can be an absolute
path, or a path relative to the video file. Default is the
same directory as the video file. Note: upon writing, if the
notes directory doesn't exist then it will be created.
callback(callable, optional):
A callable with call signature func([int], [int]) that may
be provided to update the caller on progress. If the
optional integer arguments are included, they are the step #
and estimated total # of steps in processing.
verbosity(int, optional):
Verbosity level for printing progress to standard output.
0 = no output, 1 = key steps, 2 = full output. Default is 0.
Returns:
None
"""
self._callback = callback
self._verbosity = verbosity
if self._verbosity >= 1:
print('Video scanner starting...')
if readnotes or writenotes:
dirname = os.path.dirname(self.notes['source'])
basename = os.path.basename(self.notes['source'])
basename_noext = os.path.splitext(basename)[0]
if notesdir is None:
_notesdir = dirname
elif os.path.isabs(notesdir):
_notesdir = notesdir
else:
_notesdir = os.path.join(dirname, notesdir)
step_start, step_end = steps
if step_start in (2, 3, 4, 5, 6):
if readnotes:
_notespath = os.path.join(_notesdir, '{}_notes{}.pkl'.format(
basename_noext, step_start - 1))
self.notes = VideoScanner.read_notes(_notespath)
else:
step_start = 1
for step in range(step_start, step_end + 1):
if step == 1:
self.get_video_metadata()
elif step == 2:
self.detect_objects(display=False)
elif step == 3:
self.build_initial_arcs()
elif step == 4:
self.EM_optimize()
elif step == 5:
self.detect_juggler(display=False)
elif step == 6:
self.analyze_juggling()
self.notes['step'] = step
if writenotes:
if step_end in (1, 2, 3, 4, 5):
_notespath = os.path.join(_notesdir,
'{}_notes{}.pkl'.format(
basename_noext, step_end))
else:
_notespath = os.path.join(_notesdir,
'{}_notes.pkl'.format(
basename_noext))
VideoScanner.write_notes(self.notes, _notespath)
if self._verbosity >= 1:
print('Video scanner done')
# --------------------------------------------------------------------------
# Step 1: Get video metadata
# --------------------------------------------------------------------------
def get_video_metadata(self):
"""
Find basic metadata about the video: dimensions, fps, and frame count.
Args:
None
Returns:
None
"""
notes = self.notes
if self._verbosity >= 1:
print('Getting metadata for video {}'.format(notes['source']))
cap = cv2.VideoCapture(notes['source'])
if not cap.isOpened():
raise ScannerException("Error opening video file {}".format(
notes['source']))
fps = cap.get(cv2.CAP_PROP_FPS)
framecount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
framewidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
notes['fps'] = fps
notes['frame_width'] = framewidth
notes['frame_height'] = frameheight
notes['frame_count_estimate'] = framecount
if self._verbosity >= 2:
print('width = {}, height = {}, fps = {}'.format(
framewidth, frameheight, fps))
print(f'estimated frame count = {framecount}\n')
# --------------------------------------------------------------------------
# Step 2: Extract moving features from video
# --------------------------------------------------------------------------
def detect_objects(self, display=False):
"""
Find coordinates of moving objects in each frame of a video, and store
in the self.notes data structure.
Args:
display(bool, optional):
if True then show video in a window while processing
Returns:
None
"""
notes = self.notes
notes['meas'] = dict()
if self._verbosity >= 1:
print('Object detection starting...')
fps = notes['fps']
framewidth = notes['frame_width']
frameheight = notes['frame_height']
framecount = notes['frame_count_estimate']
scanvideo = notes['scanvideo']
if scanvideo is None:
# no substitute scan video provided
if self._verbosity >= 2:
print('Scanning from video {}'.format(notes['source']))
cap = cv2.VideoCapture(notes['source'])
if not cap.isOpened():
raise ScannerException("Error opening video file {}".format(
notes['source']))
scan_framewidth, scan_frameheight = framewidth, frameheight
else:
if self._verbosity >= 2:
print(f'Scanning from video {scanvideo}')
cap = cv2.VideoCapture(scanvideo)
if not cap.isOpened():
raise ScannerException(f'Error opening video file {scanvideo}')
scan_framewidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
scan_frameheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if self._verbosity >= 2:
print('width = {}, height = {}'.format(
scan_framewidth, scan_frameheight))
scan_scaledown = frameheight / scan_frameheight
def scan_to_video_coord(scan_x, scan_y):
orig_cropwidth = frameheight * (scan_framewidth / scan_frameheight)
orig_padleft = (framewidth - orig_cropwidth) / 2
orig_x = orig_padleft + scan_x * scan_scaledown
orig_y = scan_y * scan_scaledown
return orig_x, orig_y
notes['camera_tilt'] = 0.0
notes['scanner_params']['min_tags_per_arc'] = (
notes['scanner_params']['min_tags_per_arc_high_fps']
if fps >= 29
else notes['scanner_params']['min_tags_per_arc_low_fps'])
notes['scanner_params']['max_distance_pixels'] = (
notes['scanner_params']['max_distance_pixels_480'] *
frameheight / 480)
notes['scanner_params']['radius_window'] = (
notes['scanner_params']['radius_window_high_res']
if scan_frameheight >= 480
else notes['scanner_params']['radius_window_low_res'])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
params = cv2.SimpleBlobDetector_Params()
params.filterByColor = True
params.blobColor = 255
params.filterByCircularity = True
params.minCircularity = 0.3
params.maxCircularity = 1.1
params.filterByInertia = False
params.filterByConvexity = False
params.filterByArea = True
params.minArea = (notes['scanner_params']['min_blob_area_high_res']
if scan_frameheight >= 480 else
notes['scanner_params']['min_blob_area_low_res'])
params.maxArea = (notes['scanner_params']['max_blob_area_high_res']
if scan_frameheight >= 480 else
notes['scanner_params']['max_blob_area_low_res'])
detector = cv2.SimpleBlobDetector_create(params)
framenum = framereads = 0
tag_count = 0
if display:
cv2.namedWindow(notes['source'])
while cap.isOpened():
ret, frame = cap.read()
framereads += 1
if not ret:
if self._verbosity >= 2:
print('VideoCapture.read() returned False '
'on frame read {}'.format(framereads))
if framereads > framecount:
break
continue
# run the background subtraction + blob detector to find balls
fgmask = fgbg.apply(frame)
keypoints = detector.detect(fgmask)
# process any ball detections
notes['meas'][framenum] = []
for kp in keypoints:
tag_total_weight = 1.0
"""
if body_average is not None:
if kp.pt[1] > body_average[1] + body_average[3]:
continue # skip point entirely
if kp.pt[1] > body_average[1]:
tag_total_weight = exp(2.0 *
(body_average[1] - kp.pt[1])
/ body_average[3])
"""
tag_x, tag_y = scan_to_video_coord(kp.pt[0], kp.pt[1])
tag_size = kp.size * scan_scaledown
notes['meas'][framenum].append(
Balltag(framenum, tag_x, tag_y, tag_size,
tag_total_weight))
tag_count += 1
if display:
cv2.circle(frame, (int(round(kp.pt[0])),
int(round(kp.pt[1]))),
int(kp.size), (0, 0, 255), 1)
if display:
cv2.imshow(notes['source'], frame)
# cv2.imshow('FG Mask MOG 2', fgmask)
if cv2.waitKey(10) & 0xFF == ord('q'): # Q on keyboard exits
break
framenum += 1
if self._callback is not None:
self._callback(framenum, framecount)
notes['frame_count'] = framenum
if self._verbosity >= 2:
print('actual frame count = {}'.format(notes['frame_count']))
cap.release()
if display:
cv2.destroyAllWindows()
if self._verbosity >= 1:
print(f'Object detection done: {tag_count} detections\n')
# --------------------------------------------------------------------------
# Step 3: Build initial set of arcs
# --------------------------------------------------------------------------
def build_initial_arcs(self):
"""
Create an initial set of Ballarcs from the measurements. Do this by
attempting to chain together neighboring measurements into paths with
the right parabolic shape.
Args:
None
Returns:
None
"""
notes = self.notes
if self._verbosity >= 1:
print('Build initial arcs starting...')
# Scan once to get a small number of arcs, to make a preliminary
# estimate of gravity
if self._verbosity >= 2:
print('building trial arcs...')
arcs = self.construct_arcs(maxcount=5)
self.find_global_params(arcs)
# Scan again to get all arcs
if self._verbosity >= 2:
print('building all arcs...')
arcs = self.construct_arcs()
self.find_global_params(arcs)
arcs.sort(key=lambda x: x.f_peak)
for id_, arc in enumerate(arcs, start=1):
arc.id_ = id_
notes['arcs'] = arcs
if self._verbosity >= 1:
print(f'Build initial arcs done: {len(arcs)} arcs created\n')
def construct_arcs(self, maxcount=None):
"""
Piece together neighboring measurements to build parabolic arcs.
We make two passes through this function, once to find a small number
(5) of high-confidence arcs that we use to estimate gravity. Then a
second pass finds all arcs.
Args:
maxcount(int):
maximum number of arcs to build
Returns:
list of Ballarcs
"""
notes = self.notes
if self._verbosity >= 3:
print('construct_arcs(): building neighbor lists...')
self.build_neighbor_lists()
if self._verbosity >= 3:
print('done')
arclist = []
done_making_arcs = False
# Build a list of all tags that will be the starting points of new
# arc. Start from the top of the frame and move down.
tagqueue = []
for frame in range(notes['frame_count']):
tagqueue.extend(notes['meas'][frame])
tagqueue.sort(key=lambda t: t.y)
for tag in tagqueue:
tag.done = False
for tag1 in tagqueue:
if tag1.done:
continue
made_good_arc = False
for tag2 in tag1.neighbors:
if tag2.done:
continue
made_bad_arc = False
# Try to build an arc through points {tag1, tag2}.
# Maintain a frontier set of tags reachable in one step.
try:
arc = Ballarc()
arc.tags = {tag1, tag2}
taken_frames = {tag1.frame, tag2.frame}
frontier = set(tag1.neighbors) | set(tag2.neighbors)
frontier = {
t
for t in frontier
if t.frame not in taken_frames and t.done is False
}
tag1.weight = {arc: 1.0}
tag2.weight = {arc: 1.0}
default_cm_per_pixel = (
notes['scanner_params']['default_frame_height_cm']
/ notes['frame_height'])
if 'g_px_per_frame_sq' in notes:
arc.e = 0.5 * notes['g_px_per_frame_sq']
elif 'fps' in notes:
arc.e = 0.5 * 980.7 / (default_cm_per_pixel *
(notes['fps'])**2)
else:
arc.e = 0.5 * 980.7 / (
default_cm_per_pixel *
notes['scanner_params']['default_fps']**2)
# Initialize arc parameters to fit the first two points
arc.f_peak = ((tag1.y - tag2.y) / (2.0 * arc.e * (
tag2.frame - tag1.frame)) +
0.5 * (tag1.frame + tag2.frame))
arc.c = tag1.y - arc.e * (tag1.frame - arc.f_peak)**2
arc.b = (tag1.x - tag2.x) / (tag1.frame - tag2.frame)
arc.a = tag1.x - arc.b * (tag1.frame - arc.f_peak)
while True:
if len(frontier) == 0:
break
# Pick the tag in the frontier closest to the arc
temp = [(t, arc.get_distance_from_tag(t, notes))
for t in frontier]
nexttag, dist = min(temp, key=lambda x: x[1])
if (dist > notes['scanner_params']
['max_distance_pixels']):
break
# Update the frontier and other data structures, then
# optionally re-fit the arc including the new point
arc.tags.add(nexttag)
taken_frames.add(nexttag.frame)
frontier |= set(nexttag.neighbors)
frontier = {
t
for t in frontier
if t.frame not in taken_frames and t.done is False
}
nexttag.weight = {arc: 1.0}
if (len(arc.tags) >
notes['scanner_params']
['min_tags_to_curve_fit']):
self.fit_arcs([arc])
if isnan(arc.e) or (arc.e <= 0) or any(
arc.get_distance_from_tag(t, notes) >
notes['scanner_params']['max_distance_pixels']
for t in arc.tags):
made_bad_arc = True
break
# Arc is finished. Decide whether we want to keep it
if not made_bad_arc and self.eval_arc(
arc, requirepeak=False) == 0:
for t in arc.tags:
t.done = True
arclist.append(arc)
made_good_arc = True
if maxcount is not None and len(arclist) >= maxcount:
done_making_arcs = True
if self._verbosity >= 2 and maxcount is not None:
print(f' made arc number {len(arclist)}, '
f'frame_peak = {arc.f_peak:.1f}, '
f'accel = {arc.e:.3f}')
else:
for t in arc.tags:
t.weight = None
except RuntimeWarning:
made_good_arc = False
continue
if made_good_arc:
break # break out of tag2 loop
if done_making_arcs:
break # break out of tag1 loop
if self._callback is not None:
self._callback()
tag1.done = True # so we don't visit again in tag2 loop
if self._verbosity >= 2:
# Flag the tags that were assigned to arcs:
for tag in tagqueue:
tag.done = False
for arc in arclist:
for tag in arc.tags:
tag.done = True
print(
' done: {} of {} detections attached to {} arcs'.format(
sum(1 for t in tagqueue if t.done is True), len(tagqueue),
len(arclist)))
# Clean up
for tag in tagqueue:
del tag.neighbors
del tag.done
return arclist
def build_neighbor_lists(self):
"""
For each Balltag, build a list of its neighbors. This mapping is used
for building arcs efficiently.
Args:
None
Returns:
None
"""
notes = self.notes
frame_count = notes['frame_count']
v_max = None
if 'g_px_per_frame_sq' in notes:
v_max = (sqrt(2 * notes['g_px_per_frame_sq'] *
notes['frame_height']))
for frame in range(frame_count):
for tag in notes['meas'][frame]:
tag.neighbors = []
for frame2 in range(
max(0, frame -
notes['scanner_params']['max_frame_gap_in_arc']
- 1),
min(frame_count, frame +
notes['scanner_params']['max_frame_gap_in_arc']
+ 2)):
if frame2 == frame:
continue
tag.neighbors.extend(notes['meas'][frame2])
# sort by velocity needed to get from A to B, with an optional
# cap on velocity
def velocity(t):
return (sqrt((t.x - tag.x)**2 + (t.y - tag.y)**2) /
abs(t.frame - tag.frame))
temp = sorted([(t, velocity(t)) for t in tag.neighbors],
key=lambda t: t[1])
if v_max is not None:
temp = [(t, v) for t, v in temp if v <= v_max]
tag.neighbors = [t for t, v in temp]
def fit_arcs(self, arcs):
"""
Do a weighted least-squares fit of each arc (assumed to be a parabolic
trajectory) to the measured points.
Args:
arcs(list of Ballarc):
list of Ballarc objects to fit
Returns:
None
"""
notes = self.notes
s, c = sin(notes['camera_tilt']), cos(notes['camera_tilt'])
for arc in arcs:
if len(arc.tags) < 3:
continue
T0 = T1 = T2 = T3 = T4 = X1 = T1X1 = Y1 = T1Y1 = T2X1 = T2Y1 = 0
for tag in arc.tags:
t = tag.frame - arc.f_peak
x = tag.x
y = tag.y
try:
w = tag.weight[arc]
except AttributeError:
w = 1.0
T0 += w
T1 += w * t
T2 += w * t**2
T3 += w * t**3
T4 += w * t**4
X1 += w * x
T1X1 += w * t * x
T2X1 += w * t**2 * x
Y1 += w * y
T1Y1 += w * t * y
T2Y1 += w * t**2 * y
"""
numpy code for the next section:
Ax = np.array([[T2, T1], [T1, T0]])
Bx = np.array([[c * T1X1 - s * T1Y1], [c * X1 - s * Y1]])
X_new = np.dot(np.linalg.inv(Ax), Bx)
b_new = X_new[0, 0]
a_new = X_new[1, 0]
Ay = np.array([[T4, T3, T2], [T3, T2, T1], [T2, T1, T0]])
By = np.array([[c * T2Y1 + s * T2X1], [c * T1Y1 + s * T1X1],
[c * Y1 + s * X1]])
Y_new = np.dot(np.linalg.inv(Ay), By)
e_new = Y_new[0, 0]
d_new = Y_new[1, 0]
c_new = Y_new[2, 0]
"""
Ax_det = T0 * T2 - T1**2
Ay_det = 2*T1*T2*T3 + T0*T2*T4 - T0*T3**2 - T1**2*T4 - T2**3
if abs(Ax_det) < 1e-3 or abs(Ay_det) < 1e-3:
continue
Ax_inv_11 = T0
Ax_inv_12 = -T1
Ax_inv_21 = Ax_inv_12
Ax_inv_22 = T2
Bx_1 = c * T1X1 - s * T1Y1
Bx_2 = c * X1 - s * Y1
b_new = (Ax_inv_11 * Bx_1 + Ax_inv_12 * Bx_2) / Ax_det
a_new = (Ax_inv_21 * Bx_1 + Ax_inv_22 * Bx_2) / Ax_det
Ay_inv_11 = T0 * T2 - T1**2
Ay_inv_12 = T1 * T2 - T0 * T3
Ay_inv_13 = T1 * T3 - T2**2
Ay_inv_21 = Ay_inv_12
Ay_inv_22 = T0 * T4 - T2**2
Ay_inv_23 = T2 * T3 - T1 * T4
Ay_inv_31 = Ay_inv_13
Ay_inv_32 = Ay_inv_23
Ay_inv_33 = T2 * T4 - T3**2
By_1 = c * T2Y1 + s * T2X1
By_2 = c * T1Y1 + s * T1X1
By_3 = c * Y1 + s * X1
e_new = (Ay_inv_11*By_1 + Ay_inv_12*By_2 + Ay_inv_13*By_3) / Ay_det
d_new = (Ay_inv_21*By_1 + Ay_inv_22*By_2 + Ay_inv_23*By_3) / Ay_det
c_new = (Ay_inv_31*By_1 + Ay_inv_32*By_2 + Ay_inv_33*By_3) / Ay_det
if (isnan(a_new) or isnan(b_new) or isnan(c_new) or isnan(d_new)
or isnan(e_new) or (e_new == 0)):
continue
# Adjust the value of f_peak to make the new d parameter = 0
f_peak_delta = -d_new / (2.0 * e_new)
arc.f_peak += f_peak_delta
a_new += b_new * f_peak_delta
c_new += d_new * f_peak_delta + e_new * f_peak_delta**2
arc.a = a_new
arc.b = b_new
arc.c = c_new
arc.e = e_new
def eval_arc(self, arc, requirepeak=False, checkaccel=True):
"""
Decide whether an arc meets our quality standards.
Args:
arc(Ballarc):
arc to test
requirepeak(boolean):
True requires the arc to include tags on either side of the
arc's peak
checkaccel(boolean):
True requires the arc to have an acceleration that lies within
allowed bounds
Returns:
int:
Zero if arc is good and should be kept, failure code otherwise
"""
notes = self.notes
if (isnan(arc.a) or isnan(arc.b) or isnan(arc.c) or isnan(arc.e)
or isnan(arc.f_peak)):
return 1
if len(arc.tags) == 0:
return 5
if requirepeak:
f_min, f_max = arc.get_tag_range()
if not (f_min <= arc.f_peak <= f_max):
return 3
if checkaccel and not self.is_acceleration_good(arc.e):
return 4
close_tag_count = sum(1 for t in arc.tags if
arc.get_distance_from_tag(t, notes) <
notes['scanner_params']['max_distance_pixels'])
if close_tag_count < notes['scanner_params']['min_tags_per_arc']:
return 2
return 0
def is_acceleration_good(self, e):
"""
Decide whether the quadratic component of the arc's y-motion falls
within the allowed range.
Args:
e(float):
coefficient of frame**2 in arc's motion
Returns:
boolean:
True if acceleration is allowed, False otherwise
"""
notes = self.notes
if e < 0:
return False
# Criterion is based on how much we know (or can guess) about gravity
if 'g_px_per_frame_sq' in notes:
if (2 * e) < ((1 - notes['scanner_params']['g_window']) *
notes['g_px_per_frame_sq']):
return False
if (2 * e) > ((1 + notes['scanner_params']['g_window']) *
notes['g_px_per_frame_sq']):
return False
else:
max_cm_per_pixel = (notes['scanner_params']['max_frame_height_cm']
/ notes['frame_height'])
if 'fps' in notes:
if (2 * e) < 980.7 / (max_cm_per_pixel * (notes['fps'])**2):
return False
else:
if (2 * e) < 980.7 / (max_cm_per_pixel *
notes['scanner_params']['max_fps']**2):
return False
return True
def find_global_params(self, arcs):
"""
Calculate the acceleration of gravity and the physical scale from a set
of arc measurements, and add them to notes.
Args:
arcs(list of Ballarc)
arcs fitted to measurements
Returns:
None
"""
notes = self.notes
if len(arcs) == 0:
return
most_tagged_arcs = sorted(arcs, key=lambda a: len(a.tags),
reverse=True)
g_px_per_frame_sq = 2 * median(a.e for a in most_tagged_arcs[:10])
notes['g_px_per_frame_sq'] = g_px_per_frame_sq
if 'fps' in notes:
fps = notes['fps']
notes['cm_per_pixel'] = 980.7 / (g_px_per_frame_sq * fps**2)
if self._verbosity >= 2:
print('g (pixels/frame^2) = {:.6f}, cm/pixel = {:.6f}'.format(
notes['g_px_per_frame_sq'], notes['cm_per_pixel']))
# --------------------------------------------------------------------------
# Step 4: Refine arcs with EM algorithm
# --------------------------------------------------------------------------
def EM_optimize(self):
"""
Run the Expectation Maximization (EM) algorithm to optimize the set of
parabolas. This alternates between calculating weights for each tag's
affiliation with each arc (E step), and using weighted least-squares
fitting to refine the parabolas (M step). Try to merge and prune out
bad arcs as we go.
References for EM algorithm:
- <NAME>., "The Expectation Maximization Algorithm”, IEEE Signal
Processing Magazine, vol. 13, no. 6, pp. 47–60, November 1996.
- <NAME>. et al, "Detection of Thrown Objects in Indoor and
Outdoor Scenes", Proceedings of the 2007 IEEE/RSJ International
Conference on Intelligent Robots and Systems, IROS 2007.
Args:
None
Returns:
None
"""
notes = self.notes
arcs = notes['arcs']
keep_iterating = True
if self._verbosity >= 1:
print('EM optimization starting...')
arcs_before = len(arcs)
"""
It's important to do these steps in a certain order. In particular
we want to do a merge step before we calculate weights, since the
latter can attach a lot of spurious detection events to arcs and make
the "obvious" mergers harder to detect. We also want to always follow
camera tilt estimation by a least-squares fit, to adapt arc parameters
to the new tilt angle.
Under most circumstances the first merge and prune steps will do nearly
all of the work, and the EM steps will make final tweaks.
"""
while keep_iterating:
self.estimate_camera_tilt(arcs)
if self._verbosity >= 2:
print('fitting arcs...')
self.fit_arcs(arcs)
keep_iterating = False
if self._verbosity >= 2:
print('merging arcs...')
for arc in arcs:
arc.done = False
while self.merge_arcs(arcs):
keep_iterating = True
if self._verbosity >= 2:
print('pruning arcs...')
while self.prune_arcs(arcs):
keep_iterating = True
if self._verbosity >= 2:
print('calculating weights...')
self.calculate_weights(arcs)
if self._verbosity >= 2:
print('fitting arcs...')
self.fit_arcs(arcs)
self.clean_notes()
# camera tilt estimation using final tag/arc assignments
self.estimate_camera_tilt(arcs)
if self._verbosity >= 2:
print('fitting arcs...')
self.fit_arcs(arcs)
arcs.sort(key=lambda x: x.f_peak)
if self._verbosity >= 1:
print(f'EM done: {arcs_before} arcs before, {len(arcs)} after\n')
def calculate_weights(self, arcs):
"""
For each measured point, calculate a set of normalized weights for each
arc. This is used for least-squares fitting in the EM algorithm.
Args:
arcs(list of Ballarc):
list of Ballarc objects
Returns:
None
"""
notes = self.notes
for frame in notes['meas']:
for tag in notes['meas'][frame]:
tag.weight = dict()
for arc in arcs:
# Tag must be within a certain size range to attach to an arc
arc_mradius = arc.get_median_tag_radius()
r_min, r_max = (arc_mradius * (1 - notes['scanner_params']
['radius_window']),
arc_mradius * (1 + notes['scanner_params']
['radius_window']))
arc.tags = set()
f_min, f_max = arc.get_frame_range(notes)
for frame in range(f_min, f_max):
x, y = arc.get_position(frame, notes)
for tag in notes['meas'][frame]:
if not (r_min <= tag.radius <= r_max):
continue
distsq_norm = (((x - tag.x)**2 + (y - tag.y)**2) /
notes['scanner_params']['sigmasq'])
if distsq_norm < 5.0:
tag.weight[arc] = exp(-distsq_norm)
for frame in notes['meas']:
for tag in notes['meas'][frame]:
weight_sum = sum(tag.weight.values())
if weight_sum > 1e-5:
for arc in tag.weight:
tag.weight[arc] = (tag.weight[arc] * tag.total_weight /
weight_sum)
arc.tags.add(tag)
def estimate_camera_tilt(self, arcs):
"""
Estimate how many degrees the video is rotated, based on estimation of
x- and y-components of gravity. Estimate acceleration in each direction
with a least-squares fit.
Our sign convention is that a positive camera tilt angle corresponds to
an apparent counterclockwise rotation of the juggler in the video. We
transform from juggler coordinates to screen coordinates with:
x_screen = x_juggler * cos(t) + y_juggler * sin(t)
y_screen = -x_juggler * sin(t) + y_juggler * cos(t)
Args:
arcs(list of Ballarc):
list of Ballarc objects in video
Returns:
None
"""
if self._verbosity >= 2:
print('estimating camera tilt...')
notes = self.notes
tilt_sum = 0.0
tilt_count = 0
for arc in arcs:
if len(arc.tags) < 3:
continue
T0 = T1 = T2 = T3 = T4 = X1 = T1X1 = T2X1 = Y1 = T1Y1 = T2Y1 = 0
for tag in arc.tags:
t = tag.frame - arc.f_peak
x = tag.x
y = tag.y
try:
w = tag.weight[arc]
except AttributeError:
w = 1.0
T0 += w
T1 += w * t
T2 += w * t**2
T3 += w * t**3
T4 += w * t**4
X1 += w * x
T1X1 += w * t * x
T2X1 += w * t**2 * x
Y1 += w * y
T1Y1 += w * t * y
T2Y1 += w * t**2 * y
"""
numpy code for the next section:
A = np.array([[T4, T3, T2], [T3, T2, T1], [T2, T1, T0]])
A_inv = np.linalg.inv(A)
B_y = np.array([[T2Y1], [T1Y1], [Y1]])
coefs_y = np.dot(A_inv, B_y)
e_y = coefs_y[0, 0] # acceleration along y direction
B_x = np.array([[T2X1], [T1X1], [X1]])
coefs_x = np.dot(A_inv, B_x)
e_x = coefs_x[0, 0] # acceleration along x direction
"""
A_det = 2*T1*T2*T3 + T0*T2*T4 - T0*T3**2 - T1**2*T4 - T2**3
if abs(A_det) < 1e-3:
continue
A_inv_11 = T0 * T2 - T1**2
A_inv_12 = T1 * T2 - T0 * T3
A_inv_13 = T1 * T3 - T2**2
e_y = (A_inv_11*T2Y1 + A_inv_12*T1Y1 + A_inv_13*Y1) / A_det
e_x = (A_inv_11*T2X1 + A_inv_12*T1X1 + A_inv_13*X1) / A_det
if self.is_acceleration_good(e_y):
tilt = atan(e_x / e_y)
tilt_sum += tilt
tilt_count += 1
notes['camera_tilt'] = ((tilt_sum / tilt_count) if tilt_count > 0
else 0.0)
if self._verbosity >= 2:
print(' camera tilt = {:.6f} degrees'.format(
degrees(notes['camera_tilt'])))
def merge_arcs(self, arcs):
"""
Find arcs that are duplicates -- where one arc adequately describes the
tags assigned to another arc -- and merge them. Return after a single
merger, i.e., call this repeatedly to merge all arcs.
Args:
arcs(list of Ballarc):
list of Ballarc objects to merge
Returns:
boolean:
True if an arc was eliminated, False otherwise
"""
notes = self.notes
for arc1 in arcs:
if arc1.done:
continue
if len(arc1.tags) == 0:
arc1.done = True
continue
f_min, f_max = arc1.get_frame_range(notes)
arc1_mradius = arc1.get_median_tag_radius()
r_min1, r_max1 = (arc1_mradius * (1 - notes['scanner_params']
['radius_window']),
arc1_mradius * (1 + notes['scanner_params']
['radius_window']))
taglist1 = [t for t in arc1.tags
if (arc1.get_distance_from_tag(t, notes) <
notes['scanner_params']['max_distance_pixels']
and t.total_weight > 0.95
and (r_min1 <= t.radius <= r_max1))]
if len(taglist1) < 3:
arc1.done = True
continue
for arc2 in arcs:
if arc2 is arc1:
continue
if len(arc2.tags) == 0:
arc2.done = True
continue
f2_min, f2_max = arc2.get_frame_range(notes)
if f2_max < f_min or f2_min > f_max:
continue
# debug_focus = (arc1.id_ == 32 and arc2.id_ == 39)
debug_focus = False
if debug_focus:
print(' trying to merge arc1={} and arc2={}'.format(
arc1.id_, arc2.id_))
"""
Try to build a new arc that merges the tags for each of
{arc1, arc2}. If the new arc adequately fits the combined
tags then the arcs can be merged.
"""
arc2_mradius = arc2.get_median_tag_radius()
r_min2, r_max2 = (arc2_mradius * (1 - notes['scanner_params']
['radius_window']),
arc2_mradius * (1 + notes['scanner_params']
['radius_window']))
taglist2 = [t for t in arc2.tags
if (arc2.get_distance_from_tag(t, notes) <
notes['scanner_params']['max_distance_pixels']
and t.total_weight > 0.95
and (r_min2 <= t.radius <= r_max2))]
if len(taglist2) < 3:
continue
new_arc = copy.copy(arc1)
new_arc.tags = set(taglist1 + taglist2)
if (len(new_arc.tags) <
notes['scanner_params']['min_tags_per_arc']):
continue
if debug_focus:
print(' arc1 tags = {}, arc2 tags = {}, '
'combined tags = {}'.format(
len(taglist1), len(taglist2), len(new_arc.tags)))
for tag in new_arc.tags:
tag.weight[new_arc] = tag.total_weight
self.fit_arcs([new_arc])
tags_poorly_fitted = sum(
new_arc.get_distance_from_tag(tag, notes) >
notes['scanner_params']['max_distance_pixels']
for tag in new_arc.tags)
if debug_focus:
arc1_tags = sorted([(t.frame, round(t.x), round(t.y),
arc1.id_) for t in taglist1],
key=lambda x: x[0])
print(' arc1 good tags = {}'.format(arc1_tags))
arc2_tags = sorted([(t.frame, round(t.x), round(t.y),
arc2.id_) for t in taglist2],
key=lambda x: x[0])
print(' arc2 good tags = {}'.format(arc2_tags))
print(' tags poorly fitted = {}'.format(
tags_poorly_fitted))
if tags_poorly_fitted > 2:
# merging didn't work
for tag in new_arc.tags:
del tag.weight[new_arc]
if debug_focus:
print(' # tags poorly fitted = {}...exiting'.format(
tags_poorly_fitted))
poor_tags1 = [(t.frame, round(t.x), round(t.y),
round(new_arc.get_distance_from_tag(
t, notes)),
arc1.id_) for t in taglist1 if
new_arc.get_distance_from_tag(t, notes) >
notes['scanner_params']
['max_distance_pixels']]
poor_tags2 = [(t.frame, round(t.x), round(t.y),
round(new_arc.get_distance_from_tag(
t, notes)),
arc2.id_) for t in taglist2 if
new_arc.get_distance_from_tag(t, notes) >
notes['scanner_params']
['max_distance_pixels']]
poor_tags = poor_tags1 + poor_tags2
poor_tags.sort(key=lambda x: x[0])
print(poor_tags)
continue
# arcs can be merged. Remove the second arc, and retain
# parameters of the new merged arc.
arcs.remove(arc2)
for tag in arc2.tags:
try:
del tag.weight[arc2]
except (AttributeError, TypeError, KeyError):
pass
arc1.f_peak = new_arc.f_peak
arc1.a = new_arc.a
arc1.b = new_arc.b
arc1.c = new_arc.c
arc1.e = new_arc.e
arc1.tags = new_arc.tags
if self._verbosity >= 2:
f1_min, _ = arc1.get_tag_range()
print(" merged arc {} at frame {} "
"into arc {} at frame {}".format(
arc2.id_, f2_min, arc1.id_, f1_min))
return True
arc1.done = True # mark so we don't revisit
if self._callback is not None:
self._callback()
return False
def prune_arcs(self, arcs):
"""
Eliminate arcs that don't meet the quality standard.
Args:
arcs(list of Ballarc):
list of Ballarc objects to prune
Returns:
boolean:
True if an arc was pruned, False otherwise
"""
notes = self.notes
for arc in arcs:
res = self.eval_arc(arc, requirepeak=False)
if res > 0:
arcs.remove(arc)
for tag in arc.tags:
try:
del tag.weight[arc]
except (AttributeError, TypeError, KeyError):
pass
if self._verbosity >= 2:
f_min, f_max = arc.get_frame_range(notes)
if res == 1:
cause = 'numerics'
elif res == 2:
cause = 'too few close tags'
elif res == 3:
cause = 'no peak'
elif res == 4:
cause = 'accel'
else:
cause = 'unknown reason'
print(' removed arc {} starting at frame {}: {}'.format(
arc.id_, f_min, cause))
if self._callback is not None:
self._callback()
return True
return False
def clean_notes(self):
"""
Clean up the notes structure. Toss out tags that don't fit to arcs,
then delete arcs that don't meet the quality standard. Make a final
assignment of tags to arcs.
Args:
None
Returns:
None
"""
notes = self.notes
if self._verbosity >= 2:
print('cleaning notes...')
for frame in notes['meas']:
for tag in notes['meas'][frame]:
tag.done = False
for arc in notes['arcs']:
arc.done = False
tags_removed = tags_remaining = arcs_removed = 0
keep_cleaning = True
while keep_cleaning:
for frame in notes['meas']:
tags_to_kill = []
for tag in notes['meas'][frame]:
if tag.done:
continue
if not self.is_tag_good(tag):
tags_to_kill.append(tag)
continue
tag.done = True
for tag in tags_to_kill:
notes['meas'][frame].remove(tag)
for arc in tag.weight:
arc.tags.remove(tag)
arc.done = False
tags_removed += 1
if self._callback is not None:
self._callback()
arcs_to_kill = []
keep_cleaning = False
for arc in notes['arcs']:
if arc.done:
continue
if self.eval_arc(arc, requirepeak=True) > 0:
arcs_to_kill.append(arc)
continue
arc.done = True
for arc in arcs_to_kill:
notes['arcs'].remove(arc)
for tag in arc.tags:
try:
del tag.weight[arc]
except (AttributeError, TypeError, KeyError):
pass
tag.done = False
keep_cleaning = True
arcs_removed += 1
if self._verbosity >= 2:
f_min, _ = arc.get_frame_range(notes)
print(' removed arc {} starting at frame {}'.format(
arc.id_, f_min))
# Final cleanup: Delete unneeded data and make final assignments of
# tags to arcs.
for arc in notes['arcs']:
del arc.done
arc.tags = set()
for frame in notes['meas']:
for tag in notes['meas'][frame]:
temp = [(arc, arc.get_distance_from_tag(tag, notes))
for arc in tag.weight]
final_arc, _ = min(temp, key=lambda x: x[1])
tag.arc = final_arc
final_arc.tags.add(tag)
del tag.weight
del tag.done
tags_remaining += 1
if self._verbosity >= 2:
print(f'cleaning done: {tags_removed} detections removed, '
f'{tags_remaining} remaining, {arcs_removed} arcs removed')
def is_tag_good(self, tag):
if tag.arc is not None:
return True
if tag.weight is None:
return False
notes = self.notes
return any(arc.get_distance_from_tag(tag, notes) <
notes['scanner_params']['max_distance_pixels']
for arc in tag.weight)
# --------------------------------------------------------------------------
# Step 5: Find juggler location in video
# --------------------------------------------------------------------------
def detect_juggler(self, display=False):
"""
Find coordinates of the juggler's body in each frame of the video
containing juggling, and store in the self.notes dictionary.
Args:
display(bool, optional):
if True then show video in a window while processing
Returns:
None
"""
notes = self.notes
notes['body'] = dict()
if self._verbosity >= 1:
print('Juggler detection starting...')
# Figure out which frame numbers to scan. To save time we'll only
# process frames that contain juggling.
arc_count = [0] * notes['frame_count']
arc_xaverage = [0] * notes['frame_count']
for arc in notes['arcs']:
start, end = arc.get_frame_range(notes)
for framenum in range(start, end + 1):
x, _ = arc.get_position(framenum, notes)
arc_xaverage[framenum] += x
arc_count[framenum] += 1
for framenum in range(notes['frame_count']):
if arc_count[framenum] > 0:
arc_xaverage[framenum] /= arc_count[framenum]
body_frames_total = sum(1 for count in arc_count if count > 0)
# For speed we don't run the detector on every frame. Aim for a
# detection rate of around 15 Hertz.
stride = max(1, int(round(notes['fps'] / 15)))
if self._verbosity >= 2:
print(f'Processing {body_frames_total} out of '
f'{notes["frame_count"]} frames containing juggling '
f'(stride = {stride})')
if body_frames_total == 0:
if self._verbosity >= 2:
print('Nothing to scan, exiting...')
if self._verbosity >= 1:
print('Juggler detection done\n')
return
# Open the capture stream and work out scaling function to map
# the (potentially rescaled) scanning video back to the original
# video's coordinates.
framewidth = notes['frame_width']
frameheight = notes['frame_height']
scanvideo = notes['scanvideo']
if scanvideo is None:
cap = cv2.VideoCapture(notes['source'])
if not cap.isOpened():
raise ScannerException("Error opening video file {}".format(
notes['source']))
scan_framewidth, scan_frameheight = framewidth, frameheight
else:
cap = cv2.VideoCapture(scanvideo)
if not cap.isOpened():
raise ScannerException(f'Error opening video file {scanvideo}')
scan_framewidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
scan_frameheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
scan_scaledown = frameheight / scan_frameheight
def scan_to_video_coord(scan_x, scan_y):
orig_cropwidth = frameheight * (scan_framewidth / scan_frameheight)
orig_padleft = (framewidth - orig_cropwidth) / 2
orig_x = orig_padleft + scan_x * scan_scaledown
orig_y = scan_y * scan_scaledown
return orig_x, orig_y
# variables for scanning loop
body_frames_to_average = int(round(
notes['fps']
* notes['scanner_params']['body_averaging_time_window_secs']
/ stride))
if body_frames_to_average < 1:
body_frames_to_average = 1
body_average = None
body_frames_averaged = 0
body_frames_processed = 0
body_frames_with_detections = 0
framenum = 0
for det_bbox, det_framenum in self.get_detections(cap, arc_count,
arc_xaverage,
stride=stride):
if det_framenum > framenum + body_frames_to_average*stride:
# skipping too far ahead; reset average
body_average = None
while framenum <= det_framenum:
if arc_count[framenum] == 0:
framenum += 1
continue # nothing to do this frame
if framenum == det_framenum:
if body_average is None:
body_average = det_bbox
body_frames_averaged = 1
else:
body_frames_averaged = min(body_frames_to_average,
body_frames_averaged + 1)
temp2 = 1 / body_frames_averaged
temp1 = 1.0 - temp2
x, y, w, h = det_bbox
body_average = (body_average[0] * temp1 + x * temp2,
body_average[1] * temp1 + y * temp2,
body_average[2] * temp1 + w * temp2,
body_average[3] * temp1 + h * temp2)
body_frames_with_detections += 1
if body_average is not None:
body_x, body_y = scan_to_video_coord(body_average[0],
body_average[1])
body_w = body_average[2] * scan_scaledown
body_h = body_average[3] * scan_scaledown
notes['body'][framenum] = (body_x, body_y, body_w, body_h,
framenum == det_framenum)
body_frames_processed += 1
framenum += 1
if self._callback is not None:
self._callback(body_frames_processed, body_frames_total)
cap.release()
if self._verbosity >= 1:
print(f'Juggler detection done: Found juggler in '
f'{body_frames_with_detections} out of '
f'{body_frames_total} frames\n')
def get_detections(self, cap, arc_count, arc_xaverage, stride=1,
display=False):
"""
Iterate over successive juggler detections from the video.
The YOLOv2-tiny neural network is used to recognize the juggler.
Args:
cap(OpenCV VideoCapture object):
video stream of frames
arc_count(list of ints):
number of arcs present in a given frame number of the video
arc_xaverage(list of floats):
when arc_count>0, average x-value of arcs present
stride(int, optional):
spacing between successive frames during detection
display(bool, optional):
if True then show video in a window while processing
Yields:
Tuples of the form ((x, y, w, h), framenum), where the first part
is the bounding box in the video frame
"""
notes = self.notes
if display:
cv2.namedWindow(notes['source'])
def draw_YOLO_detection(img, class_id, color,
x, y, x_plus_w, y_plus_h, confidence):
label = f'{str(classes[class_id])} {confidence:.2f}'
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x-10, y-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# Initialize YOLO network
if getattr(sys, 'frozen', False):
# we are running in a bundle
base_dir = sys._MEIPASS
else:
# we are running in a normal Python environment
base_dir = os.path.dirname(os.path.realpath(__file__))
YOLO_classes_file = os.path.join(base_dir,
'resources/yolo-classes.txt')
YOLO_weights_file = os.path.join(base_dir,
'resources/yolov2-tiny.weights')
YOLO_config_file = os.path.join(base_dir,
'resources/yolov2-tiny.cfg')
classes = None
with open(YOLO_classes_file, 'r') as f:
classes = [line.strip() for line in f.readlines()]
net = cv2.dnn.readNet(YOLO_weights_file, YOLO_config_file)
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1]
for i in net.getUnconnectedOutLayers()]
conf_threshold = 0.5
nms_threshold = 0.4
yolo_scale = 0.00392 # scale RGB from 0-255 to 0.0-1.0
# Number of images to send through the network at a time.
#
# Quick benchmark on a quad-core desktop PC shows a small benefit
# to a batch size of 4:
# batch_size fps
# 1 20.3
# 2 22.1
# 4 24.1*
# 8 23.9
# 16 23.2
yolo_batch_size = 4
framecount = notes['frame_count']
last_frame_to_scan = max(frame for frame in range(framecount)
if arc_count[frame] > 0)
framenum = framereads = 0
prev_frame_scanned = None
frames = []
metadata = []
while cap.isOpened():
ret, frame = cap.read()
framereads += 1
if not ret:
if framereads > framecount:
return
continue
scan_this_frame = (arc_count[framenum] > 0 and
(prev_frame_scanned is None
or (framenum - prev_frame_scanned) >= stride))
if scan_this_frame:
frames.append(frame)
metadata.append((frame.shape[1], frame.shape[0], framenum))
prev_frame_scanned = framenum
run_batch = (len(frames) == yolo_batch_size or
(framenum == last_frame_to_scan and len(frames) > 0))
if run_batch:
# run the YOLO network to identify objects
blob = cv2.dnn.blobFromImages(frames, yolo_scale, (416, 416),
(0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# DNN module returns a differently-shaped output for
# batch sizes > 1:
if len(frames) > 1:
outs = outs[0]
# print(f'blob shape: {blob.shape}, '
# f'outs shape: {np.shape(outs)}')
# Process network outputs. The first four elements of
# `detection` define a bounding box, the rest is a
# vector of class probabilities.
for index, out in enumerate(outs):
class_ids = []
confidences = []
boxes = []
bf_width, bf_height, bf_framenum = metadata[index]
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * bf_width)
center_y = int(detection[1] * bf_height)
w = int(detection[2] * bf_width)
h = int(detection[3] * bf_height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
# Do non-maximum suppression on boxes we detected.
# This in effect de-duplicates the detection events
# and produces a single bounding box for each.
kept_indices = cv2.dnn.NMSBoxes(boxes, confidences,
conf_threshold,
nms_threshold)
# Pick out the people detections. For some reason
# NMSBoxes wraps each index into a single-element list.
person_indices = [elem[0] for elem in kept_indices
if str(classes[class_ids[elem[0]]])
== 'person']
best_person = None
if len(person_indices) == 1:
best_person = person_indices[0]
elif len(person_indices) > 1:
# multiple people, pick one closest to centerline
# of juggling
def dist(i):
return abs(boxes[i][0] + 0.5 * boxes[i][2]
- arc_xaverage[framenum])
best_person = min(person_indices, key=dist)
if display:
frame = frames[index]
for elem in kept_indices:
index = elem[0]
class_id = class_ids[index]
color = ((0, 255, 255) if index == best_person
else (0, 255, 0))
x, y, w, h = boxes[index]
confidence = confidences[index]
draw_YOLO_detection(frame, class_id, color,
round(x), round(y),
round(x+w), round(y+h),
confidence)
cv2.imshow(notes['source'], frame)
if cv2.waitKey(10) & 0xFF == ord('q'):
return
if best_person is not None:
yield (boxes[best_person], bf_framenum)
frames = []
metadata = []
framenum += 1
if display:
cv2.destroyAllWindows()
# --------------------------------------------------------------------------
# Step 6: Analyze juggling patterns
# --------------------------------------------------------------------------
def analyze_juggling(self):
"""
Build out a higher-level description of the juggling using the
individual throw arcs we found in steps 1-5.
Args:
None
Returns:
None
"""
notes = self.notes
if self._verbosity >= 1:
print('Juggling analyzer starting...')
self.set_body_origin()
self.remove_tags_below_hands()
self.compile_arc_data()
runs = self.find_runs()
if self._verbosity >= 2:
print(f'Number of runs detected = {notes["runs"]}')
# Analyze each run in turn. All run-related information is stored in
# a dictionary called run_dict.
notes['run'] = list()
neworigin = dict()
for run_id, run in enumerate(runs, start=1):
# assign sequence numbers
for throw_id, arc in enumerate(sorted(
run, key=lambda x: x.f_throw), start=1):
arc.run_id = run_id
arc.throw_id = throw_id
run_dict = dict()
run_dict['throw'] = run
run_dict['throws'] = len(run)
if self._verbosity >= 2:
print(f'--- Analyzing run {run_id} ------------------------')
print(f'Number of arcs detected = {run_dict["throws"]}')
f_firstthrow = min(arc.f_throw for arc in run)
f_lastthrow = max(arc.f_throw for arc in run)
f_lastcatch = max(arc.f_catch for arc in run)
run_dict['frame range'] = (f_firstthrow, f_lastcatch)
run_dict['duration'] = (f_lastcatch - f_firstthrow) / notes['fps']
if len(run) > 2:
run_dict['height'] = notes['cm_per_pixel'] * mean(
arc.height for arc in run[2:])
else:
run_dict['height'] = None
if f_lastthrow != f_firstthrow:
run_dict['throws per sec'] = (
(run_dict['throws'] - 1) /
((f_lastthrow - f_firstthrow) / notes['fps']))
else:
# likely just a single throw in the run
run_dict['throws per sec'] = None
self.assign_hands(run_dict)
self.connect_arcs(run_dict)
self.estimate_ball_count(run_dict)
self.analyze_run_form(run_dict)
notes['run'].append(run_dict)
# keep body origin coordinates only for frames in a run
for f in range(floor(f_firstthrow), ceil(f_lastcatch) + 1):
if f in notes['origin']:
neworigin[f] = notes['origin'][f]
if self._callback is not None:
self._callback()
notes['origin'] = neworigin
if self._verbosity >= 2:
print('--------------------------------------------')
if self._verbosity >= 1:
print('Juggling analyzer done')
def set_body_origin(self):
"""
Define a centerpoint on each frame with juggling, defined as a point
(in screen coordinates) on the midline of the body, and at the
usual throwing/catching elevation.
First we fill in any missing body measurements with an estimate, in
case the detector didn't work.
"""
notes = self.notes
notes['origin'] = dict()
if self._verbosity >= 2:
print('setting hand levels...')
arc_count = [0] * notes['frame_count']
for arc in notes['arcs']:
start, end = arc.get_frame_range(notes)
for framenum in range(start, end + 1):
arc_count[framenum] += 1
last_body = None
bodies_added = 0
for framenum in range(0, notes['frame_count']):
if arc_count[framenum] == 0:
continue
if framenum in notes['body']:
last_body = notes['body'][framenum]
else:
"""
Body was not detected for this frame. Estimate the body box
based on tagged ball positions. Find the most extremal tags
nearby in time.
"""
f_min = max(framenum - 120, 0)
f_max = min(framenum + 120, notes['frame_count'])
nearby_tags = []
for frame in range(f_min, f_max):
nearby_tags.extend(notes['meas'][frame])
if len(nearby_tags) > 0:
x_sorted_tags = sorted(nearby_tags, key=lambda t: t.x)
x_min = median(t.x for t in x_sorted_tags[:5])
x_max = median(t.x for t in x_sorted_tags[-5:])
if last_body is not None:
_, y, w, h, _ = last_body
x = 0.5 * (x_min + x_max - w)
else:
y_sorted_tags = sorted(nearby_tags, key=lambda t: t.y)
y_max = median(t.y for t in y_sorted_tags[-5:])
w = 0.7 * (x_max - x_min) # make educated guesses
h = 0.8 * w
x, y = 0.5 * (x_min + x_max - w), y_max - h
notes['body'][framenum] = (x, y, w, h, False)
bodies_added += 1
# print(f'added body to frame {framenum}')
elif last_body is not None:
notes['body'][framenum] = last_body
bodies_added += 1
else:
if self._verbosity >= 2:
print(f' problem adding body location to frame {framenum}')
x, y, w, _, _ = notes['body'][framenum]
# Assume a hand position 50 centimeters below the top of the head
x_origin = x + 0.5 * w
y_origin = y + 50.0 / notes['cm_per_pixel']
notes['origin'][framenum] = (x_origin, y_origin)
if self._verbosity >= 2 and bodies_added > 0:
print(' added missing body measurements '
f'to {bodies_added} frames')
def remove_tags_below_hands(self):
"""
Delete any tags that are below the hand height defined above. If
this renders any arcs unviable then delete those as well.
"""
notes = self.notes
if self._verbosity >= 2:
print('removing detections below hand level...')
arcs_to_kill = []
tags_removed = 0
arcs_removed = 0
for arc in notes['arcs']:
# delete any tags attached to the arc that are below hand height
_, y_origin = notes['origin'][round(arc.f_peak)]
tags_to_kill = [tag for tag in arc.tags if tag.y > y_origin]
for tag in tags_to_kill:
arc.tags.remove(tag)
notes['meas'][tag.frame].remove(tag)
tags_removed += 1
# check if the arc is still viable
if len(tags_to_kill) > 0:
if self.eval_arc(arc, requirepeak=True) > 0:
arcs_to_kill.append(arc)
for arc in arcs_to_kill:
notes['arcs'].remove(arc)
tags_to_kill = list(arc.tags)
for tag in tags_to_kill:
arc.tags.remove(tag)
notes['meas'][tag.frame].remove(tag)
tags_removed += 1
if self._verbosity >= 2:
f_min, _ = arc.get_frame_range(notes)
print(' removed arc {} starting at frame {}'.format(
arc.id_, f_min))
arcs_removed += 1
if self._verbosity >= 2:
print(f' done: {tags_removed} detections '
f'removed, {arcs_removed} arcs removed')
def compile_arc_data(self):
"""
Work out some basic information about each arc in the video: Throw
height, throw position relative to centerline, etc.
"""
notes = self.notes
s, c = sin(notes['camera_tilt']), cos(notes['camera_tilt'])
for arc in notes['arcs']:
x_origin, y_origin = notes['origin'][round(arc.f_peak)]
# Body origin in juggler coordinates:
x_origin_jc = x_origin * c - y_origin * s
y_origin_jc = x_origin * s + y_origin * c
df2 = (y_origin_jc - arc.c) / arc.e
if df2 > 0:
df = sqrt(df2)
else:
# throw peak is below hand height (should never happen!)
arc_fmin, arc_fmax = arc.get_tag_range()
df = max(abs(arc.f_peak - arc_fmin),
abs(arc.f_peak - arc_fmax))
arc.f_throw = arc.f_peak - df
arc.f_catch = arc.f_peak + df
arc.x_throw = (arc.a - arc.b * df) - x_origin_jc
arc.x_catch = (arc.a + arc.b * df) - x_origin_jc
arc.height = y_origin_jc - arc.c
def find_runs(self):
"""
Separate arcs into a list of runs, by assuming that two arcs that
overlap in time are part of the same run.
Args:
None
Returns:
runs(list):
List of runs, each of which is a list of Ballarc objects
"""
notes = self.notes
arcs = notes['arcs']
if len(arcs) == 0:
notes['runs'] = 0
return []
runs = list()
sorted_arcs = sorted(arcs, key=lambda a: a.f_throw)
first_arc = sorted_arcs[0]
current_run = [first_arc]
current_max_frame = first_arc.f_catch
for arc in sorted_arcs[1:]:
if arc.f_throw < current_max_frame:
current_run.append(arc)
current_max_frame = max(current_max_frame, arc.f_catch)
else:
# got a gap in time -> start a new run
runs.append(current_run)
current_run = [arc]
current_max_frame = arc.f_catch
runs.append(current_run)
# filter out any runs that are too short
good_runs, bad_arcs = [], []
for run in runs:
good_runs.append(run) if len(run) >= 2 else bad_arcs.extend(run)
notes['arcs'] = [a for a in notes['arcs'] if a not in bad_arcs]
for arc in bad_arcs:
for tag in arc.tags:
notes['meas'][tag.frame].remove(tag)
runs = good_runs
notes['runs'] = len(runs)
return runs
def assign_hands(self, run_dict):
"""
Assign throwing and catching hands to every arc in a given run. This
algorithm starts by assigning throws/catches far away from the
centerline and chaining from there, using the fact that two events in
close succession probably involve opposite hands.
Args:
run_dict(dict):
dictionary of information for a given run
Returns:
None
"""
notes = self.notes
run = run_dict['throw']
debug = False
if self._verbosity >= 3:
print('Assigning hands to arcs...')
# Start by making high-probability assignments of catches and throws.
# Assume that the 25% of throws with the largest x_throw values are
# from the left hand, and similarly for the right, and for catches
# as well.
for arc in run:
arc.hand_throw = arc.hand_catch = None
arcs_throw_sort = sorted(run, key=lambda a: a.x_throw)
for arc in arcs_throw_sort[:int(len(arcs_throw_sort)/4)]:
arc.hand_throw = 'right'
for arc in arcs_throw_sort[int(3*len(arcs_throw_sort)/4):]:
arc.hand_throw = 'left'
arcs_catch_sort = sorted(run, key=lambda a: a.x_catch)
for arc in arcs_catch_sort[:int(len(arcs_catch_sort)/4)]:
arc.hand_catch = 'right'
for arc in arcs_catch_sort[int(3*len(arcs_catch_sort)/4):]:
arc.hand_catch = 'left'
"""
Now the main algorithm. Our strategy is to maintain a queue of arcs
that have had hand_throw assigned. We will try to use these arcs to
assign hand_throw for nearby arcs that are unassigned, at which point
they are added to the queue. Continue this process recursively for as
long as we can.
"""
arc_queue = [arc for arc in run if arc.hand_throw is not None]
if len(arc_queue) == 0:
# Nothing assigned yet; assign something to get started
arc = max(run, key=lambda a: abs(a.x_throw))
arc.hand_throw = 'right' if arc.x_throw < 0 else 'left'
arc_queue = [arc]
if debug:
print(f'no throws assigned; set arc {arc.throw_id} '
f'to throw from {arc.hand_throw}')
# arcs that originate within 0.05s and 10cm of one another are
# assumed to be a multiplex throw from the same hand:
mp_window_frames = 0.05 * notes['fps']
mp_window_pixels = 10.0 / notes['cm_per_pixel']
# assume that a hand can't make two distinct throws within 0.23s
# (or less) of each other:
if run_dict['throws per sec'] is None:
min_cycle_frames = 0.23 * notes['fps']
else:
min_cycle_frames = ((1.0 / run_dict['throws per sec']) * 1.3 *
notes['fps'])
while True:
while len(arc_queue) > 0:
assigned_arc = arc_queue.pop()
"""
Two cases for other arcs that can have throw hand assigned
based on assigned_arc:
1. arcs that are very close in time and space, which must
be from the same hand (a multiplex throw)
2. arcs thrown within min_cycle_frames of its throw time,
which should be from the opposite hand
"""
mp_arcs = [arc for arc in run if arc.hand_throw is None
and (assigned_arc.f_throw - mp_window_frames) <
arc.f_throw <
(assigned_arc.f_throw + mp_window_frames)
and (assigned_arc.x_throw - mp_window_pixels) <
arc.x_throw <
(assigned_arc.x_throw + mp_window_pixels)]
for arc in mp_arcs:
arc.hand_throw = assigned_arc.hand_throw
arc_queue.append(arc)
if debug:
print(f'multiplex throw; set arc {arc.throw_id} '
f'to throw from {arc.hand_throw}')
close_arcs = [arc for arc in run if arc.hand_throw is None
and (assigned_arc.f_throw - min_cycle_frames)
< arc.f_throw <
(assigned_arc.f_throw + min_cycle_frames)]
for arc in close_arcs:
arc.hand_throw = 'right' if (assigned_arc.hand_throw
== 'left') else 'left'
arc_queue.append(arc)
if debug:
print(f'close timing; set arc {arc.throw_id} '
f'to throw from {arc.hand_throw}')
# If there are still unassigned throws, find the one that is
# closest in time to one that is already assigned.
unassigned_arcs = [arc for arc in run if arc.hand_throw is None]
if len(unassigned_arcs) == 0:
break
assigned_arcs = [arc for arc in run if arc.hand_throw is not None]
closest_assigned = [(arc, min(assigned_arcs, key=lambda a:
abs(arc.f_throw - a.f_throw)))
for arc in unassigned_arcs]
arc_toassign, arc_assigned = min(closest_assigned,
key=lambda p: abs(p[0].f_throw -
p[1].f_throw))
# We want to assign a throw hand to arc_toassign. First
# check if it's part of a synchronous throw pair, in which
# case we'll assign hands based on locations.
sync_arcs = [arc for arc in run if
abs(arc.f_throw - arc_toassign.f_throw) <
mp_window_frames and arc is not arc_toassign]
if len(sync_arcs) > 0:
arc_toassign2 = sync_arcs[0]
if arc_toassign.x_throw > arc_toassign2.x_throw:
arc_toassign.hand_throw = 'left'
arc_toassign2.hand_throw = 'right'
else:
arc_toassign.hand_throw = 'right'
arc_toassign2.hand_throw = 'left'
arc_queue.append(arc_toassign)
arc_queue.append(arc_toassign2)
if debug:
print(f'sync pair; set arc {arc_toassign.throw_id} '
f'to throw from {arc_toassign.hand_throw}')
print(f'sync pair; set arc {arc_toassign2.throw_id} '
f'to throw from {arc_toassign2.hand_throw}')
else:
arc_toassign.hand_throw = 'right' if (
arc_assigned.hand_throw == 'left') else 'left'
arc_queue.append(arc_toassign)
if debug:
print(f'alternating (from arc {arc_assigned.throw_id}); '
f'set arc {arc_toassign.throw_id} to throw from '
f'{arc_toassign.hand_throw}')
# Do the same process for catching hands
arc_queue = [arc for arc in run if arc.hand_catch is not None]
if len(arc_queue) == 0:
# Nothing assigned yet; assign something to get started
arc = max(run, key=lambda a: abs(a.x_catch))
arc.hand_catch = 'right' if arc.x_catch < 0 else 'left'
arc_queue = [arc]
if debug:
print(f'no catches assigned; set arc {arc.throw_id} '
f'to catch from {arc.hand_catch}')
# assume that a hand can't make two distinct catches within 0.18s
# (or less) of each other:
if run_dict['throws per sec'] is None:
min_cycle_frames = 0.18 * notes['fps']
else:
min_cycle_frames = ((1.0 / run_dict['throws per sec']) * 1.0 *
notes['fps'])
while True:
while len(arc_queue) > 0:
assigned_arc = arc_queue.pop()
close_arcs = [arc for arc in run if arc.hand_catch is None
and (assigned_arc.f_catch - min_cycle_frames)
< arc.f_catch <
(assigned_arc.f_catch + min_cycle_frames)]
for arc in close_arcs:
arc.hand_catch = 'right' if (assigned_arc.hand_catch
== 'left') else 'left'
arc_queue.append(arc)
if debug:
print(f'close timing; set arc {arc.throw_id} '
f'to catch in {arc.hand_catch}')
# If there are still unassigned catches, find the one that is
# closest in time to one that is already assigned.
unassigned_arcs = [arc for arc in run if arc.hand_catch is None]
if len(unassigned_arcs) == 0:
break
assigned_arcs = [arc for arc in run if arc.hand_catch is not None]
closest_assigned = [(arc, min(assigned_arcs, key=lambda a:
abs(arc.f_catch - a.f_catch)))
for arc in unassigned_arcs]
arc_toassign, arc_assigned = min(closest_assigned,
key=lambda p: abs(p[0].f_catch -
p[1].f_catch))
# We want to assign a catch hand to arc_toassign. First
# check if it's part of a synchronous catch pair, in which
# case we'll assign hands based on locations.
sync_arcs = [arc for arc in run if
abs(arc.f_catch - arc_toassign.f_catch) <
mp_window_frames and arc is not arc_toassign]
if len(sync_arcs) > 0:
arc_toassign2 = sync_arcs[0]
if arc_toassign.x_catch > arc_toassign2.x_catch:
arc_toassign.hand_catch = 'left'
arc_toassign2.hand_catch = 'right'
else:
arc_toassign.hand_catch = 'right'
arc_toassign2.hand_catch = 'left'
arc_queue.append(arc_toassign)
arc_queue.append(arc_toassign2)
if debug:
print(f'sync pair; set arc {arc_toassign.throw_id} '
f'to catch in {arc_toassign.hand_catch}')
print(f'sync pair; set arc {arc_toassign2.throw_id} '
f'to catch in {arc_toassign2.hand_catch}')
else:
arc_toassign.hand_catch = 'right' if (
arc_assigned.hand_catch == 'left') else 'left'
arc_queue.append(arc_toassign)
if debug:
print(f'alternating (from arc {arc_assigned.throw_id}); '
f'set arc {arc_toassign.throw_id} to catch in '
f'{arc_toassign.hand_catch}')
if self._verbosity >= 3:
for arc in run:
print(f'arc {arc.throw_id} throwing from {arc.hand_throw}, '
f'catching in {arc.hand_catch}')
def connect_arcs(self, run_dict):
"""
Try to connect arcs together that represent subsequent throws for
a given ball. Do this by filling in arc.prev and arc.next for each
arc in a given run, forming a linked list for each ball in the pattern.
A value of None for arc.prev or arc.next signifies the first or last
arc for that ball, within the current run.
Since some arcs are not detected (e.g. very low throws), this process
can often make mistakes.
Args:
run_dict(dict):
dictionary of information for a given run
Returns:
None
"""
run = run_dict['throw']
for arc in run:
arc.next = None
for arc in run:
# try to find the last arc caught by the hand `arc` throws with
arc.prev = max((arc_prev for arc_prev in run
if (arc_prev.f_catch < arc.f_throw
and arc_prev.hand_catch == arc.hand_throw
and arc_prev.next is None)),
key=lambda a: a.f_catch, default=None)
if arc.prev is not None:
arc.prev.next = arc
def estimate_ball_count(self, run_dict):
"""
Use some heuristics to estimate the number of balls in the pattern.
This can't be done by counting the number of object in the air since
there is usually at least one in the hands that won't be seen by
the tracker.
"""
run = run_dict['throw']
duration = run_dict['duration']
tps = run_dict['throws per sec']
height = self.notes['cm_per_pixel'] * mean(arc.height for arc in run)
if tps is None:
# should never happen
N_round = N_est = 1
else:
# estimate using physics, from the height of the pattern
g = 980.7 # gravity in cm/s^2
dwell_ratio = 0.63 # assumed fraction of time hand is filled
N_est = 2 * dwell_ratio + tps * sqrt(8 * height / g)
same_side_throws = sum(1 for arc in run if
arc.hand_catch == arc.hand_throw)
total_throws = sum(1 for arc in run if arc.hand_catch is not None
and arc.hand_throw is not None)
if total_throws > 0:
if same_side_throws > 0.5 * total_throws:
# assume a fountain pattern with even number ->
# round N to the nearest even number
N_round = 2 * int(round(0.5 * N_est))
else:
# assume a cascade pattern with odd number ->
# round N to the nearest odd number
N_round = 1 + 2 * int(round(0.5 * (N_est - 1)))
else:
N_round = int(round(N_est))
# maximum possible value based on connections between arcs:
N_max = sum(1 for arc in run if arc.prev is None)
run_dict['balls'] = N = min(N_round, N_max)
if self._verbosity >= 2:
print(f'duration = {duration:.1f} sec, tps = {tps:.2f} Hz, '
f'height = {height:.1f} cm')
print(f'N_est = {N_est:.2f}, N_round = {N_round}, '
f'N_max = {N_max} --> N = {N}')
def analyze_run_form(self, run_dict):
"""
Based on the number of balls, determine ideal throw and catch
locations. Also calculate any asymmetry in throw/catch timing and
translate this to a delta in y-position (cm).
Add to run_dict:
(1) width of the pattern (cm)
(2) target throw points, right and left (cm)
(3) target catch points, right and left (cm)
Add to individual arcs in run (third throw in run and later):
(4) timing error in throw (seconds)
(5) timing error in throw, translated to Delta-y (cm)
(6) timing error in catch (seconds)
(7) timing error in catch, translated to Delta-y (cm)
"""
notes = self.notes
run = run_dict['throw']
# find pattern width
catch_left_avg = mean(t.x_catch for t in run
if t.hand_catch == 'left')
catch_right_avg = mean(t.x_catch for t in run
if t.hand_catch == 'right')
width = notes['cm_per_pixel'] * (catch_left_avg - catch_right_avg)
run_dict['width'] = width
# Find ideal amount of scoop (P), as a fraction of width. These
# values are from an analytical model that minimizes the probability
# of collisions under an assumption of normally-distributed throw
# errors.
balls = run_dict['balls']
PoverW_ideal = [0.0, 0.5, 0.5, 0.4,
0.4, 0.38, 0.45, 0.32,
0.45, 0.26, 0.45, 0.23,
0.45, 0.19, 0.45]
PoverW_target = PoverW_ideal[balls] if balls < 15 else 0.3
run_dict['target throw point cm'] = (0.5 - PoverW_target) * width
run_dict['target catch point cm'] = 0.5 * width
# Figure out the errors in throw and catch timing. Do this by
# defining a window of neighboring arcs and doing a linear
# regression to interpolate an ideal time for the arc in question.
sorted_run = sorted(run, key=lambda a: a.throw_id)
for throw_idx in range(2, len(run)):
arc = sorted_run[throw_idx]
# calculate error in throw timing; window is the set of other arcs
# likeliest to collide with this one:
if balls % 2 == 0:
window = [-2, 2]
else:
window = [-2, -1, 1, 2]
window = [x for x in window if 0 <= (throw_idx + x) < len(run)]
if len(window) > 2:
# do linear regression over the throwing window
N = X = X2 = F = XF = 0
for x in window:
arc2 = sorted_run[throw_idx + x]
N += 1
X += x
X2 += x * x
F += arc2.f_throw
XF += x * arc2.f_throw
f_throw_ideal = (X2 * F - X * XF) / (X2 * N - X * X)
arc.throw_error_s = ((arc.f_throw - f_throw_ideal)
/ notes['fps'])
# sign convention: positive delta-y corresponds to throws
# that are thrown early (i.e. negative delta-t)
arc.throw_error_cm = ((2.0 * arc.e * notes['fps']**2 *
notes['cm_per_pixel'])
* arc.throw_error_s)
# same thing but for catching; window is neighboring catches into
# the same hand
arc_prev = max((arc2 for arc2 in run
if (arc2.f_catch < arc.f_catch
and arc2.hand_catch == arc.hand_catch)),
key=lambda a: a.f_catch, default=None)
arc_next = min((arc2 for arc2 in run
if (arc2.f_catch > arc.f_catch
and arc2.hand_catch == arc.hand_catch)),
key=lambda a: a.f_catch, default=None)
if arc_prev is not None and arc_next is not None:
f_catch_ideal = 0.5 * (arc_prev.f_catch + arc_next.f_catch)
arc.catch_error_s = ((arc.f_catch - f_catch_ideal)
/ notes['fps'])
# sign convention: negative delta-y corresponds to throws
# that are caught early (i.e. negative delta-t)
arc.catch_error_cm = ((-2.0 * arc.e * notes['fps']**2 *
notes['cm_per_pixel'])
* arc.catch_error_s)
if self._verbosity >= 3:
output = f'arc {arc.throw_id}: '
if arc.throw_error_s is not None:
output += (f'throw {arc.throw_error_s:.3f} s '
f'({arc.throw_error_cm:.1f} cm), ')
else:
output += 'throw None, '
if arc.catch_error_s is not None:
output += (f'catch {arc.catch_error_s:.3f} s '
f'({arc.catch_error_cm:.1f} cm)')
else:
output += 'catch None'
print(output)
# --------------------------------------------------------------------------
# Non-member functions
# --------------------------------------------------------------------------
def default_scanner_params():
"""
Returns a dictionary with constants that configure Hawkeye's video
scanner. Optionally you can pass a dictionary of this type to the
VideoScanner initializer as 'params'. In most cases the defaults
should work pretty well.
The 'high res' values apply when the frame height is greater than or
equal to 480 pixels.
"""
params = {
# duration (in seconds) over which body positions are averaged
'body_averaging_time_window_secs': 0.2,
# area (square pixels) of smallest blobs detected
'min_blob_area_high_res': 7.0,
'min_blob_area_low_res': 1.0,
# area (square pixels) of largest blobs detected
'max_blob_area_high_res': 1000.0,
'max_blob_area_low_res': 150.0,
# maximum height of the frame in the juggling plane, in centimeters
'max_frame_height_cm': 1000,
# default height of the frame in the juggling plane, in centimeters
'default_frame_height_cm': 300,
# assumed maximum frames per second
'max_fps': 60,
# default frames per second
'default_fps': 30,
# assumed uncertainty in measured locations, in pixels^2
'sigmasq': 15.0,
# when building initial arcs from data, largest allowed gap (in
# frames) between tags for a given arc
'max_frame_gap_in_arc': 2,
# closeness to arc to associate a tag with an arc
'max_distance_pixels_480': 5,
# how close (fractionally) an arc's acceleration must be to
# calculated value of g, to be accepted
'g_window': 0.25,
# how close (fractionally) a tag's radius needs to be to the median
# tag radius of an arc, to be allowed to attach to that arc
'radius_window_high_res': 0.65, # was 0.3
'radius_window_low_res': 0.75,
# minimum number of tags needed for an arc to be considered valid
'min_tags_per_arc_high_fps': 10, # was 6 JKB
'min_tags_per_arc_low_fps': 5,
# number of tags needed to start curve fitting arc to the data
'min_tags_to_curve_fit': 4
}
return params
def read_notes(filename):
"""
Read in the notes data structure from a pickle file.
Args:
filename(string):
filename to read
Returns:
notes(dict):
record of all raw detection events
"""
with open(filename, 'rb') as handle:
notes = pickle.load(handle)
return notes
def write_notes(notes, filename):
"""
Write the notes data structure to a pickle file.
Args:
notes(dict):
record of all raw detection events
filename(string):
filename to write
Returns:
None
"""
_filepath = os.path.abspath(filename)
_dirname = os.path.dirname(_filepath)
if not os.path.exists(_dirname):
os.makedirs(_dirname)
if os.path.exists(_filepath):
os.remove(_filepath)
with open(_filepath, 'wb') as handle:
pickle.dump(notes, handle, protocol=pickle.HIGHEST_PROTOCOL)
# -----------------------------------------------------------------------------
class ScannerException(Exception):
def __init__(self, message=None):
super().__init__(message)
# -----------------------------------------------------------------------------
def play_video(filename, notes=None, outfilename=None, startframe=0,
keywait=False):
"""
This is not part of the scanner per se but is helpful for testing and
debugging. It plays a video using OpenCV, including overlays based on
data in the optional 'notes' dictionary. If 'outfilename' is specified
then it will write the annotated video to a file on disk.
Keyboard 'q' quits, 'i' toggles arc number labels.
"""
cap = cv2.VideoCapture(filename)
if not cap.isOpened():
print('Error opening video stream or file')
return
# cap.set(1, startframe)
framecount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if outfilename is not None:
fps = cap.get(cv2.CAP_PROP_FPS)
framewidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter(outfilename, fourcc, fps, (framewidth,
frameheight))
tags = dict()
body = dict()
arcs = []
if notes is not None:
if 'meas' in notes:
tags = notes['meas']
if 'body' in notes:
body = notes['body']
if 'arcs' in notes:
arcs = notes['arcs']
cv2.namedWindow(filename)
font = cv2.FONT_HERSHEY_SIMPLEX
framenum = framereads = 0
show_arc_id = False
while cap.isOpened():
ret, frame = cap.read()
framereads += 1
if not ret:
print('VideoCapture.read() returned False '
'on frame read {}'.format(framereads))
if framereads > framecount:
break
continue
if framenum >= startframe:
if framenum in body:
# draw body bounding box
x, y, w, h, detected = notes['body'][framenum]
x = int(round(x))
y = int(round(y))
w = int(round(w))
h = int(round(h))
color = (255, 0, 0) if detected else (0, 0, 255)
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
if framenum in tags:
for tag in tags[framenum]:
color = ((0, 255, 0) if tag.arc is not None
else (0, 0, 255))
cv2.circle(frame, (int(round(tag.x)), int(round(tag.y))),
int(round(tag.radius)), color, 1)
for arc in arcs:
if notes['step'] < 6:
start, end = arc.get_frame_range(notes)
# print('start = {}, end = {}'.format(start, end))
else:
start, end = arc.f_throw, arc.f_catch
if start <= framenum <= end:
x, y = arc.get_position(framenum, notes)
x = int(x + 0.5)
y = int(y + 0.5)
if (notes is not None and len(arc.tags) <
notes['scanner_params']['min_tags_per_arc']):
temp, _ = arc.get_tag_range()
print('error, arc {} at {} has only {} tags'.format(
arc.id_, temp, len(arc.tags)))
arc_has_tag = any(
arc.get_distance_from_tag(tag, notes) <
notes['scanner_params']['max_distance_pixels']
for tag in arc.tags if tag.frame == framenum)
color = (0, 255, 0) if arc_has_tag else (0, 0, 255)
cv2.circle(frame, (x, y), 2, color, -1)
if show_arc_id:
arc_id = (arc.id_ if notes['step'] < 6
else arc.throw_id)
cv2.rectangle(frame, (x+10, y+5),
(x+40, y-4), (0, 0, 0), -1)
cv2.putText(frame, format(arc_id, ' 4d'),
(x+13, y+4),
font, 0.3, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.rectangle(frame, (3, 3), (80, 27), (0, 0, 0), -1)
cv2.putText(frame, format(framenum, ' 7d'), (10, 20), font, 0.5,
(255, 255, 255), 1, cv2.LINE_AA)
(h, w) = frame.shape[:2]
r = 640 / float(h)
dim = (int(w * r), 640)
frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
cv2.imshow(filename, frame)
if outfilename is not None:
out.write(frame)
stop_playback = False
while not stop_playback:
keycode = cv2.waitKey(1) & 0xFF
if keycode == ord('q'): # Q on keyboard exits
stop_playback = True
break
elif keycode == ord('i'): # I toggles throw IDs
show_arc_id = not show_arc_id
break
if not keywait or keycode != 0xFF:
break
if stop_playback:
break
framenum += 1
cap.release()
if outfilename is not None:
out.release()
cv2.destroyAllWindows()
```
#### File: hawkeye/hawkeye/worker.py
```python
import os
import sys
import io
import time
import subprocess
import platform
import json
from math import gcd
from PySide2.QtCore import QObject, QThread, Signal, Slot
from hawkeye.tracker import VideoScanner, ScannerException
class HEWorker(QObject):
"""
Worker that processes videos. Processing consists of two parts:
1. Transcoding the video into a format that supports smooth cueing
2. Scanning the video file to analyze the juggling and produce the
`notes` dictionary with object detections, arc parameters, etc.
Video transcoding (step 1) is done first and reported back to the UI
thread, so that the user can view the video while scanning is still
underway.
The worker also handles requests to extract a clip from a source video.
See on_extract_clip().
We put this worker in a separate QObject and use signals and slots to
communicate with it, so that we can do these time-consuming operations on a
thread separate from the main event loop. The signal/slot mechanism is a
thread-safe way to communicate in Qt. Look in HEMainWindow.start_worker()
to see how this thread is initiated and signals and slots connected.
"""
# signal output from processing
# arg1(str) = video file_id
# arg2(str) = processing output (should be appended to any prev. output)
sig_output = Signal(str, str)
# progress indicator signal for work-in-process
# arg1(str) = video file_id
# arg2(int) = step #
# arg3(int) = max # of steps
sig_progress = Signal(str, int, int)
# signal that processing failed with an error
# arg1(str) = video file_id
# arg2(str) = error message
sig_error = Signal(str, str)
# signal video transcoding task is done
# arg1(str) = video file_id
# arg2(dict) = fileinfo dictionary with file path names
# arg3(int) = resolution (vertical pixels) of converted video
# arg4(int) = preliminary version of notes dictionary for video
# arg5(bool) = successful completion
sig_video_done = Signal(str, dict, int, dict, bool)
# signal juggling analysis task is done
# arg1(str) = video file_id
# arg2(dict) = notes dictionary
# arg3(bool) = successful completion
sig_analyze_done = Signal(str, dict, bool)
# signal that video clipping task is done
# arg1(bool) = successful completion
sig_clipping_done = Signal(str, bool)
def __init__(self, app=None):
super().__init__()
self._app = app
self._resolution = 0
self._abort = False
@Slot(dict)
def on_new_prefs(self, prefs: dict):
"""
This slot gets signaled when there is a change to the display
preferences for output videos. The worker uses these preferences to
create a video with the given vertical pixel dimension.
"""
if prefs['resolution'] == 'Actual size':
self._resolution = 0
else:
self._resolution = int(prefs['resolution'])
@Slot(str, bool)
def on_process_video(self, file_id: str, analyze: bool):
"""
Signaled when the worker should process a video to make the version
suitable for single-frame stepping.
When `analyze` is true, subsequently analyze the video for juggling
content.
"""
if self.abort():
return
fileinfo = self.make_product_fileinfo(file_id)
notes = None
try:
# check if the file exists and is readable
file_path = fileinfo['file_path']
errorstring = ''
if not os.path.exists(file_path):
errorstring = f'File {file_path} does not exist'
elif not os.path.isfile(file_path):
errorstring = f'File {file_path} is not a file'
if errorstring != '':
self.sig_error.emit(file_id, errorstring)
raise HEProcessingException()
# check if a valid notes file already exists, and if so load it
need_notes = True
notes_path = fileinfo['notes_path']
if os.path.isfile(notes_path):
notes = VideoScanner.read_notes(notes_path)
if notes['version'] == VideoScanner.CURRENT_NOTES_VERSION:
need_notes = False
else:
# old version of notes file -> delete and create a new one
self.sig_output.emit(file_id,
'Notes file is old...deleting\n\n')
os.remove(notes_path)
if need_notes:
scanner = VideoScanner(file_path,
scanvideo=fileinfo['scanvid_path'])
notes = scanner.notes
try:
retcode = self.get_video_metadata(fileinfo, notes)
except Exception as e:
print(f'metadata exception: {e}')
if retcode != 0:
raise HEProcessingException()
"""
OpenCV-based alternative for line above:
if self.run_scanner(fileinfo, scanner, steps=(1, 1),
writenotes=False) != 0:
self.sig_error.emit(file_id,
'Error getting video metadata')
raise HEProcessingException()
"""
if not self.abort():
# check if the target directory exists, and if not create it
hawkeye_dir = fileinfo['hawkeye_dir']
if not os.path.exists(hawkeye_dir):
self.sig_output.emit(
file_id, f'Creating directory {hawkeye_dir}\n\n')
os.makedirs(hawkeye_dir)
if not os.path.exists(hawkeye_dir):
self.sig_error.emit(
file_id, f'Error creating directory {hawkeye_dir}')
return
resolution = self.make_display_video(fileinfo, notes)
self.sig_video_done.emit(file_id, fileinfo, resolution, notes,
resolution >= 0)
except HEProcessingException:
self.sig_video_done.emit(file_id, fileinfo, 0, notes, False)
if analyze:
self.sig_analyze_done.emit(file_id, notes, False)
return
if analyze and not self.abort():
self.on_analyze_juggling(file_id, notes)
@Slot(str, dict)
def on_analyze_juggling(self, file_id: str, notes: dict):
"""
Signaled when the user wants to analyze the juggling in a video. This
fills in most of the fields in the `notes` dictionary.
"""
if self.abort():
return
# UI_thread = (QThread.currentThread() == self._app.thread())
# print(f'starting analysis..., UI thread = {UI_thread}')
try:
need_analysis = (notes['step'] < 6)
first_step = notes['step'] + 1
except KeyError:
need_analysis = True
first_step = 1
if need_analysis:
fileinfo = self.make_product_fileinfo(file_id)
try:
# the following two checks are already done in
# on_process_video() but do them again in case the filesystem
# has changed in the interim.
# check if the file exists and is readable
file_path = fileinfo['file_path']
errorstring = ''
if not os.path.exists(file_path):
errorstring = f'File {file_path} does not exist'
elif not os.path.isfile(file_path):
errorstring = f'File {file_path} is not a file'
if errorstring != '':
self.sig_error.emit(file_id, errorstring)
raise HEProcessingException()
# check if the target directory exists, and if not create it
hawkeye_dir = fileinfo['hawkeye_dir']
if not os.path.exists(hawkeye_dir):
self.sig_output.emit(
file_id, f'Creating directory {hawkeye_dir}\n\n')
os.makedirs(hawkeye_dir)
if not os.path.exists(hawkeye_dir):
self.sig_error.emit(
file_id, f'Error creating directory {hawkeye_dir}')
raise HEProcessingException()
if self.make_scan_video(fileinfo) != 0:
raise HEProcessingException()
scanner = VideoScanner(file_path,
scanvideo=fileinfo['scanvid_path'],
notes=notes)
if self.run_scanner(fileinfo, scanner, steps=(first_step, 6),
writenotes=True) != 0:
raise HEProcessingException()
try:
os.remove(fileinfo['scanvid_path'])
except OSError:
pass
except HEProcessingException:
self.sig_analyze_done.emit(file_id, notes, False)
return
self.sig_analyze_done.emit(file_id, notes, True)
@Slot(str, dict, int)
def on_extract_clip(self, file_id: str, notes: dict, run_num: int):
"""
Signaled when the user wants to extract a clip from a video.
Clip the given run number from the video and save it in the same
directory as the source video.
"""
if self.abort():
return
run_dict = notes['run'][run_num]
balls = run_dict['balls']
throws = run_dict['throws']
# construct absolute path to the clip we will create
file_path = os.path.abspath(file_id)
file_dir = os.path.dirname(file_path)
file_basename = os.path.basename(file_path)
file_root, file_ext = os.path.splitext(file_basename)
clip_basename = (f'{file_root}_run{run_num+1:03}'
f'_{balls}b_{throws}t.mp4')
clip_path = os.path.join(file_dir, clip_basename)
if os.path.isfile(clip_path):
self.sig_clipping_done.emit(file_id, True) # clip already exists
return
# start clip 3 secs before first throw, end 3 secs after last catch
fps = notes['fps']
startframe, endframe = run_dict['frame range']
starttime = max(0.0, startframe / fps - 3.0)
endtime = min((notes['frame_count'] - 4) / fps, endframe / fps + 3.0)
duration = endtime - starttime
sm, ss = divmod(starttime, 60)
sh, sm = divmod(sm, 60)
starttime_str = f'{sh:02.0f}:{sm:02.0f}:{ss:02.3f}'
dm, ds = divmod(duration, 60)
dh, dm = divmod(dm, 60)
duration_str = f'{dh:02.0f}:{dm:02.0f}:{ds:02.3f}'
rotation_str = 'rotate=' + str(round(notes['camera_tilt'] * 10000)) + '/10000'
# run FFmpeg to make the clip. An alternative approach is to use the
# "copy" codec to avoid re-encoding the streams, but we would have to
# take care to start on a keyframe. I haven't found an easy way to do
# that while keeping the audio in sync.
args = ['-i', file_path,
'-ss', starttime_str,
'-t', duration_str,
'-vf', rotation_str,
'-c:a', 'aac', # re-encode audio as AAC
'-c:v', 'libx264', # re-encode video as H.264/mp4
'-preset', 'veryfast',
'-crf', '20',
clip_path]
retcode = self.run_ffmpeg(args, None)
if retcode != 0 or self.abort():
try:
os.remove(clip_path)
except OSError:
pass
if not self.abort():
self.sig_error.emit(file_id, 'Error saving clip: FFmpeg failed'
f' with return code {retcode}')
self.sig_clipping_done.emit(file_id, retcode == 0)
def abort(self):
"""
Return True if the user is trying to quit the app.
"""
if self._abort:
return True
self._abort = QThread.currentThread().isInterruptionRequested()
if self._abort:
QThread.currentThread().quit() # stop thread's event loop
return self._abort
def make_product_fileinfo(self, file_id):
"""
Make a dictionary of file paths for all work products, the exception
being the path to the display video which is added in
make_display_video() below.
"""
file_path = os.path.abspath(file_id)
file_dir = os.path.dirname(file_path)
file_basename = os.path.basename(file_path)
file_basename_noext = os.path.splitext(file_basename)[0]
hawkeye_dir = os.path.join(file_dir, '__Hawkeye__')
scanvid_basename = file_basename_noext + '_640x480.mp4'
scanvid_path = os.path.join(hawkeye_dir, scanvid_basename)
notes_basename = file_basename_noext + '_notes.pkl'
notes_path = os.path.join(hawkeye_dir, notes_basename)
csvfile_path = os.path.join(file_dir, file_basename + '.csv')
result = dict()
result['file_id'] = file_id
result['file_path'] = file_path
result['file_dir'] = file_dir
result['file_basename'] = file_basename
result['file_basename_noext'] = file_basename_noext
result['hawkeye_dir'] = hawkeye_dir
result['scanvid_basename'] = scanvid_basename
result['scanvid_path'] = scanvid_path
result['notes_basename'] = notes_basename
result['notes_path'] = notes_path
result['csvfile_path'] = csvfile_path
return result
def get_video_metadata(self, fileinfo, notes):
"""
Run FFprobe to get some source video metadata. In particular we need:
1. width in pixels (int)
2. height in pixels (int)
3. frames per second (float)
4. frame count (int)
This function replaces step 1 in VideoScanner, which gets this data
using OpenCV. We use FFprobe here because it may handle a wider
variety of input formats, and it reports information not accessible
through the OpenCV API such as display aspect ratio (DAR).
Returns 0 on success, nonzero on failure. This should signal sig_error
on every nonzero return value that isn't an abort.
"""
file_id = fileinfo['file_id']
file_path = fileinfo['file_path']
self.sig_output.emit(file_id, 'Getting metadata for video {}...\n'
.format(notes['source']))
if getattr(sys, 'frozen', False):
# running in a bundle
ffprobe_dir = sys._MEIPASS
else:
# running in a normal Python environment
if platform.system() == 'Windows':
ffprobe_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources\\windows')
elif platform.system() == 'Darwin':
ffprobe_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources/osx')
elif platform.system() == 'Linux':
ffprobe_dir = '/usr/local/bin' # fill this in JKB
ffprobe_executable = os.path.join(ffprobe_dir, 'ffprobe')
args = ['-v', 'error', '-of', 'json', '-select_streams', 'v:0',
'-show_streams', '-i', file_path]
args = [ffprobe_executable] + args
message = 'Running FFprobe with arguments:\n{}\n'.format(
' '.join(args))
self.sig_output.emit(file_id, message)
try:
kwargs = {}
if platform.system() == 'Windows':
# Python 3.7:
# CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW
CREATE_NO_WINDOW = 0x08000000
kwargs['creationflags'] = CREATE_NO_WINDOW
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, **kwargs)
outputhandler = io.TextIOWrapper(p.stdout, encoding='utf-8')
output = ''
while p.poll() is None:
time.sleep(0.1)
for line in outputhandler:
output += line
if self.abort():
p.terminate()
p.wait()
return 1
results = f'FFprobe process ended, return code {p.returncode}\n'
self.sig_output.emit(file_id, results)
if p.returncode != 0:
self.sig_output.emit(
file_id, '\n####### Error running FFprobe #######\n')
self.sig_error.emit(file_id, 'Error getting video metadata')
return p.returncode
parsed_output = json.loads(output)
video_metadata = parsed_output['streams'][0]
width_orig = int(video_metadata['width'])
height = int(video_metadata['height'])
# The input video might have non-square pixels, which is a problem
# for us in the visual analysis. Later we will use FFmpeg's scaler
# to create scan and display videos with square pixels (i.e.
# SAR=1:1). Here we use the DAR to calculate what the pixel width
# will be when we scale to square pixels.
if 'display_aspect_ratio' in video_metadata:
dar = video_metadata['display_aspect_ratio']
dar_parts = dar.split(':')
width = height * int(dar_parts[0]) // int(dar_parts[1])
else:
# assume square pixels
width = width_orig
k = gcd(width, height)
dar = f'{width // k}:{height // k}'
# the fps response can either be in numeric form like '30' or
# '59.94', or in rational form like '60000/1001'. In any case we
# want fps as a float.
fps_raw = str(video_metadata['avg_frame_rate'])
if '/' in fps_raw:
fps_parts = fps_raw.split('/')
fps = float(fps_parts[0]) / float(fps_parts[1])
else:
fps = float(fps_raw)
if 'nb_frames' in video_metadata:
framecount = int(video_metadata['nb_frames'])
else:
framecount = round(float(video_metadata['duration']) * fps)
self.sig_output.emit(file_id, f'height = {height}\n')
self.sig_output.emit(
file_id,
f'width = {width_orig}, '
f'scaling to width = {width} (DAR = {dar})\n')
self.sig_output.emit(file_id, f'fps = {fps_raw} = {fps}\n')
self.sig_output.emit(file_id,
f'estimated frame count = {framecount}\n\n')
# fill in the same notes fields as VideoScanner's step 1
notes['fps'] = fps
notes['frame_width'] = width
notes['frame_height'] = height
notes['frame_count_estimate'] = framecount
notes['step'] = 1
return 0
except subprocess.SubprocessError as err:
self.sig_output.emit(
file_id, '\n####### Error running FFprobe #######\n')
self.sig_output.emit(
file_id, f'Error message: {err}\n\n\n')
except KeyError as err:
self.sig_output.emit(
file_id, '\n####### Error running FFprobe #######\n')
self.sig_output.emit(
file_id, f'Key error accessing returned data: {err}\n\n\n')
self.sig_error.emit(file_id, 'Error getting video metadata')
return 1
def make_display_video(self, fileinfo, notes):
"""
The video we display in the UI is not the original video, but a version
transcoded with FFmpeg. We transcode for four reasons:
1. The video player can't smoothly step backward a frame at a time
unless every frame is coded as a keyframe. This is rarely the case
for source video. We use the x264 encoder's keyint=1 option to
specify the keyframe interval to be a single frame.
2. For performance reasons we may want to limit the display resolution
to a maximum value, in which case we want to rescale.
3. The video player on Windows gives an error when it loads a video
with no audio track. Fix this by adding a null audio track.
4. FFmpeg reads more video formats/codecs than QMediaPlayer, so
transcoding into standard H.264/mp4 allows us to be compatible with
a wider range of source video formats.
Returns the resolution of the transcoded video (vertical scanlines, or
if 0 then identical to the source video). Return value of -1 indicates
failure.
"""
file_id = fileinfo['file_id']
file_path = fileinfo['file_path']
hawkeye_dir = fileinfo['hawkeye_dir']
file_basename_noext = fileinfo['file_basename_noext']
displayvid_resolution = self._resolution if (
notes is not None and self._resolution < notes['frame_height']
) else 0
if displayvid_resolution == 0:
displayvid_basename = file_basename_noext + '_keyint1.mp4'
else:
displayvid_basename = (file_basename_noext + '_keyint1_' +
str(displayvid_resolution) + '.mp4')
displayvid_path = os.path.join(hawkeye_dir, displayvid_basename)
fileinfo['displayvid_basename'] = displayvid_basename
fileinfo['displayvid_path'] = displayvid_path
if os.path.isfile(displayvid_path):
return displayvid_resolution
self.sig_output.emit(file_id, 'Video conversion starting...\n')
if displayvid_resolution == 0:
# FFmpeg args for native resolution
args = ['-f', 'lavfi',
'-i', 'anullsrc=channel_layout=stereo:sample_rate=44100',
'-i', file_path, '-shortest', '-c:v', 'libx264', '-preset',
'veryfast', '-tune', 'fastdecode', '-crf', '20', '-vf',
'scale=trunc(ih*dar):ih,setsar=1,format=yuv420p',
'-x264-params', 'keyint=1', '-c:a', 'aac', '-map', '0:a',
'-map', '1:v', displayvid_path]
else:
# reduced resolution
args = ['-f', 'lavfi',
'-i', 'anullsrc=channel_layout=stereo:sample_rate=44100',
'-i', file_path, '-shortest', '-c:v', 'libx264', '-preset',
'veryfast', '-tune', 'fastdecode', '-crf', '20', '-vf',
'scale=trunc(ih*dar):ih,scale=-2:'
+ str(displayvid_resolution) + ',setsar=1,format=yuv420p',
'-x264-params', 'keyint=1', '-c:a', 'aac', '-map', '0:a',
'-map', '1:v', displayvid_path]
retcode = self.run_ffmpeg(args, file_id)
if retcode != 0 or self.abort():
try:
os.remove(displayvid_path)
except OSError:
# print("got exception: {}".format(err))
pass
if not self.abort():
self.sig_output.emit(
file_id, '\nError converting video {}'.format(
fileinfo['file_basename']))
self.sig_error.emit(
file_id, 'Error converting video {}'.format(
fileinfo['file_basename']))
return -1
return displayvid_resolution
def make_scan_video(self, fileinfo):
"""
Create an H.264/mp4 video at 640x480 resolution to use as input for the
feature detector. We use a fixed resolution because (a) the feature
detector performs well on videos of this scale, and (b) we may want to
experiment with a neural network-based feature detector in the future,
which would need a fixed input dimension.
Also OpenCV's support for codecs may be more limited than FFmpeg's, so
transcoding allows us to process any video format FFmpeg can read.
Lastly we want to ensure the input to the video scanner is video with
square pixels. Some source video uses non-square pixels so we use
FFmpeg's scaler to transform it.
Returns 0 on success, 1 on failure. This must signal sig_error on every
nonzero return value that isn't an abort.
"""
file_id = fileinfo['file_id']
file_path = fileinfo['file_path']
scanvid_path = fileinfo['scanvid_path']
if os.path.isfile(scanvid_path):
return 0 # video already exists
args = ['-hide_banner',
'-i', file_path,
'-c:v', 'libx264', # encode with libx264 (H.264)
'-crf', '20', # quality factor (high)
'-vf', # video filters:
'scale=trunc(ih*dar):ih,' # scale to square pixels
'scale=-1:480,' # scale to height 480
'crop=min(iw\\,640):480,' # crop to width 640, if needed
'pad=640:480:(ow-iw)/2:0,' # pad to width 640, if needed
'setsar=1', # set SAR=1:1 (square pixels)
'-an', # no audio
scanvid_path]
retcode = self.run_ffmpeg(args, file_id)
if retcode != 0 or self.abort():
try:
os.remove(scanvid_path)
except OSError:
# print("got exception: {}".format(err))
pass
if not self.abort():
self.sig_output.emit(
file_id, '\nError converting video {}'.format(
fileinfo['file_basename']))
self.sig_error.emit(
file_id, 'Error converting video {}'.format(
fileinfo['file_basename']))
return 1
return 0
def run_ffmpeg(self, args, file_id):
"""
Run FFmpeg as a separate process, optionally sending console output
back to the UI thread.
Args:
args(list):
Argument list for FFmpeg, minus the executable name
file_id(str):
Filename for directing FFmpeg console output back to UI thread
using sig_output signal. If None then output is ignored.
Returns:
retcode(int):
FFmpeg return code
"""
if getattr(sys, 'frozen', False):
# running in a bundle
ffmpeg_dir = sys._MEIPASS
else:
# running in a normal Python environment
if platform.system() == 'Windows':
ffmpeg_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources\\windows')
elif platform.system() == 'Darwin':
ffmpeg_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'resources/osx')
elif platform.system() == 'Linux':
ffmpeg_dir = '/usr/local/bin' # fill this in JKB
ffmpeg_executable = os.path.join(ffmpeg_dir, 'ffmpeg')
args = [ffmpeg_executable] + args
if file_id is not None:
message = 'Running FFmpeg with arguments:\n{}\n\n'.format(
' '.join(args))
self.sig_output.emit(file_id, message)
try:
kwargs = {}
if platform.system() == 'Windows':
# Python 3.7:
# CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW
CREATE_NO_WINDOW = 0x08000000
kwargs['creationflags'] = CREATE_NO_WINDOW
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, **kwargs)
outputhandler = io.TextIOWrapper(p.stdout, encoding='utf-8')
while p.poll() is None:
time.sleep(0.1)
for line in outputhandler:
if file_id is not None:
self.sig_output.emit(file_id, line)
if self.abort():
p.terminate()
p.wait()
return 1
if file_id is not None:
self.sig_output.emit(file_id, 'FFmpeg process ended, return '
f'code {p.returncode}\n\n')
return p.returncode
except subprocess.SubprocessError as err:
if file_id is not None:
self.sig_output.emit(
file_id, '\n####### Error running FFmpeg #######\n')
self.sig_output.emit(
file_id, "Error message: {}\n\n\n".format(err))
return 1
def run_scanner(self, fileinfo, scanner, steps, writenotes):
"""
Run the video scanner. Capture stdout and send the output to our UI.
Returns 0 on success, 1 on failure. This must signal sig_error on every
nonzero return value that isn't an abort.
"""
file_id = fileinfo['file_id']
hawkeye_dir = fileinfo['hawkeye_dir']
# Install our own output handler at sys.stdout to capture printed text
# from the scanner and send it to our UI thread.
def output_callback(s):
self.sig_output.emit(file_id, s)
sys.stdout = HEOutputHandler(callback=output_callback)
# Define a callback function to pass in to VideoScanner.process()
# below. Processing takes a long time (seconds to minutes) and the
# scanner will call this function at irregular intervals.
def processing_callback(step=0, maxsteps=0):
# so UI thread can update progress bar:
self.sig_progress.emit(file_id, step, maxsteps)
# Release the GIL periodically so that video drawing operations
# don't get blocked for too long during processing.
# VideoScanner's step 2 isn't a problem because most of that time
# is spent in OpenCV so the GIL is mostly free. Steps 3+ though are
# all Python code (and those also happen to correspond to
# maxsteps==0), so we periodically sleep during those steps.
if maxsteps == 0:
time.sleep(0.001)
if self.abort():
# raise exception to bail us out of whereever we are in
# processing:
raise HEAbortException()
try:
scanner.process(steps=steps,
writenotes=writenotes, notesdir=hawkeye_dir,
callback=processing_callback, verbosity=2)
except ScannerException as err:
self.sig_output.emit(file_id,
'\n####### Error during scanning #######\n')
self.sig_output.emit(file_id,
"Error message: {}\n\n\n".format(err))
self.sig_error.emit(file_id, f'Error: {err}')
return 1
except HEAbortException:
# worker thread got an abort signal during processing;
# the scanner writes the notes file as an atomic operation after
# processing is complete, so no need to delete a partially-written
# file here.
return 1
finally:
sys.stdout.close()
sys.stdout = sys.__stdout__
self.sig_output.emit(file_id, '\n')
return 0
# -----------------------------------------------------------------------------
class HEOutputHandler(io.StringIO):
"""
Simple output handler we install at sys.stdout to capture printed output
and send it through a callback function instead of printing to the console.
"""
def __init__(self, callback=None):
super().__init__()
self._callback = callback
def write(self, s: str):
if self._callback is not None:
self._callback(s)
# -----------------------------------------------------------------------------
class HEProcessingException(Exception):
def __init__(self, message=None):
super().__init__(message)
# -----------------------------------------------------------------------------
class HEAbortException(Exception):
pass
# -----------------------------------------------------------------------------
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.