metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jlmadurga/python_selenium_astride",
"score": 3
} |
#### File: python_selenium_astride/tests/pages.py
```python
from selenium_astride import BasePage, BasePageCSSElement, BasePageNamedElement
from .locators import HomeLocators, LoginLocators
class HomePage(BasePage):
def go_login(self):
self._click_action(HomeLocators.LOGIN)
def first_entry(self):
element = self._wait_for_element(HomeLocators.FIRST_ENTRY_TITLE, 1)
return element.text
class UsernameElement(BasePageCSSElement):
locator = "#username"
class PasswordElement(BasePageNamedElement):
locator = "password"
class LoginPage(BasePage):
username = UsernameElement()
password = <PASSWORD>()
def title_page(self):
element = self._wait_for_element(LoginLocators.TITLE, 1)
return element.text
def login(self):
self._click_action(LoginLocators.SUBMIT)
def get_error(self):
element = self._wait_for_element(LoginLocators.ERROR, 1)
return element.text
``` |
{
"source": "jlmadurga/yowsup-gateway",
"score": 2
} |
#### File: yowsup-gateway/tests/test_gateway_layer.py
```python
import unittest
import time
from yowsup.layers import YowProtocolLayerTest
from yowsup_gateway.layer import GatewayLayer
from yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity
from yowsup.layers.protocol_receipts.protocolentities import IncomingReceiptProtocolEntity
from yowsup.layers import YowLayerEvent
from yowsup.layers.protocol_acks.protocolentities.test_ack_incoming import entity as incomingAckEntity
from yowsup.layers.network import YowNetworkLayer
from yowsup_gateway.exceptions import ConnectionError
from yowsup_gateway.layer import ExitGateway
from yowsup_gateway import YowsupGateway
from . import success_protocol_entity
try:
import Queue
except ImportError:
import queue as Queue
class DummyStack(YowsupGateway):
def __init__(self):
self.detached_queue = Queue.Queue()
self.result = None
self._props = {}
class GatewayLayerTest(YowProtocolLayerTest, GatewayLayer):
def setUp(self):
GatewayLayer.__init__(self)
self.connected = True
self.setStack(DummyStack())
def tearDown(self):
pass
def send_message(self):
content = "Hello world"
number = "341111111"
message = (number, content)
self.onEvent(YowLayerEvent(GatewayLayer.EVENT_SEND_MESSAGES, messages=[message]))
return message
def receive_ack(self):
incoming_ack_entity = incomingAckEntity
self.ack_pending.append(incoming_ack_entity.getId())
self.receive(incoming_ack_entity)
return incoming_ack_entity
def receive_message(self):
content = "Received message"
jid = "<EMAIL>"
msg = TextMessageProtocolEntity(content, _from=jid)
self.receive(msg)
return msg
def receive_receipt(self):
receipt = IncomingReceiptProtocolEntity("123", "sender", int(time.time()))
self.receive(receipt)
return receipt
def test_connection_successful(self):
self.connected = False
self.on_success(success_protocol_entity())
self.assertTrue(self.connected)
def test_connection_failure(self):
self.connected = False
self.assertFalse(self.connected)
def test_send_message_ok(self):
message = self.send_message()
msg_sent = self.lowerSink.pop()
self.assertEqual(msg_sent.getBody(), message[1])
self.assertEqual(msg_sent.getTo(), message[0] + "@s.whatsapp.net")
self.assertEqual(msg_sent.getId(), self.ack_pending.pop())
self.assertEqual(self.outbox, [msg_sent])
def test_send_message_not_connected(self):
self.connected = False
with self.assertRaises(ConnectionError):
self.send_message()
def test_receive_ack_message_ok(self):
ack = self.receive_ack()
self.assertEqual(self.ack_pending, [])
self.assertEqual(self.inbox, [ack])
self.assert_broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_DISCONNECT))
def test_receive_message(self):
msg = self.receive_message()
self.assertEqual(self.inbox, [msg])
ack = self.lowerSink.pop()
self.assertEqual(ack.getId(), msg.getId())
self.assertEqual(self.outbox, [ack])
def test_receive_receipt(self):
receipt = self.receive_receipt()
self.assertEqual(self.inbox, [receipt])
ack = self.lowerSink.pop()
self.assertEqual(ack.getId(), receipt.getId())
self.assertEqual(self.outbox, [ack])
def test_disconnect(self):
with self.assertRaises(ExitGateway):
self.onEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_DISCONNECTED))
self.assertFalse(self.connected)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
``` |
{
"source": "jlmaners/core",
"score": 2
} |
#### File: components/senz/api.py
```python
from typing import cast
from aiosenz import AbstractSENZAuth
from httpx import AsyncClient
from homeassistant.helpers import config_entry_oauth2_flow
class SENZConfigEntryAuth(AbstractSENZAuth):
"""Provide nVent RAYCHEM SENZ authentication tied to an OAuth2 based config entry."""
def __init__(
self,
httpx_async_client: AsyncClient,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
) -> None:
"""Initialize SENZ auth."""
super().__init__(httpx_async_client)
self._oauth_session = oauth_session
async def get_access_token(self) -> str:
"""Return a valid access token."""
await self._oauth_session.async_ensure_token_valid()
return cast(str, self._oauth_session.token["access_token"])
```
#### File: components/senz/config_flow.py
```python
import logging
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
class OAuth2FlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle SENZ OAuth2 authentication."""
DOMAIN = DOMAIN
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": "restapi offline_access"}
```
#### File: components/tractive/diagnostics.py
```python
from __future__ import annotations
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from .const import DOMAIN, TRACKABLES
TO_REDACT = {CONF_PASSWORD, CONF_EMAIL, "title", "_id"}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict:
"""Return diagnostics for a config entry."""
trackables = hass.data[DOMAIN][config_entry.entry_id][TRACKABLES]
diagnostics_data = async_redact_data(
{
"config_entry": config_entry.as_dict(),
"trackables": [item.trackable for item in trackables],
},
TO_REDACT,
)
return diagnostics_data
``` |
{
"source": "jlmarrugom/Dashapp",
"score": 3
} |
#### File: Dashapp/ControlBox/mapa.py
```python
import numpy as np
import pandas as pd
import plotly.graph_objects as go
def mun_to_coord(full_ser):
"""
Recibe un Dataframe con municipios,
añade sus coordenadas
y regresa un Dataframe.
"""
full_ser['MUNICIPIO'] = full_ser['MUNICIPIO'].astype(object).replace({1:'Lorica',
2:'Planeta Rica',
3:'Tierralta',
4:'Sahagun',
5:'Montelibano',
6:'Montería'})
try: #Para el dataset de Murcielagos
full_ser['MUNICIPIO'] = full_ser['Lugar']
except:
pass
full_ser['lat']=0
full_ser['lon']=0
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Montería'] = 8.7558921
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Montería'] = -75.887029
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Lorica'] = 9.2394583
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Lorica'] = -75.8139786
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Planeta Rica'] = 8.4076739
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Planeta Rica'] = -75.5840456
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Tierralta'] = 8.1717342
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Tierralta'] = -76.059376
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Sahagun'] = 8.9472964
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Sahagun'] = -75.4434972
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Montelibano'] = 7.9800534
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Montelibano'] = -75.4167198
full_ser['lat'].loc[full_ser['MUNICIPIO']=='Cereté'] = 8.8852282
full_ser['lon'].loc[full_ser['MUNICIPIO']=='Cereté'] = -75.7922421
full_ser['lat'].loc[full_ser['MUNICIPIO']=='San Antero'] = 9.373016
full_ser['lon'].loc[full_ser['MUNICIPIO']=='San Antero'] = -75.7595056
return full_ser
def table_prueba(pat_df,prueba):
"""
Prueba es el tipo de prueba, Serologia o PCR, son sets de datos distintos
"""
df = pat_df.copy(deep=True)
df = df[['lat','lon','MUNICIPIO']].groupby('MUNICIPIO').max()
if prueba=='PCR':
# df = pat_df[pat_df['RESULTADO PCR']=='POSITIVO']
# df = df.dropna()
# max_amount = float(df['export_val'].max())
df = df.merge(pat_df[['RESULTADO PCR','MUNICIPIO']].loc[pat_df['RESULTADO PCR']=='POSITIVO'].groupby(['MUNICIPIO']).count() ,how='outer',on='MUNICIPIO')
df = df.merge(pat_df[['RESULTADO PCR','MUNICIPIO']].groupby(['MUNICIPIO']).count() ,how='inner',on='MUNICIPIO')
#df = df.merge(pat_df[['RESULTADO PCR','MUNICIPIO']].loc[pat_df['RESULTADO PCR']=='POSITIVO'].groupby(['MUNICIPIO']).agg(lambda x:x.value_counts().index[0])
df = df.rename(columns={'RESULTADO PCR_x':'POSITIVOS PCR',
'RESULTADO PCR_y':'No DE PRUEBAS PCR'})
df = df.reset_index()
else:
df = df.merge(pat_df[['RESULTADO SEROLOGIA','MUNICIPIO']].loc[pat_df['RESULTADO SEROLOGIA']==1].groupby(['MUNICIPIO']).count() ,how='outer',on='MUNICIPIO')
df = df.merge(pat_df[['RESULTADO SEROLOGIA','MUNICIPIO']].groupby(['MUNICIPIO']).count() ,how='inner',on='MUNICIPIO')
#df = df.merge(full_ser[['RESULTADO PCR','MUNICIPIO']].loc[full_ser['RESULTADO PCR']=='POSITIVO'].groupby(['MUNICIPIO']).agg(lambda x:x.value_counts().index[0])
df['VULNERABILIDAD (%)'] = round(100*(1-(df['RESULTADO SEROLOGIA_x']/df['RESULTADO SEROLOGIA_y'])))
df = df.rename(columns={'RESULTADO SEROLOGIA_x':'POSITIVOS SEROLOGIA',
'RESULTADO SEROLOGIA_y':'No DE PRUEBAS SEROLOGIA'})
df = df.reset_index()
return df
def mapping_df(full_ser,prueba):
"""
Recibe un Dataframe con Coordenadas y lo grafica
en un mapa. retorna una figura.
Prueba es el tipo de prueba, Serologia o PCR
"""
df = table_prueba(full_ser, prueba)
print(df.head())
#Mapa:
import folium
folium_hmap = folium.Figure(width=500, height=500)
m = folium.Map(location=[8.3344713,-75.6666238],
width='100%',
height='100%',
zoom_start=8,#Por defecto es 10
tiles="OpenStreetMap" #OpenSteetMap ,Stamen Toner(Terrain, Watercolor)
).add_to(folium_hmap)
data = df
if prueba=='Serologia':
for i in range(0,len(data)):
html = f"""
<head>
<link rel="stylesheet" href="https://codepen.io/chriddyp/pen/dZVMbK.css">
<head>
<h4> {data.iloc[i]['MUNICIPIO']}</h3>
<p> Serología: </p>
<p>Positivas: {data.iloc[i]['POSITIVOS SEROLOGIA']}</p>
<p> Total: {data.iloc[i]['No DE PRUEBAS SEROLOGIA']}</p>
"""
iframe = folium.IFrame(html=html,width=160, height=160)
popup = folium.Popup(iframe, max_width=2650)
folium.Circle(
location=[data.iloc[i]['lat'], data.iloc[i]['lon']],
popup=popup,
radius=float(data.iloc[i]['No DE PRUEBAS SEROLOGIA'])*100,
color='lightgray',
fill=True,
fill_color='gray'
).add_to(m)
for i in range(0,len(data)):
html = f"""
<head>
<link rel="stylesheet" href="https://codepen.io/chriddyp/pen/dZVMbK.css">
<head>
<h4> {data.iloc[i]['MUNICIPIO']}</h3>
<p> Serología: </p>
<p>Positivas: {data.iloc[i]['POSITIVOS SEROLOGIA']}</p>
<p> Total: {data.iloc[i]['No DE PRUEBAS SEROLOGIA']}</p>
"""
iframe = folium.IFrame(html=html,width=160, height=160)
popup = folium.Popup(iframe, max_width=2650)
folium.Circle(
location=[data.iloc[i]['lat'], data.iloc[i]['lon']],
popup=popup,
radius=float(data.iloc[i]['POSITIVOS SEROLOGIA'])*100,
color='cadetblue',
fill=True,
fill_color='blue'
).add_to(m)
folium_hmap.save('prueba_por_municipios.html')
else:
for i in range(0,len(data)):
html = f"""
<head>
<link rel="stylesheet" href="https://codepen.io/chriddyp/pen/dZVMbK.css">
<head>
<h4> {data.iloc[i]['MUNICIPIO']}</h3>
<p> PCR: </p>
<p>Positivas: {data.iloc[i]['POSITIVOS PCR']}</p>
<p> Total: {data.iloc[i]['No DE PRUEBAS PCR']}</p>
"""
iframe = folium.IFrame(html=html,width=160, height=160)
popup = folium.Popup(iframe, max_width=2650)
folium.Circle(
location=[data.iloc[i]['lat'], data.iloc[i]['lon']],
popup=popup,
radius=float(data.iloc[i]['No DE PRUEBAS PCR'])*150,
color='lightgray',
fill=True,
fill_color='lightgray'
).add_to(m)
for i in range(0,len(data)):
html = f"""
<head>
<link rel="stylesheet" href="https://codepen.io/chriddyp/pen/dZVMbK.css">
<head>
<h4> {data.iloc[i]['MUNICIPIO']}</h3>
<p> PCR: </p>
<p>Positivas: {data.iloc[i]['POSITIVOS PCR']}</p>
<p> Total: {data.iloc[i]['No DE PRUEBAS PCR']}</p>
"""
iframe = folium.IFrame(html=html,width=160, height=160)
popup = folium.Popup(iframe, max_width=2650)
folium.Circle(
location=[data.iloc[i]['lat'], data.iloc[i]['lon']],
popup=popup,
radius=float(data.iloc[i]['POSITIVOS PCR'])*150,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
folium_hmap.save('prueba_por_municipios.html')
return folium_hmap
# mapping_df(mun_to_coord(df))
```
#### File: Dashapp/ControlBox/radial_plot.py
```python
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
from module_selection import *
def radial_plot(df):
data = module_selection(df,1).copy(deep=True) #Info básica
data.loc[:,'HA ESTADO ENFERMO A HA TENIDO SÍNTOMAS LOS ÚLTIMOS TRES MESES':'DIARREA'] = data.loc[:,'HA ESTADO ENFERMO A HA TENIDO SÍNTOMAS LOS ÚLTIMOS TRES MESES':'DIARREA'].replace({2:0,3:np.nan}) #Cambio según la convención 0: No, 1 Sí
#Seleccionamos sólo personas con síntomas:
data = data.loc[data['HA ESTADO ENFERMO A HA TENIDO SÍNTOMAS LOS ÚLTIMOS TRES MESES']!=0].dropna() #los nan bajan el promedio
data = data.groupby('MUNICIPIO').mean() #Agrupamos el promedio de cada síntoma por municipio
#Radial plot
data = data.loc[:,'TOS':'DIARREA']
categories = ['TOS','DIFICULTAD PARA RESPIRAR','FATIGA','DOLORES MUSCULARES Y CORPORALES',
'DOLOR DE CABEZA','PERDIDAD DEL OLFATO O DEL GUSTO','DOLOR DE GARGANTA',
'CONGESTION DE LA NARIZ','NÁUSEAS O VÓMITOS','DIARREA']
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=data.iloc[0],
theta=categories,
name='Lorica'
))
fig.add_trace(go.Scatterpolar(
r=data.iloc[1],
theta=categories,
name='Planeta Rica'
))
fig.add_trace(go.Scatterpolar(
r=data.iloc[2],
theta=categories,
name='Montelibano'
))
fig.add_trace(go.Scatterpolar(
r=data.iloc[3],
theta=categories,
name='Tierralta'
))
fig.add_trace(go.Scatterpolar(
r=data.iloc[4],
theta=categories,
name='Monteria'
))
fig.add_trace(go.Scatterpolar(
r=data.iloc[5],
theta=categories,
name='Sahagún'
))
fig.update_layout(
polar=dict(
radialaxis=dict(
visible=True,
range=[0, 1]
)),
showlegend=True
)
fig.update_layout(
title={
'text': "Sintomas por Municipio",
'y':0.95,
'x':0.45,
'xanchor': 'center',
'yanchor': 'top'}
# legend=dict(
# yanchor="top",
# y=0.99,
# xanchor="left",
# x=0.10)
)
#fig.show()
return fig
``` |
{
"source": "jlmaurer/pygmt",
"score": 3
} |
#### File: pygmt/tests/test_datasets.py
```python
import numpy as np
import numpy.testing as npt
import pytest
from pygmt.datasets import (
load_earth_relief,
load_japan_quakes,
load_ocean_ridge_points,
load_sample_bathymetry,
load_usgs_quakes,
)
from pygmt.exceptions import GMTInvalidInput
def test_japan_quakes():
"""
Check that the dataset loads without errors.
"""
data = load_japan_quakes()
assert data.shape == (115, 7)
summary = data.describe()
assert summary.loc["min", "year"] == 1987
assert summary.loc["max", "year"] == 1988
assert summary.loc["min", "month"] == 1
assert summary.loc["max", "month"] == 12
assert summary.loc["min", "day"] == 1
assert summary.loc["max", "day"] == 31
def test_ocean_ridge_points():
"""
Check that the @ridge.txt dataset loads without errors.
"""
data = load_ocean_ridge_points()
assert data.shape == (4146, 2)
summary = data.describe()
assert summary.loc["min", "longitude"] == -179.9401
assert summary.loc["max", "longitude"] == 179.935
assert summary.loc["min", "latitude"] == -65.6182
assert summary.loc["max", "latitude"] == 86.8
def test_sample_bathymetry():
"""
Check that the @tut_ship.xyz dataset loads without errors.
"""
data = load_sample_bathymetry()
assert data.shape == (82970, 3)
summary = data.describe()
assert summary.loc["min", "longitude"] == 245.0
assert summary.loc["max", "longitude"] == 254.705
assert summary.loc["min", "latitude"] == 20.0
assert summary.loc["max", "latitude"] == 29.99131
assert summary.loc["min", "bathymetry"] == -7708.0
assert summary.loc["max", "bathymetry"] == -9.0
def test_usgs_quakes():
"""
Check that the dataset loads without errors.
"""
data = load_usgs_quakes()
assert data.shape == (1197, 22)
def test_earth_relief_fails():
"""
Make sure earth relief fails for invalid resolutions.
"""
resolutions = "1m 1d bla 60d 001m 03".split()
resolutions.append(60)
for resolution in resolutions:
with pytest.raises(GMTInvalidInput):
load_earth_relief(resolution=resolution)
# Only test 01d and 30m to avoid downloading large datasets in CI
def test_earth_relief_01d():
"""
Test some properties of the earth relief 01d data.
"""
data = load_earth_relief(resolution="01d", registration="gridline")
assert data.shape == (181, 361)
npt.assert_allclose(data.lat, np.arange(-90, 91, 1))
npt.assert_allclose(data.lon, np.arange(-180, 181, 1))
npt.assert_allclose(data.min(), -8592.5)
npt.assert_allclose(data.max(), 5559.0)
def test_earth_relief_01d_with_region():
"""
Test loading low-resolution earth relief with 'region'.
"""
with pytest.raises(NotImplementedError):
load_earth_relief("01d", region=[0, 180, 0, 90])
def test_earth_relief_30m():
"""
Test some properties of the earth relief 30m data.
"""
data = load_earth_relief(resolution="30m", registration="gridline")
assert data.shape == (361, 721)
npt.assert_allclose(data.lat, np.arange(-90, 90.5, 0.5))
npt.assert_allclose(data.lon, np.arange(-180, 180.5, 0.5))
npt.assert_allclose(data.min(), -9460.5)
npt.assert_allclose(data.max(), 5887.5)
def test_earth_relief_05m_with_region():
"""
Test loading a subregion of high-resolution earth relief grid.
"""
data = load_earth_relief(
resolution="05m", region=[120, 160, 30, 60], registration="gridline"
)
assert data.coords["lat"].data.min() == 30.0
assert data.coords["lat"].data.max() == 60.0
assert data.coords["lon"].data.min() == 120.0
assert data.coords["lon"].data.max() == 160.0
assert data.data.min() == -9633.0
assert data.data.max() == 2532.0
assert data.sizes["lat"] == 361
assert data.sizes["lon"] == 481
def test_earth_relief_05m_without_region():
"""
Test loading high-resolution earth relief without passing 'region'.
"""
with pytest.raises(GMTInvalidInput):
load_earth_relief("05m")
def test_earth_relief_incorrect_registration():
"""
Test loading earth relief with incorrect registration type.
"""
with pytest.raises(GMTInvalidInput):
load_earth_relief(registration="improper_type")
``` |
{
"source": "jlmaurer/PyRate",
"score": 2
} |
#### File: PyRate/pyrate/config.py
```python
from __future__ import print_function
import os
from os.path import splitext
import warnings
from pyrate import compat
from pyrate import mpiops
# TODO: add regex column to check if some values are within bounds? Potential
# problem with the checking being done in the middle of the runs, as bad values
# could cause crashes & destroying some of the results.
# general constants
NO_MULTILOOKING = 1
# constants for lookups
#: STR; Name of input interferogram list file
IFG_FILE_LIST = 'ifgfilelist'
#: BOOL (0/1); The interferogram processor used (0==ROIPAC, 1==GAMMA)
PROCESSOR = 'processor'
#: STR; Name of directory containing input interferograms.
OBS_DIR = 'obsdir'
#: STR; Name of directory for saving output products
OUT_DIR = 'outdir'
#: STR; Name of Digital Elevation Model file
DEM_FILE = 'demfile'
#: STR; Name of the header for the DEM
DEM_HEADER_FILE = 'demHeaderFile'
#: STR; Name of directory containing GAMMA SLC parameter files
SLC_DIR = 'slcFileDir'
#: STR; The projection of the input interferograms.
INPUT_IFG_PROJECTION = 'projection'
#: FLOAT; The no data value in the interferogram files.
NO_DATA_VALUE = 'noDataValue'
#: FLOAT; No data averaging threshold for prepifg
NO_DATA_AVERAGING_THRESHOLD = 'noDataAveragingThreshold'
#: BOOL (1/2/3); Re-project data from Line of sight, 1 = vertical,
# 2 = horizontal, 3 = no conversion
REPROJECTION = 'prjflag' # NOT CURRENTLY USED
#: BOOL (0/1); Select MST algorithm, 0 = Matlab Pirate algorithm, 1 = NetworkX
NETWORKX_OR_MATLAB_FLAG = 'networkx_or_matlab'
#: BOOL (0/1): Convert no data values to Nan
NAN_CONVERSION = 'nan_conversion'
# Prepifg parameters
#: BOOL (1/2/3/4); Method for cropping interferograms, 1 = minimum overlapping area (intersection), 2 = maximum area (union), 3 = customised area, 4 = all ifgs already same size
IFG_CROP_OPT = 'ifgcropopt'
#: INT; Multi look factor for interferogram preparation in x dimension
IFG_LKSX = 'ifglksx'
#: INT; Multi look factor for interferogram preparation in y dimension
IFG_LKSY = 'ifglksy'
#: REAL; Minimum longitude for cropping with method 3
IFG_XFIRST = 'ifgxfirst'
#: REAL; Maximum longitude for cropping with method 3
IFG_XLAST = 'ifgxlast'
#: REAL; Minimum latitude for cropping with method 3
IFG_YFIRST = 'ifgyfirst'
#: REAL; Maximum latitude for cropping with method 3
IFG_YLAST = 'ifgylast'
# reference pixel parameters
#: INT; Coordinate in x of reference pixel OR -1 = perform search
REFX = 'refx'
#: INT; Coordinate in y of reference pixel OR -1 = perform search
REFY = 'refy'
#: INT; Number of reference pixel grid search nodes in x dimension
REFNX = "refnx"
#: INT; Number of reference pixel grid search nodes in y dimension
REFNY = "refny"
#: INT; Dimension of reference pixel search window
REF_CHIP_SIZE = 'refchipsize'
#: REAL; Minimum fraction of observations required in search window for pixel to be a viable reference pixel
REF_MIN_FRAC = 'refminfrac'
#: BOOL (1/2); Reference phase estimation method
REF_EST_METHOD = 'refest'
#atmospheric error correction parameters NOT CURRENTLY USED
APS_CORRECTION = 'apscorrect'
APS_METHOD = 'apsmethod'
APS_INCIDENCE_MAP = 'incidencemap'
APS_INCIDENCE_EXT = 'APS_INCIDENCE_EXT'
APS_ELEVATION_MAP = 'elevationmap'
APS_ELEVATION_EXT = 'APS_ELEVATION_EXT'
# orbital error correction/parameters
#: BOOL (1/0); Flag controlling whether to apply orbital error correction
ORBITAL_FIT = 'orbfit'
#: BOOL (1/2); Method for orbital error correction, 1: independent, 2: network
ORBITAL_FIT_METHOD = 'orbfitmethod'
#: BOOL (1/2/3) Order of orbital error model, 1 = planar in x and y (2 parameter model, 2 = quadratic in x and y (5 parameter model), 3 = quadratic in x and cubic in y (part-cubic 6 parameter model)
ORBITAL_FIT_DEGREE = 'orbfitdegrees'
#: INT; Multi look factor for orbital error calculation in x dimension
ORBITAL_FIT_LOOKS_X = 'orbfitlksx'
#: INT; Multi look factor for orbital error calculation in y dimension
ORBITAL_FIT_LOOKS_Y = 'orbfitlksy'
# Linear rate/stacking parameters
#: REAL; Threshold ratio between 'model minus observation' residuals and a-priori observation standard deviations for linear rate estimate acceptance (otherwise remove furthest outlier and re-iterate)
LR_NSIG = 'nsig'
#: INT; Number of required observations per pixel for the linear rate inversion
LR_PTHRESH = 'pthr'
#: REAL; Maximum allowable standard error for pixels in linear rate inversion.
LR_MAXSIG = 'maxsig'
# atmospheric delay errors fitting parameters NOT CURRENTLY USED
# atmfitmethod = 1: interferogram by interferogram; atmfitmethod = 2, epoch by epoch
#ATM_FIT = 'atmfit'
#ATM_FIT_METHOD = 'atmfitmethod'
#: BOOL (0/1) Do spatio-temporal filter
APSEST = 'apsest'
# temporal low-pass filter parameters
TLPF_METHOD = 'tlpfmethod'
TLPF_CUTOFF = 'tlpfcutoff'
TLPF_PTHR = 'tlpfpthr'
# spatially correlated noise low-pass filter parameters
SLPF_METHOD = 'slpfmethod'
SLPF_CUTOFF = 'slpfcutoff'
SLPF_ORDER = 'slpforder'
SLPF_NANFILL = 'slpnanfill'
SLPF_NANFILL_METHOD = 'slpnanfill_method'
# Time series parameters
#: BOOL (1/0); Do Time series calculation
TIME_SERIES_CAL = 'tscal'
#: INT (1/2); Method for time series inversion (1: Laplacian Smoothing; 2: SVD)
TIME_SERIES_METHOD = 'tsmethod'
#: INT; Number of required input observations per pixel for time series inversion
TIME_SERIES_PTHRESH = 'ts_pthr'
#: INT (1/2); Order of Laplacian smoothing operator, first or # second order
TIME_SERIES_SM_ORDER = 'smorder'
#: REAL; Laplacian smoothing factor (values used is 10**smfactor)
TIME_SERIES_SM_FACTOR = 'smfactor'
# tsinterp is automatically assigned in the code; not needed in conf file
#TIME_SERIES_INTERP = 'tsinterp'
#: BOOL (0/1/2); Use parallelisation/Multi-threading
PARALLEL = 'parallel'
#: INT; Number of processes for multi-threading
PROCESSES = 'processes'
#: BOOL (0/1); Switch for using Luigi to perform prepifg step
LUIGI = 'use_luigi'
# Orbital error correction constants for conversion to readable flags
INDEPENDENT_METHOD = 'INDEPENDENT'
NETWORK_METHOD = 'NETWORK'
PLANAR = 'PLANAR'
QUADRATIC = 'QUADRATIC'
PART_CUBIC = 'PART_CUBIC'
# dir for temp files
TMPDIR = 'tmpdir'
def _orb_degree_conv(deg):
"""
Convenience: convert numerical degree flag to human readable string
"""
degree = int(deg)
if degree == 1:
return PLANAR
if degree == 2:
return QUADRATIC
if degree == 3:
return PART_CUBIC
raise ValueError("Orbital fit polynomial degree option not recognised")
def _orb_method_conv(meth):
"""
Convenience: convert numerical method flag to human readable string
"""
method = int(meth)
if method == 1:
return INDEPENDENT_METHOD
if method == 2:
return NETWORK_METHOD
raise ValueError("Orbital fit method not recognised")
# Lookup to help convert args to correct type/defaults
# format is key : (conversion, default value)
# None = no conversion
PARAM_CONVERSION = {
REPROJECTION : (int, 3), # Default no conversion, CONVERSION NOT IMPLEMENTED
IFG_CROP_OPT : (int, 1), # default to area 'intersection' option
IFG_LKSX : (int, NO_MULTILOOKING),
IFG_LKSY : (int, NO_MULTILOOKING),
IFG_XFIRST : (float, None),
IFG_XLAST : (float, None),
IFG_YFIRST : (float, None),
IFG_YLAST : (float, None),
NO_DATA_VALUE: (float, 0.0),
REFX: (int, -1),
REFY: (int, -1),
REFNX: (int, 50),
REFNY: (int, 50),
REF_CHIP_SIZE: (int, 21),
REF_MIN_FRAC: (float, 0.8),
REF_EST_METHOD: (int, 1), # default to average of whole image
ORBITAL_FIT: (int, 0),
ORBITAL_FIT_METHOD: (_orb_method_conv, NETWORK_METHOD),
ORBITAL_FIT_DEGREE: (_orb_degree_conv, QUADRATIC),
ORBITAL_FIT_LOOKS_X: (int, NO_MULTILOOKING),
ORBITAL_FIT_LOOKS_Y: (int, NO_MULTILOOKING),
LR_NSIG: (int, 3),
# pixel thresh based on nepochs? not every project may have 20 epochs
LR_PTHRESH: (int, 20),
LR_MAXSIG: (int, 2),
#ATM_FIT: (int, 0), NOT CURRENTLY USED
#ATM_FIT_METHOD: (int, 2),
APSEST: (int, 0),
TLPF_METHOD: (int, 1),
TLPF_CUTOFF: (float, 0.0),
TLPF_PTHR: (int, 1),
SLPF_METHOD: (int, 1),
SLPF_CUTOFF: (float, 0.0),
SLPF_ORDER: (int, 1),
SLPF_NANFILL: (int, 0),
TIME_SERIES_CAL: (int, 0),
# pixel thresh based on nepochs? not every project may have 20 epochs
TIME_SERIES_PTHRESH: (int, 20),
TIME_SERIES_SM_FACTOR: (float, None),
TIME_SERIES_SM_ORDER: (int, None),
TIME_SERIES_METHOD: (int, 2), # Default to SVD method
PARALLEL: (int, 0),
PROCESSES: (int, 8),
PROCESSOR: (int, None),
NETWORKX_OR_MATLAB_FLAG: (int, 1), # Default to NetworkX
LUIGI: (int, 0),
NAN_CONVERSION: (int, 0),
NO_DATA_AVERAGING_THRESHOLD: (float, 0.0),
APS_CORRECTION: (int, 0),
APS_METHOD: (int, 1)
}
PATHS = [OBS_DIR, IFG_FILE_LIST, DEM_FILE,
DEM_HEADER_FILE, OUT_DIR,
SLC_DIR,
APS_INCIDENCE_MAP,
APS_ELEVATION_MAP]
INT_KEYS = [APS_CORRECTION, APS_METHOD]
def get_config_params(path):
"""
Returns a dictionary of key:value parameter pairs from the
configuration file
:param str path: path of config file
:return: params
:rtype: dict
"""
txt = ''
with open(path, 'r') as inputFile:
for line in inputFile:
if any(x in line for x in PATHS):
pos = line.find('~')
if pos != -1:
# create expanded line
line = line[:pos] + os.environ['HOME'] + line[(pos+1):]
txt += line
params = _parse_conf_file(txt)
params[TMPDIR] = os.path.join(os.path.abspath(params[OUT_DIR]), 'tmpdir')
if mpiops.size > 1 and params[LUIGI] == 1:
raise ConfigException('LUIGI with MPI not supported. Please '
'turn off LUIGI in config file or '
'use LUIGI without MPI')
return params
def _parse_conf_file(content):
"""
Parser for converting text content into a dictionary of parameters
"""
def _is_valid(line):
"""
Check if line is not empty or has % or #
"""
return line != "" and line[0] not in "%#"
lines = [ln.split() for ln in content.split('\n') if _is_valid(ln)]
# convert "field: value" lines to [field, value]
kvpair = [(e[0].rstrip(":"), e[1]) for e in lines if len(e) == 2] \
+ [(e[0].rstrip(":"), None) for e in lines if len(e) == 1]
parameters = dict(kvpair)
for p in PATHS:
if p not in parameters:
parameters[p] = None
for p in INT_KEYS:
if p not in parameters:
parameters[p] = '0' # insert dummies
parameters = _handle_extra_parameters(parameters)
if not parameters:
raise ConfigException('Cannot parse any parameters from config file')
return _parse_pars(parameters)
def _handle_extra_parameters(params):
"""
Function to check if requirements for weather model correction are given
"""
params[APS_INCIDENCE_EXT] = None
params[APS_ELEVATION_EXT] = None
if compat.PyAPS_INSTALLED:
# define APS_INCIDENCE_EXT for gamma prepifg
if ((params[APS_INCIDENCE_MAP] is not None) and
(params[APS_ELEVATION_MAP] is not None)):
warnings.warn('Both incidence and elevation map supplied. Using '
'the incidence map and ignoring elevation map')
if (int(params[APS_CORRECTION]) and
(int(params[APS_METHOD]) == 2) and
((params[APS_INCIDENCE_MAP] is None) and
(params[APS_ELEVATION_MAP] is None))):
raise ConfigException('When doing APS correction using method 2,'
'the incidence/elevation map method,'
'one of incidence or elevation map must be '
'provided')
if params[APS_INCIDENCE_MAP] is not None:
params[APS_INCIDENCE_EXT] = \
os.path.basename(params[APS_INCIDENCE_MAP]).split('.')[-1]
params[APS_ELEVATION_MAP] = None
params[APS_ELEVATION_EXT] = None
return params
# define APS_ELEVATON_EXT for gamma prepifg
if params[APS_ELEVATION_MAP] is not None:
params[APS_ELEVATION_EXT] = \
os.path.basename(params[APS_ELEVATION_MAP]).split('.')[-1]
return params
def _parse_pars(pars):
"""
Parses and converts config file params from text
"""
for k in PARAM_CONVERSION:
if k in pars:
# if option value is blank/missing revert to default
if pars[k] is None:
pars[k] = PARAM_CONVERSION[k][1]
conversion_func = PARAM_CONVERSION[k][0]
if conversion_func:
pars[k] = conversion_func(pars[k])
else:
# revert missing options to default value
if k in PARAM_CONVERSION:
pars[k] = PARAM_CONVERSION[k][1]
return pars
def parse_namelist(nml):
"""
Parses name list file into array of paths
:param str nml: interferogram file list
:return: list of interferogram file names
:rtype: list
"""
with open(nml) as f_in:
lines = [line.rstrip() for line in f_in]
return filter(None, lines)
class ConfigException(Exception):
"""
Default exception class for configuration errors.
"""
pass
def write_config_file(params, output_conf_file):
"""
Takes a param object and writes the config file. Reverse of get_conf_params.
:param dict params: parameter dictionary
:param str output_conf_file: output file name
:return: config file
:rtype: list
"""
with open(output_conf_file, 'w') as f:
for k, v in params.items():
if k == ORBITAL_FIT_DEGREE:
v = _reverse_orb_degree_conv(v)
if k == ORBITAL_FIT_METHOD:
v = _reverse_orb_method_conv(v)
if v is not None:
f.write(''.join([k, ':\t', str(v), '\n']))
else:
f.write(''.join([k, ':\t', '', '\n']))
def _reverse_orb_degree_conv(v):
"""
Convenience: convert degree to integer for config file
"""
if v == PLANAR:
return 1
if v == QUADRATIC:
return 2
if v == PART_CUBIC:
return 3
else:
raise ValueError(
"Orbital fit polynomial degree option not recognised")
def _reverse_orb_method_conv(v):
"""
Convenience: convert method to integer for config file
"""
if v == INDEPENDENT_METHOD:
return 1
if v == NETWORK_METHOD:
return 2
else:
raise ValueError(
"Orbital fit method option not recognised")
def transform_params(params):
"""
Returns subset of all parameters for cropping and multilooking.
:param dict params: Parameter dictionary
:return: xlooks, ylooks, crop
:rtype: int
"""
t_params = [IFG_LKSX, IFG_LKSY, IFG_CROP_OPT]
xlooks, ylooks, crop = [params[k] for k in t_params]
return xlooks, ylooks, crop
def original_ifg_paths(ifglist_path):
"""
Returns sequence of paths to files in given ifglist file.
:param str ifglist_path: full path to interferogram file list
:return: full path to ifg files
:rtype: list
"""
basedir = os.path.dirname(ifglist_path)
ifglist = parse_namelist(ifglist_path)
return [os.path.join(basedir, p) for p in ifglist]
def mlooked_path(path, looks, crop_out):
"""
Adds suffix to ifg path, for creating a new path for multilooked files.
:param str path: original interferogram path
:param int looks: number of range looks applied
:param int crop_out: crop option applied
:return: multilooked file name
:rtype: str
"""
base, ext = splitext(path)
return "{base}_{looks}rlks_{crop_out}cr{ext}".format(
base=base, looks=looks, crop_out=crop_out, ext=ext)
def get_dest_paths(base_paths, crop, params, looks):
"""
Determines the full path names for the destination multilooked files
:param list base_paths: original interferogram paths
:param int crop: Crop option applied
:param dict params: Parameters dictionary
:param int looks: number of range looks applied
:return: full path names for destination files
:rtype: list
"""
dest_mlooked_ifgs = [mlooked_path(os.path.basename(q).split('.')[0] + '_'
+ os.path.basename(q).split('.')[1] +
'.tif', looks=looks, crop_out=crop)
for q in base_paths]
return [os.path.join(params[OUT_DIR], p) for p in dest_mlooked_ifgs]
def get_ifg_paths(config_file):
"""
Read the configuration file, extract interferogram file list and determine
input and output interferogram path names.
:param str config_file: Configuration file path
:return: base_unw_paths: List of unwrapped inteferograms
:return: dest_paths: List of multi-looked and cropped geotifs
:return: params: Dictionary corresponding to the config file
:rtype: list
:rtype: list
:rtype: dict
"""
params = get_config_params(config_file)
ifg_file_list = params.get(IFG_FILE_LIST)
params[IFG_FILE_LIST] = ifg_file_list
if ifg_file_list is None:
emsg = 'Error {code}: Interferogram list file name not provided ' \
'or does not exist'.format(code=2)
raise IOError(2, emsg)
xlks, _, crop = transform_params(params)
# base_unw_paths need to be geotiffed and multilooked by run_prepifg
base_unw_paths = original_ifg_paths(ifg_file_list)
# dest_paths are tifs that have been geotif converted and multilooked
dest_paths = get_dest_paths(base_unw_paths, crop, params, xlks)
return base_unw_paths, dest_paths, params
```
#### File: pyrate/tasks/prepifg.py
```python
import os
import pickle
import luigi
import pyrate.config as cf
from pyrate.prepifg import (
Ifg,
get_analysis_extent,
prepare_ifg,
PreprocessError)
from pyrate.tasks.converttogeotif import ConvertToGeotiff
from pyrate.tasks.utils import (
IfgListMixin,
InputParam,
RasterParam)
from pyrate.shared import warp_required
class GetAnalysisExtents(IfgListMixin, luigi.Task):
"""
Class used to gather analysis extents used during Luigi tasks
"""
crop_opt = luigi.IntParameter(config_path=InputParam(cf.IFG_CROP_OPT))
ifgx_first = luigi.FloatParameter(default=None,
config_path=InputParam(cf.IFG_XFIRST))
ifgy_first = luigi.FloatParameter(default=None,
config_path=InputParam(cf.IFG_YFIRST))
ifgx_last = luigi.FloatParameter(default=None,
config_path=InputParam(cf.IFG_XLAST))
ifgy_last = luigi.FloatParameter(default=None,
config_path=InputParam(cf.IFG_YLAST))
xlooks = luigi.IntParameter(config_path=InputParam(cf.IFG_LKSX))
ylooks = luigi.IntParameter(config_path=InputParam(cf.IFG_LKSY))
def requires(self):
return [ConvertToGeotiff()]
def run(self):
user_exts = (self.ifgx_first, self.ifgy_first,
self.ifgx_last, self.ifgy_last)
if not all(user_exts):
if self.crop_opt == 3:
raise PreprocessError('No custom cropping extents specified')
user_exts = None
ifgs = [Ifg(path) for path in self.ifg_tiff_list()]
extents = get_analysis_extent(
self.crop_opt,
ifgs,
self.xlooks,
self.ylooks,
user_exts)
with open(self.extents_file_name, 'wb') as ext_file:
pickle.dump(extents, ext_file)
def output(self):
return luigi.LocalTarget(self.extents_file_name)
class PrepareInterferogram(IfgListMixin, luigi.WrapperTask):
"""
Wrapper function to prepare an interferogram file for PyRate analysis
using a Luigi task. See pyrate.prepifg.prepare_ifg() for further
documentation.
"""
# pylint: disable=bad-super-call, no-member
ifg = RasterParam()
thresh = luigi.FloatParameter(config_path=InputParam(
cf.NO_DATA_AVERAGING_THRESHOLD))
crop_opt = luigi.IntParameter(config_path=InputParam(cf.IFG_CROP_OPT))
xlooks = luigi.IntParameter(config_path=InputParam(cf.IFG_LKSX))
ylooks = luigi.IntParameter(config_path=InputParam(cf.IFG_LKSY))
# verbose = luigi.BooleanParameter(default=True, significant=False)
def requires(self):
return [GetAnalysisExtents()]
def run(self):
with open(self.extents_file_name, 'rb') as ext_file:
extents = pickle.load(ext_file)
prepare_ifg(
self.ifg.data_path,
self.xlooks,
self.ylooks,
extents,
self.thresh,
self.crop_opt
)
self.ifg.close()
def output(self):
if warp_required(self.xlooks, self.ylooks, self.crop_opt):
return luigi.LocalTarget(
cf.mlooked_path(self.ifg.data_path,
self.ylooks,
self.crop_opt))
else:
return []
def complete(self):
if self.output():
return super(luigi.WrapperTask, self).complete()
else:
# then this didn't do anything, so check that
# the requres are complete
# ... which is exactly what luigi.WrapperTask does.
# TODO: This requires knowledge of prepare_ifg,
# that is opaque. Address with refactoring.
return super(PrepareInterferogram, self).complete()
class PrepareInterferograms(IfgListMixin, luigi.WrapperTask):
"""
Wrapper function to prepare a sequence of interferogram files for
PyRate analysis using Luigi tasks. See pyrate.prepifg.prepare_ifgs() and
pyrate.tasks.prepifg.PrepareInterferogram() for further documentation.
"""
def __init__(self, *args, **kwargs):
super(PrepareInterferograms, self).__init__(*args, **kwargs)
self.extents_removed = False
def requires(self):
return [PrepareInterferogram(ifg=Ifg(path))
for path in self.ifg_tiff_list()]
def run(self):
try:
if os.path.exists(self.extents_file_name):
os.remove(self.extents_file_name)
except:
raise PrepifgException(
'Extents file was not found in the desired '
'location: {}'.format(self.extents_file_name),
'Make sure your paths are setup correctly in config file')
self.extents_removed = True
def complete(self):
return self.extents_removed and \
super(PrepareInterferograms, self).complete()
class PrepifgException(Exception):
"""
Prepifg exception class
"""
```
#### File: PyRate/tests/test_gamma.py
```python
from __future__ import print_function
import glob
import os
import re
import shutil
import sys
import tempfile
import unittest
from datetime import date, time
from os.path import join
import numpy as np
from numpy.testing import assert_array_almost_equal
from osgeo import gdal
import pyrate.ifgconstants as ifc
from pyrate import config as cf
from pyrate import gamma
from pyrate import shared
from pyrate.config import (
DEM_HEADER_FILE,
NO_DATA_VALUE,
OBS_DIR,
IFG_FILE_LIST,
PROCESSOR,
OUT_DIR,
SLC_DIR,
LUIGI,
IFG_LKSX,
IFG_LKSY,
IFG_CROP_OPT,
NO_DATA_AVERAGING_THRESHOLD,
DEM_FILE,
APS_INCIDENCE_MAP,
APS_ELEVATION_MAP)
from pyrate.scripts import run_prepifg
from pyrate.scripts.converttogtif import main as gammaMain
from pyrate.shared import write_geotiff, GeotiffException
from tests import common
from tests.common import GAMMA_TEST_DIR, SML_TEST_GAMMA
from tests.common import TEST_CONF_GAMMA, TEMPDIR
from tests.common import small_data_setup
gdal.UseExceptions()
LIGHTSPEED = 3e8 # approx
class GammaCommandLineTests(unittest.TestCase):
def setUp(self):
self.base = join(os.environ['PYRATEPATH'],
'tests', 'test_data', 'gamma')
self.hdr = join(self.base, 'dem16x20raw.dem.par')
temp_text = tempfile.mktemp()
self.confFile = os.path.join(TEMPDIR,
'{}/gamma_test.cfg'.format(temp_text))
self.ifgListFile = os.path.join(
TEMPDIR, '{}/gamma_ifg.list'.format(temp_text))
self.base_dir = os.path.dirname(self.confFile)
shared.mkdir_p(self.base_dir)
def tearDown(self):
try:
os.remove(self.exp_path)
except:
pass
shutil.rmtree(self.base_dir)
def makeInputFiles(self, data):
with open(self.confFile, 'w') as conf:
conf.write('{}: {}\n'.format(DEM_HEADER_FILE, self.hdr))
conf.write('{}: {}\n'.format(NO_DATA_VALUE, '0.0'))
conf.write('{}: {}\n'.format(OBS_DIR, self.base_dir))
conf.write('{}: {}\n'.format(IFG_FILE_LIST, self.ifgListFile))
conf.write('{}: {}\n'.format(PROCESSOR, '1'))
conf.write('{}: {}\n'.format(OUT_DIR, self.base_dir))
conf.write('{}: {}\n'.format(SLC_DIR, ''))
with open(self.ifgListFile, 'w') as ifgl:
ifgl.write(data)
def test_cmd_ifg(self):
data = join(self.base, '16x20_20090713-20090817_VV_4rlks_utm.unw')
self.exp_path = os.path.join(
self.base_dir, '16x20_20090713-20090817_VV_4rlks_utm_unw.tif')
self.common_check(data)
def test_cmd_dem(self):
data = join(self.base, 'dem16x20raw.dem')
self.exp_path = os.path.join(self.base_dir, 'dem16x20raw_dem.tif')
self.common_check(data)
def common_check(self, data):
self.makeInputFiles(data)
sys.argv = ['gamma.py', self.confFile]
gammaMain()
self.assertTrue(os.path.exists(self.exp_path))
class GammaToGeoTiffTests(unittest.TestCase):
"""Tests conversion of GAMMA rasters to custom PyRate GeoTIFF"""
@classmethod
def setUpClass(cls):
# create common combined header obj so the headers are only read once
# tricker: needs both ifg headers, and DEM one for the extents
filenames = ['r20090713_VV.slc.par', 'r20090817_VV.slc.par']
hdr_paths = [join(GAMMA_TEST_DIR, f) for f in filenames]
hdrs = [gamma.parse_epoch_header(p) for p in hdr_paths]
dem_hdr_path = join(GAMMA_TEST_DIR, 'dem16x20raw.dem.par')
cls.DEM_HDR = gamma.parse_dem_header(dem_hdr_path)
cls.COMBINED = gamma.combine_headers(*hdrs, dem_hdr=cls.DEM_HDR)
def tearDown(self):
if os.path.exists(self.dest):
os.remove(self.dest)
def test_to_geotiff_dem(self):
hdr_path = join(GAMMA_TEST_DIR, 'dem16x20raw.dem.par')
hdr = gamma.parse_dem_header(hdr_path)
data_path = join(GAMMA_TEST_DIR, 'dem16x20raw.dem')
self.dest = os.path.join(TEMPDIR, "tmp_gamma_dem.tif")
write_geotiff(hdr, data_path, self.dest, nodata=0)
exp_path = join(GAMMA_TEST_DIR, 'dem16x20_subset_from_gamma.tif')
exp_ds = gdal.Open(exp_path)
ds = gdal.Open(self.dest)
# compare data and geographic headers
# HACK: round expected to nearest integer
assert_array_almost_equal(np.rint(exp_ds.ReadAsArray()),
ds.ReadAsArray())
self.compare_rasters(exp_ds, ds)
md = ds.GetMetadata()
self.assertTrue(md['AREA_OR_POINT'] == 'Area')
def test_to_geotiff_ifg(self):
self.dest = os.path.join(TEMPDIR, 'tmp_gamma_ifg.tif')
data_path = join(GAMMA_TEST_DIR,
'16x20_20090713-20090817_VV_4rlks_utm.unw')
write_geotiff(self.COMBINED, data_path, self.dest, nodata=0)
ds = gdal.Open(self.dest)
exp_path = join(GAMMA_TEST_DIR,
'16x20_20090713-20090817_VV_4rlks_utm.tif')
exp_ds = gdal.Open(exp_path)
# compare data and geographic headers
assert_array_almost_equal(exp_ds.ReadAsArray(), ds.ReadAsArray())
self.compare_rasters(ds, exp_ds)
md = ds.GetMetadata()
self.assertEqual(len(md), 11) # 11 metadata items
self.assertTrue(md[ifc.MASTER_DATE] == str(date(2009, 7, 13)))
self.assertTrue(md[ifc.SLAVE_DATE] == str(date(2009, 8, 17)))
self.assertTrue(md[ifc.PYRATE_TIME_SPAN] == str(35 / ifc.DAYS_PER_YEAR))
self.assertTrue(md[ifc.MASTER_TIME]) == str(12)
self.assertTrue(md[ifc.SLAVE_TIME]) == str(time(12))
wavelen = float(md[ifc.PYRATE_WAVELENGTH_METRES])
self.assertAlmostEqual(wavelen, 0.05627457792190739)
def test_to_geotiff_wrong_input_data(self):
# use TIF, not UNW for data
self.dest = os.path.join(TEMPDIR, 'tmp_gamma_ifg.tif')
data_path = join(GAMMA_TEST_DIR,
'16x20_20090713-20090817_VV_4rlks_utm.tif')
self.assertRaises(GeotiffException, write_geotiff,
self.COMBINED, data_path, self.dest, nodata=0)
def test_mismatching_cell_resolution(self):
hdrs = self.DEM_HDR.copy()
hdrs[ifc.PYRATE_X_STEP] = 0.1 # fake a mismatch
data_path = join(GAMMA_TEST_DIR,
'16x20_20090713-20090817_VV_4rlks_utm.unw')
self.dest = os.path.join(TEMPDIR, 'fake')
self.assertRaises(GeotiffException, write_geotiff, hdrs,
data_path, self.dest, 0)
def compare_rasters(self, ds, exp_ds):
band = ds.GetRasterBand(1)
exp_band = exp_ds.GetRasterBand(1)
nodata = band.GetNoDataValue()
self.assertFalse(nodata is None)
self.assertEqual(exp_band.GetNoDataValue(), nodata)
pj = ds.GetProjection()
self.assertTrue('WGS 84' in pj)
self.assertEqual(exp_ds.GetProjection(), pj)
for exp, act in zip(exp_ds.GetGeoTransform(), ds.GetGeoTransform()):
self.assertAlmostEqual(exp, act, places=4)
def test_bad_projection(self):
hdr = self.DEM_HDR.copy()
hdr[ifc.PYRATE_DATUM] = 'nonexistent projection'
data_path = join(GAMMA_TEST_DIR, 'dem16x20raw.dem')
self.dest = os.path.join(TEMPDIR, 'tmp_gamma_dem2.tif')
self.assertRaises(GeotiffException, write_geotiff, hdr,
data_path, self.dest, nodata=0)
class GammaHeaderParsingTests(unittest.TestCase):
'Tests conversion of GAMMA headers to Py dicts'
def test_parse_gamma_epoch_header(self):
# minimal required headers are:
# date: 2009 7 13
# radar_frequency: 5.3310040e+09 Hz
path = join(GAMMA_TEST_DIR, 'r20090713_VV.slc.par')
hdrs = gamma.parse_epoch_header(path)
exp_date = date(2009, 7, 13)
self.assertEqual(hdrs[ifc.MASTER_DATE], exp_date)
exp_wavelen = LIGHTSPEED / 5.3310040e+09
self.assertEqual(hdrs[ifc.PYRATE_WAVELENGTH_METRES], exp_wavelen)
incidence_angle = 22.9671
self.assertEqual(hdrs[ifc.PYRATE_INCIDENCE_DEGREES], incidence_angle)
def test_parse_gamma_dem_header(self):
path = join(GAMMA_TEST_DIR, 'dem16x20raw.dem.par')
hdrs = gamma.parse_dem_header(path)
self.assertEqual(hdrs[ifc.PYRATE_NCOLS], 16)
self.assertEqual(hdrs[ifc.PYRATE_NROWS], 20)
self.assertEqual(hdrs[ifc.PYRATE_LAT], -33.3831945)
self.assertEqual(hdrs[ifc.PYRATE_LONG], 150.3870833)
self.assertEqual(hdrs[ifc.PYRATE_X_STEP], 6.9444445e-05)
self.assertEqual(hdrs[ifc.PYRATE_Y_STEP], -6.9444445e-05)
# Test data for the epoch header combination
H0 = {ifc.MASTER_DATE : date(2009, 7, 13),
ifc.MASTER_TIME : time(12),
ifc.PYRATE_WAVELENGTH_METRES: 1.8,
ifc.PYRATE_INCIDENCE_DEGREES: 35.565,
}
H1 = {ifc.MASTER_DATE : date(2009, 8, 17),
ifc.MASTER_TIME : time(12, 10, 10),
ifc.PYRATE_WAVELENGTH_METRES: 1.8,
ifc.PYRATE_INCIDENCE_DEGREES: 35.56,
}
H1_ERR1 = {ifc.MASTER_DATE : date(2009, 8, 17),
ifc.MASTER_TIME : time(12),
ifc.PYRATE_WAVELENGTH_METRES: 2.4,
ifc.PYRATE_INCIDENCE_DEGREES: 35.56,
}
H1_ERR2 = {ifc.MASTER_DATE : date(2009, 8, 17),
ifc.MASTER_TIME : time(12),
ifc.PYRATE_WAVELENGTH_METRES: 1.8,
ifc.PYRATE_INCIDENCE_DEGREES: 35.76,
}
class HeaderCombinationTests(unittest.TestCase):
'Tests GAMMA epoch and DEM headers can be combined into a single Py dict'
def setUp(self):
self.err = gamma.GammaException
dem_hdr_path = join(GAMMA_TEST_DIR, 'dem16x20raw.dem.par')
self.dh = gamma.parse_dem_header(dem_hdr_path)
def test_combine_headers(self):
filenames = ['r20090713_VV.slc.par', 'r20090817_VV.slc.par']
paths = [join(GAMMA_TEST_DIR, p) for p in filenames]
hdr0, hdr1 = [gamma.parse_epoch_header(p) for p in paths]
chdr = gamma.combine_headers(hdr0, hdr1, self.dh)
exp_timespan = (18 + 17) / ifc.DAYS_PER_YEAR
self.assertEqual(chdr[ifc.PYRATE_TIME_SPAN], exp_timespan)
exp_date = date(2009, 7, 13)
self.assertEqual(chdr[ifc.MASTER_DATE], exp_date)
exp_date2 = date(2009, 8, 17)
self.assertEqual(chdr[ifc.SLAVE_DATE], exp_date2)
exp_wavelen = LIGHTSPEED / 5.3310040e+09
self.assertEqual(chdr[ifc.PYRATE_WAVELENGTH_METRES], exp_wavelen)
def test_fail_non_dict_header(self):
self.assertRaises(self.err, gamma.combine_headers, H0, '', self.dh)
self.assertRaises(self.err, gamma.combine_headers, '', H0, self.dh)
self.assertRaises(self.err, gamma.combine_headers, H0, H1, None)
self.assertRaises(self.err, gamma.combine_headers, H0, H1, '')
def test_fail_mismatching_wavelength(self):
self.assertRaises(self.err, gamma.combine_headers, H0, H1_ERR1, self.dh)
def test_fail_mismatching_incidence(self):
self.assertRaises(self.err, gamma.combine_headers, H0, H1_ERR2, self.dh)
def test_fail_same_date(self):
self.assertRaises(self.err, gamma.combine_headers, H0, H0, self.dh)
def test_fail_bad_date_order(self):
self.assertRaises(self.err, gamma.combine_headers, H1, H0, self.dh)
class TestGammaLuigiEquality(unittest.TestCase):
@classmethod
def setUpClass(cls):
luigi_dir = tempfile.mktemp()
non_luigi_dir = tempfile.mkdtemp()
cls.luigi_confFile = os.path.join(
TEMPDIR,
'{}/gamma_test.conf'.format(luigi_dir)
)
cls.luigi_ifgListFile = os.path.join(
TEMPDIR,
'{}/gamma_ifg.list'.format(luigi_dir)
)
cls.non_luigi_confFile = os.path.join(
TEMPDIR,
'{}/gamma_test.conf'.format(non_luigi_dir)
)
cls.non_luigi_ifgListFile = os.path.join(
TEMPDIR,
'{}/gamma_ifg.list'.format(non_luigi_dir)
)
cls.luigi_base_dir = os.path.dirname(cls.luigi_confFile)
cls.non_luigi_base_dir = os.path.dirname(cls.non_luigi_confFile)
shared.mkdir_p(cls.luigi_base_dir)
shared.mkdir_p(cls.non_luigi_base_dir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.luigi_base_dir)
except OSError:
print('Failed to remove temp directory: %s'
% cls.luigi_base_dir)
try:
shutil.rmtree(cls.non_luigi_base_dir)
except OSError:
print('Failed to remove temp directory: %s' %
cls.non_luigi_base_dir)
def make_input_files(self, data):
with open(self.conf_file, 'w') as conf:
conf.write('{}: {}\n'.format(NO_DATA_VALUE, '0.0'))
conf.write('{}: {}\n'.format(OBS_DIR, self.base_dir))
conf.write('{}: {}\n'.format(OUT_DIR, self.base_dir))
conf.write('{}: {}\n'.format(IFG_FILE_LIST, self.ifgListFile))
conf.write('{}: {}\n'.format(PROCESSOR, '1'))
conf.write('{}: {}\n'.format(LUIGI, self.LUIGI))
conf.write('{}: {}\n'.format(
DEM_HEADER_FILE, os.path.join(
SML_TEST_GAMMA, '20060619_utm_dem.par')))
conf.write('{}: {}\n'.format(IFG_LKSX, '1'))
conf.write('{}: {}\n'.format(IFG_LKSY, '1'))
conf.write('{}: {}\n'.format(IFG_CROP_OPT, '1'))
conf.write('{}: {}\n'.format(NO_DATA_AVERAGING_THRESHOLD, '0.5'))
conf.write('{}: {}\n'.format(SLC_DIR, ''))
conf.write('{}: {}\n'.format(DEM_FILE, common.SML_TEST_DEM_GAMMA))
conf.write('{}: {}\n'.format(APS_INCIDENCE_MAP,
common.SML_TEST_INCIDENCE))
conf.write('{}: {}\n'.format(APS_ELEVATION_MAP, ''))
with open(self.ifgListFile, 'w') as ifgl:
ifgl.write('\n'.join(data))
def test_cmd_ifg_luigi_files_created(self):
self.LUIGI = '1' # luigi or no luigi
self.conf_file = self.luigi_confFile
self.base_dir = self.luigi_base_dir
self.ifgListFile = self.luigi_ifgListFile
self.common_check(self.luigi_confFile)
def test_cmd_ifg_no_luigi_files_created(self):
self.LUIGI = '0' # luigi or no luigi
self.conf_file = self.non_luigi_confFile
self.base_dir = self.non_luigi_base_dir
self.ifgListFile = self.non_luigi_ifgListFile
self.common_check(self.non_luigi_confFile)
def common_check(self, conf_file):
data_paths = glob.glob(
os.path.join(SML_TEST_GAMMA, "*_utm.unw"))
self.make_input_files(data_paths)
base_ifg_paths, dest_paths, params = cf.get_ifg_paths(conf_file)
dest_base_ifgs = [os.path.join(
params[cf.OUT_DIR], os.path.basename(q).split('.')[0] + '_' +
os.path.basename(q).split('.')[1] + '.tif')
for q in base_ifg_paths]
sys.argv = ['pyrate', 'prepifg', conf_file]
run_prepifg.main()
for p, q in zip(dest_base_ifgs, dest_paths):
self.assertTrue(os.path.exists(p),
'{} does not exist'.format(p))
self.assertTrue(os.path.exists(q),
'{} does not exist'.format(q))
def test_luigi_vs_no_luigi_phase_data(self):
all_luigi_ifgs, all_non_luigi_ifgs = self.shared_setup()
self.assertEqual(len(all_luigi_ifgs),
len(glob.glob(os.path.join(
self.luigi_base_dir, "*.tif"))))
self.assertEquals(len(all_luigi_ifgs), len(all_non_luigi_ifgs))
c = 0
for c, (i, j) in enumerate(zip(all_luigi_ifgs, all_non_luigi_ifgs)):
np.testing.assert_array_equal(i.phase_data, j.phase_data)
self.assertEquals(c + 1, len(all_luigi_ifgs))
def test_equality_of_meta_data(self):
all_luigi_ifgs, all_non_luigi_ifgs = self.shared_setup()
c = 0
for c, (i, j) in enumerate(zip(all_luigi_ifgs, all_non_luigi_ifgs)):
self.assertEqual(os.path.dirname(i.data_path), self.luigi_base_dir)
# check meta data equal
self.assertDictEqual(i.meta_data, j.meta_data)
# test that DATA_TYPE exists in metadata
self.assertIn(ifc.DATA_TYPE, i.meta_data.keys())
md = i.meta_data
for k in [ifc.SLAVE_TIME, ifc.MASTER_TIME, ifc.MASTER_DATE,
ifc.SLAVE_DATE, ifc.PYRATE_WAVELENGTH_METRES,
ifc.PYRATE_TIME_SPAN, ifc.PYRATE_INSAR_PROCESSOR]:
self.assertIn(k, md)
if i.data_path.__contains__(
'_{looks}rlks_{crop}cr'.format(looks=1, crop=1)):
# these are multilooked tifs
# test that DATA_TYPE is MULTILOOKED
self.assertEqual(md[ifc.DATA_TYPE], ifc.MULTILOOKED)
# else:
# # others tifs are just geotiffs
# self.assertEqual(md[ifc.DATA_TYPE], ifc.ORIG)
self.assertEquals(c + 1, len(all_luigi_ifgs))
def shared_setup(self):
self.test_cmd_ifg_no_luigi_files_created()
self.test_cmd_ifg_luigi_files_created()
all_luigi_ifgs = small_data_setup(
glob.glob(os.path.join(self.luigi_base_dir, "*.tif")))
all_non_luigi_files = []
gamma_PTN = re.compile(r'\d{8}')
for i in glob.glob(os.path.join(self.non_luigi_base_dir,
"*.tif")):
if len(gamma_PTN.findall(i)) == 2:
all_non_luigi_files.append(i)
all_non_luigi_ifgs = small_data_setup(all_non_luigi_files)
return all_luigi_ifgs, all_non_luigi_ifgs
class TestGammaParallelVsSerial(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.serial_dir = tempfile.mkdtemp()
cls.parallel_dir = tempfile.mkdtemp()
unw_paths = glob.glob(os.path.join(SML_TEST_GAMMA, "*_utm.unw"))
# read in the params
_, _, params = cf.get_ifg_paths(TEST_CONF_GAMMA)
params[cf.OUT_DIR] = cls.serial_dir
params[cf.PARALLEL] = False
shared.mkdir_p(cls.serial_dir)
run_prepifg.gamma_prepifg(unw_paths, params)
params[cf.OUT_DIR] = cls.parallel_dir
params[cf.PARALLEL] = True
shared.mkdir_p(cls.parallel_dir)
run_prepifg.gamma_prepifg(unw_paths, params)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.parallel_dir)
shutil.rmtree(cls.serial_dir)
def test_equality(self):
serial_ifgs = small_data_setup(
datafiles=glob.glob(os.path.join(self.serial_dir, "*_1cr.tif")))
parallel_ifgs = small_data_setup(
datafiles=glob.glob(os.path.join(self.parallel_dir, "*_1cr.tif")))
for s, p in zip(serial_ifgs, parallel_ifgs):
np.testing.assert_array_almost_equal(s.phase_data, p.phase_data)
def test_meta_data_exist(self):
serial_ifgs = small_data_setup(
datafiles=glob.glob(os.path.join(self.serial_dir, "*_1cr.tif")))
parallel_ifgs = small_data_setup(
datafiles=glob.glob(os.path.join(self.parallel_dir, "*_1cr.tif")))
for s, p in zip(serial_ifgs, parallel_ifgs):
# all metadata equal
self.assertDictEqual(s.meta_data, p.meta_data)
# test that DATA_TYPE exists in metadata
self.assertIn(ifc.DATA_TYPE, s.meta_data.keys())
# test that DATA_TYPE is MULTILOOKED
self.assertEqual(s.meta_data[ifc.DATA_TYPE], ifc.MULTILOOKED)
if __name__ == "__main__":
unittest.main()
```
#### File: PyRate/tests/test_matlab_mst.py
```python
import glob
import os
import shutil
import tempfile
import unittest
import numpy as np
from pyrate import mst
from pyrate.matlab_mst import _IfgListPyRate as IfgList
from pyrate.matlab_mst import _calculate_connect_and_ntrees, DTYPE
from pyrate.matlab_mst import _matlab_mst, _matlab_mst_bool
from pyrate.matlab_mst import _matlab_mst_kruskal
from pyrate.matlab_mst import _get_sub_structure
from tests.common import small_data_setup
from tests.common import small_ifg_file_list
from tests.common import get_nml
class IfgListTest(unittest.TestCase):
def setUp(self):
self.matlab_n = [1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
9, 10, 11, 3, 5, 7, 9, 5, 6, 8, 11, 12, 8, 13,
9, 10, 13, 10, 11, 12]
self.matlab_masnum = [1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6,
7, 7, 8, 9, 10, 11]
self.matlab_slvnum = [3, 5, 7, 9, 5, 6, 8, 11, 12, 8, 13, 9,
10, 13, 10, 11, 12]
ifg_instance = IfgList(small_ifg_file_list())
self.ifg_list, self.epoch_list = get_nml(ifg_instance, nodata_value=0)
def test_matlab_n(self):
# add 1 to ifg_list.n as matlab indices start from 1.
np.testing.assert_array_equal(self.matlab_n, self.ifg_list.n + 1)
def test_matlab_masnum(self):
# add 1 to ifg_list.masnum as matlab indices start from 1.
np.testing.assert_array_equal(self.matlab_masnum,
self.ifg_list.master_num + 1)
def test_matlab_slvnum(self):
# add 1 to ifg_list.slvnum as matlab indices start from 1.
np.testing.assert_array_equal(self.matlab_slvnum,
self.ifg_list.slave_num + 1)
# SB: this is not used anywhere now
def sort_list(id_l, master_l, slave_l, nan_frac_l):
"""
sort list based on nan_frac
"""
sorted_list = [(i, m, s, n) for i, m, s, n in
zip(id_l, master_l, slave_l, nan_frac_l)]
sorted_list = np.array(sorted_list, dtype=DTYPE)
return np.sort(sorted_list, order=['nan_frac'])
class MatlabMstKruskalTest(unittest.TestCase):
def setUp(self):
ifg_instance = IfgList(small_ifg_file_list())
self.ifg_list, _ = get_nml(ifg_instance, nodata_value=0)
self.sorted_list = sort_list(self.ifg_list.id,
self.ifg_list.master_num,
self.ifg_list.slave_num,
self.ifg_list.nan_frac)
self.matlab_sorted_list_zero_nan_frac = [(1, 1, 3, 0.0),
(2, 2, 5, 0.0),
(3, 3, 7, 0.0),
(4, 3, 9, 0.0),
(5, 4, 5, 0.0),
(6, 4, 6, 0.0),
(7, 4, 8, 0.0),
(8, 5, 11, 0.0),
(9, 5, 12, 0.0),
(10, 6, 8, 0.0),
(11, 6, 13, 0.0),
(12, 7, 9, 0.0),
(13, 7, 10, 0.0),
(14, 8, 13, 0.0),
(15, 9, 10, 0.0),
(16, 10, 11, 0.0),
(17, 11, 12, 0.0)]
self.ifg_list_mst_matlab = [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 16]
def test_sorted_list_equal_matlab(self):
for s, m in zip(self.sorted_list,
self.matlab_sorted_list_zero_nan_frac):
self.assertEquals((s[0]+1, s[1]+1, s[2]+1, s[3]), m)
def test_mst_kruskal_matlab(self):
edges = _get_sub_structure(self.ifg_list,
np.zeros(len(self.ifg_list.id), dtype=bool))
ifg_list_mst = _matlab_mst_kruskal(edges)
ifg_list_mst = [i + 1 for i in ifg_list_mst] # add 1 to each index
self.assertSequenceEqual(ifg_list_mst, self.ifg_list_mst_matlab)
class MSTKruskalCalcConnectAndNTrees(unittest.TestCase):
def test_calculate_connect_and_ntrees(self):
connect_start = np.ones(shape=(10, 10), dtype=bool)
connect_start[1, :-1] = False
_, connect, ntrees = _calculate_connect_and_ntrees(connect_start, [])
self.assertEqual(ntrees, 9)
np.testing.assert_array_equal(connect,
np.ones(shape=(9, 10), dtype=bool))
connect_start[2:5, :-1] = False
_, connect, ntrees = _calculate_connect_and_ntrees(connect_start, [])
self.assertEqual(ntrees, 6)
np.testing.assert_array_equal(connect,
np.ones(shape=(6, 10), dtype=bool))
def test_calculate_connect_and_ntrees_raise_1(self):
connect_start = np.eye(10, dtype=bool)
connect_start[1, :] = False
with self.assertRaises(ValueError):
_calculate_connect_and_ntrees(connect_start, [])
def test_calculate_connect_and_ntrees_raise_2(self):
connect_start = np.eye(10, dtype=bool)
with self.assertRaises(ValueError):
_calculate_connect_and_ntrees(connect_start, [])
class MSTKruskalConnectAndTreesSmallData(unittest.TestCase):
def test_calculate_connect_and_ntrees_small_data(self):
ifg_instance = IfgList(small_ifg_file_list())
ifg_list, _ = get_nml(ifg_instance, nodata_value=0)
edges = _get_sub_structure(ifg_list, np.zeros(len(ifg_list.id), dtype=bool))
mst, connected, ntrees = _matlab_mst_kruskal(edges,
ntrees=True
)
self.assertTrue(connected[0].all())
self.assertEqual(ntrees, 1)
self.assertEqual(len(connected[0]), len(mst) + 1)
def test_assert_is_not_tree(self):
non_overlapping = [1, 2, 5, 6, 12, 13, 14, 15, 16, 17]
small_files = small_ifg_file_list()
datafiles = [f for i, f in enumerate(small_files)
if i+1 in non_overlapping]
non_overlapping_ifg_isntance = IfgList(datafiles)
ifg_list, _ = get_nml(non_overlapping_ifg_isntance, nodata_value=0)
edges = _get_sub_structure(ifg_list,
np.zeros(len(ifg_list.id), dtype=bool))
mst, connected, ntrees = _matlab_mst_kruskal(edges, ntrees=True)
self.assertEqual(connected.shape[0], 4)
self.assertEqual(ntrees, 4)
def test_assert_is_tree(self):
overlapping = [1, 2, 3, 4, 6, 7, 10, 11, 16, 17]
small_files = small_ifg_file_list()
datafiles = [f for i, f in enumerate(small_files)
if i+1 in overlapping]
overlapping_ifg_isntance = IfgList(datafiles)
ifg_list, _ = get_nml(overlapping_ifg_isntance, nodata_value=0)
edges = _get_sub_structure(ifg_list,
np.zeros(len(ifg_list.id), dtype=bool))
_, connected, ntrees = _matlab_mst_kruskal(edges, ntrees=True)
self.assertEqual(ntrees, 4)
self.assertEqual(connected.shape[0], 4)
def test_assert_two_trees_overlapping(self):
overlapping = [3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17]
small_files = small_ifg_file_list()
datafiles = [f for i, f in enumerate(small_files)
if i+1 in overlapping]
overlapping_ifg_isntance = IfgList(datafiles)
ifg_list, _ = get_nml(overlapping_ifg_isntance, nodata_value=0)
edges = _get_sub_structure(ifg_list,
np.zeros(len(ifg_list.id), dtype=bool))
mst, connected, ntrees = _matlab_mst_kruskal(edges, ntrees=True)
self.assertEqual(connected.shape[0], 2)
self.assertEqual(ntrees, 2)
def test_assert_two_trees_non_overlapping(self):
non_overlapping = [2, 5, 6, 12, 13, 15]
small_files = small_ifg_file_list()
datafiles = [f for i, f in enumerate(small_files)
if i+1 in non_overlapping]
non_overlapping_ifg_isntance = IfgList(datafiles)
ifg_list, _ = get_nml(non_overlapping_ifg_isntance, nodata_value=0)
edges = _get_sub_structure(ifg_list,
np.zeros(len(ifg_list.id), dtype=bool))
_, connected, ntrees = _matlab_mst_kruskal(edges, ntrees=True)
self.assertEqual(ntrees, 2)
self.assertEqual(connected.shape[0], 2)
class MatlabMSTTests(unittest.TestCase):
"""
Tests to ensure matlab and python mst outputs are the same.
"""
def setUp(self):
self.ifgs = small_data_setup()
self.ifg_file_list = small_ifg_file_list()
self.matlab_mst_list = ['geo_060619-061002_unw.tif',
'geo_060828-061211_unw.tif',
'geo_061002-070219_unw.tif',
'geo_061002-070430_unw.tif',
'geo_061106-061211_unw.tif',
'geo_061106-070115_unw.tif',
'geo_061106-070326_unw.tif',
'geo_061211-070709_unw.tif',
'geo_061211-070813_unw.tif',
'geo_070115-070917_unw.tif',
'geo_070219-070604_unw.tif',
'geo_070604-070709_unw.tif']
# reorder ifgs as per the matlab list
self.ifgs = small_data_setup()
def test_matlab_mst_kruskal(self):
"""
test that the matlab and python mst algos outputs are the same
"""
ifg_instance = IfgList(datafiles=self.ifg_file_list)
ifg_list, _ = get_nml(ifg_instance, nodata_value=0)
edges = _get_sub_structure(ifg_list,
np.zeros(len(ifg_list.id), dtype=bool))
ifg_list_mst_id = _matlab_mst_kruskal(edges)
self.assertEquals(len(self.matlab_mst_list),
len(ifg_list_mst_id))
for i in ifg_list_mst_id:
self.assertIn(os.path.split(ifg_list.nml[i])[-1],
self.matlab_mst_list)
def test_matlab_make_mstmat(self):
"""
tests equality of boolean mst arrays of both python and matlab.
"""
ifg_instance = IfgList(datafiles=self.ifg_file_list)
ifg_list, _ = get_nml(ifg_instance, nodata_value=0)
mst_mat = _matlab_mst(ifg_list, p_threshold=1)
# path to csv folders from matlab output
from tests.common import SML_TEST_MATLAB_MST_DIR
onlyfiles = [f for f in os.listdir(SML_TEST_MATLAB_MST_DIR)
if os.path.isfile(os.path.join(SML_TEST_MATLAB_MST_DIR, f))]
for i, f in enumerate(onlyfiles):
mst_f = np.genfromtxt(os.path.join(SML_TEST_MATLAB_MST_DIR, f),
delimiter=',')
for k, j in enumerate(self.ifg_file_list):
if f.split('matlab_')[-1].split('.')[0] == \
os.path.split(j)[-1].split('.')[0]:
np.testing.assert_array_equal(mst_f, mst_mat[k, :, :])
def test_matlab_make_mstmat_boolean_array(self):
"""
tests equality of boolean mst arrays of both python and matlab.
"""
ifg_instance = IfgList(datafiles=self.ifg_file_list)
ifg_list, _ = get_nml(ifg_instance, nodata_value=0)
mst_mat = _matlab_mst_bool(ifg_list, p_threshold=1)
# path to csv folders from matlab output
from tests.common import SML_TEST_MATLAB_MST_DIR
onlyfiles = [f for f in os.listdir(SML_TEST_MATLAB_MST_DIR)
if os.path.isfile(os.path.join(SML_TEST_MATLAB_MST_DIR, f))]
for i, f in enumerate(onlyfiles):
mst_f = np.genfromtxt(os.path.join(SML_TEST_MATLAB_MST_DIR, f),
delimiter=',')
for k, j in enumerate(self.ifg_file_list):
if f.split('matlab_')[-1].split('.')[0] == \
os.path.split(j)[-1].split('.')[0]:
np.testing.assert_array_equal(mst_f, mst_mat[k, :, :])
def test_mas_mat_vs_mst_mat_generator(self):
ifg_instance = IfgList(datafiles=self.ifg_file_list)
ifg_list, _ = get_nml(ifg_instance, nodata_value=0,
nan_conversion=True)
mst_mat1 = _matlab_mst(ifg_list)
mst_mat2 = _matlab_mst_bool(ifg_list)
np.testing.assert_array_equal(mst_mat2, mst_mat1)
class TestMSTBooleanArray(unittest.TestCase):
def setUp(self):
self.ifg_dir = tempfile.mkdtemp()
small_files = small_ifg_file_list()
for sf in small_files:
dest = os.path.join(self.ifg_dir, os.path.basename(sf))
shutil.copy(sf, dest)
os.chmod(dest, 0o660)
self.small_files = glob.glob(os.path.join(self.ifg_dir, "*.tif"))
self.small_ifgs = small_data_setup(self.small_files)
def tearDown(self):
shutil.rmtree(self.ifg_dir)
def test_mst_boolean_array(self):
nan_conversion = 1
for i in self.small_ifgs:
if not i.is_open:
i.open(readonly=False)
if nan_conversion: # nan conversion happens here in networkx mst
i.nodata_value = 0
i.convert_to_nans()
if not i.mm_converted:
i.convert_to_mm()
i.write_modified_phase()
mst_nx = mst.mst_boolean_array(self.small_ifgs)
small_ifg_instance = IfgList(datafiles=self.small_files)
ifgs = small_ifg_instance.ifgs
for i in ifgs:
if not i.mm_converted:
i.convert_to_mm()
i.write_modified_phase()
ifg_instance_updated, epoch_list = \
get_nml(small_ifg_instance, nodata_value=0,
nan_conversion=nan_conversion)
mst_matlab = _matlab_mst_bool(ifg_instance_updated)
np.testing.assert_array_equal(mst_matlab, mst_nx)
# close ifgs for windows
for i in self.small_ifgs:
i.close()
if __name__ == '__main__':
unittest.main()
```
#### File: PyRate/tests/test_mpi.py
```python
from __future__ import print_function
import glob
import shutil
import numpy as np
import pytest
import os
import tempfile
import random
import string
from subprocess import check_output
import pyrate.orbital
import pyrate.shared
import tests.common
from pyrate import ref_phs_est as rpe
from pyrate import covariance
from pyrate import refpixel
from pyrate.scripts import run_pyrate, run_prepifg, postprocessing
from tests.common import small_data_setup, reconstruct_mst, \
reconstruct_linrate, SML_TEST_DEM_HDR_GAMMA, pre_prepare_ifgs
from tests import common
from tests.test_covariance import matlab_maxvar
from pyrate import config as cf
from pyrate import mpiops
from pyrate import algorithm
TRAVIS = True if 'TRAVIS' in os.environ else False
PYTHON3P5 = True if ('TRAVIS_PYTHON_VERSION' in os.environ and
os.environ['TRAVIS_PYTHON_VERSION'] == '3.5') else False
GDAL_VERSION = check_output(["gdal-config", "--version"]).decode(
encoding="utf-8").split('\n')[0]
MPITEST = TRAVIS and GDAL_VERSION == '2.0.0'
@pytest.fixture()
def tempdir():
"""
tempdir for tests
"""
def tmpdir():
return tempfile.mkdtemp()
return tmpdir
@pytest.fixture
def random_filename(tmpdir_factory):
def make_random_filename(ext=''):
dir = str(tmpdir_factory.mktemp('pyrate').realpath())
fname = ''.join(random.choice(string.ascii_lowercase)
for _ in range(10))
return os.path.join(dir, fname + ext)
return make_random_filename
@pytest.fixture()
def get_config():
"""
Parameters
----------
conf_file: str
config file
Returns
-------
params: dict
dict of params
"""
def params(conf_file):
return cf.get_config_params(conf_file)
return params
# Make sure all MPI tests use this fixure
@pytest.fixture()
def mpisync(request):
mpiops.comm.barrier()
def fin():
mpiops.comm.barrier()
request.addfinalizer(fin)
return mpiops.comm
@pytest.fixture(params=[0, 1])
def roipac_or_gamma(request):
return request.param
@pytest.fixture(params=[1, 2])
def ref_est_method(request):
return request.param
@pytest.fixture(params=[1, 2, 5])
def row_splits(request):
return request.param
@pytest.fixture(params=[1, 2, 5])
def col_splits(request):
return request.param
@pytest.fixture(params=[1, 2, 5])
def modify_config(request, tempdir, get_config):
test_conf = common.TEST_CONF_ROIPAC
params_dict = get_config(test_conf)
params_dict[cf.IFG_LKSX] = request.param
params_dict[cf.IFG_LKSY] = request.param
params_dict[cf.OBS_DIR] = tempdir()
common.copytree(common.SML_TEST_GAMMA, params_dict[cf.OBS_DIR])
params_dict[cf.IFG_FILE_LIST] = os.path.join(
params_dict[cf.OBS_DIR], 'ifms_17')
params_dict[cf.PARALLEL] = False
params_dict[cf.APS_CORRECTION] = 0
yield params_dict
# clean up
shutil.rmtree(params_dict[cf.OBS_DIR])
@pytest.fixture(params=range(1, 6))
def get_lks(request):
return request.param
@pytest.fixture(params=range(1, 3))
def get_crop(request):
return request.param
def test_vcm_matlab_vs_mpi(mpisync, tempdir, get_config):
from tests.common import SML_TEST_DIR, TEST_CONF_ROIPAC
params_dict = get_config(TEST_CONF_ROIPAC)
MATLAB_VCM_DIR = os.path.join(SML_TEST_DIR, 'matlab_vcm')
matlab_vcm = np.genfromtxt(os.path.join(MATLAB_VCM_DIR,
'matlab_vcmt.csv'), delimiter=',')
if mpiops.rank == 0:
outdir = tempdir()
else:
outdir = None
outdir = mpiops.comm.bcast(outdir, root=0)
params_dict[cf.OUT_DIR] = outdir
params_dict[cf.PARALLEL] = False
xlks, ylks, crop = cf.transform_params(params_dict)
base_unw_paths = cf.original_ifg_paths(params_dict[cf.IFG_FILE_LIST])
# dest_paths are tifs that have been geotif converted and multilooked
dest_paths = cf.get_dest_paths(base_unw_paths, crop, params_dict, xlks)
# run prepifg, create the dest_paths files
if mpiops.rank == 0:
run_prepifg.roipac_prepifg(base_unw_paths, params_dict)
mpiops.comm.barrier()
tiles = pyrate.shared.get_tiles(dest_paths[0], rows=1, cols=1)
preread_ifgs = run_pyrate._create_ifg_dict(dest_paths,
params=params_dict,
tiles=tiles)
refpx, refpy = run_pyrate._ref_pixel_calc(dest_paths, params_dict)
run_pyrate._orb_fit_calc(dest_paths, params_dict)
run_pyrate._ref_phase_estimation(dest_paths, params_dict, refpx, refpy)
maxvar, vcmt = run_pyrate._maxvar_vcm_calc(dest_paths, params_dict,
preread_ifgs)
np.testing.assert_array_almost_equal(maxvar, matlab_maxvar, decimal=4)
np.testing.assert_array_almost_equal(matlab_vcm, vcmt, decimal=3)
if mpiops.rank == 0:
shutil.rmtree(outdir)
@pytest.fixture(params=[1, 2, 5])
def orbfit_lks(request):
return request.param
@pytest.fixture(params=[cf.INDEPENDENT_METHOD, cf.NETWORK_METHOD])
def orbfit_method(request):
return request.param
@pytest.fixture(params=[cf.PLANAR, cf.QUADRATIC, cf.PART_CUBIC])
def orbfit_degrees(request):
return request.param
@pytest.mark.skipif(not MPITEST, reason='skipping mpi tests in travis except '
'in TRAVIS and GDAL=2.0.0')
def test_timeseries_linrate_mpi(mpisync, tempdir, modify_config,
ref_est_method, row_splits, col_splits,
get_crop, orbfit_lks, orbfit_method,
orbfit_degrees):
params = modify_config
outdir = mpiops.run_once(tempdir)
params[cf.OUT_DIR] = outdir
params[cf.TMPDIR] = os.path.join(params[cf.OUT_DIR], cf.TMPDIR)
params[cf.DEM_HEADER_FILE] = SML_TEST_DEM_HDR_GAMMA
params[cf.REF_EST_METHOD] = ref_est_method
params[cf.IFG_CROP_OPT] = get_crop
params[cf.ORBITAL_FIT_LOOKS_Y] = orbfit_lks
params[cf.ORBITAL_FIT_LOOKS_X] = orbfit_lks
params[cf.ORBITAL_FIT_METHOD] = orbfit_method
params[cf.ORBITAL_FIT_DEGREE] = orbfit_degrees
xlks, ylks, crop = cf.transform_params(params)
if xlks * col_splits > 45 or ylks * row_splits > 70:
print('skipping test because lks and col_splits are not compatible')
return
# skip some tests in travis to run CI faster
if TRAVIS and (xlks % 2 or row_splits % 2 or col_splits % 2
or orbfit_lks % 2):
print('Skipping in travis env for faster CI run')
return
print("xlks={}, ref_est_method={}, row_splits={}, col_splits={}, "
"get_crop={}, orbfit_lks={}, orbfit_method={}, "
"rank={}".format(xlks, ref_est_method, row_splits, col_splits,
get_crop, orbfit_lks, orbfit_method, orbfit_degrees,
mpiops.rank))
base_unw_paths = cf.original_ifg_paths(params[cf.IFG_FILE_LIST])
# dest_paths are tifs that have been geotif converted and multilooked
dest_paths = cf.get_dest_paths(base_unw_paths, crop, params, xlks)
# run prepifg, create the dest_paths files
if mpiops.rank == 0:
run_prepifg.gamma_prepifg(base_unw_paths, params)
mpiops.comm.barrier()
(refpx, refpy), maxvar, vcmt = run_pyrate.process_ifgs(
ifg_paths=dest_paths, params=params, rows=row_splits, cols=col_splits)
tiles = mpiops.run_once(pyrate.shared.get_tiles, dest_paths[0],
rows=row_splits, cols=col_splits)
postprocessing._postprocess_linrate(row_splits, col_splits, params)
postprocessing._postprocess_timeseries(row_splits, col_splits, params)
ifgs_mpi_out_dir = params[cf.OUT_DIR]
ifgs_mpi = small_data_setup(datafiles=dest_paths)
# single process timeseries/linrate calculation
if mpiops.rank == 0:
params_old = modify_config
params_old[cf.OUT_DIR] = tempdir()
params_old[cf.REF_EST_METHOD] = ref_est_method
params_old[cf.IFG_CROP_OPT] = get_crop
params_old[cf.ORBITAL_FIT_LOOKS_Y] = orbfit_lks
params_old[cf.ORBITAL_FIT_LOOKS_X] = orbfit_lks
params_old[cf.ORBITAL_FIT_METHOD] = orbfit_method
params_old[cf.ORBITAL_FIT_DEGREE] = orbfit_degrees
xlks, ylks, crop = cf.transform_params(params_old)
base_unw_paths = cf.original_ifg_paths(
params_old[cf.IFG_FILE_LIST])
dest_paths = cf.get_dest_paths(
base_unw_paths, crop, params_old, xlks)
run_prepifg.gamma_prepifg(base_unw_paths, params_old)
ifgs = pre_prepare_ifgs(dest_paths, params_old)
mst_grid = tests.common.mst_calculation(dest_paths, params_old)
refy, refx = refpixel.ref_pixel(ifgs, params_old)
assert (refx == refpx) and (refy == refpy) # both must match
pyrate.orbital.remove_orbital_error(ifgs, params_old)
ifgs = common.prepare_ifgs_without_phase(dest_paths, params_old)
rpe.estimate_ref_phase(ifgs, params_old, refx, refy)
ifgs = pre_prepare_ifgs(dest_paths, params_old)
r_dist = covariance.RDist(ifgs[0])()
maxvar_s = [covariance.cvd(i, params_old, r_dist)[0] for i in ifgs]
vcmt_s = covariance.get_vcmt(ifgs, maxvar)
tsincr, tscum, _ = tests.common.compute_time_series(
ifgs, mst_grid, params, vcmt)
rate, error, samples = tests.common.calculate_linear_rate(
ifgs, params_old, vcmt, mst_grid)
mst_mpi = reconstruct_mst(ifgs[0].shape, tiles, params[cf.TMPDIR])
np.testing.assert_array_almost_equal(mst_grid, mst_mpi)
tsincr_mpi, tscum_mpi = reconstruct_times_series(ifgs[0].shape,
tiles,
params[cf.TMPDIR])
rate_mpi, error_mpi, samples_mpi = \
[reconstruct_linrate(ifgs[0].shape, tiles, params[cf.TMPDIR], t)
for t in ['linrate', 'linerror', 'linsamples']]
np.testing.assert_array_almost_equal(maxvar, maxvar_s)
np.testing.assert_array_almost_equal(vcmt, vcmt_s)
for i, j in zip(ifgs, ifgs_mpi):
np.testing.assert_array_almost_equal(i.phase_data, j.phase_data)
np.testing.assert_array_almost_equal(tsincr, tsincr_mpi, decimal=4)
np.testing.assert_array_almost_equal(tscum, tscum_mpi, decimal=4)
np.testing.assert_array_almost_equal(rate, rate_mpi, decimal=4)
np.testing.assert_array_almost_equal(error, error_mpi, decimal=4)
np.testing.assert_array_almost_equal(samples, samples_mpi, decimal=4)
# assert linear rate output tifs are same
_tifs_same(ifgs_mpi_out_dir, params_old[cf.OUT_DIR], 'linrate.tif')
_tifs_same(ifgs_mpi_out_dir, params_old[cf.OUT_DIR], 'linerror.tif')
_tifs_same(ifgs_mpi_out_dir, params_old[cf.OUT_DIR], 'linsamples.tif')
# assert time series output tifs are same
epochlist = algorithm.get_epochs(ifgs)[0]
for i in range(tsincr.shape[2]):
_tifs_same(ifgs_mpi_out_dir, params_old[cf.OUT_DIR],
'tsincr' + '_' + str(epochlist.dates[i + 1]) + ".tif")
# 12 timeseries outputs
assert i + 1 == tsincr.shape[2]
shutil.rmtree(ifgs_mpi_out_dir) # remove mpi out dir
shutil.rmtree(params_old[cf.OUT_DIR]) # remove serial out dir
def _tifs_same(dir1, dir2, tif):
linrate_tif_s = os.path.join(dir1, tif)
linrate_tif_m = os.path.join(dir2, tif)
common.assert_ifg_phase_equal(linrate_tif_m, linrate_tif_s)
@pytest.mark.skipif(TRAVIS, reason='skipping mpi tests in travis')
def reconstruct_times_series(shape, tiles, output_dir):
tsincr_file_0 = os.path.join(output_dir, 'tsincr_{}.npy'.format(0))
shape3 = np.load(tsincr_file_0).shape[2]
tsincr_mpi = np.empty(shape=(shape + (shape3,)), dtype=np.float32)
tscum_mpi = np.empty_like(tsincr_mpi, dtype=np.float32)
for i, t in enumerate(tiles):
tsincr_file_n = os.path.join(output_dir,
'tsincr_{}.npy'.format(i))
tsincr_mpi[t.top_left_y:t.bottom_right_y,
t.top_left_x: t.bottom_right_x, :] = np.load(tsincr_file_n)
tscum_file_n = os.path.join(output_dir, 'tscuml_{}.npy'.format(i))
tscum_mpi[t.top_left_y:t.bottom_right_y,
t.top_left_x: t.bottom_right_x, :] = np.load(tscum_file_n)
return tsincr_mpi, tscum_mpi
def test_prepifg_mpi(mpisync, get_config, tempdir,
roipac_or_gamma, get_lks, get_crop):
from tests.common import TEST_CONF_ROIPAC, TEST_CONF_GAMMA
from os.path import join, basename
if roipac_or_gamma == 1:
params = get_config(TEST_CONF_GAMMA)
else:
params = get_config(TEST_CONF_ROIPAC)
outdir = mpiops.run_once(tempdir)
params[cf.OUT_DIR] = outdir
params[cf.PARALLEL] = False
params[cf.IFG_LKSX], params[cf.IFG_LKSY] = get_lks, get_lks
params[cf.IFG_CROP_OPT] = get_crop
if roipac_or_gamma == 1:
params[cf.IFG_FILE_LIST] = join(common.SML_TEST_GAMMA, 'ifms_17')
params[cf.OBS_DIR] = common.SML_TEST_GAMMA
params[cf.DEM_FILE] = common.SML_TEST_DEM_GAMMA
params[cf.DEM_HEADER_FILE] = common.SML_TEST_DEM_HDR_GAMMA
run_prepifg.main(params)
if mpiops.rank == 0:
if roipac_or_gamma == 1:
params_s = get_config(TEST_CONF_GAMMA)
else:
params_s = get_config(TEST_CONF_ROIPAC)
params_s[cf.OUT_DIR] = tempdir()
params_s[cf.PARALLEL] = True
params_s[cf.IFG_LKSX], params_s[cf.IFG_LKSY] = get_lks, get_lks
params_s[cf.IFG_CROP_OPT] = get_crop
if roipac_or_gamma == 1:
base_unw_paths = glob.glob(join(common.SML_TEST_GAMMA,
"*_utm.unw"))
run_prepifg.gamma_prepifg(base_unw_paths, params_s)
else:
base_unw_paths = glob.glob(join(common.SML_TEST_OBS, "*.unw"))
run_prepifg.roipac_prepifg(base_unw_paths, params_s)
mpi_tifs = glob.glob(join(outdir, "*.tif"))
serial_tifs = glob.glob(join(params[cf.OUT_DIR], "*.tif"))
mpi_tifs.sort()
serial_tifs.sort()
# 17 geotifs, and 17 mlooked tifs
assert len(mpi_tifs) == len(serial_tifs)
for m_f, s_f in zip(mpi_tifs, serial_tifs):
assert basename(m_f) == basename(s_f)
shutil.rmtree(outdir)
shutil.rmtree(params_s[cf.OUT_DIR])
```
#### File: PyRate/tests/test_pyaps.py
```python
import copy
import glob
import os
import re
import shutil
import subprocess
import tempfile
import unittest
import pytest
import gdal
import numpy as np
from pyrate import config as cf
from pyrate import ifgconstants as ifc
from pyrate.compat import PyAPS_INSTALLED
from pyrate.scripts import run_pyrate, run_prepifg
from tests import common
if PyAPS_INSTALLED:
from pyrate import pyaps
@unittest.skipUnless(PyAPS_INSTALLED, 'PyAPS must be available for this test')
@pytest.mark.skipif(not PyAPS_INSTALLED,
reason='PyAPS must be available for this test')
class TestMethod1VsMethod2AndMetaData(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tif_dir = tempfile.mkdtemp()
cls.tif_dir_method2 = tempfile.mkdtemp()
cls.test_conf = common.TEST_CONF_ROIPAC
# change the required params
cls.params = cf.get_config_params(cls.test_conf)
cls.params[cf.OBS_DIR] = common.SML_TEST_GAMMA
cls.params[cf.PROCESSOR] = 1 # gamma
file_list = cf.parse_namelist(os.path.join(common.SML_TEST_GAMMA,
'ifms_17'))
cls.params[cf.IFG_FILE_LIST] = tempfile.mktemp(dir=cls.tif_dir)
# write a short filelist with only 3 gamma unws
with open(cls.params[cf.IFG_FILE_LIST], 'w') as fp:
for f in file_list[:2]:
fp.write(os.path.join(common.SML_TEST_GAMMA, f) + '\n')
cls.params[cf.OUT_DIR] = cls.tif_dir
cls.params[cf.PARALLEL] = True
cls.params[cf.REF_EST_METHOD] = 1
cls.params[cf.DEM_FILE] = common.SML_TEST_DEM_GAMMA
cls.params[cf.APS_INCIDENCE_MAP] = common.SML_TEST_INCIDENCE
# base_unw_paths need to be geotiffed and multilooked by run_prepifg
base_unw_paths = run_pyrate.original_ifg_paths(
cls.params[cf.IFG_FILE_LIST])
# add dem
base_unw_paths.append(common.SML_TEST_DEM_GAMMA)
# add incidence
base_unw_paths.append(common.SML_TEST_INCIDENCE)
xlks, ylks, crop = run_pyrate.transform_params(cls.params)
import copy
cls.params_method2 = copy.copy(cls.params)
cls.params_method2[cf.OUT_DIR] = cls.tif_dir_method2
cls.params_method2[cf.APS_METHOD] = 2
# dest_paths are tifs that have been geotif converted and multilooked
run_prepifg.gamma_prepifg(base_unw_paths, cls.params)
run_prepifg.gamma_prepifg(base_unw_paths, cls.params_method2)
# removed incidence as we don't want it in ifgs list
base_unw_paths.pop()
# removed dem as we don't want it in ifgs list
base_unw_paths.pop()
dest_paths = run_pyrate.get_dest_paths(
base_unw_paths, crop, cls.params, xlks)
cls.ifgs = common.small_data_setup(datafiles=dest_paths)
dest_paths_m2 = run_pyrate.get_dest_paths(
base_unw_paths, crop, cls.params_method2, xlks)
cls.ifgs_method2 = common.small_data_setup(datafiles=dest_paths_m2)
pyaps.remove_aps_delay(cls.ifgs, cls.params)
pyaps.remove_aps_delay(cls.ifgs_method2, cls.params_method2)
@classmethod
def tearDownClass(cls):
for i in cls.ifgs:
i.close()
for i in cls.ifgs_method2:
i.close()
shutil.rmtree(cls.tif_dir)
shutil.rmtree(cls.tif_dir_method2)
def test_metadata_was_copied(self):
for i in self.ifgs:
md = i.meta_data
i.close()
self.assertIn(ifc.PYRATE_WEATHER_ERROR, md.keys())
self.assertIn(pyaps.APS_STATUS, md.values())
def test_meta_data_was_written(self):
for i in self.ifgs:
md = i.meta_data
i.close()
ds = gdal.Open(i.data_path)
md_w = ds.GetMetadata()
self.assertDictEqual(md, md_w)
ds = None
def test_dem_tifs_present(self):
# geotiffed dem
os.path.exists(os.path.join(self.params[cf.OUT_DIR],
os.path.splitext(common.SML_TEST_DEM_GAMMA)[0]
+ '.tif'))
# multilooked dem
os.path.exists(os.path.join(self.params[cf.OUT_DIR],
os.path.splitext(common.SML_TEST_DEM_GAMMA)[0]
+ '_{looks}rlks_{crop}cr.tif'.format(
looks=self.params[cf.IFG_LKSX],
crop=self.params[cf.IFG_CROP_OPT])))
def test_method1_method2_equal_with_uniform_incidence_map(self):
for i, j in zip(self.ifgs, self.ifgs_method2):
np.testing.assert_array_almost_equal(i.phase_data, j.phase_data,
decimal=4)
@unittest.skipUnless(PyAPS_INSTALLED, 'PyAPS must be available for this test')
@pytest.mark.skipif(not PyAPS_INSTALLED,
reason='PyAPS must be available for this test')
class TestOriginalVsEfficientAps(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tif_dir = tempfile.mkdtemp()
cls.tif_dir_original = tempfile.mkdtemp()
cls.test_conf = common.TEST_CONF_ROIPAC
# change the required params
cls.params = cf.get_config_params(cls.test_conf)
cls.params[cf.OBS_DIR] = common.SML_TEST_GAMMA
cls.params[cf.PROCESSOR] = 1 # gamma
file_list = cf.parse_namelist(os.path.join(common.SML_TEST_GAMMA,
'ifms_17'))
cls.params[cf.IFG_FILE_LIST] = tempfile.mktemp(dir=cls.tif_dir)
# write a short filelist with only 3 gamma unws
with open(cls.params[cf.IFG_FILE_LIST], 'w') as fp:
for f in file_list[:2]:
fp.write(os.path.join(common.SML_TEST_GAMMA, f) + '\n')
cls.params[cf.OUT_DIR] = cls.tif_dir
cls.params[cf.PARALLEL] = True
cls.params[cf.REF_EST_METHOD] = 2
cls.params[cf.DEM_FILE] = common.SML_TEST_DEM_GAMMA
cls.params[cf.APS_INCIDENCE_MAP] = common.SML_TEST_INCIDENCE
# base_unw_paths need to be geotiffed and multilooked by run_prepifg
base_unw_paths = run_pyrate.original_ifg_paths(
cls.params[cf.IFG_FILE_LIST])
# add dem
base_unw_paths.append(common.SML_TEST_DEM_GAMMA)
# add incidence
base_unw_paths.append(common.SML_TEST_INCIDENCE)
xlks, ylks, crop = run_pyrate.transform_params(cls.params)
import copy
cls.params_original = copy.copy(cls.params)
cls.params_original[cf.OUT_DIR] = cls.tif_dir_original
cls.params_original[cf.APS_METHOD] = 2
# dest_paths are tifs that have been geotif converted and multilooked
run_prepifg.gamma_prepifg(base_unw_paths, cls.params)
run_prepifg.gamma_prepifg(base_unw_paths, cls.params_original)
# removed incidence as we don't want it in ifgs list
base_unw_paths.pop()
# removed dem as we don't want it in ifgs list
base_unw_paths.pop()
dest_paths = run_pyrate.get_dest_paths(
base_unw_paths, crop, cls.params, xlks)
cls.ifgs = common.small_data_setup(datafiles=dest_paths)
dest_paths_orig = run_pyrate.get_dest_paths(
base_unw_paths, crop, cls.params_original, xlks)
cls.ifgs_orig = common.small_data_setup(datafiles=dest_paths_orig)
pyaps.remove_aps_delay(cls.ifgs, cls.params)
pyaps.remove_aps_delay_original(cls.ifgs_orig, cls.params_original)
@classmethod
def tearDownClass(cls):
for i in cls.ifgs:
i.close()
for i in cls.ifgs_orig:
i.close()
shutil.rmtree(cls.tif_dir)
shutil.rmtree(cls.tif_dir_original)
def test_metadata_was_copied(self):
for i in self.ifgs:
md = i.meta_data
i.close()
self.assertIn(ifc.PYRATE_WEATHER_ERROR, md.keys())
self.assertIn(pyaps.APS_STATUS, md.values())
def test_meta_data_was_written(self):
for i in self.ifgs:
i.close()
md = i.meta_data
ds = gdal.Open(i.data_path)
md_w = ds.GetMetadata()
self.assertDictEqual(md, md_w)
ds = None
def test_dem_tifs_present(self):
# geotiffed dem
os.path.exists(os.path.join(self.params[cf.OUT_DIR],
os.path.splitext(common.SML_TEST_DEM_GAMMA)[0]
+ '.tif'))
# multilooked dem
os.path.exists(os.path.join(self.params[cf.OUT_DIR],
os.path.splitext(common.SML_TEST_DEM_GAMMA)[0]
+ '_{looks}rlks_{crop}cr.tif'.format(
looks=self.params[cf.IFG_LKSX],
crop=self.params[cf.IFG_CROP_OPT])))
def test_method1_method2_equal_with_uniform_incidence_map(self):
for i, j in zip(self.ifgs, self.ifgs_orig):
np.testing.assert_array_almost_equal(i.phase_data, j.phase_data,
decimal=4)
@unittest.skipUnless(PyAPS_INSTALLED, 'PyAPS must be available for this test')
@pytest.mark.skipif(not PyAPS_INSTALLED,
reason='PyAPS must be available for this test')
class TestAPSIncidenceVsElevationVsParallel(unittest.TestCase):
"""
This class tests APS method when incidence map is provided vs elevation map
"""
@classmethod
def setUpClass(cls):
cls.tif_dir_inc = tempfile.mkdtemp()
cls.tif_dir_ele = tempfile.mkdtemp()
cls.tif_dir_ele_par = tempfile.mkdtemp()
cls.test_conf = common.TEST_CONF_ROIPAC
# change the required params
cls.params_inc = cf.get_config_params(cls.test_conf)
cls.params_inc[cf.OBS_DIR] = common.SML_TEST_GAMMA
cls.params_inc[cf.PROCESSOR] = 1 # gamma
file_list = cf.parse_namelist(os.path.join(common.SML_TEST_GAMMA,
'ifms_17'))
# config file
cls.params_inc[cf.IFG_FILE_LIST] = tempfile.mktemp(dir=cls.tif_dir_inc)
# write a short filelist with only 3 gamma unws
with open(cls.params_inc[cf.IFG_FILE_LIST], 'w') as fp:
for f in file_list[:2]:
fp.write(os.path.join(common.SML_TEST_GAMMA, f) + '\n')
cls.params_inc[cf.OUT_DIR] = cls.tif_dir_inc
cls.params_inc[cf.PARALLEL] = 0
cls.params_inc[cf.REF_EST_METHOD] = 1
cls.params_inc[cf.APS_METHOD] = 2
cls.params_inc[cf.DEM_FILE] = common.SML_TEST_DEM_GAMMA
cls.params_inc[cf.APS_INCIDENCE_MAP] = common.SML_TEST_INCIDENCE
run_prepifg.main(cls.params_inc)
# now create the config for the elevation_map case
cls.params_ele = copy.copy(cls.params_inc)
cls.params_ele[cf.OUT_DIR] = cls.tif_dir_ele
cls.params_ele[cf.APS_METHOD] = 2
cls.params_ele[cf.APS_INCIDENCE_MAP] = None
cls.params_ele[cf.APS_INCIDENCE_EXT] = None
cls.params_ele[cf.APS_ELEVATION_MAP] = common.SML_TEST_ELEVATION
cls.params_ele[cf.APS_ELEVATION_EXT] = 'lv_theta'
run_prepifg.main(cls.params_ele)
ptn = re.compile(r'\d{8}')
dest_paths_inc = [f for f in
glob.glob(os.path.join(cls.tif_dir_inc, '*.tif'))
if ("cr" in f) and ("rlks" in f) and
(len(re.findall(ptn, os.path.basename(f))) == 2)]
cls.ifgs_inc = common.small_data_setup(datafiles=dest_paths_inc)
dest_paths_ele = [f for f in
glob.glob(os.path.join(cls.tif_dir_ele, '*.tif'))
if "cr" in f and "rlks" in f and
(len(re.findall(ptn, os.path.basename(f))) == 2)]
cls.ifgs_ele = common.small_data_setup(datafiles=dest_paths_ele)
# now create the config for the elevation map parallel case
cls.params_ele_par = copy.copy(cls.params_ele)
cls.params_ele_par[cf.OUT_DIR] = cls.tif_dir_ele_par
cls.params_ele_par[cf.PARALLEL] = True
run_prepifg.main(cls.params_ele_par)
dest_paths_ele_par = \
[f for f in glob.glob(os.path.join(cls.tif_dir_ele_par, '*.tif'))
if "cr" in f and "rlks" in f and
(len(re.findall(ptn, os.path.basename(f))) == 2)]
cls.ifgs_ele_par = common.small_data_setup(datafiles=dest_paths_ele_par)
pyaps.remove_aps_delay(cls.ifgs_inc, cls.params_inc)
pyaps.remove_aps_delay(cls.ifgs_ele, cls.params_ele)
pyaps.remove_aps_delay(cls.ifgs_ele_par, cls.params_ele_par)
@classmethod
def tearDownClass(cls):
for i in cls.ifgs_ele:
i.close()
for i in cls.ifgs_ele_par:
i.close()
for i in cls.ifgs_inc:
i.close()
shutil.rmtree(cls.tif_dir_inc)
shutil.rmtree(cls.tif_dir_ele)
shutil.rmtree(cls.tif_dir_ele_par)
def test_inc_vs_ele_equal_with_uniform_incidence_map(self):
for i, j in zip(self.ifgs_inc, self.ifgs_ele):
np.testing.assert_array_almost_equal(i.phase_data, j.phase_data,
decimal=4)
def test_inc_serial_vs_parallel(self):
for i, j in zip(self.ifgs_ele_par, self.ifgs_ele):
np.testing.assert_array_almost_equal(i.phase_data, j.phase_data,
decimal=4)
@unittest.skipUnless(PyAPS_INSTALLED, 'PyAPS must be available for this test')
class MPITests(unittest.TestCase):
# TODO: add tests for looks > 1
@classmethod
def setUpClass(cls):
cls.tif_dir_serial = tempfile.mkdtemp()
cls.tif_dir_mpi = tempfile.mkdtemp()
cls.test_conf = common.TEST_CONF_ROIPAC
# change the required params
cls.params = cf.get_config_params(cls.test_conf)
cls.params[cf.OBS_DIR] = common.SML_TEST_GAMMA
cls.params[cf.PROCESSOR] = 1 # gamma
file_list = cf.parse_namelist(os.path.join(common.SML_TEST_GAMMA,
'ifms_17'))
cls.params[cf.IFG_FILE_LIST] = tempfile.mktemp(dir=cls.tif_dir_serial)
# write a short filelist with only 3 gamma unws
with open(cls.params[cf.IFG_FILE_LIST], 'w') as fp:
for f in file_list[:2]:
fp.write(os.path.join(common.SML_TEST_GAMMA, f) + '\n')
cls.params[cf.OUT_DIR] = cls.tif_dir_serial
cls.params[cf.PARALLEL] = 0
cls.params[cf.REF_EST_METHOD] = 2
cls.params[cf.IFG_LKSX] = 1
cls.params[cf.IFG_LKSY] = 1
cls.params[cf.DEM_FILE] = common.SML_TEST_DEM_GAMMA
cls.params[cf.APS_INCIDENCE_MAP] = common.SML_TEST_INCIDENCE
# base_unw_paths need to be geotiffed and multilooked by run_prepifg
base_unw_paths = run_pyrate.original_ifg_paths(
cls.params[cf.IFG_FILE_LIST])
# add dem
base_unw_paths.append(common.SML_TEST_DEM_GAMMA)
# add incidence
base_unw_paths.append(common.SML_TEST_INCIDENCE)
xlks, ylks, crop = run_pyrate.transform_params(cls.params)
cls.params_mpi = copy.copy(cls.params)
cls.params_mpi[cf.OUT_DIR] = cls.tif_dir_mpi
# dest_paths are tifs that have been geotif converted and multilooked
run_prepifg.gamma_prepifg(base_unw_paths, cls.params)
run_prepifg.gamma_prepifg(base_unw_paths, cls.params_mpi)
# removed incidence as we don't want it in ifgs list
base_unw_paths.pop()
# removed dem as we don't want it in ifgs list
base_unw_paths.pop()
dest_paths = run_pyrate.get_dest_paths(
base_unw_paths, crop, cls.params, xlks)
dest_paths_mpi = run_pyrate.get_dest_paths(
base_unw_paths, crop, cls.params_mpi, xlks)
run_pyrate.process_ifgs(dest_paths, cls.params)
cls.ifgs_serial = common.small_data_setup(datafiles=dest_paths)
cls.conf_mpi = tempfile.mktemp('.conf', dir=cls.tif_dir_mpi)
cf.write_config_file(cls.params_mpi, cls.conf_mpi)
str = 'mpirun -np 4 python pyrate/nci/run_pyrate_pypar.py ' + \
cls.conf_mpi
cmd = str.split()
subprocess.check_call(cmd)
str = 'mpirun -np 4 python pyrate/nci/run_pyrate_pypar_2.py ' + \
cls.conf_mpi
cmd = str.split()
subprocess.check_call(cmd)
str = 'mpirun -np 4 python pyrate/nci/run_pyrate_pypar_3.py ' + \
cls.conf_mpi
cmd = str.split()
subprocess.check_call(cmd)
cls.ifgs_mpi = common.small_data_setup(datafiles=dest_paths_mpi)
@classmethod
def tearDownClass(cls):
for i in cls.ifgs_serial:
i.close()
for i in cls.ifgs_mpi:
i.close()
shutil.rmtree(cls.tif_dir_serial)
shutil.rmtree(cls.tif_dir_mpi)
def test_metadata_was_copied(self):
for j, i in zip(self.ifgs_serial, self.ifgs_mpi):
md = i.meta_data
md_s = j.meta_data
self.assertIn(ifc.PYRATE_WEATHER_ERROR, md_s.keys())
self.assertIn(ifc.PYRATE_WEATHER_ERROR, md.keys())
self.assertIn(pyaps.APS_STATUS, md.values())
def test_meta_data_was_written(self):
for i in self.ifgs_mpi:
md = i.meta_data
ds = gdal.Open(i.data_path)
md_w = ds.GetMetadata()
self.assertDictEqual(md, md_w)
self.assertIn(ifc.PYRATE_WEATHER_ERROR, md_w.keys())
ds = None
def test_dem_tifs_present(self):
# geotiffed dem
os.path.exists(os.path.join(self.params_mpi[cf.OUT_DIR],
os.path.splitext(common.SML_TEST_DEM_GAMMA)[0]
+ '.tif'))
# multilooked dem
os.path.exists(os.path.join(self.params_mpi[cf.OUT_DIR],
os.path.splitext(common.SML_TEST_DEM_GAMMA)[0]
+ '_{looks}rlks_{crop}cr.tif'.format(
looks=self.params_mpi[cf.IFG_LKSX],
crop=self.params_mpi[cf.IFG_CROP_OPT])))
def test_serial_vs_mpi_equal(self):
for i, j in zip(self.ifgs_serial, self.ifgs_mpi):
np.testing.assert_array_almost_equal(i.phase_data, j.phase_data,
decimal=4)
if __name__ == '__main__':
unittest.main()
```
#### File: PyRate/utils/gdaldem.py
```python
import subprocess
import sys
import os
import tempfile
import numpy as np
from pyrate.shared import Ifg, DEM
def main(input_file, color_file, output_file):
cmd = "gdaldem color-relief " + input_file \
+ ' ' + color_file + ' ' + output_file
subprocess.check_call(cmd, shell=True)
def gen_color_file(input_file):
fp, temp_file = tempfile.mkstemp(suffix='.txt')
dem = DEM(input_file)
dem.open()
phase_data = dem.height_band.ReadAsArray()
max_ph = np.nanmax(phase_data)
min_ph = np.nanmin(phase_data)
range_ph = max_ph-min_ph
colors = ['black', 'blue', 'yellow', 'orange', 'red', 'white']
with open(temp_file, 'w') as f:
for i, c in enumerate(colors[:-1]):
f.write(str(int(min_ph + (i + 1)*range_ph/len(colors))) +
' ' + c + '\n')
f.write(str(int(max_ph - range_ph/len(colors))) +
' ' + colors[-1] + '\n')
os.close(fp)
return temp_file
if __name__ == '__main__':
input_file = sys.argv[1]
color_file = sys.argv[2]
output_file = sys.argv[3]
if color_file == 'auto':
print '\nauto generating color file'
color_file = gen_color_file(input_file)
with open(color_file, 'r') as f:
print '\ncolor file contents'
print '='*50
for l in f.readlines():
print l
print '='*50
main(input_file, color_file, output_file)
``` |
{
"source": "jlmaurer/pyre",
"score": 2
} |
#### File: pyre/flow/Status.py
```python
import pyre
# declaration
class Status(pyre.tracker):
"""
A helper that watches over a component's traits and records value changes
"""
# public data
@property
def stale(self):
"""
Return my current status
"""
# easy enough
return self._stale
@stale.setter
def stale(self, status):
"""
Adjust my status
"""
# if i'm being marked as stale
if status is True:
# flush
return self.flush()
# otherwise, just update the status
self._stale = status
# all done
return self
# interface
def playback(self, node, alias):
"""
Go through the history of the trait named {alias}
"""
# find its trait by this name
trait = node.pyre_trait(alias=alias)
# get the key
key = node.pyre_inventory[trait].key
# chain up
yield from super().playback(key=key)
# all done
return
# input binding
def addInputBinding(self, **kwds):
"""
The given {product} is now an input to {factory}
"""
# show me
# self.log(activity="adding input", **kwds)
# all done
return self
def removeInputBinding(self, **kwds):
"""
The given {product} is no longer an input to {factory}
"""
# show me
# self.log(activity="removing input", **kwds)
# all done
return self
# output binding
def addOutputBinding(self, **kwds):
"""
The given {product} is now an output of {factory}
"""
# show me
# self.log(activity="adding output", **kwds)
# all done
return self
def removeOutputBinding(self, **kwds):
"""
The given {product} is no longer an output of {factory}
"""
# show me
self.log(activity="removing output", **kwds)
# all done
return self
# meta-methods
def __init__(self, node, stale=False, **kwds):
# chain up
super().__init__(**kwds)
# initialize my flag
self._stale = stale
# enable tracking
self.track(component=node)
# all done
return
# hooks
def flush(self, **kwds):
"""
Handler of the notification that the value of an {observable} has changed
"""
# update my state
self._stale = True
# chain up
return super().flush(**kwds)
# implementation details
def log(self, activity, factory, product):
# show me
print(f"pyre.flow.Status: {activity}")
print(f" status: {self}")
print(f" factory: {factory}")
print(f" product: {product}")
print(f"")
# all done
return
# private data
_stale = None
# end of file
``` |
{
"source": "jlmaurer/scikit-gstat",
"score": 3
} |
#### File: scikit-gstat/skgstat/DirectionalVariogram.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import squareform, pdist
import matplotlib.collections
from .Variogram import Variogram
class DirectionalVariogram(Variogram):
"""DirectionalVariogram Class
Calculates a variogram of the separating distances in the given
coordinates and relates them to one of the semi-variance measures of the
given dependent values.
The direcitonal version of a Variogram will only form paris of points
that share a specified spatial relationship.
"""
def __init__(self,
coordinates=None,
values=None,
estimator='matheron',
model='spherical',
dist_func='euclidean',
bin_func='even',
normalize=False,
fit_method='trf',
fit_sigma=None,
directional_model='triangle',
azimuth=0,
tolerance=45.0,
bandwidth='q33',
use_nugget=False,
maxlag=None,
n_lags=10,
verbose=False
):
r"""Variogram Class
Directional Variogram. The calculation is not performant and not
tested yet.
Parameters
----------
coordinates : numpy.ndarray
Array of shape (m, n). Will be used as m observation points of
n-dimensions. This variogram can be calculated on 1 - n
dimensional coordinates. In case a 1-dimensional array is passed,
a second array of same length containing only zeros will be
stacked to the passed one.
values : numpy.ndarray
Array of values observed at the given coordinates. The length of
the values array has to match the m dimension of the coordinates
array. Will be used to calculate the dependent variable of the
variogram.
estimator : str, callable
String identifying the semi-variance estimator to be used.
Defaults to the Matheron estimator. Possible values are:
* matheron [Matheron, default]
* cressie [Cressie-Hawkins]
* dowd [Dowd-Estimator]
* genton [Genton]
* minmax [MinMax Scaler]
* entropy [Shannon Entropy]
If a callable is passed, it has to accept an array of absoulte
differences, aligned to the 1D distance matrix (flattened upper
triangle) and return a scalar, that converges towards small
values for similarity (high covariance).
model : str
String identifying the theoretical variogram function to be used
to describe the experimental variogram. Can be one of:
* spherical [Spherical, default]
* exponential [Exponential]
* gaussian [Gaussian]
* cubic [Cubic]
* stable [Stable model]
* matern [Matérn model]
* nugget [nugget effect variogram]
dist_func : str
String identifying the distance function. Defaults to
'euclidean'. Can be any metric accepted by
scipy.spatial.distance.pdist. Additional parameters are not (yet)
passed through to pdist. These are accepted by pdist for some of
the metrics. In these cases the default values are used.
bin_func : str
String identifying the binning function used to find lag class
edges. At the moment there are two possible values: 'even'
(default) or 'uniform'. Even will find n_lags bins of same width
in the interval [0,maxlag[. 'uniform' will identfy n_lags bins on
the same interval, but with varying edges so that all bins count
the same amount of observations.
normalize : bool
Defaults to False. If True, the independent and dependent
variable will be normalized to the range [0,1].
fit_method : str
String identifying the method to be used for fitting the
theoretical variogram function to the experimental. More info is
given in the Variogram.fit docs. Can be one of:
* 'lm': Levenberg-Marquardt algorithm for unconstrained
problems. This is the faster algorithm, yet is the fitting of
a variogram not unconstrianed.
* 'trf': Trust Region Reflective function for non-linear
constrained problems. The class will set the boundaries
itself. This is the default function.
fit_sigma : numpy.ndarray, str
Defaults to None. The sigma is used as measure of uncertainty
during variogram fit. If fit_sigma is an array, it has to hold
n_lags elements, giving the uncertainty for all lags classes. If
fit_sigma is None (default), it will give no weight to any lag.
Higher values indicate higher uncertainty and will lower the
influcence of the corresponding lag class for the fit.
If fit_sigma is a string, a pre-defined function of separating
distance will be used to fill the array. Can be one of:
* 'linear': Linear loss with distance. Small bins will have
higher impact.
* 'exp': The weights decrease by a e-function of distance
* 'sqrt': The weights decrease by the squareroot of distance
* 'sq': The weights decrease by the squared distance.
More info is given in the Variogram.fit_sigma documentation.
directional_model : string, function
The model used for selecting all points fulfilling the
directional constraint of the Variogram. A predefined
model can be selected by passing the model name as string.
Optionally a callable accepting the difference vectors
between points in polar form as angles and distances and
returning a mask array can be passed. In this case, the
azimuth, tolerance and bandwidth has to be incorporated by
hand into the model.
* 'compass': includes points in the direction of the
azimuth at given tolerance. The bandwidth parameter will be
ignored.
* 'triangle': constructs a triangle with an angle of
tolerance at the point of interest and union an rectangle
parallel to azimuth, once the hypotenuse length reaches
bandwidth.
* 'circle': constructs a half circle touching the point of
interest, dislocating the center at the distance of
bandwidth in the direction of azimuth. The half circle is
union with an rectangle parallel to azimuth.
Visual representations, usage hints and implementation specifics
are given in the documentation.
azimuth : float
The azimuth of the directional dependence for this Variogram,
given as an angle in **degree**. The East of the coordinate
plane is set to be at 0° and is counted clockwise to 180° and
counter-clockwise to -180°. Only Points lying in the azimuth of a
specific point will be used for forming point pairs.
tolerance : float
The tolerance is given as an angle in **degree**- Points being
dislocated from the exact azimuth by half the tolerance will be
accepted as well. It's half the tolerance as the point may be
dislocated in the positive and negative direction from the azimuth.
bandwidth : float
Maximum tolerance acceptable in **coordinate units**, which is
usually meter. Points at higher distances may be far dislocated
from the azimuth in terms of coordinate distance, as the
tolerance is defined as an angle. THe bandwidth defines a maximum
width for the search window. It will be perpendicular to and
bisected by the azimuth.
use_nugget : bool
Defaults to False. If True, a nugget effet will be added to all
Variogram.models as a third (or fourth) fitting parameter. A
nugget is essentially the y-axis interception of the theoretical
variogram function.
maxlag : float, str
Can specify the maximum lag distance directly by giving a value
larger than 1. The binning function will not find any lag class
with an edge larger than maxlag. If 0 < maxlag < 1, then maxlag
is relative and maxlag * max(Variogram.distance) will be used.
In case maxlag is a string it has to be one of 'median', 'mean'.
Then the median or mean of all Variogram.distance will be used.
Note maxlag=0.5 will use half the maximum separating distance,
this is not the same as 'median', which is the median of all
separating distances
n_lags : int
Specify the number of lag classes to be defined by the binning
function.
verbose : bool
Set the Verbosity of the class. Not Implemented yet.
"""
# FIXME: Call __init__ of baseclass?
self._direction_mask_cache = None
# Set coordinates
self._X = np.asarray(coordinates)
# pairwise difference
self._diff = None
# set verbosity
self.verbose = verbose
# set values
self._values = None
# calc_diff = False here, because it will be calculated by fit() later
self.set_values(values=values, calc_diff=False)
# distance matrix
self._dist = None
# set distance calculation function
self._dist_func = None
self.set_dist_function(func=dist_func)
# Angles and euclidean distances used for direction mask calculation
self._angles = None
self._euclidean_dist = None
# lags and max lag
self.n_lags = n_lags
self._maxlag = None
self.maxlag = maxlag
# estimator can be function or a string
self._estimator = None
self.set_estimator(estimator_name=estimator)
# model can be function or a string
self._model = None
self.set_model(model_name=model)
# azimuth direction
self._azimuth = None
self.azimuth = azimuth
# azimuth tolerance
self._tolerance = None
self.tolerance = tolerance
# tolerance bandwidth
self._bandwidth = None
self.bandwidth = bandwidth
# set the directional model
self._directional_model = None
self.set_directional_model(model_name=directional_model)
# the binning settings
self._bin_func = None
self._groups = None
self._bins = None
self.set_bin_func(bin_func=bin_func)
# specify if the lag should be given absolute or relative to the maxlag
self._normalized = normalize
# set the fitting method and sigma array
self.fit_method = fit_method
self._fit_sigma = None
self.fit_sigma = fit_sigma
# set if nugget effect shall be used
self.use_nugget = use_nugget
# set attributes to be filled during calculation
self.cov = None
self.cof = None
# settings, not reachable by init (not yet)
self._cache_experimental = False
# do the preprocessing and fitting upon initialization
# Note that fit() calls preprocessing
self.fit(force=True)
def preprocessing(self, force=False):
self._calc_distances(force=force)
self._calc_direction_mask_data(force)
self._calc_diff(force=force)
self._calc_groups(force=force)
def _calc_direction_mask_data(self, force=False):
r"""
Calculate directional mask data.
For this, the angle between the vector between the two
points, and east (see comment about self.azimuth) is calculated.
The result is stored in self._angles and contains the angle of each
point pair vector to the x-axis in radians.
Parameters
----------
force : bool
If True, a new calculation of all angles is forced, even if they
are already in the cache.
Notes
-----
The masked data is in radias, while azimuth is given in degrees.
For the Vector between a point pair A,B :math:`\overrightarrow{AB}=u` and the
x-axis, represented by vector :math:`\overrightarrow{e} = [1,0]`, the angle
:math:`\Theta` is calculated like:
.. math::
cos(\Theta) = \frac{u \circ e}{|e| \cdot |[1,0]|}
See Also
--------
`azimuth <skgstat.DirectionalVariogram.azimuth>`_
"""
# check if already calculated
if self._angles is not None and not force:
return
# if self._X is of just one dimension, concat zeros.
if self._X.ndim == 1:
_x = np.vstack(zip(self._X, np.zeros(len(self._X))))
elif self._X.ndim == 2:
_x = self._X
else:
raise NotImplementedError('N-dimensional coordinates cannot be handled')
# for angles, we need Euklidean distance,
# no matter which distance function is used
if self._dist_func == "euclidean":
self._euclidean_dist = self._dist
else:
self._euclidean_dist = pdist(_x, "euclidean")
# Calculate the angles
# (a - b).[1,0] = ||a - b|| * ||[1,0]|| * cos(v)
# cos(v) = (a - b).[1,0] / ||a - b||
# cos(v) = (a.[1,0] - b.[1,0]) / ||a - b||
scalar = pdist(np.array([np.dot(_x, [1, 0])]).T, np.subtract)
pos_angles = np.arccos(scalar / self._euclidean_dist)
# cos(v) for [2,1] and [2, -1] is the same,
# but v is not (v vs -v), fix that.
ydiff = pdist(np.array([np.dot(_x, [0, 1])]).T, np.subtract)
# store the angle or negative angle, depending on the
# amount of the x coordinate
self._angles = np.where(ydiff >= 0, pos_angles, -pos_angles)
@property
def azimuth(self):
"""Direction azimuth
Main direction for te selection of points in the formation of point
pairs. East of the coordinate plane is defined to be 0° and then the
azimuth is set clockwise up to 180°and count-clockwise to -180°.
Parameters
----------
angle : float
New azimuth angle in **degree**.
Raises
------
ValueError : in case angle < -180° or angle > 180
"""
return self._azimuth
@azimuth.setter
def azimuth(self, angle):
if angle < -180 or angle > 180:
raise ValueError('The azimuth is an angle in degree and has to '
'meet -180 <= angle <= 180')
else:
self._azimuth = angle
# reset groups and mask cache on azimuth change
self._direction_mask_cache = None
self._groups = None
@property
def tolerance(self):
"""Azimuth tolerance
Tolerance angle of how far a point can be off the azimuth for being
still counted as directional. A tolerance angle will be applied to
the azimuth angle symmetrically.
Parameters
----------
angle : float
New tolerance angle in **degree**. Has to meet 0 <= angle <= 360.
Raises
------
ValueError : in case angle < 0 or angle > 360
"""
return self._tolerance
@tolerance.setter
def tolerance(self, angle):
if angle < 0 or angle > 360:
raise ValueError('The tolerance is an angle in degree and has to '
'meet 0 <= angle <= 360')
else:
self._tolerance = angle
# reset groups and mask on tolerance change
self._direction_mask_cache = None
self._groups = None
@property
def bandwidth(self):
"""Tolerance bandwidth
New bandwidth parameter. As the tolerance from azimuth is given as an
angle, point pairs at high distances can be far off the azimuth in
coordinate distance. The bandwidth limits this distance and has the
unnit of the coordinate system.
Parameters
----------
width : float
Positive coordinate distance.
Raises
------
ValueError : in case width is negative
"""
if self._bandwidth is None:
return 0
else:
return self._bandwidth
@bandwidth.setter
def bandwidth(self, width):
# check if quantiles is given
if isinstance(width, str):
# TODO document and handle more exceptions
q = int(width[1:])
self._bandwidth = np.percentile(self.distance, q)
elif width < 0:
raise ValueError('The bandwidth cannot be negative.')
elif width > np.max(self.distance):
print('The bandwidth is larger than the maximum separating '
'distance. Thus it will have no effect.')
else:
self._bandwidth = width
# reset groups and direction mask cache on bandwidth change
self._direction_mask_cache = None
self._groups = None
def set_directional_model(self, model_name):
"""Set new directional model
The model used for selecting all points fulfilling the
directional constraint of the Variogram. A predefined model
can be selected by passing the model name as string.
Optionally a callable accepting the difference vectors between
points in polar form as angles and distances and returning a
mask array can be passed. In this case, the azimuth, tolerance
and bandwidth has to be incorporated by hand into the model.
The predefined options are:
* 'compass': includes points in the direction of the azimuth at given
tolerance. The bandwidth parameter will be ignored.
* 'triangle': constructs a triangle with an angle of tolerance at the
point of interest and union an rectangle parallel to azimuth,
once the hypotenuse length reaches bandwidth.
* 'circle': constructs a half circle touching the point of interest,
dislocating the center at the distance of bandwidth in the
direction of azimuth. The half circle is union with an rectangle
parallel to azimuth.
Visual representations, usage hints and implementation specifics
are given in the documentation.
Parameters
----------
model_name : string, callable
The name of the predefined model (string) or a function
that accepts angle and distance arrays and returns a mask
array.
"""
# handle predefined models
if isinstance(model_name, str):
if model_name.lower() == 'compass':
self._directional_model = self._compass
elif model_name.lower() == 'triangle':
self._directional_model = self._triangle
elif model_name.lower() == 'circle':
self._directional_model = self._circle
else:
raise ValueError('%s is not a valid model.' % model_name)
# handle callable
elif callable(model_name):
self._directional_model = model_name
else:
raise ValueError('The directional model has to be identified by a '
'model name, or it has to be the search area '
'itself')
# reset the groups as the directional model changed
self._groups = None
@property
def bins(self):
if self._bins is None:
# get the distances
d = self.distance.copy()
d[np.where(~self._direction_mask())] = np.nan
self._bins = self.bin_func(d, self.n_lags, self.maxlag)
return self._bins.copy()
def _calc_groups(self, force=False):
super(DirectionalVariogram, self)._calc_groups(force=force)
# set to outside maxlag group
self._groups[np.where(~self._direction_mask())] = -1
# @jit
def _direction_mask(self, force=False):
"""Directional Mask
Array aligned to self.distance masking all point pairs which shall be
ignored for binning and grouping. The one dimensional array contains
all row-wise point pair combinations from the upper or lower triangle
of the distance matrix in case either of both is directional.
Returns
-------
mask : numpy.array
Array aligned to self.distance giving for each point pair
combination a boolean value whether the point are directional or
not.
"""
if force or self._direction_mask_cache is None:
self._direction_mask_cache = self._directional_model(self._angles, self._euclidean_dist)
return self._direction_mask_cache
def pair_field(self, ax=None, cmap="gist_rainbow", points='all',add_points=True, alpha=0.3): # pragma: no cover
"""
Plot a pair field.
Plot a network graph for all point pairs that fulfill the direction
filter and lie within each others search area.
Parameters
----------
ax : matplotlib.Subplot
A matplotlib Axes object to plot the pair field onto.
If ``None``, a new new matplotlib figure will be created.
cmap : string
Any color-map name that is supported by matplotlib
points : 'all', int, list
If not ``'all'``, only the given coordinate (int) or
list of coordinates (list) will be plotted. Recommended, if
the input data is quite large.
add_points : bool
If True (default) The coordinates will be added as black points.
alpha : float
Alpha value for the colors to make overlapping vertices
visualize better. Defaults to ``0.3``.
"""
# get the direction mask
mask = squareform(self._direction_mask())
# build a coordinate meshgrid
r = np.arange(len(self._X))
x1, x2 = np.meshgrid(r, r)
start = self._X[x1[mask]]
end = self._X[x2[mask]]
# handle lesser points
if isinstance(points, int):
points = [points]
if isinstance(points, list):
_start, _end = list(), list()
for p in self._X[points]:
_start.extend(start[np.where(end == p)[0]])
_end.extend(end[np.where(end == p)[0]])
start = np.array(_start)
end = np.array(_end)
# extract all lines and align colors
lines = np.column_stack((start.reshape(len(start), 1, 2), end.reshape(len(end), 1, 2)))
#colors = plt.cm.get_cmap(cmap)(x2[mask] / x2[mask].max())
colors = plt.cm.get_cmap(cmap)(np.linspace(0, 1, len(lines)))
colors[:, 3] = alpha
# get the figure and ax object
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(8,8))
else:
fig = ax.get_figure()
# plot
lc = matplotlib.collections.LineCollection(lines, colors=colors, linewidths=1)
ax.add_collection(lc)
# add coordinates
if add_points:
ax.scatter(self._X[:, 0], self._X[:, 1], 15, c='k')
if isinstance(points, list):
ax.scatter(self._X[:, 0][points], self._X[:, 1][points], 25, c='r')
# finish plot
ax.autoscale()
ax.margins(0.1)
return fig
def _triangle(self, angles, dists):
r"""Triangular Search Area
Construct a triangular bounded search area for building directional
dependent point pairs. The Search Area will be located onto the
current point of interest and the local x-axis is rotated onto the
azimuth angle.
Parameters
----------
angles, dists : numpy.array
Vectors between point pairs in polar form (angle relative
to east in radians, length in coordinate space units)
Returns
-------
mask : numpy.array(bool)
Point pair mask, indexed as the results of
scipy.spatial.distance.pdist are.
Notes
-----
.. code-block:: text
C
/|\
a / | \ a
/__|h_\
A c B
The point of interest is C and c is the bandwidth. The angle at C
(gamma) is the tolerance. From this, a and then h can be calculated.
When rotated into the local coordinate system, the two points needed
to build the search area A,B are A := (h, 1/2 c) and B:= (h, -1/2 c)
a can be calculated like:
.. math::
a = \frac{c}{2 * sin\left(\frac{\gamma}{2}\right)}
See Also
--------
DirectionalVariogram._compass
DirectionalVariogram._circle
"""
absdiff = np.abs(angles + np.radians(self.azimuth))
absdiff = np.where(absdiff > np.pi, absdiff - np.pi, absdiff)
absdiff = np.where(absdiff > np.pi / 2, np.pi - absdiff, absdiff)
in_tol = absdiff <= np.radians(self.tolerance / 2)
in_band = self.bandwidth / 2 >= np.abs(dists * np.sin(np.abs(angles + np.radians(self.azimuth))))
return in_tol & in_band
def _circle(self, angles, dists):
r"""Circular Search Area
Construct a half-circled bounded search area for building directional
dependent point pairs. The Search Area will be located onto the
current point of interest and the local x-axis is rotated onto the
azimuth angle.
The radius of the half-circle is set to half the bandwidth.
Parameters
----------
angles, dists : numpy.array
Vectors between point pairs in polar form (angle relative
to east in radians, length in coordinate space units)
Returns
-------
mask : numpy.array(bool)
Point pair mask, indexed as the results of
scipy.spatial.distance.pdist are.
Raises
------
ValueError : In case the DirectionalVariogram.bandwidth is None or 0.
See Also
--------
DirectionalVariogram._triangle
DirectionalVariogram._compass
"""
raise NotImplementedError
def _compass(self, angles, dists):
r"""Compass direction direction mask
Construct a search area for building directional dependent point
pairs. The compass search area will **not** be bounded by the
bandwidth. It will include all point pairs at the azimuth direction
with a given tolerance. The Search Area will be located onto the
current point of interest and the local x-axis is rotated onto the
azimuth angle.
Parameters
----------
angles, dists : numpy.array
Vectors between point pairs in polar form (angle relative
to east in radians, length in coordinate space units)
Returns
-------
mask : numpy.array(bool)
Point pair mask, indexed as the results of
scipy.spatial.distance.pdist are.
See Also
--------
DirectionalVariogram._triangle
DirectionalVariogram._circle
"""
absdiff = np.abs(angles + np.radians(self.azimuth))
absdiff = np.where(absdiff > np.pi, absdiff - np.pi, absdiff)
absdiff = np.where(absdiff > np.pi / 2, np.pi - absdiff, absdiff)
return absdiff <= np.radians(self.tolerance / 2)
```
#### File: scikit-gstat/skgstat/models.py
```python
import math
from functools import wraps
import numpy as np
from scipy import special
from numba import jit
def variogram(func):
@wraps(func)
def wrapper(*args, **kwargs):
if hasattr(args[0], '__iter__'):
new_args = args[1:]
mapping = map(lambda h: func(h, *new_args, **kwargs), args[0])
return np.fromiter(mapping, dtype=float)
else:
return func(*args, **kwargs)
return wrapper
@variogram
@jit
def spherical(h, r, c0, b=0):
r"""Spherical Variogram function
Implementation of the spherical variogram function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
Specifies the lag of separating distances that the dependent variable
shall be calculated for. It has to be a positive real number.
r : float
The effective range. Note this is not the range parameter! However,
for the spherical variogram the range and effective range are the same.
c0 : float
The sill of the variogram, where it will flatten out. The function
will not return a value higher than C0 + b.
b : float
The nugget of the variogram. This is the value of independent
variable at the distance of zero. This is usually attributed to
non-spatial variance.
Returns
-------
gamma : numpy.float64
Unlike in most variogram function formulas, which define the function
for :math:`2*\gamma`, this function will return :math:`\gamma` only.
Notes
-----
The implementation follows [6]_:
.. math::
\gamma = b + C_0 * \left({1.5*\frac{h}{r} - 0.5*\frac{h}{r}^3}\right)
if :math:`h < r`, and
.. math::
\gamma = b + C_0
else. r is the effective range, which is in case of the spherical
variogram just a.
References
----------
.. [6] Burgess, <NAME>., & Webster, R. (1980). Optimal interpolation
and isarithmic mapping of soil properties. I.The semi-variogram and
punctual kriging. Journal of Soil and Science, 31(2), 315–331,
http://doi.org/10.1111/j.1365-2389.1980.tb02084.x
"""
# prepare parameters
a = r / 1.
if h <= r:
return b + c0 * ((1.5 * (h / a)) - (0.5 * ((h / a) ** 3.0)))
else:
return b + c0
@variogram
@jit
def exponential(h, r, c0, b=0):
r"""Exponential Variogram function
Implementation of the exponential variogram function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
Specifies the lag of separating distances that the dependent variable
shall be calculated for. It has to be a positive real number.
r : float
The effective range. Note this is not the range parameter! For the
exponential variogram function the range parameter a is defined to be
:math:`a=\frac{r}{3}`. The effective range is the lag where 95% of the
sill are exceeded. This is needed as the sill is only approached
asymptotically by an exponential function.
c0 : float
The sill of the variogram, where it will flatten out. The function
will not return a value higher than C0 + b.
b : float
The nugget of the variogram. This is the value of independent
variable at the distance of zero. This is usually attributed to
non-spatial variance.
Returns
-------
gamma : numpy.float64
Unlike in most variogram function formulas, which define the function
for :math:`2*\gamma`, this function will return :math:`\gamma` only.
Notes
-----
The implementation following [7]_ and [8]_ is as:
.. math::
\gamma = b + C_0 * \left({1 - e^{-\frac{h}{a}}}\right)
a is the range parameter, that can be calculated from the
effective range r as: :math:`a = \frac{r}{3}`.
References
----------
.. [7] <NAME>. (1993): Statistics for spatial data.
Wiley Interscience.
.. [8] <NAME>., <NAME>. (1999). Geostatistics. Modeling Spatial
Uncertainty. Wiley Interscience.
"""
# prepare parameters
a = r / 3.
return b + c0 * (1. - math.exp(-(h / a)))
@variogram
@jit
def gaussian(h, r, c0, b=0):
r""" Gaussian Variogram function
Implementation of the Gaussian variogram function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
Specifies the lag of separating distances that the dependent variable
shall be calculated for. It has to be a positive real number.
r : float
The effective range. Note this is not the range parameter! For the
exponential variogram function the range parameter a is defined to be
:math:`a=\frac{r}{3}`. The effetive range is the lag where 95% of the
sill are exceeded. This is needed as the sill is only approached
asymptotically by an exponential function.
c0 : float
The sill of the variogram, where it will flatten out. The function
will not return a value higher than C0 + b.
b : float
The nugget of the variogram. This is the value of independent
variable at the distance of zero. This is usually attributed to
non-spatial variance.
Returns
-------
gamma : numpy.float64
Unlike in most variogram function formulas, which define the function
for :math:`2*\gamma`, this function will return :math:`\gamma` only.
Notes
-----
This implementation follows [9]_:
.. math::
\gamma = b + c_0 * \left({1 - e^{-\frac{h^2}{a^2}}}\right)
a is the range parameter, that can be calculated from the
effective range r as:
.. math::
a = \frac{r}{2}
References
----------
.. [9] <NAME>., <NAME>. (1999). Geostatistics. Modeling Spatial
Uncertainty. Wiley Interscience.
"""
# prepare parameters
a = r / 2.
return b + c0 * (1. - math.exp(- (h ** 2 / a ** 2)))
@variogram
@jit
def cubic(h, r, c0, b=0):
r"""Cubic Variogram function
Implementation of the Cubic variogram function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
Specifies the lag of separating distances that the dependent variable
shall be calculated for. It has to be a positive real number.
r : float
The effective range. Note this is not the range parameter! However,
for the cubic variogram the range and effective range are the same.
c0 : float
The sill of the variogram, where it will flatten out. The function
will not return a value higher than C0 + b.
b : float
The nugget of the variogram. This is the value of independent
variable at the distance of zero. This is usually attributed to
non-spatial variance.
Returns
-------
gamma : numpy.float64
Unlike in most variogram function formulas, which define the function
for :math:`2*\gamma`, this function will return :math:`\gamma` only.
Notes
-----
This implementation is like:
.. math::
\gamma = b + c_0 * \left[{7 * \left(\frac{h^2}{a^2}\right) -
\frac{35}{4} * \left(\frac{h^3}{a^3}\right) +
\frac{7}{2} * \left(\frac{h^5}{a^5}\right) -
\frac{3}{4} * \left(\frac{h^7}{a^7}\right)}\right]
a is the range parameter. For the cubic function, the effective range and
range parameter are the same.
"""
# prepare parameters
a = r / 1.
if h < r:
return b + c0 * ((7 * (h ** 2 / a ** 2)) -
((35 / 4) * (h ** 3 / a ** 3)) +
((7 / 2) * (h ** 5 / a ** 5)) -
((3 / 4) * (h ** 7 / a ** 7)))
else:
return b + c0
@variogram
@jit
def stable(h, r, c0, s, b=0):
r"""Stable Variogram function
Implementation of the stable variogram function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
Specifies the lag of separating distances that the dependent variable
shall be calculated for. It has to be a positive real number.
r : float
The effective range. Note this is not the range parameter! For the
stable variogram function the range parameter a is defined to be
:math:`a = \frac{r}{3^{\frac{1}{s}}}`. The effective range is the lag
where 95% of the sill are exceeded. This is needed as the sill is
only approached asymptotically by the e-function part of the stable
model.
c0 : float
The sill of the variogram, where it will flatten out. The function
will not return a value higher than C0 + b.
s : float
Shape parameter. For s <= 2 the model will be shaped more like a
exponential or spherical model, for s > 2 it will be shaped most like
a Gaussian function.
b : float
The nugget of the variogram. This is the value of independent
variable at the distance of zero. This is usually attributed to
non-spatial variance.
Returns
-------
gamma : numpy.float64
Unlike in most variogram function formulas, which define the function
for :math:`2*\gamma`, this function will return :math:`\gamma` only.
Notes
-----
The implementation is:
.. math::
\gamma = b + C_0 * \left({1. - e^{- {\frac{h}{a}}^s}}\right)
a is the range parameter and is calculated from the effective range r as:
.. math::
a = \frac{r}{3^{\frac{1}{s}}}
"""
# prepare parameters
a = r / np.power(3, 1 / s)
# if s > 2:
# s = 2
return b + c0 * (1. - math.exp(- math.pow(h / a, s)))
@variogram
@jit(forceobj=True)
def matern(h, r, c0, s, b=0):
r"""Matérn Variogram function
Implementation of the Matérn variogram function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
Specifies the lag of separating distances that the dependent variable
shall be calculated for. It has to be a positive real number.
r : float
The effective range. Note this is not the range parameter! For the
Matérn variogram function the range parameter a is defined to be
:math:`a = \frac{r}{2}`. The effective range is the lag
where 95% of the sill are exceeded. This is needed as the sill is
only approached asymptotically by Matérn model.
c0 : float
The sill of the variogram, where it will flatten out. The function
will not return a value higher than C0 + b.
s : float
Smoothness parameter. The smoothness parameter can shape a smooth or
rough variogram function. A value of 0.5 will yield the exponential
function, while a smoothness of +inf is exactly the Gaussian model.
Typically a value of 10 is close enough to Gaussian shape to simulate
its behaviour. Low values are considered to be 'smooth', while larger
values are considered to describe a 'rough' random field.
b : float
The nugget of the variogram. This is the value of independent
variable at the distance of zero. This is usually attributed to
non-spatial variance.
Returns
-------
gamma : numpy.float64
Unlike in most variogram function formulas, which define the function
for :math:`2*\gamma`, this function will return :math:`\gamma` only.
Notes
-----
The formula and references will follow.
"""
# prepare parameters
# TODO: depend a on s, for 0.5 should be 3, above 10 should be 3
a = r / 2.
# calculate
return b + c0 * (1. - (2 / special.gamma(s)) *
np.power((h * np.sqrt(s)) / a, s) *
special.kv(s, 2 * ((h * np.sqrt(s)) / a))
)
``` |
{
"source": "jlmaurer/tectosaur",
"score": 2
} |
#### File: tectosaur/examples/basin.py
```python
import numpy as np
import matplotlib.pyplot as plt
import okada_wrapper
import tectosaur.mesh as mesh
import tectosaur.constraints as constraints
import tectosaur.mass_op as mass_op
from tectosaur.sparse_integral_op import SparseIntegralOp
import tectosaur.geometry as geometry
from tectosaur.mass_op import MassOp
from tectosaur.composite_op import CompositeOp
from tectosaur.combined_mesh import CombinedMesh
import solve
def make_free_surface(w, n):
corners = [[-w, -w, 0], [-w, w, 0], [w, w, 0], [w, -w, 0]]
return mesh.make_rect(n, n, corners)
def make_fault(L, top_depth, n):
return mesh.make_rect(n, n, [
[-L, 0, top_depth], [-L, 0, top_depth - 1],
[L, 0, top_depth - 1], [L, 0, top_depth]
])
def build_meshes():
fault_L = 1.0
fault_top_depth = -0.5
w = 6
basin_center = [0.0, 2.0, -2.1]
basin_r = 2.0
# n_flt = 8
# n_surf = 50
# basin_refine = 3
n_flt = 8
n_surf = 30
basin_refine = 3
# n_flt = 4
# n_surf = 10
# basin_refine = 1
surf = make_free_surface(w, n_surf)
fault = make_fault(fault_L, fault_top_depth, n_flt)
basin = mesh.make_sphere(basin_center, basin_r, basin_refine)
# basin = mesh.refine_to_size(mesh.make_ellipse(basin_center, 6.0, 1.0, 1.0), 0.5)
country_mesh = CombinedMesh([('surf', surf), ('fault', fault), ('basin', mesh.flip_normals(basin))])
basin_mesh = CombinedMesh([('basin', mesh.flip_normals((country_mesh.pts, country_mesh.get_piece_tris('basin'))))])
return country_mesh, basin_mesh
def plot_surf_disp(country_mesh, soln):
obs_pts, vals = country_mesh.extract_pts_vals('surf', soln)
vmax = np.max(vals)
for d in range(3):
plt.figure()
plt.tripcolor(
obs_pts[:,0], obs_pts[:, 1], country_mesh.get_piece_tris('surf'),
vals[:,d], #shading='gouraud',
cmap = 'PuOr', vmin = -vmax, vmax = vmax
)
plt.title('u ' + ['x', 'y', 'z'][d])
plt.colorbar()
plt.show()
def couple_domains(mesh1, mesh2):
np.testing.assert_almost_equal(mesh1.pts, mesh2.pts)
all_tris = np.vstack((mesh1.tris, mesh2.tris))
initial_cs_u = constraints.continuity_constraints(all_tris, np.array([]), mesh1.pts)
# TODO: Refactor to have a shared find-touching-triangle-vertex-pairs
# function with continuity_constraints
cs_u = []
cs_t = []
for c in initial_cs_u:
dof1, dof2 = c.terms[0].dof, c.terms[1].dof
if dof1 > mesh1.n_dofs():
dof1 += mesh1.n_dofs()
cs_u.append(constraints.ConstraintEQ([
constraints.Term(1.0, dof1),
constraints.Term(-1.0, dof2)
], 0.0
))
second_dof_factor = 1.0
if (dof1 < mesh1.n_dofs()) == (dof2 < mesh1.n_dofs()):
second_dof_factor = -1.0
if dof1 < mesh1.n_dofs():
dof1 += mesh1.n_dofs()
else:
dof1 += mesh2.n_dofs()
if dof2 < mesh1.n_dofs():
dof2 += mesh1.n_dofs()
else:
dof2 += mesh2.n_dofs()
cs_t.append(constraints.ConstraintEQ([
constraints.Term(1.0, dof1),
constraints.Term(second_dof_factor, dof2)
], c.rhs
))
return cs_u + cs_t
def main():
sm = 1.0
pr = 0.25
basin_sm = 0.00000001
country_mesh, basin_mesh = build_meshes()
n_country_dofs = country_mesh.n_dofs() * 2
n_basin_dofs = basin_mesh.n_dofs() * 2
country_csU = constraints.continuity_constraints(
country_mesh.tris, np.array([]), country_mesh.pts
)
country_csU.extend(constraints.constant_bc_constraints(
country_mesh.get_start('fault'), country_mesh.get_past_end('fault'), [1.0, 0.0, 0.0]
))
country_csU.extend(constraints.free_edge_constraints(country_mesh.get_piece_tris('surf')))
country_csT = constraints.constant_bc_constraints(
country_mesh.get_start('surf'), country_mesh.get_past_end('surf'), [0.0, 0.0, 0.0]
)
# country_csT.extend(constraints.constant_bc_constraints(
# country_mesh.get_start('fault'), country_mesh.get_past_end('fault'), [0.0, 0.0, 0.0]
# ))
country_cs = constraints.build_composite_constraints(
(country_csU, 0), (country_csT, country_mesh.n_dofs())
)
basin_csU = constraints.continuity_constraints(
basin_mesh.tris, np.array([]), basin_mesh.pts
)
# basin_csT = constraints.constant_bc_constraints(
# 0, basin_mesh.n_total_tris(), [0.0, 0.0, 0.0],
# )
basin_csT = []
basin_cs = constraints.build_composite_constraints(
(basin_csU, 0), (basin_csT, basin_mesh.n_dofs())
)
cs = constraints.build_composite_constraints(
(country_cs, 0), (basin_cs, n_country_dofs)
)
cs.extend(couple_domains(country_mesh, basin_mesh))
Hop = SparseIntegralOp(
[], 0, 0, (8, 16, 8), 3, 6, 4.0, 'H', sm, pr, country_mesh.pts, country_mesh.tris,
use_tables = True, remove_sing = True
)
Aop = SparseIntegralOp(
[], 0, 0, 6, 3, 6, 4.0, 'A', sm, pr, country_mesh.pts, country_mesh.tris,
use_tables = True, remove_sing = False
)
country_mass = MassOp(3, country_mesh.pts, country_mesh.tris)
country_mass.mat *= -1
country_op = CompositeOp(
(Hop, 0, 0),
(Aop, 0, country_mesh.n_dofs()),
(country_mass, 0, country_mesh.n_dofs()),
shape = (n_country_dofs, n_country_dofs)
)
Uop = SparseIntegralOp(
[], 0, 0, 6, 3, 6, 4.0, 'U', basin_sm, pr, basin_mesh.pts, basin_mesh.tris,
use_tables = True, remove_sing = False
)
Top = SparseIntegralOp(
[], 0, 0, 6, 3, 6, 4.0, 'T', basin_sm, pr, basin_mesh.pts, basin_mesh.tris,
use_tables = True, remove_sing = False
)
basin_mass = MassOp(3, basin_mesh.pts, basin_mesh.tris)
basin_op = CompositeOp(
(Top, 0, 0),
(basin_mass, 0, 0),
(Uop, 0, basin_mesh.n_dofs()),
shape = (n_basin_dofs, n_basin_dofs)
)
op = CompositeOp(
(country_op, 0, 0),
(basin_op, n_country_dofs, n_country_dofs)
)
soln = solve.iterative_solve(op, cs)
plot_surf_disp(country_mesh, soln)
# soln = solve.iterative_solve(Hop, csU)
# plot_surf_disp(country_mesh, soln)
if __name__ == '__main__':
main()
```
#### File: tectosaur/examples/okada.py
```python
import sys
import logging
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import okada_wrapper
import scipy.spatial
import tectosaur
import tectosaur.mesh.refine as mesh_refine
import tectosaur.mesh.modify as mesh_modify
import tectosaur.mesh.mesh_gen as mesh_gen
import tectosaur.constraints as constraints
from tectosaur.continuity import continuity_constraints, get_side_of_fault
from tectosaur.constraint_builders import all_bc_constraints, free_edge_constraints
from tectosaur.util.timer import Timer
from tectosaur.ops.dense_integral_op import RegularizedDenseIntegralOp
from tectosaur.ops.sparse_integral_op import RegularizedSparseIntegralOp
from tectosaur.ops.sparse_farfield_op import FMMFarfieldOp, \
TriToTriDirectFarfieldOp
from tectosaur.ops.mass_op import MassOp
from tectosaur.ops.neg_op import MultOp
from tectosaur.ops.sum_op import SumOp
from tectosaur.check_for_problems import check_for_problems
import solve
from tectosaur.util.logging import setup_root_logger
logger = setup_root_logger(__name__)
class Okada:
def __init__(self, n_surf, n_fault, top_depth = 0.0, surface_w = 5.0,
fault_L = 1.0, gauss_z = 0.0, verbose = False):
log_level = logging.INFO
if verbose:
log_level = logging.DEBUG
tectosaur.logger.setLevel(log_level)
solve.logger.setLevel(log_level)
logger.setLevel(log_level)
self.k_params = [1.0, 0.25]
self.surface_w = surface_w
self.gauss_z = gauss_z
self.fault_L = fault_L
self.top_depth = top_depth
self.load_soln = False
self.float_type = np.float32
self.n_surf = n_surf
self.n_fault = n_fault#max(2, n_surf // 5)
self.all_mesh, self.surface_tris, self.fault_tris = make_meshes(
self.surface_w, self.fault_L, self.top_depth, self.n_surf, self.n_fault
)
logger.info('n_elements: ' + str(self.all_mesh[1].shape[0]))
self.n_surf_tris = self.surface_tris.shape[0]
self.n_fault_tris = self.fault_tris.shape[0]
self.n_tris = self.all_mesh[1].shape[0]
self.surf_tri_idxs = np.arange(self.n_surf_tris)
self.fault_tri_idxs = np.arange(self.n_surf_tris, self.n_tris)
self.n_surf_dofs = self.n_surf_tris * 9
self.n_dofs = self.n_tris * 9
# _,_,_,_ = check_for_problems(self.all_mesh, check = True)
def plot_mesh(self):
mesh_gen.plot_mesh3d(*self.all_mesh)
def run(self, build_and_solve = None):
if build_and_solve is None:
build_and_solve = build_and_solve_T
if not self.load_soln:
soln = build_and_solve(self)
np.save('okada.npy', soln)
else:
soln = np.load('okada.npy')
return soln
def okada_exact(self):
obs_pts = self.all_mesh[0]
sm, pr = self.k_params
lam = 2 * sm * pr / (1 - 2 * pr)
alpha = (lam + sm) / (lam + 2 * sm)
print(lam, sm, pr, alpha)
n_pts = obs_pts.shape[0]
u = np.zeros((n_pts, 3))
NX = 20
NY = 20
X_vals = np.linspace(-self.fault_L, self.fault_L, NX + 1)
Y_vals = np.linspace(-1.0, 0.0, NX + 1)
for i in range(n_pts):
pt = obs_pts[i, :]
for j in range(NX):
X1 = X_vals[j]
X2 = X_vals[j+1]
midX = (X1 + X2) / 2.0
for k in range(NY):
Y1 = Y_vals[k]
Y2 = Y_vals[k+1]
midY = (Y1 + Y2) / 2.0
slip = gauss_slip_fnc(midX, midY + self.top_depth, self.gauss_z)
[suc, uv, grad_uv] = okada_wrapper.dc3dwrapper(
alpha, pt, -self.top_depth, 90.0,
[X1, X2], [Y1, Y2], [slip, 0.0, 0.0]
)
if suc != 0:
u[i, :] = 0
else:
u[i, :] += uv
return u
def xsec_plot(self, solns, okada_soln = None, show = True):
xsec_pts = []
xsec_idxs = []
xsec_vals = [[] for j in range(len(solns))]
xsec_vals_okada = []
for i in range(self.surface_tris.shape[0]):
for pt_idx in range(3):
p = self.all_mesh[0][self.surface_tris[i,pt_idx],:]
if np.abs(p[0]) > 0.001:
continue
xsec_pts.append(p)
for j in range(len(solns)):
xsec_vals[j].append([solns[j][i * 9 + pt_idx * 3 + d] for d in range(3)])
if okada_soln is not None:
xsec_vals_okada.append([
okada_soln[self.all_mesh[1][i,pt_idx]][d] for d in range(3)
])
xsec_idxs.append([i * 9 + pt_idx * 3 + d for d in range(3)])
xsec_pts = np.array(xsec_pts)
xsec_vals = np.array(xsec_vals)
xsec_vals_okada = np.array(xsec_vals_okada)
plt.figure()
for j in range(len(solns)):
plt.plot(xsec_pts[:,1], xsec_vals[j,:,0], 'o-', label = str(j))
if okada_soln is not None:
plt.plot(xsec_pts[:,1], xsec_vals_okada[:,0], 'o-', label = 'okada')
# plt.savefig('okada_xsec.pdf', bbox_inches = 'tight')
plt.legend()
if show:
plt.show()
def plot_interior_displacement(self, soln):
nxy = 40
nz = 40
d = 0
xs = np.linspace(-10, 10, nxy)
zs = np.linspace(-0.1, -4.0, nz)
X, Y, Z = np.meshgrid(xs, xs, zs)
obs_pts = np.array([X.flatten(), Y.flatten(), Z.flatten()]).T.copy()
t = Timer(output_fnc = logger.debug)
interior_disp = -interior_integral(
obs_pts, obs_pts, self.all_mesh, soln, 'elasticT3',
3, 8, self.k_params, self.float_type,
fmm_params = None#[100, 3.0, 3000, 25]
).reshape((nxy, nxy, nz, 3))
t.report('eval %.2E interior pts' % obs_pts.shape[0])
for i in range(nz):
plt.figure()
plt.pcolor(xs, xs, interior_disp[:,:,i,d])
plt.colorbar()
plt.title('at z = ' + ('%.3f' % zs[i]) + ' u' + ['x', 'y', 'z'][d])
plt.show()
def get_pt_soln(self, soln):
pt_soln = np.empty((self.all_mesh[0].shape[0], 3))
pt_soln[self.all_mesh[1]] = soln.reshape((-1, 3, 3))
return pt_soln
def plot_results(self, soln, okada_soln):
pts, tris = self.all_mesh
est = self.get_pt_soln(soln)
vmax = np.max(okada_soln)
for d in range(3):
plt.figure()
plt.tripcolor(
pts[:,0], pts[:, 1], tris,
est[:,d], #shading='gouraud',
cmap = 'PuOr', vmin = -vmax, vmax = vmax
)
plt.title("u " + ['x', 'y', 'z'][d])
plt.colorbar()
for d in range(3):
plt.figure()
plt.tripcolor(
pts[:, 0], pts[:, 1], tris,
okada_soln[:, d], #shading='gouraud',
cmap = 'PuOr', vmin = -vmax, vmax = vmax
)
plt.title("Okada u " + ['x', 'y', 'z'][d])
plt.colorbar()
for d in range(3):
plt.figure()
plt.tripcolor(
pts[:, 0], pts[:, 1], tris,
okada_soln[:, d] - est[:,d], #shading='gouraud',
cmap = 'PuOr'
)
plt.title("Diff u " + ['x', 'y', 'z'][d])
plt.colorbar()
plt.show()
def print_error(self, soln, okada_soln):
est = self.get_pt_soln(soln)
pts = self.all_mesh[0]
close = np.sqrt(np.sum(pts ** 2, axis = 1)) < 4.0
not0 = np.abs(pts[:,1]) > 1e-5
test = np.logical_and(close, not0)
diff = okada_soln[test,:] - est[test,:]
l2diff = np.sum(diff ** 2)
l2correct = np.sum(okada_soln[test,:] ** 2)
linferr = np.max(np.abs(diff))
print("L2diff: " + str(l2diff))
print("L2correct: " + str(l2correct))
print("L2relerr: " + str(l2diff / l2correct))
print("maxerr: " + str(linferr))
return linferr
def make_free_surface(w, n):
corners = [[-w, -w, 0], [-w, w, 0], [w, w, 0], [w, -w, 0]]
return mesh_gen.make_rect(n, n, corners)
def make_fault(L, top_depth, n_fault):
m = mesh_gen.make_rect(n_fault, n_fault, [
[-L, 0, top_depth], [-L, 0, top_depth - 1],
[L, 0, top_depth - 1], [L, 0, top_depth]
])
return m
def make_meshes(surf_w, fault_L, top_depth, n_surf, n_fault):
t = Timer(output_fnc = logger.debug)
surface = make_free_surface(surf_w, n_surf)
t.report('make free surface')
fault = make_fault(fault_L, top_depth, n_fault)
t.report('make fault')
all_mesh = mesh_modify.concat(surface, fault)
t.report('concat meshes')
surface_tris = all_mesh[1][:surface[1].shape[0]]
fault_tris = all_mesh[1][surface[1].shape[0]:]
return all_mesh, surface_tris, fault_tris
def gauss_slip_fnc(x, z, gauss_z):
return np.exp(-(x ** 2 + (z - gauss_z) ** 2) * 8.0)
def gauss_fault_slip(pts, fault_tris, gauss_z):
dof_pts = pts[fault_tris]
x = dof_pts[:,:,0]
z = dof_pts[:,:,2]
mean_z = np.mean(z)
slip = np.zeros((fault_tris.shape[0], 3, 3))
# slip[:,:,0] = (1 - np.abs(x)) * (1 - np.abs((z - mean_z) * 2.0))
# slip[:,:,1] = (1 - np.abs(x)) * (1 - np.abs((z - mean_z) * 2.0))
# slip[:,:,2] = (1 - np.abs(x)) * (1 - np.abs((z - mean_z) * 2.0))
slip[:,:,0] = gauss_slip_fnc(x, z, gauss_z)
# slip_pts = np.zeros(pts.shape[0])
# # slip_pts[fault_tris] = np.log10(np.abs(slip[:,:,0]))
# slip_pts[fault_tris] = slip[:,:,0]
# plt.tricontourf(pts[:,0], pts[:,2], fault_tris, slip_pts)
# plt.triplot(pts[:,0], pts[:,2], fault_tris)
# dof_pts = pts[fault_tris]
# plt.xlim([np.min(dof_pts[:,:,0]), np.max(dof_pts[:,:,0])])
# plt.ylim([np.min(dof_pts[:,:,2]), np.max(dof_pts[:,:,2])])
# plt.colorbar()
# plt.show()
# idxs = np.where(pts[:,2] == 0.0)[0]
# idxs = np.intersect1d(idxs, fault_tris.flatten())
# x = pts[idxs,0]
# s = slip_pts[idxs]
# plt.plot(x, s, '.')
# plt.show()
# for I in idxs:
# tri_idxs, basis_idxs = np.where(fault_tris == I)
# slip[tri_idxs, basis_idxs,0] = 0.0
return slip
def build_constraints(surface_tris, fault_tris, pts, tris, gauss_z):
n_surf_tris = surface_tris.shape[0]
n_fault_tris = fault_tris.shape[0]
side = get_side_of_fault(pts, tris, surface_tris.shape[0])
# plt.tripcolor(pts[:,0], pts[:,1], tris[:surface_tris.shape[0]], side[:surface_tris.shape[0]])
# plt.triplot(pts[:,0], pts[:,1], tris[surface_tris.shape[0]:], 'k-')
# plt.show()
from tectosaur.constraints import build_constraint_matrix
cs = continuity_constraints(pts, tris, surface_tris.shape[0])
slip = gauss_fault_slip(pts, fault_tris, gauss_z).flatten()
cs.extend(all_bc_constraints(
n_surf_tris, n_surf_tris + n_fault_tris, slip
))
# cm = build_constraint_matrix(cs, tris.shape[0] * 9)
# import ipdb
# ipdb.set_trace()
# import copy
# cs2 = copy.copy(cs)
# csf_idxs = [i for i in range(len(cs2)) if len(cs2[i].terms) == 3]
# from tectosaur.constraints import ConstraintEQ, Term
# xs = []
# ys = []
# for i in csf_idxs:
# old_c = cs2[i]
# fault_dof = old_c.terms[2].dof
# slip_val = slip[fault_dof - n_surf_tris * 9]
# if fault_dof % 3 == 0:
# corner_idx = (fault_dof // 3) % 3
# tri_idx = (fault_dof - corner_idx * 3) // 9 - n_surf_tris
# x = pts[fault_tris[tri_idx, corner_idx], 0]
# xs.append(x)
# ys.append(slip_val)
# new_c = ConstraintEQ(old_c.terms[:2], slip_val * -old_c.terms[2].val)
# cs2[i] = new_c
# from tectosaur.constraints import build_constraint_matrix
# cm, c_rhs = build_constraint_matrix(cs, tris.shape[0] * 9)
# cm2, c_rhs2 = build_constraint_matrix(cs2, tris.shape[0] * 9)
# plt.plot(c_rhs, 'k-')
# plt.plot(c_rhs2, 'b-')
# plt.show()
# import ipdb
# ipdb.set_trace()
return cs
def build_and_solve_T_regularized(data):
timer = Timer(output_fnc = logger.debug)
cs = build_constraints(
data.surface_tris, data.fault_tris, data.all_mesh[0],
data.all_mesh[1], data.gauss_z
)
timer.report("Constraints")
T_op = RegularizedSparseIntegralOp(
6, 6, 6, 2, 5, 2.5,
'elasticRT3', 'elasticRT3', data.k_params, data.all_mesh[0], data.all_mesh[1],
data.float_type,
# farfield_op_type = TriToTriDirectFarfieldOp
farfield_op_type = FMMFarfieldOp(mac = 2.5, pts_per_cell = 100, order = 2)
)
import tectosaur.fmm.tsfmm as tsfmm
tsfmm.report_interactions(T_op.farfield.fmm)
timer.report("Integrals")
mass_op = MultOp(MassOp(3, data.all_mesh[0], data.all_mesh[1]), 0.5)
iop = SumOp([T_op, mass_op])
timer.report('mass op/sum op')
soln = solve.iterative_solve(iop, cs, tol = 1e-5)
timer.report("Solve")
return soln
def build_and_solve_H_regularized(data):
timer = Timer(output_fnc = logger.debug)
cs = build_constraints(
data.surface_tris, data.fault_tris, data.all_mesh[0],
data.all_mesh[1], data.gauss_z
)
# For H, we need to constrain the edges of the surface to have 0 displacement.
cs.extend(free_edge_constraints(data.surface_tris))
timer.report("Constraints")
H_op = RegularizedSparseIntegralOp(
6, 6, 6, 2, 5, 2.5,
'elasticRH3', 'elasticRH3',
data.k_params, data.all_mesh[0], data.all_mesh[1], data.float_type,
# farfield_op_type = TriToTriDirectFarfieldOp
farfield_op_type = FMMFarfieldOp(mac = 2.5, pts_per_cell = 100, order = 2)
)
iop = SumOp([H_op])
timer.report("Integrals")
soln = solve.iterative_solve(iop, cs, tol = 1e-5)
timer.report("Solve")
return soln
def main():
#python examples/okada.py 31 7 5.0 0.0 0.0 T
#python examples/okada.py 31 7 5.0 0.0 0.0 H
#python examples/okada.py 31 7 5.0 -0.5 -1.0 T
#python examples/okada.py 31 7 5.0 -0.5 -1.0 H
t = Timer(output_fnc = logger.info)
obj = Okada(
int(sys.argv[1]), n_fault = int(sys.argv[2]),
surface_w = float(sys.argv[3]), top_depth = float(sys.argv[4]),
gauss_z = float(sys.argv[5]), verbose = True
)
# obj.plot_mesh()
if sys.argv[6] == 'T':
soln = obj.run(build_and_solve = build_and_solve_T_regularized)
else:
soln = obj.run(build_and_solve = build_and_solve_H_regularized)
t.report('tectosaur')
okada_soln = obj.okada_exact()
# np.save('okada_soln_for_plot.npy', [obj.all_mesh, obj.surface_tris, obj.fault_tris, soln, okada_soln])
t.report('okada')
obj.xsec_plot([soln], okada_soln)
# obj.plot_interior_displacement(soln)
obj.print_error(soln, okada_soln)
t.report('check')
# obj.plot_results(soln, okada_soln)
if __name__ == '__main__':
main()
```
#### File: tectosaur/examples/progressive_refine.py
```python
def refined_free_surface():
w = 10
minsize = 0.02
slope = 400
maxsize = 0.02
pts = np.array([[-w, -w, 0], [w, -w, 0], [w, w, 0], [-w, w, 0]])
tris = np.array([[0, 1, 3], [3, 1, 2]])
addedpts = 4
it = 0
while addedpts > 0:
pts, tris = mesh.remove_duplicate_pts((pts, tris))
print(it, addedpts)
it += 1
newpts = pts.tolist()
newtris = []
addedpts = 0
# size = np.linalg.norm(np.cross(
# pts[tris[:, 1]] - pts[tris[:, 0]],
# pts[tris[:, 2]] - pts[tris[:, 0]]
# ), axis = 1) / 2.0
# centroid = np.mean(pts[tris], axis = 2)
# r2 = np.sum(centroid ** 2, axis = 1)
for i, t in enumerate(tris):
size = np.linalg.norm(np.cross(
pts[t[1]] - pts[t[0]],
pts[t[2]] - pts[t[0]]
)) / 2.0
centroid = np.mean(pts[t], axis = 0)
A = (centroid[0] / 1.5) ** 2 + (centroid[1] / 1.0) ** 2
# A = np.sum(centroid ** 2)
if (A < slope * size and size > minsize) or size > maxsize:
# print(r2[i], size[i])
# if r2[i] < size[i] and size[i] > minsize:
newidx = len(newpts)
newpts.extend([
(pts[t[0]] + pts[t[1]]) / 2,
(pts[t[1]] + pts[t[2]]) / 2,
(pts[t[2]] + pts[t[0]]) / 2
])
newtris.extend([
[t[0], newidx, newidx + 2],
[newidx, t[1], newidx + 1],
[newidx + 1, t[2], newidx + 2],
[newidx + 1, newidx + 2, newidx]
])
addedpts += 3
else:
newtris.append(t)
pts = np.array(newpts)
tris = np.array(newtris)
final_tris = scipy.spatial.Delaunay(np.array([pts[:,0],pts[:,1]]).T).simplices
plt.triplot(pts[:, 0], pts[:, 1], final_tris, linewidth = 0.5)
plt.show()
print('npts: ' + str(pts.shape[0]))
print('ntris: ' + str(final_tris.shape[0]))
return pts, final_tris
```
#### File: tectosaur/tectosaur/check_for_problems.py
```python
import numpy as np
import tectosaur.mesh.find_near_adj as find_near_adj
import cppimport.import_hook
import tectosaur.util.tri_tri_intersect as tri_tri_intersect
import tectosaur._check_for_problems as _check_for_problems
def check_min_adj_angle(m, ea = None):
pts, tris = m
close_or_touch_pairs = find_near_adj.find_close_or_touching(pts, tris, pts, tris, 2.0)
nearfield_pairs, va, ea = find_near_adj.split_adjacent_close(close_or_touch_pairs, tris, tris)
bad_pairs = []
lower_lim = min_intersect_angle
upper_lim = (2 * np.pi - min_intersect_angle)
for pair in ea:
obs_clicks, obs_tri, src_clicks, src_flip, src_tri = \
edge_adj_setup.orient_adj_tris(pts, tris, pair[0], pair[1])
phi = edge_adj_setup.calc_adjacent_phi(obs_tri, src_tri)
if lower_lim <= phi <= upper_lim:
continue
bad_pairs.append(pair)
return np.array(bad_pairs, dtype = np.int64)
def check_for_slivers(m):
pts, tris = m
bad_tris = []
for i, t in enumerate(tris):
t_list = pts[t].tolist()
try:
out = standardize.standardize(t_list, table_min_internal_angle, True)
except standardize.BadTriangleException as e:
bad_tris.append(i)
return np.array(bad_tris, dtype = np.int64)
def check_tris_tall_enough(m):
pts, tris = m
bad_tris = []
for i, t in enumerate(tris):
t_list = pts[t].tolist()
split_pt = edge_adj_setup.get_split_pt(t_list);
xyhat = edge_adj_setup.xyhat_from_pt(split_pt, t_list)
if not edge_adj_setup.check_xyhat(xyhat):
bad_tris.append(i)
return np.array(bad_tris, dtype = np.int64)
def check_for_intersections_nearfield(pts, tris, nearfield_pairs):
if nearfield_pairs.shape[0] == 0:
return []
unique_near_pairs = np.unique(np.sort(nearfield_pairs, axis = 1), axis = 0)
bad_pairs = []
for pair in unique_near_pairs:
tri1 = pts[tris[pair[0]]].tolist()
tri2 = pts[tris[pair[1]]].tolist()
I = tri_tri_intersect.tri_tri_intersect(tri1, tri2)
if I:
bad_pairs.append(pair)
return bad_pairs
def check_for_intersections_va(pts, tris, va):
if va.shape[0] == 0:
return []
bad_pairs = []
for pair in va:
assert(tris[pair[0]][pair[2]] == tris[pair[1]][pair[3]])
tri1 = pts[tris[pair[0]]]
for edge in range(3):
if edge == pair[2]:
continue
tri1_moved = tri1.copy()
tri1_moved[pair[2]] += (tri1[edge] - tri1[pair[2]]) * 0.001
tri2 = pts[tris[pair[1]]]
I = tri_tri_intersect.tri_tri_intersect(
tri1_moved.tolist(), tri2.tolist()
)
if I:
bad_pairs.append(pair[:2])
break
return bad_pairs
def check_for_intersections_ea(pts, tris, ea):
if ea.shape[0] == 0:
return []
bad_pairs = []
for pair in ea:
for d in range(3):
if tris[pair[0],d] in tris[pair[1]]:
continue
dist = np.sqrt(np.sum((pts[tris[pair[1]]] - pts[tris[pair[0],d]]) ** 2, axis = 1))
if np.any(dist == 0):
bad_pairs.append(pair)
return bad_pairs
import logging
logger = logging.getLogger(__name__)
from tectosaur.util.timer import Timer
def check_for_intersections(m):
pts, tris = m
close_or_touch_pairs = find_near_adj.find_close_or_touching(pts, tris, pts, tris, 2.0)
nearfield_pairs, va, ea = find_near_adj.split_adjacent_close(close_or_touch_pairs, tris, tris)
bad_pairs = []
t = Timer(output_fnc = logger.info)
# Three situations:
# 1) Nearfield pair is intersection
bad_pairs.extend(check_for_intersections_nearfield(pts, tris, nearfield_pairs))
t.report('nearfield')
# 2) Vertex adjacent pair actually intersects beyond just the vertex
# We can test for this by moving the shared vertex short distance
# along one of the edges of the first triangle. If there is still
# an intersection, then the triangles intersect at locations besides
# just the shared vertex.
bad_pairs.extend(check_for_intersections_va(pts, tris, va))
t.report('va')
# 3) Edge adjacent pair is actually coincident. <-- Easy!
bad_pairs.extend(check_for_intersections_ea(pts, tris, ea))
t.report('ea')
return np.array(bad_pairs, dtype = np.int64)
def check_for_problems(m, check = False):
intersections = check_for_intersections(m)
slivers = check_for_slivers(m)
short_tris = check_tris_tall_enough(m)
sharp_angles = check_min_adj_angle(m)
if check and (intersections.shape[0] > 0
or slivers.shape[0] > 0
or short_tris.shape[0] > 0
or sharp_angles.shape[0] > 0):
raise Exception('Mesh has problems. Run with check = False to see the issues.')
return intersections, slivers, short_tris, sharp_angles
```
#### File: tectosaur/tectosaur/continuity.py
```python
import numpy as np
import scipy.sparse.csgraph
from tectosaur.util.geometry import tri_normal, unscaled_normals, normalize
from tectosaur.constraints import ConstraintEQ, Term
from tectosaur.stress_constraints import stress_constraints, stress_constraints2, \
equilibrium_constraint, constant_stress_constraint
def find_touching_pts(tris):
max_pt_idx = np.max(tris)
out = [[] for i in range(max_pt_idx + 1)]
for i, t in enumerate(tris):
for d in range(3):
out[t[d]].append((i, d))
return out
def tri_connectivity_graph(tris):
n_tris = tris.shape[0]
touching = [[] for i in range(np.max(tris) + 1)]
for i in range(n_tris):
for d in range(3):
touching[tris[i,d]].append(i)
rows = []
cols = []
for i in range(len(touching)):
for row in touching[i]:
for col in touching[i]:
rows.append(row)
cols.append(col)
rows = np.array(rows)
cols = np.array(cols)
connectivity = scipy.sparse.coo_matrix((np.ones(rows.shape[0]), (rows, cols)), shape = (n_tris, n_tris))
return connectivity
def tri_side(tri1, tri2, threshold = 1e-12):
tri1_normal = tri_normal(tri1, normalize = True)
tri1_center = np.mean(tri1, axis = 0)
tri2_center = np.mean(tri2, axis = 0)
direction = tri2_center - tri1_center
direction /= np.linalg.norm(direction)
dot_val = direction.dot(tri1_normal)
if dot_val > threshold:
return 0
elif dot_val < -threshold:
return 1
else:
return 2
def get_side_of_fault(pts, tris, fault_start_idx):
connectivity = tri_connectivity_graph(tris)
fault_touching_pair = np.where(np.logical_and(
connectivity.row < fault_start_idx,
connectivity.col >= fault_start_idx
))[0]
side = np.zeros(tris.shape[0])
shared_verts = np.zeros(tris.shape[0])
fault_surf_tris = pts[tris[connectivity.col[fault_touching_pair]]]
for i in range(fault_touching_pair.shape[0]):
surf_tri_idx = connectivity.row[fault_touching_pair[i]]
surf_tri = tris[surf_tri_idx]
fault_tri = tris[connectivity.col[fault_touching_pair[i]]]
which_side = tri_side(pts[fault_tri], pts[surf_tri])
n_shared_verts = 0
for d in range(3):
if surf_tri[d] in fault_tri:
n_shared_verts += 1
if shared_verts[surf_tri_idx] < 2:
side[surf_tri_idx] = int(which_side) + 1
shared_verts[surf_tri_idx] = n_shared_verts
return side
#TODO: this function needs to know the idxs of the surface_tris and fault_tris, so use
# idx lists and pass the full tris array, currently using the (n_surf_tris * 9) hack!
#TODO: refactor and merge this with the traction continuity constraints
def continuity_constraints(pts, tris, fault_start_idx, tensor_dim = 3):
surface_tris = tris[:fault_start_idx]
fault_tris = tris[fault_start_idx:]
touching_pt = find_touching_pts(surface_tris)
side = get_side_of_fault(pts, tris, fault_start_idx)
constraints = []
for i, tpt in enumerate(touching_pt):
if len(tpt) == 0:
continue
for independent_idx in range(len(tpt)):
independent = tpt[independent_idx]
independent_tri_idx = independent[0]
independent_corner_idx = independent[1]
independent_tri = surface_tris[independent_tri_idx]
for dependent_idx in range(independent_idx + 1, len(tpt)):
dependent = tpt[dependent_idx]
dependent_tri_idx = dependent[0]
dependent_corner_idx = dependent[1]
dependent_tri = surface_tris[dependent_tri_idx]
# Check for anything that touches across the fault.
side1 = side[independent_tri_idx]
side2 = side[dependent_tri_idx]
crosses = (side1 != side2) and (side1 != 0) and (side2 != 0)
fault_tri_idx = None
if crosses:
fault_tri_idxs, fault_corner_idxs = np.where(
fault_tris == dependent_tri[dependent_corner_idx]
)
if fault_tri_idxs.shape[0] != 0:
fault_tri_idx = fault_tri_idxs[0]
fault_corner_idx = fault_corner_idxs[0]
# plt_pts = np.vstack((
# pts[independent_tri],
# pts[dependent_tri],
# pts[fault_tris[fault_tri_idx]]
# ))
# import matplotlib.pyplot as plt
# plt.tripcolor(pts[:,0], pts[:,1], tris[:surface_tris.shape[0]], side[:surface_tris.shape[0]])
# plt.triplot(plt_pts[:,0], plt_pts[:,1], np.array([[0,1,2]]), 'b-')
# plt.triplot(plt_pts[:,0], plt_pts[:,1], np.array([[3,4,5]]), 'k-')
# plt.triplot(pts[:,0], pts[:,1], tris[fault_start_idx:], 'r-')
# plt.show()
for d in range(tensor_dim):
independent_dof = (independent_tri_idx * 3 + independent_corner_idx) * tensor_dim + d
dependent_dof = (dependent_tri_idx * 3 + dependent_corner_idx) * tensor_dim + d
if dependent_dof <= independent_dof:
continue
diff = 0.0
terms = [Term(1.0, dependent_dof), Term(-1.0, independent_dof)]
if fault_tri_idx is not None:
fault_dof = (
fault_start_idx * 9 +
fault_tri_idx * 9 + fault_corner_idx * 3 + d
)
if side1 < side2:
terms.append(Term(-1.0, fault_dof))
else:
terms.append(Term(1.0, fault_dof))
constraints.append(ConstraintEQ(terms, 0.0))
return constraints
def traction_admissibility_constraints(pts, tris, fault_start_idx):
# At each vertex, there should be three remaining degrees of freedom.
# Initially, there are n_tris*3 degrees of freedom.
# So, we need (n_tris-1)*3 constraints.
touching_pt = find_touching_pts(tris)
ns = normalize(unscaled_normals(pts[tris]))
side = get_side_of_fault(pts, tris, fault_start_idx)
continuity_cs = []
admissibility_cs = []
for tpt in touching_pt:
if len(tpt) == 0:
continue
# Separate the triangles touching at the vertex into a groups
# by the normal vectors for each triangle.
normal_groups = []
for i in range(len(tpt)):
tri_idx = tpt[i][0]
n = ns[tri_idx]
joined = False
for j in range(len(normal_groups)):
if np.allclose(normal_groups[j][0], n):
tri_idx2 = tpt[normal_groups[j][1][0]][0]
side1 = side[tri_idx]
side2 = side[tri_idx2]
crosses = (side1 != side2) and (side1 != 0) and (side2 != 0)
fault_tri_idx = None
# if crosses:
# continue
normal_groups[j][1].append(i)
joined = True
break
if not joined:
normal_groups.append((n, [i]))
# Continuity within normal group
for i in range(len(normal_groups)):
group = normal_groups[i][1]
independent_idx = group[0]
independent = tpt[independent_idx]
independent_tri_idx = independent[0]
independent_corner_idx = independent[1]
independent_dof_start = independent_tri_idx * 9 + independent_corner_idx * 3
for j in range(1, len(group)):
dependent_idx = group[j]
dependent = tpt[dependent_idx]
dependent_tri_idx = dependent[0]
dependent_corner_idx = dependent[1]
dependent_dof_start = dependent_tri_idx * 9 + dependent_corner_idx * 3
for d in range(3):
terms = [
Term(1.0, dependent_dof_start + d),
Term(-1.0, independent_dof_start + d)
]
continuity_cs.append(ConstraintEQ(terms, 0.0))
if len(normal_groups) == 1:
# Only continuity needed!
continue
# assert(len(normal_groups) == 2)
# Add constant stress constraints
for i in range(len(normal_groups)):
tpt_idx1 = normal_groups[i][1][0]
tri_idx1 = tpt[tpt_idx1][0]
corner_idx1 = tpt[tpt_idx1][1]
tri1 = pts[tris[tri_idx1]]
tri_data1 = (tri1, tri_idx1, corner_idx1)
for j in range(i + 1, len(normal_groups)):
tpt_idx2 = normal_groups[j][1][0]
tri_idx2 = tpt[tpt_idx2][0]
# print(tri_idx1, tri_idx2)
corner_idx2 = tpt[tpt_idx2][1]
tri2 = pts[tris[tri_idx2]]
tri_data2 = (tri2, tri_idx2, corner_idx2)
# for c in new_cs:
# print(', '.join(['(' + str(t.val) + ',' + str(t.dof) + ')' for t in c.terms]) + ' rhs: ' + str(c.rhs))
admissibility_cs.append(constant_stress_constraint(tri_data1, tri_data2))
admissibility_cs.append(equilibrium_constraint(tri_data1))
admissibility_cs.append(equilibrium_constraint(tri_data2))
return continuity_cs, admissibility_cs
```
#### File: tectosaur/faultea/slip_vectors.py
```python
import numpy as np
from tectosaur.util.geometry import tri_normal
def get_slip_vectors(tri, vertical = [0, 0, 1]):
n = tri_normal(tri, normalize = True)
is_normal_vertical = n.dot(vertical) >= 1.0
if is_normal_vertical: # this means the fault plane is horizontal, so there is no "strike" and "dip"
raise Exception("fault plane is horizontal. strike and dip make no sense")
v1 = np.cross(n, vertical)
v1 /= np.linalg.norm(v1)
v2 = np.cross(n, v1)
v2 /= np.linalg.norm(v2)
return v1, v2
def test_slip_vec_easy():
v1, v2 = get_slip_vectors(np.array([[0,0,0],[1,0,0],[0,1,0]]))
def test_slip_vec_hard():
v1, v2 = get_slip_vectors(np.array([[0,0,0],[0,1,0],[0,0,1]]))
np.testing.assert_almost_equal(v1, [0,0,1])
np.testing.assert_almost_equal(v2, [0,-1,0])
def test_slip_vec_harder():
for i in range(10):
# random triangles should still follow these rules:
# vecs should be perpindicular to each other and the normal
# and be normalized to unit length
tri = np.random.rand(3,3)
v1, v2 = get_slip_vectors(tri)
n = tri_normal(tri, normalize = True)
np.testing.assert_almost_equal(np.linalg.norm(v1), 1.0)
np.testing.assert_almost_equal(np.linalg.norm(v2), 1.0)
np.testing.assert_almost_equal(v1.dot(v2), 0.0)
np.testing.assert_almost_equal(v1.dot(n), 0.0)
np.testing.assert_almost_equal(v2.dot(n), 0.0)
```
#### File: tectosaur/fmm/builder.py
```python
import numpy as np
import tectosaur.util.gpu as gpu
from tectosaur.fmm.c2e import build_c2e
import logging
logger = logging.getLogger(__name__)
def make_tree(m, cfg, max_pts_per_cell):
tri_pts = m[0][m[1]]
centers = np.mean(tri_pts, axis = 1)
pt_dist = tri_pts - centers[:,np.newaxis,:]
Rs = np.max(np.linalg.norm(pt_dist, axis = 2), axis = 1)
tree = cfg.traversal_module.Tree.build(centers, Rs, max_pts_per_cell)
return tree
class FMM:
def __init__(self, obs_tree, obs_m, src_tree, src_m, cfg):
self.cfg = cfg
self.obs_tree = obs_tree
self.obs_m = obs_m
self.src_tree = src_tree
self.src_m = src_m
self.gpu_data = dict()
self.setup_interactions()
self.collect_gpu_ops()
self.setup_output_sizes()
self.params_to_gpu()
self.tree_to_gpu(obs_m, src_m)
self.interactions_to_gpu()
self.d2e_u2e_ops_to_gpu()
def setup_interactions(self):
self.interactions = self.cfg.traversal_module.fmmmm_interactions(
self.obs_tree, self.src_tree, self.cfg.inner_r, self.cfg.outer_r,
self.cfg.order, self.cfg.treecode
)
def collect_gpu_ops(self):
self.gpu_ops = dict()
for a in ['s', 'p']:
for b in ['s', 'p']:
name = a + '2' + b
self.gpu_ops[name] = getattr(self.cfg.gpu_module, name + '_' + self.cfg.K.name)
self.gpu_ops['c2e1'] = self.cfg.gpu_module.c2e_kernel1
self.gpu_ops['c2e2'] = self.cfg.gpu_module.c2e_kernel2
def setup_output_sizes(self):
self.n_surf_tris = self.cfg.surf[1].shape[0]
self.n_surf_dofs = self.n_surf_tris * 9
self.n_multipoles = self.n_surf_dofs * self.src_tree.n_nodes
self.n_locals = self.n_surf_dofs * self.obs_tree.n_nodes
self.n_input = self.src_m[1].shape[0] * 9
self.n_output = self.obs_m[1].shape[0] * 9
def float_gpu(self, arr):
return gpu.to_gpu(arr, self.cfg.float_type)
def int_gpu(self, arr):
return gpu.to_gpu(arr, np.int32)
def params_to_gpu(self):
self.gpu_data['params'] = self.float_gpu(self.cfg.params)
def tree_to_gpu(self, obs_m, src_m):
gd = self.gpu_data
gd['obs_pts'] = self.float_gpu(obs_m[0])
gd['obs_tris'] = self.int_gpu(obs_m[1][self.obs_tree.orig_idxs])
gd['src_pts'] = self.float_gpu(src_m[0])
gd['src_tris'] = self.int_gpu(src_m[1][self.src_tree.orig_idxs])
obs_tree_nodes = self.obs_tree.nodes
src_tree_nodes = self.src_tree.nodes
for name, tree in [('src', self.src_tree), ('obs', self.obs_tree)]:
gd[name + '_n_C'] = self.float_gpu(tree.node_centers)
gd[name + '_n_R'] = self.float_gpu(tree.node_Rs)
for name, tree in [('src', src_tree_nodes), ('obs', obs_tree_nodes)]:
gd[name + '_n_start'] = self.int_gpu(np.array([n.start for n in tree]))
gd[name + '_n_end'] = self.int_gpu(np.array([n.end for n in tree]))
def interactions_to_gpu(self):
op_names = ['p2p', 'p2m', 'p2l', 'm2p', 'm2m', 'm2l', 'l2p', 'l2l']
for name in op_names:
op = getattr(self.interactions, name)
if type(op) is list:
for i, op_level in enumerate(op):
self.op_to_gpu(name + str(i), op_level)
else:
self.op_to_gpu(name, op)
def op_to_gpu(self, name, op):
for data_name in ['obs_n_idxs', 'obs_src_starts', 'src_n_idxs']:
self.gpu_data[name + '_' + data_name] = self.int_gpu(
np.array(getattr(op, data_name), copy = False)
)
def d2e_u2e_ops_to_gpu(self):
gd = self.gpu_data
gd['u2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.u2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.m2m))
]
gd['d2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.d2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.l2l))
]
u2e_UT, u2e_E, u2e_V = build_c2e(
self.src_tree, self.cfg.outer_r, self.cfg.inner_r, self.cfg
)
gd['u2e_V'] = self.float_gpu(u2e_V)
gd['u2e_E'] = self.float_gpu(u2e_E)
gd['u2e_UT'] = self.float_gpu(u2e_UT)
d2e_UT, d2e_E, d2e_V = build_c2e(
self.obs_tree, self.cfg.inner_r, self.cfg.outer_r, self.cfg
)
gd['d2e_V'] = self.float_gpu(d2e_V)
gd['d2e_E'] = self.float_gpu(d2e_E)
gd['d2e_UT'] = self.float_gpu(d2e_UT)
def to_tree(self, input_orig):
orig_idxs = np.array(self.src_tree.orig_idxs)
input_orig = input_orig.reshape((-1,9))
return input_orig[orig_idxs,:].flatten()
def to_orig(self, output_tree):
orig_idxs = np.array(self.obs_tree.orig_idxs)
output_tree = output_tree.reshape((-1, 9))
output_orig = np.empty_like(output_tree)
output_orig[orig_idxs,:] = output_tree
return output_orig.flatten()
def report_interactions(fmm_obj):
dim = fmm_obj.obs_m[1].shape[1]
order = fmm_obj.cfg.surf[1].shape[0]
def count_interactions(op_name, op):
obs_surf = False if op_name[2] == 'p' else True
src_surf = False if op_name[0] == 'p' else True
return fmm_obj.cfg.traversal_module.count_interactions(
op, fmm_obj.obs_tree, fmm_obj.src_tree,
obs_surf, src_surf, order
)
n_obs_tris = fmm_obj.obs_m[1].shape[0]
n_src_tris = fmm_obj.src_m[1].shape[0]
level_ops = ['m2m', 'l2l']
ops = ['p2m', 'p2l', 'm2l', 'p2p', 'm2p', 'l2p']
interactions = dict()
for op_name in ops:
op = getattr(fmm_obj.interactions, op_name)
interactions[op_name] = count_interactions(op_name, op)
for op_name in level_ops:
ops = getattr(fmm_obj.interactions, op_name)
for op in ops:
if op_name not in interactions:
interactions[op_name] = 0
interactions[op_name] += count_interactions(op_name, op)
direct_i = n_obs_tris * n_src_tris
fmm_i = sum([v for k,v in interactions.items()])
logger.info('compression factor: ' + str(fmm_i / direct_i))
logger.info('# obs tris: ' + str(n_obs_tris))
logger.info('# src tris: ' + str(n_src_tris))
logger.info('total tree interactions: %e' % fmm_i)
for k, v in interactions.items():
logger.info('total %s interactions: %e' % (k, v))
```
#### File: tectosaur/kernels/sympy_to_cpp.py
```python
import sympy
import mpmath
def cpp_binop(op):
def _cpp_binop(expr):
out = '(' + to_cpp(expr.args[0])
for arg in expr.args[1:]:
out += op + to_cpp(arg)
out += ')'
return out
return _cpp_binop
def cpp_func(f_name):
def _cpp_func(expr):
return f_name + cpp_binop(',')(expr)
return _cpp_func
def cpp_pow(expr):
if expr.args[1] == -1:
return '(1 / ' + to_cpp(expr.args[0]) + ')'
elif expr.args[1] == 2:
a = to_cpp(expr.args[0])
return '({a} * {a})'.format(a = a)
elif expr.args[1] == -2:
a = to_cpp(expr.args[0])
return '(1 / ({a} * {a}))'.format(a = a)
elif expr.args[1] == -0.5:
a = to_cpp(expr.args[0])
return '(1.0 / sqrt({a}))'.format(a = a)
elif expr.args[1] == 0.5:
a = to_cpp(expr.args[0])
return '(sqrt({a}))'.format(a = a)
elif expr.args[1] == -1.5:
a = to_cpp(expr.args[0])
return '(1.0 / (sqrt({a}) * sqrt({a}) * sqrt({a})))'.format(a = a)
else:
return cpp_func('pow')(expr)
to_cpp_map = dict()
to_cpp_map[sympy.Mul] = cpp_binop('*')
to_cpp_map[sympy.Add] = cpp_binop('+')
to_cpp_map[sympy.Symbol] = lambda e: str(e)
to_cpp_map[sympy.Number] = lambda e: mpmath.nstr(float(e), 17)
to_cpp_map[sympy.numbers.Pi] = lambda e: 'M_PI'
to_cpp_map[sympy.NumberSymbol] = lambda e: str(e)
to_cpp_map[sympy.Function] = lambda e: cpp_func(str(e.func))(e)
to_cpp_map[sympy.Pow] = cpp_pow
def to_cpp(expr):
if expr.func in to_cpp_map:
return to_cpp_map[expr.func](expr)
for k in to_cpp_map:
if issubclass(expr.func, k):
return to_cpp_map[k](expr)
```
#### File: tectosaur/nearfield/nearfield_op.py
```python
import scipy.sparse
import numpy as np
import tectosaur.mesh.find_near_adj as find_near_adj
from tectosaur.nearfield.pairs_integrator import PairsIntegrator
from tectosaur.util.timer import Timer
import tectosaur.util.sparse as sparse
import tectosaur.util.gpu as gpu
import logging
logger = logging.getLogger(__name__)
def any_nearfield(pts, tris, obs_subset, src_subset, near_threshold):
close_or_touch_pairs = find_near_adj.find_close_or_touching(
pts, tris[obs_subset], pts, tris[src_subset], near_threshold
)
nearfield_pairs_dofs, va_dofs, ea_dofs = find_near_adj.split_adjacent_close(
close_or_touch_pairs, tris[obs_subset], tris[src_subset]
)
return nearfield_pairs_dofs.shape[0] > 0
def to_dof_space(tri_indices, obs_subset, src_subset):
dof_space_indices = []
for pair in tri_indices:
try:
dof_space_indices.append([
np.where(obs_subset == pair[0])[0][0],
np.where(src_subset == pair[1])[0][0]
])
except:
import ipdb
ipdb.set_trace()
dof_space_indices = np.array(dof_space_indices)
if dof_space_indices.shape[0] == 0:
dof_space_indices = np.empty((0, 2), dtype = np.int)
return dof_space_indices
def to_tri_space(dof_indices, obs_subset, src_subset):
tri_idxs = np.array([obs_subset[dof_indices[:,0]], src_subset[dof_indices[:,1]]]).T
return np.concatenate((tri_idxs, dof_indices[:,2:]), axis = 1)
def edge_adj_orient(touching_verts):
tv = sorted(touching_verts)
if tv[0] == 0:
if tv[1] == 2:
return 2
return 0
return 1
def resolve_ea_rotation(tris, ea):
out = []
for i in range(ea.shape[0]):
obs_clicks = edge_adj_orient([ea[i,2], ea[i,4]])
src_clicks = edge_adj_orient([ea[i,3], ea[i,5]])
src_flip = False
if tris[ea[i,0], (0 + obs_clicks) % 3] != tris[ea[i,1], (1 + src_clicks) % 3] or \
tris[ea[i,0], (1 + obs_clicks) % 3] != tris[ea[i,1], (0 + src_clicks) % 3]:
src_flip = True
out.append((ea[i,0], ea[i,1], obs_clicks, src_clicks, src_flip))
return np.array(out)
def build_nearfield(shape, *mats):
out = []
for entries, pairs in mats:
if entries.shape[0] == 0:
entries = np.empty((0, 9, 9))
else:
entries = entries.reshape((-1, 9, 9))
bcoo = sparse.BCOOMatrix(pairs[:, 0], pairs[:, 1], entries, shape)
out.append(bcoo)
return out
class RegularizedNearfieldIntegralOp:
def __init__(self, pts, tris, obs_subset, src_subset,
nq_coincident, nq_edge_adj, nq_vert_adjacent,
nq_far, nq_near, near_threshold,
K_near_name, K_far_name, params, float_type):
n_obs_dofs = obs_subset.shape[0] * 9
n_src_dofs = src_subset.shape[0] * 9
self.shape = (n_obs_dofs, n_src_dofs)
timer = Timer(output_fnc = logger.debug, tabs = 1)
pairs_int = PairsIntegrator(
K_near_name, params, float_type, nq_far, nq_near, pts, tris
)
correction_pairs_int = PairsIntegrator(
K_far_name, params, float_type, nq_far, nq_near, pts, tris
)
timer.report('setup pairs integrator')
co_tris = np.intersect1d(obs_subset, src_subset)
co_indices = np.array([co_tris, co_tris]).T.copy()
co_dofs = to_dof_space(co_indices, obs_subset, src_subset)
co_mat = pairs_int.coincident(nq_coincident, co_indices)
timer.report("Coincident")
co_mat_correction = correction_pairs_int.correction(co_indices, True)
timer.report("Coincident correction")
close_or_touch_pairs = find_near_adj.find_close_or_touching(
pts, tris[obs_subset], pts, tris[src_subset], near_threshold
)
nearfield_pairs_dofs, va_dofs, ea_dofs = find_near_adj.split_adjacent_close(
close_or_touch_pairs, tris[obs_subset], tris[src_subset]
)
nearfield_pairs = to_tri_space(nearfield_pairs_dofs, obs_subset, src_subset)
va = to_tri_space(va_dofs, obs_subset, src_subset)
va = np.hstack((va, np.zeros((va.shape[0], 1))))
ea = resolve_ea_rotation(tris, to_tri_space(ea_dofs, obs_subset, src_subset))
timer.report("Find nearfield/adjacency")
ea_mat_rot = pairs_int.edge_adj(nq_edge_adj, ea)
timer.report("Edge adjacent")
if ea.shape[0] == 0:
ea_mat_correction = 0 * ea_mat_rot
else:
ea_mat_correction = correction_pairs_int.correction(ea[:,:2], False)
timer.report("Edge adjacent correction")
va_mat_rot = pairs_int.vert_adj(nq_vert_adjacent, va)
timer.report("Vert adjacent")
va_mat_correction = correction_pairs_int.correction(va[:,:2], False)
timer.report("Vert adjacent correction")
nearfield_mat = pairs_int.nearfield(nearfield_pairs)
timer.report("Nearfield")
nearfield_correction = correction_pairs_int.correction(nearfield_pairs, False)
timer.report("Nearfield correction")
self.mat = build_nearfield(
self.shape,
(co_mat - co_mat_correction, co_dofs),
(ea_mat_rot - ea_mat_correction, ea_dofs[:,:2]),
(va_mat_rot - va_mat_correction, va_dofs[:,:2]),
(nearfield_mat - nearfield_correction, nearfield_pairs_dofs)
)
timer.report("Assemble matrix")
self.mat_no_correction = build_nearfield(
self.shape,
(co_mat, co_dofs),
(ea_mat_rot, ea_dofs[:,:2]),
(va_mat_rot, va_dofs[:,:2]),
(nearfield_mat, nearfield_pairs_dofs),
)
timer.report("Assemble uncorrected matrix")
def full_scipy_mat(self):
return sum([m.to_bsr().to_scipy() for m in self.mat])
def full_scipy_mat_no_correction(self):
return sum([m.to_bsr().to_scipy() for m in self.mat_no_correction])
def dot(self, v):
return sum(arr.dot(v) for arr in self.mat)
def nearfield_no_correction_dot(self, v):
return sum(arr.dot(v) for arr in self.mat_no_correction)
def to_dense(self):
return sum([mat.to_bsr().to_scipy().todense() for mat in self.mat])
def no_correction_to_dense(self):
return sum([mat.to_bsr().to_scipy().todense() for mat in self.mat_no_correction])
class NearfieldIntegralOp:
def __init__(self, pts, tris, obs_subset, src_subset,
nq_vert_adjacent, nq_far, nq_near, near_threshold,
kernel, params, float_type):
n_obs_dofs = obs_subset.shape[0] * 9
n_src_dofs = src_subset.shape[0] * 9
self.shape = (n_obs_dofs, n_src_dofs)
timer = Timer(output_fnc = logger.debug, tabs = 1)
pairs_int = PairsIntegrator(kernel, params, float_type, nq_far, nq_near, pts, tris)
timer.report('setup pairs integrator')
co_tris = np.intersect1d(obs_subset, src_subset)
co_indices = np.array([co_tris, co_tris]).T.copy()
co_dofs = to_dof_space(co_indices, obs_subset, src_subset)
co_mat = coincident_table(kernel, params, pts[tris[co_tris]], float_type)
timer.report("Coincident")
co_mat_correction = pairs_int.correction(co_indices, True)
timer.report("Coincident correction")
close_or_touch_pairs = find_near_adj.find_close_or_touching(
pts, tris[obs_subset], pts, tris[src_subset], near_threshold
)
nearfield_pairs_dofs, va_dofs, ea_dofs = find_near_adj.split_adjacent_close(
close_or_touch_pairs, tris[obs_subset], tris[src_subset]
)
nearfield_pairs = to_tri_space(nearfield_pairs_dofs, obs_subset, src_subset)
va = to_tri_space(va_dofs, obs_subset, src_subset)
ea = to_tri_space(ea_dofs, obs_subset, src_subset)
timer.report("Find nearfield/adjacency")
ea_mat_rot = adjacent_table(nq_vert_adjacent, kernel, params, pts, tris, ea, float_type)
timer.report("Edge adjacent")
ea_mat_correction = pairs_int.correction(ea, False)
timer.report("Edge adjacent correction")
va_mat_rot = pairs_int.vert_adj(nq_vert_adjacent, va)
timer.report("Vert adjacent")
va_mat_correction = pairs_int.correction(va[:,:2], False)
timer.report("Vert adjacent correction")
nearfield_mat = pairs_int.nearfield(nearfield_pairs)
timer.report("Nearfield")
nearfield_correction = pairs_int.correction(nearfield_pairs, False)
timer.report("Nearfield correction")
self.mat = build_nearfield(
self.shape,
(co_mat - co_mat_correction, co_dofs),
(ea_mat_rot - ea_mat_correction, ea_dofs[:,:2]),
(va_mat_rot - va_mat_correction, va_dofs[:,:2]),
(nearfield_mat - nearfield_correction, nearfield_pairs_dofs)
)
timer.report("Assemble matrix")
self.mat_no_correction = build_nearfield(
self.shape,
(co_mat, co_dofs),
(ea_mat_rot, ea_dofs[:,:2]),
(va_mat_rot, va_dofs[:,:2]),
(nearfield_mat, nearfield_pairs_dofs),
)
timer.report("Assemble uncorrected matrix")
def dot(self, v):
return sum(arr.dot(v) for arr in self.mat)
def nearfield_no_correction_dot(self, v):
return sum(arr.dot(v) for arr in self.mat_no_correction)
def to_dense(self):
return sum([mat.to_bsr().to_scipy().todense() for mat in self.mat])
def no_correction_to_dense(self):
return sum([mat.to_bsr().to_scipy().todense() for mat in self.mat_no_correction])
```
#### File: tectosaur/nearfield/triangle_rules.py
```python
import tectosaur.util.quadrature as quad
from tectosaur.util.paget import paget
import numpy as np
def vertex_interior_quad(n_theta, n_rho, a):
# return quad.gauss2d_tri(10)
theta_quad = quad.map_to(quad.gaussxw(n_theta), [0, np.pi / 2])
if a == 0:
# rho_quad = quad.telles_singular(n_rho, -1)
# rho_quad = quad.gaussxw(n_rho)
rho_quad = quad.tanh_sinh(n_rho)
else:
rho_quad = paget(n_rho, a)
pts = []
wts = []
for t, tw in zip(*theta_quad):
rho_max = 1.0 / (np.cos(t) + np.sin(t))
q = quad.map_to(rho_quad, [0, rho_max])
for r, rw in zip(*q):
assert(r > 0)
srcxhat = r * np.cos(t)
srcyhat = r * np.sin(t)
pts.append((srcxhat, srcyhat))
# The r**2 comes from two factors:
# 1) The first r comes from the jacobian for the polar transform
# 2) The second r corrects for the implicit 1/r in the quad rule
weight = tw * rw * (r ** (a + 1))
wts.append(weight)
return np.array(pts), np.array(wts)
def vertex_adj_quad(n_theta, n_beta, n_alpha):
theta_quad = quad.gaussxw(n_theta)
beta_quad = quad.gaussxw(n_beta)
alpha_quad = quad.gaussxw(n_alpha)
def rho_max(theta):
return 1.0 / (np.cos(theta) + np.sin(theta))
pts = []
wts = []
def alpha_integral(w, tp, tq, b, alpha_max):
q = quad.map_to(alpha_quad, [0, alpha_max])
for a, aw in zip(*q):
jacobian = a ** 3 * np.cos(b) * np.sin(b)
rho_p = a * np.cos(b)
rho_q = a * np.sin(b)
obsxhat = rho_p * np.cos(tp)
obsyhat = rho_p * np.sin(tp)
srcxhat = rho_q * np.cos(tq)
srcyhat = rho_q * np.sin(tq)
pts.append((obsxhat, obsyhat, srcxhat, srcyhat))
wts.append(jacobian * aw * w)
def beta_integral(w, tp, tq, beta_min, beta_max, alpha_max_fnc):
q = quad.map_to(beta_quad, [beta_min, beta_max])
for b, bw in zip(*q):
alpha_max = alpha_max_fnc(b)
alpha_integral(bw * w, tp, tq, b, alpha_max)
def theta_q_integral(w, tp):
q = quad.map_to(theta_quad, [0, np.pi / 2])
for tq, tqw in zip(*q):
beta_split = np.arctan(rho_max(tq) / rho_max(tp))
beta_integral(w * tqw, tp, tq, 0, beta_split,
lambda b: rho_max(tp) / np.cos(b))
beta_integral(w * tqw, tp, tq, beta_split, np.pi / 2,
lambda b: rho_max(tq) / np.sin(b))
def theta_p_integral():
q = quad.map_to(theta_quad, [0, np.pi / 2])
for tp, tpw in zip(*q):
theta_q_integral(tpw, tp)
theta_p_integral()
return np.array(pts), np.array(wts)
def coincident_quad(nq):
"""
Coincident quadrature rule from <NAME> Sauter (1998):
Efficient automatic quadrature in 3D Galerkin BEM
"""
qtri = quad.gauss2d_tri(nq)
qline = quad.map_to(quad.gaussxw(nq), [0.0, 1.0])
mappings = [
lambda eta, w1, w2, w3: (
w1 + w2 + w3,
w1 + w2,
w1 * (1 - eta) + w2 + w3,
w2
),
lambda eta, w1, w2, w3: (
w1 * (1 - eta) + w2 + w3,
w1 * (1 - eta) + w2,
w1 + w2 + w3,
w2
),
lambda eta, w1, w2, w3: (
w1 + w2 + w3,
w1 * eta + w2,
w2 + w3,
w2
),
lambda eta, w1, w2, w3: (
w1 * (1 - eta) + w2 + w3,
w2,
w1 + w2 + w3,
w1 + w2,
),
lambda eta, w1, w2, w3: (
w1 + w2 + w3,
w2,
w1 * (1 - eta) + w2 + w3,
w1 * (1 - eta) + w2
),
lambda eta, w1, w2, w3: (
w2 + w3,
w2,
w1 + w2 + w3,
w1 * eta + w2
)
]
pts = []
wts = []
for i in range(len(mappings)):
m = mappings[i]
for w12_idx in range(qtri[0].shape[0]):
w1 = qtri[0][w12_idx,0]
w2 = qtri[0][w12_idx,1]
f12 = qtri[1][w12_idx]
for x3, f3 in zip(*qline):
w3 = x3 * (1 - w1 - w2)
f3 *= (1 - w1 - w2)
for eta, feta in zip(*qline):
F = f12 * f3 * feta * w1
x1, x2, y1, y2 = m(eta, w1, w2, w3)
obsxhat = 1 - x1
obsyhat = x2
srcxhat = 1 - y1
srcyhat = y2
pts.append((obsxhat, obsyhat, srcxhat, srcyhat))
wts.append(F)
return np.array(pts), np.array(wts)
def edge_adj_quad(nq):
"""
Edge adjacent quadrature rule from Sauter and Schwab book.
"""
qtri = quad.gauss2d_tri(nq)
qline = quad.map_to(quad.gaussxw(nq), [0.0, 1.0])
mappings = [
lambda e1, e2, e3, P: (
P,
P * e1 * e3,
P * (1 - e1 * e2),
P * e1 * (1 - e2),
P ** 3 * e1 ** 2
),
lambda e1, e2, e3, P: (
P,
P * e1,
P * (1 - e1 * e2 * e3),
P * e1 * e2 * (1 - e3),
P ** 3 * e1 ** 2 * e2
),
lambda e1, e2, e3, P: (
P * (1 - e1 * e2),
P * e1 * (1 - e2),
P,
P * e1 * e2 * e3,
P ** 3 * e1 ** 2 * e2
),
lambda e1, e2, e3, P: (
P * (1 - e1 * e2 * e3),
P * e1 * e2 * (1 - e3),
P,
P * e1,
P ** 3 * e1 ** 2 * e2
),
lambda e1, e2, e3, P: (
P * (1 - e1 * e2 * e3),
P * e1 * (1 - e2 * e3),
P,
P * e1 * e2,
P ** 3 * e1 ** 2 * e2
),
]
pts = []
wts = []
for i in range(len(mappings)):
m = mappings[i]
for e1, f1 in zip(*qline):
for e2, f2 in zip(*qline):
for e3, f3 in zip(*qline):
for P, f4 in zip(*qline):
F = f1 * f2 * f3 * f4
x1, x2, y1, y2, jac = m(e1, e2, e3, P)
F *= jac
obsxhat = 1 - x1
obsyhat = x2
srcxhat = 1 - y1
srcyhat = y2
srcxhat = (1 - srcyhat) - srcxhat
pts.append((obsxhat, obsyhat, srcxhat, srcyhat))
wts.append(F)
return np.array(pts), np.array(wts)
```
#### File: tectosaur/ops/composite_op.py
```python
import numpy as np
class CompositeOp:
def __init__(self, *ops_and_starts, shape = None):
self.ops = [el[0] for el in ops_and_starts]
self.row_start = [el[1] for el in ops_and_starts]
self.col_start = [el[2] for el in ops_and_starts]
n_rows = max([el[1] + el[0].shape[0] for el in ops_and_starts])
n_cols = max([el[2] + el[0].shape[1] for el in ops_and_starts])
if shape is None:
self.shape = (n_rows, n_cols)
else:
self.shape = shape
def generic_dot(self, v, dot_name):
out = np.zeros([self.shape[0]] + list(v.shape[1:]))
for i in range(len(self.ops)):
op = self.ops[i]
start_row_idx = self.row_start[i]
end_row_idx = start_row_idx + op.shape[0]
start_col_idx = self.col_start[i]
end_col_idx = start_col_idx + op.shape[1]
input_v = v[start_col_idx:end_col_idx]
out[start_row_idx:end_row_idx] += getattr(op, dot_name)(input_v)
return out
def nearfield_dot(self, v):
return self.generic_dot(v, "nearfield_dot")
def nearfield_no_correction_dot(self, v):
return self.generic_dot(v, "nearfield_no_correction_dot")
def dot(self, v):
return self.generic_dot(v, "dot")
def farfield_dot(self, v):
return self.generic_dot(v, "farfield_dot")
```
#### File: tectosaur/ops/mass_op.py
```python
import tectosaur.util.geometry as geometry
from tectosaur.util.quadrature import gauss2d_tri
import numpy as np
import scipy.sparse
from tectosaur.util.cpp import imp
_mass_op = imp("tectosaur.ops._mass_op")
class MassOp:
def __init__(self, nq, pts, tris, tensor_dim = 3):
qx, qw = gauss2d_tri(nq)
tri_pts = pts[tris]
basis = geometry.linear_basis_tri_arr(qx)
unscaled_normals = geometry.unscaled_normals(tri_pts)
jacobians = geometry.jacobians(unscaled_normals)
basis_factors = []
for b1 in range(3):
for b2 in range(3):
basis_factors.append(np.sum(qw * (basis[:,b1]*basis[:,b2])))
basis_factors = np.array(basis_factors)
rows, cols, vals = _mass_op.build_op(basis_factors, jacobians, tensor_dim)
n_rows = tris.shape[0] * 3 * tensor_dim
self.shape = (n_rows, n_rows)
self.mat = scipy.sparse.csr_matrix((vals, (rows, cols)), shape = self.shape)
def dot(self, v):
return self.mat.dot(v)
def nearfield_dot(self, v):
return self.dot(v)
def nearfield_no_correction_dot(self, v):
return self.dot(v)
def farfield_dot(self, v):
shape = [self.shape[0]]
shape.extend(v.shape[1:])
return np.zeros(shape)
```
#### File: tectosaur/ops/neg_op.py
```python
class MultOp:
def __init__(self, op, factor):
self.op = op
self.shape = op.shape
self.factor = factor
def nearfield_dot(self, v):
return self.factor * self.op.nearfield_dot(v)
def nearfield_no_correction_dot(self, v):
return self.factor * self.op.nearfield_no_correction_dot(v)
def dot(self, v):
return self.factor * self.op.dot(v)
def farfield_dot(self, v):
return self.factor * self.op.farfield_dot(v)
#TODO: Deprecate
def NegOp(op):
return MultOp(op, -1)
```
#### File: tectosaur/qd/plot_config.py
```python
import os
from IPython import get_ipython
import matplotlib.pyplot as plt
meade03_socket_idx = 0
def configure_meade03(idx):
global meade03_socket_idx
meade03_socket_idx = idx
configure(gpu_idx = idx, fast_plot = True, socket = idx)
def configure(gpu_idx = 0, fast_plot = True, socket = None):
set_gpu(gpu_idx)
if fast_plot:
configure_mpl_fast()
else:
configure_mpl_pretty()
configure_omp(socket)
def configure_omp(socket):
if socket is None:
if 'OMP_NUM_THREADS' in os.environ:
del os.environ['OMP_NUM_THREADS']
else:
first_core = socket * 20
last_core = (socket + 1) * 20
OMP_PLACES='{' + str(first_core) + ':' + str(last_core) + ':1}'
os.environ['OMP_NUM_THREADS'] = str(20)
os.environ['OMP_PLACES']=OMP_PLACES
def set_gpu(idx):
os.environ['CUDA_DEVICE'] = str(idx)
def configure_mpl_fast():
#TODO: try pdf or svg?
get_ipython().magic('config InlineBackend.figure_format = \'png\'')
configure_mpl()
def configure_mpl_pretty():
get_ipython().magic('config InlineBackend.figure_format = \'retina\'')
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = '\\usepackage{amsmath}'
configure_mpl()
def configure_mpl():
plt.style.use('dark_background')
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.titlesize'] = 22
plt.rcParams['savefig.transparent'] = False
```
#### File: tests/fmm/test_tri_fmm.py
```python
import logging
import numpy as np
import matplotlib.pyplot as plt
import tectosaur as tct
from tectosaur.ops.sparse_farfield_op import (
TriToTriDirectFarfieldOp, FMMFarfieldOp, PtToPtDirectFarfieldOp)
from tectosaur.ops.sparse_integral_op import RegularizedSparseIntegralOp
from tectosaur.ops.dense_integral_op import farfield_tris
import tectosaur.mesh.mesh_gen as mesh_gen
from tectosaur.kernels import kernels
from tectosaur.mesh.modify import concat
import tectosaur.util.gpu as gpu
from tectosaur.util.quadrature import gauss4d_tri
from tectosaur.ops.dense_integral_op import RegularizedDenseIntegralOp
from tectosaur.fmm.fmm import make_tree, make_config, FMM, FMMEvaluator, report_interactions
from tectosaur.fmm.c2e import reg_lstsq_inverse
from tectosaur.constraint_builders import continuity_constraints
from tectosaur.constraints import build_constraint_matrix
from tectosaur.util.timer import Timer
from tectosaur.util.test_decorators import golden_master
# TODO: dim
def test_tri_ones():
from test_farfield import make_meshes
m, surf1_idxs, surf2_idxs = make_meshes(n_m = 2, sep = 5, w = 1)
ops = [
C(
3, 'tensor_one3', [], m[0], m[1], np.float64,
surf1_idxs, surf2_idxs
) for C in [
TriToTriDirectFarfieldOp,
PtToPtDirectFarfieldOp,
# PtToPtFMMFarfieldOp(250, 3.0, 250)
]
]
x = np.random.rand(ops[0].shape[1])
x = np.ones(ops[0].shape[1])
ys = [op.dot(x) for op in ops]
for y in ys:
print(y)
def op(obs_m, src_m, K, nq = 2):
new_pts, new_tris = concat(obs_m, src_m)
n_obs_tris = obs_m[1].shape[0]
obs_tris = new_tris[:n_obs_tris]
src_tris = new_tris[n_obs_tris:]
save = False
if save:
np.save('ptspts.npy', [new_pts, obs_tris, src_tris, nq])
mat = farfield_tris(
K, [1.0, 0.25], new_pts, obs_tris, src_tris, nq, np.float32
)
nrows = mat.shape[0] * 9
ncols = mat.shape[3] * 9
return mat.reshape((nrows, ncols))
def test_tri_fmm_p2p():
np.random.seed(100)
n = 10
m1 = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
m2 = mesh_gen.make_rect(n, n, [[-3, 0, 1], [-3, 0, -1], [-2, 0, -1], [-2, 0, 1]])
K = 'elasticRH3'
cfg = make_config(K, [1.0, 0.25], 1.1, 2.5, 2, np.float32, treecode = True, force_order = 1000000)
tree1 = make_tree(m1, cfg, 10)
tree2 = make_tree(m2, cfg, 10)
print('n_nodes: ', str(len(tree1.nodes)))
fmm = FMM(tree1, m1, tree2, m2, cfg)
fmmeval = FMMEvaluator(fmm)
full = op(m1, m2, K = K)
x = np.random.rand(full.shape[1])
y1 = full.dot(x)
x_tree = fmm.to_tree(x)
import taskloaf as tsk
async def call_fmm(tsk_w):
return (await fmmeval.eval(tsk_w, x_tree, return_all_intermediates = True))
fmm_res, m_check, multipoles, l_check, locals = tsk.run(call_fmm)
y2 = fmm.to_orig(fmm_res)
np.testing.assert_almost_equal(y1, y2)
def test_tri_fmm_m2p_single():
np.random.seed(100)
n = 10
m1 = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
m2 = mesh_gen.make_rect(n, n, [[-10, 0, 1], [-10, 0, -1], [-8, 0, -1], [-8, 0, 1]])
K = 'elasticRH3'
cfg = make_config(K, [1.0, 0.25], 1.1, 2.5, 2, np.float32, treecode = True, force_order = 1)
tree1 = make_tree(m1, cfg, 1000)
tree2 = make_tree(m2, cfg, 1000)
src_R = tree2.nodes[0].bounds.R
center = tree2.nodes[0].bounds.center
R_outer = cfg.outer_r
R_inner = cfg.inner_r
scaling = 1.0
check_sphere = mesh_gen.make_sphere(center, scaling * R_outer * src_R, 2)
equiv_sphere = mesh_gen.make_sphere(center, scaling * R_inner * src_R, 2)
src_tri_idxs = tree2.orig_idxs
src_tris = m2[1][src_tri_idxs]
obs_tri_idxs = tree1.orig_idxs
obs_tris = m1[1][obs_tri_idxs]
p2c = op(check_sphere, (m2[0], src_tris), K)
e2c = op(check_sphere, equiv_sphere, K, nq = 4)
c2e = reg_lstsq_inverse(e2c, cfg.alpha)
e2p = op((m1[0], obs_tris), equiv_sphere, K)
p2p = op((m1[0], obs_tris), (m2[0], src_tris), K)
fmm_mat = e2p.dot(c2e.dot(p2c))
full = op(m1, m2, K = K)
fmm = FMM(tree1, m1, tree2, m2, cfg)
fmmeval = FMMEvaluator(fmm)
x = np.random.rand(full.shape[1])
y1 = full.dot(x)
x_tree = fmm.to_tree(x)
import taskloaf as tsk
async def call_fmm(tsk_w):
return (await fmmeval.eval(tsk_w, x_tree, return_all_intermediates = True))
fmm_res, m_check, multipoles, l_check, locals = tsk.run(call_fmm)
y2 = fmm.to_orig(fmm_res)
m_check2 = p2c.dot(x_tree)
np.testing.assert_almost_equal(m_check, m_check2)
np.testing.assert_almost_equal(multipoles, c2e.dot(m_check))
np.testing.assert_almost_equal(fmm_res, e2p.dot(multipoles), 4)
np.testing.assert_almost_equal(y1, y2, 5)
def test_tri_fmm_full():
np.random.seed(100)
n = 40
K = 'elasticRA3'
m1 = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
m2 = mesh_gen.make_rect(n, n, [[-3, 0, 1], [-3, 0, -1], [-2, 0, -1], [-2, 0, 1]])
x = np.random.rand(m2[1].shape[0] * 9)
t = Timer()
cfg = make_config(K, [1.0, 0.25], 1.1, 4.5, 2, np.float32)
tree1 = make_tree(m1, cfg, 100)
tree2 = make_tree(m2, cfg, 100)
print('n_nodes: ', str(len(tree1.nodes)))
fmm = FMM(tree1, m1, tree2, m2, cfg)
report_interactions(fmm)
fmmeval = FMMEvaluator(fmm)
t.report('setup fmm')
full = op(m1, m2, K = K)
t.report('setup dense')
t.report('gen x')
y1 = full.dot(x)
t.report('dense mv')
x_tree = fmm.to_tree(x)
import taskloaf as tsk
async def call_fmm(tsk_w):
return (await fmmeval.eval(tsk_w, x_tree))
fmm_res = tsk.run(call_fmm)
y2 = fmm.to_orig(fmm_res)
t.report('fmm mv')
np.testing.assert_almost_equal(y1, y2)
@golden_master(digits = 5)
def test_c2e(request):
n = 10
m1 = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
m2 = mesh_gen.make_rect(n, n, [[-3, 0, 1], [-3, 0, -1], [-2, 0, -1], [-2, 0, 1]])
K = 'elasticRH3'
t = Timer()
cfg = make_config(K, [1.0, 0.25], 1.1, 2.5, 2, np.float64, treecode = True)
tree1 = make_tree(m1, cfg, 100)
tree2 = make_tree(m2, cfg, 100)
print(len(tree1.nodes))
fmm = FMM(tree1, m1, tree2, m2, cfg)
u2e_ops = []
for n in tree2.nodes:
UT, eig, V = fmm.u2e_ops
alpha = cfg.alpha
R = n.bounds.R
inv_eig = R * eig / ((R * eig) ** 2 + alpha ** 2)
u2e_ops.append((V * inv_eig).dot(UT))
return np.array(u2e_ops)
def benchmark():
tct.logger.setLevel(logging.INFO)
m = 1
n = 50
K = 'elasticRH3'
m1 = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
m2 = mesh_gen.make_rect(n, n, [[-3, 0, 1], [-3, 0, -1], [-2, 0, -1], [-2, 0, 1]])
x = np.random.rand(m2[1].shape[0] * 9)
t = Timer()
new_pts, new_tris = concat(m1, m2)
n_obs_tris = m1[1].shape[0]
sparse_op = RegularizedSparseIntegralOp(
8,8,8,2,5,2.5,K,K,[1.0, 0.25],new_pts,new_tris,
np.float32,TriToTriDirectFarfieldOp,
obs_subset = np.arange(0,n_obs_tris),
src_subset = np.arange(n_obs_tris,new_tris.shape[0])
)
t.report('assemble matrix free')
for i in range(m):
y1 = sparse_op.dot(x)
t.report('matrix free mv x10')
fmm = FMMFarfieldOp(mac = 4.5, pts_per_cell = 300)(
2, K, [1.0, 0.25], new_pts, new_tris, np.float32,
obs_subset = np.arange(0,n_obs_tris),
src_subset = np.arange(n_obs_tris,new_tris.shape[0])
)
report_interactions(fmm.fmm_obj)
t.report('setup fmm')
y2 = fmm.dot(x)
t.report('fmm mv x10')
print(y1, y2)
if __name__ == "__main__":
benchmark()
```
#### File: tectosaur/tests/laplace.py
```python
from tectosaur.util.geometry import *
import numpy as np
def laplace(tri1, tri2, i, j, eps, pts):
obsxhat = pts[:, 0]
obsyhat = pts[:, 1]
srcxhat = pts[:, 2]
srcyhat = pts[:, 3]
obsbasis = linear_basis_tri(obsxhat, obsyhat)
srcbasis = linear_basis_tri(srcxhat, srcyhat)
obsn = tri_normal(tri1)
srcn = tri_normal(tri2)
obspt = (tri_pt(obsbasis, tri1).T - np.outer(eps, obsn)).T
srcpt = tri_pt(srcbasis, tri2)
R = np.sqrt(
(obspt[0,:] - srcpt[0,:]) ** 2 +
(obspt[1,:] - srcpt[1,:]) ** 2 +
(obspt[2,:] - srcpt[2,:]) ** 2
)
K = (1.0 / 4.0 / np.pi) * ((1 / R ** 3) - (3.0 * eps ** 2 / R ** 5))
return obsbasis[i, :] * srcbasis[j, :] * K
```
#### File: tectosaur/tests/test_constraints.py
```python
from tectosaur.constraints import *
from tectosaur.continuity import *
from tectosaur.constraint_builders import *
import tectosaur.mesh.mesh_gen as mesh_gen
import tectosaur.mesh.modify as mesh_modify
import numpy as np
import tectosaur.util.geometry
import tectosaur as tct
import logging
logger = logging.getLogger(__name__)
def test_rearrange_constraint_eq():
eqtn = ConstraintEQ([Term(3,0),Term(-1,1),Term(4,2)], 13.7)
rearr = isolate_term_on_lhs(eqtn, 2)
assert(rearr.lhs_dof == 2)
assert(rearr.c.terms[0].val == -0.75)
assert(rearr.c.terms[0].dof == 0)
assert(rearr.c.terms[1].val == 0.25)
assert(rearr.c.terms[1].dof == 1)
assert(rearr.c.rhs[0].val == 1.0 / 4.0)
assert(rearr.c.rhs[0].dof == 0)
def subs_test(victim, sub_in, correct):
in_rearr = isolate_term_on_lhs(sub_in, 0)
result = substitute(victim, 0, in_rearr, 1.0)
for i in range(len(result.terms)):
assert(result.terms[i].dof == correct.terms[i].dof)
assert(result.terms[i].val == correct.terms[i].val)
def test_subs_rhs():
eqtn0 = ConstraintEQ([Term(1,1), Term(3,1)], 4.0)
eqtn1 = ConstraintEQ([Term(1,1)], 2.0)
correct = ConstraintEQ([Term(3,1)], 2.0)
subs_test(eqtn0, eqtn1, correct)
def test_combine_terms():
out = combine_terms(ConstraintEQ([Term(1, 1), Term(2, 1)], 0.0))
assert(len(out.terms) == 1)
assert(out.terms[0].dof == 1)
assert(out.terms[0].val == 3)
def test_filter_zero():
out = filter_zero_terms(ConstraintEQ([Term(1, 0), Term(0, 1)], 0.0))
assert(len(out.terms) == 1)
assert(out.terms[0].dof == 0)
assert(out.terms[0].val == 1)
def test_constraint_matrix():
cs = [ConstraintEQ([Term(1, 0), Term(-1, 1)], 0.0)]
cm, rhs, _ = build_constraint_matrix(cs, 3)
assert(cm.shape == (3, 2))
np.testing.assert_almost_equal(cm.todense(), [[1, 0], [1, 0], [0, 1]])
np.testing.assert_almost_equal(rhs, 0)
def test_constraint_matrix_harder():
cs = [
ConstraintEQ([Term(1, 5), Term(-1, 1)], 0.0),
ConstraintEQ([Term(1, 3), Term(0.25, 0)], 0.0),
ConstraintEQ([Term(1, 2), Term(0.5, 3), Term(0.5, 4)], 0.0)
]
cm,rhs, _ = build_constraint_matrix(cs, 7)
assert(cm.shape == (7, 4))
correct = [
[1,0,0,0],[0,1,0,0],[0,0,1,0], [-0.25,0,0,0],
[0.25,0,-2,0],[0,1,0,0],[0,0,0,1]
]
np.testing.assert_almost_equal(cm.todense(), correct)
np.testing.assert_almost_equal(rhs, 0)
def test_constraint_matrix_rhs1():
cs = [
ConstraintEQ([Term(1,0)], 2.0)
]
cm, rhs, rhs_mat = build_constraint_matrix(cs, 1)
np.testing.assert_almost_equal(rhs, [2.0])
def test_constraint_matrix_rhs2():
cs = [
ConstraintEQ([Term(1,0), Term(2,2)], 2.0),
ConstraintEQ([Term(1,1), Term(1,0)], 3.0)
]
cm, rhs, rhs_mat = build_constraint_matrix(cs, 3)
np.testing.assert_almost_equal(rhs, [0, 3.0, 1.0])
def test_constraint_matrix_rhs3():
cs = [
ConstraintEQ([Term(1, 5), Term(-1, 1)], 0.0),
ConstraintEQ([Term(1, 3), Term(0.25, 0)], 1.0),
ConstraintEQ([Term(1, 2), Term(0.5, 3), Term(0.5, 4)], 2.0)
]
cm, rhs, rhs_mat = build_constraint_matrix(cs, 7)
np.testing.assert_almost_equal(rhs, [0,0,0,1.0,3.0,0,0])
def test_constraint_double():
cs = [
ConstraintEQ([Term(1, 0), Term(1, 1), Term(1, 2)], 0.0),
ConstraintEQ([Term(1, 0), Term(-1, 1), Term(1, 2)], 0.0),
]
cm, rhs, _ = build_constraint_matrix(cs, 3)
np.testing.assert_almost_equal(cm.todense(), np.array([[1, 0, -1]]).T)
def test_constraint_triple():
cs = [
ConstraintEQ([Term(1, 0), Term(1, 1), Term(1, 2), Term(1, 3)], 0.0),
ConstraintEQ([Term(1, 0), Term(-1, 1), Term(1, 2), Term(1, 3)], 0.0),
ConstraintEQ([Term(1, 0), Term(1, 1), Term(-1, 2), Term(1, 3)], 0.0),
]
cm, rhs, _ = build_constraint_matrix(cs, 4)
np.testing.assert_almost_equal(cm.todense(), np.array([[1, 0, 0, -1]]).T)
def test_find_free_edges():
tris = np.array([[0,1,2],[2,1,3]])
free_es = find_free_edges(tris)
assert(len(free_es) == 4)
for e in [(0,0), (0,2), (1,1), (1,2)]:
assert(e in free_es)
def simple_rect_mesh(n):
corners = [[-1.0, -1.0, 0], [-1.0, 1.0, 0], [1.0, 1.0, 0], [1.0, -1.0, 0]]
return mesh_gen.make_rect(n, n, corners)
def test_free_edge_constraints():
m = simple_rect_mesh(3)
cs = free_edge_constraints(m[1])
dofs = [c.terms[0].dof for c in cs]
tri_pts = m[0][m[1]].reshape((-1,3))
xyz_near_origin = np.abs(tri_pts[:,:]) < 0.1
near_origin = np.logical_and(xyz_near_origin[:,0], xyz_near_origin[:,1])
correct_pt_idxs = np.where(np.logical_not(near_origin))[0]
correct_dofs = set((
np.tile(correct_pt_idxs * 3, (3,1)) + np.array([0,1,2])[:,np.newaxis]
).reshape(-1).tolist())
assert(correct_dofs == set(dofs))
assert(len(dofs) == 18 * 3)
def test_composite():
cs1 = [ConstraintEQ([Term(1, 0)], 2)]
cs2 = [ConstraintEQ([Term(1, 0)], 3)]
cs = build_composite_constraints((cs1, 2), (cs2, 3))
assert(cs[0].terms[0].dof == 2)
assert(cs[0].rhs == 2)
assert(cs[1].terms[0].dof == 3)
assert(cs[1].rhs == 3)
def test_redundant_continuity():
n = 13
m = simple_rect_mesh(n)
cs = continuity_constraints(m[0], m[1], m[1].shape[0])
n_total_dofs = m[1].size * 3
cm, c_rhs, _ = build_constraint_matrix(cs, n_total_dofs)
assert(cm.shape[1] == 3 * n ** 2)
# def test_faulted_continuity():
# n = 3
# m = simple_rect_mesh(n)
# fault_corners = [[-1.0, 0.0, 0.0], [-1.0, 0.0, -1.0], [1.0, 0.0, -1.0], [1.0, 0.0, 0.0]]
# m2 = mesh_gen.make_rect(n, n, fault_corners)
# all_mesh = mesh_modify.concat(m, m2)
# surface_tris = all_mesh[1][:m[1].shape[0]]
# fault_tris = all_mesh[1][m[1].shape[0]:]
#
# cs = continuity_constraints(all_mesh[0], all_mesh[1], m[1].shape[0])
# n_total_dofs = m[1].size * 3
# rows, cols, vals, rhs, n_unique_cs = fast_constraints.build_constraint_matrix(cs, all_mesh[1].shape[0])
# n_rows = n_total_dofs
# n_cols = n_total_dofs - n_unique_cs
# cm = scipy.sparse.csr_matrix((vals, (rows, cols)), shape = (n_rows, n_cols))
# assert(cm.shape[1] == 36)
# def test_cascadia_continuity():
# pts, tris = np.load('tests/cascadia10000.npy')
# cs = continuity_constraints(tris, np.array([]))
# # dof_pairs = [(c.terms[0].dof, c.terms[1].dof) for c in cs]
# # print(
# # [x for x in dof_pairs if x[0] == 4887 or x[1] == 4887],
# # [x for x in dof_pairs if x[0] == 3045 or x[1] == 3045]
# # )
#
# cm, c_rhs = build_constraint_matrix(cs, tris.shape[0] * 9)
#
# np.random.seed(75)
# field = np.random.rand(tris.shape[0] * 9)
# continuous = cm.dot(cm.T.dot(field)).reshape((-1,3))[:,0]
# assert(check_continuity(tris, continuous) == [])
def benchmark_build_constraint_matrix():
from tectosaur.util.timer import timer
from tectosaur.constraints import fast_constraints
import scipy.sparse
t = Timer()
corners = [[-1.0, -1.0, 0], [-1.0, 1.0, 0], [1.0, 1.0, 0], [1.0, -1.0, 0]]
n = 100
m = mesh_gen.make_rect(n, n, corners)
t.report('make mesh')
cs = continuity_constraints(m[1], np.array([]), m[0])
t.report('make constraints')
n_total_dofs = m[1].size * 3
rows, cols, vals, rhs, n_unique_cs = fast_constraints.build_constraint_matrix(cs, n_total_dofs)
t.report('build matrix')
n_rows = n_total_dofs
n_cols = n_total_dofs - n_unique_cs
cm = scipy.sparse.csr_matrix((vals, (rows, cols)), shape = (n_rows, n_cols))
t.report('to csr')
if __name__ == '__main__':
benchmark_build_constraint_matrix()
```
#### File: tectosaur/tests/test_integral_op.py
```python
import numpy as np
import tectosaur.nearfield.triangle_rules as triangle_rules
import tectosaur.nearfield.nearfield_op as nearfield_op
import tectosaur.ops.dense_integral_op as dense_integral_op
import tectosaur.ops.sparse_integral_op as sparse_integral_op
from tectosaur.ops.sparse_farfield_op import TriToTriDirectFarfieldOp, \
FMMFarfieldOp
import tectosaur.ops.mass_op as mass_op
import tectosaur.util.quadrature as quad
import tectosaur.mesh.mesh_gen as mesh_gen
import tectosaur.mesh.modify as modify
from tectosaur.interior import interior_integral
from tectosaur.util.test_decorators import slow, golden_master, kernel
from tectosaur.util.timer import Timer
import tectosaur as tct
from laplace import laplace
import logging
logger = logging.getLogger(__name__)
float_type = np.float32
def build_subset_mesh():
n = 10
m = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
n_tris = m[1].shape[0]
overlap = n_tris // 2
obs_subset = np.arange(n_tris // 2)
src_subset = np.arange(n_tris // 2 - overlap, n_tris)
obs_range = [0, (obs_subset[-1] + 1) * 9]
src_range = [src_subset[0] * 9, (src_subset[-1] + 1) * 9]
# import matplotlib.pyplot as plt
# plt.figure()
# plt.triplot(m[0][:,0], m[0][:,2], m[1], 'k-')
# plt.figure()
# plt.triplot(m[0][:,0], m[0][:,2], m[1][obs_subset], 'b-')
# plt.triplot(m[0][:,0], m[0][:,2], m[1][src_subset], 'r-')
# plt.show()
return m, obs_subset, src_subset, obs_range, src_range
def test_op_subset_dense():
m, obs_subset, src_subset, obs_range, src_range = build_subset_mesh()
k = 'elasticH3'
params = [1.0, 0.25]
subset_op = dense_integral_op.DenseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
obs_subset = obs_subset,
src_subset = src_subset,
).mat
full_op = dense_integral_op.DenseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
).mat
subfull = full_op[obs_range[0]:obs_range[1],src_range[0]:src_range[1]]
np.testing.assert_almost_equal(subfull, subset_op)
def test_op_subset_sparse():
m, obs_subset, src_subset, obs_range, src_range = build_subset_mesh()
k = 'elasticH3'
params = [1.0, 0.25]
subset_op = sparse_integral_op.SparseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
farfield_op_type = PtToPtDirectFarfieldOp,
obs_subset = obs_subset,
src_subset = src_subset,
)
y2 = subset_op.dot(np.ones(subset_op.shape[1]))
full_op = sparse_integral_op.SparseIntegralOp(
7, 4, 3, 2.0, k, params, m[0], m[1], float_type,
farfield_op_type = PtToPtDirectFarfieldOp
)
y1 = full_op.dot(np.ones(full_op.shape[1]))
np.testing.assert_almost_equal(y1[obs_range[0]:obs_range[1]], y2)
@golden_master()
def test_farfield_two_tris(request):
pts = np.array(
[[1, 0, 0], [2, 0, 0], [1, 1, 0],
[5, 0, 0], [6, 0, 0], [5, 1, 0]]
)
obs_tris = np.array([[0, 1, 2]], dtype = np.int)
src_tris = np.array([[3, 4, 5]], dtype = np.int)
params = [1.0, 0.25]
out = dense_integral_op.farfield_tris(
'elasticH3', params, pts, obs_tris, src_tris, 3, float_type
)
return out
@golden_master()
def test_gpu_vert_adjacent(request):
pts = np.array([[0,0,0],[1,0,0],[0,1,0],[1,-1,0],[2,0,0]]).astype(np.float32)
tris = np.array([[1,2,0],[1,3,4]]).astype(np.int32)
params = [1.0, 0.25]
pairs_int = nearfield_op.PairsIntegrator('elasticH3', params, np.float32, 1, 1, pts, tris)
out = pairs_int.vert_adj(3, np.array([[0,1,0,0]]))
return out
def test_vert_adj_separate_bases():
K = 'elasticH3'
params = [1.0, 0.25]
nq = 6
full_tris = np.array([[0,1,2], [0,4,3]])
pts = np.array([[0,0,0],[1,0,0],[0,1,0],[-0.5,0,0],[0,0,-2],[0.5,0.5,0]])
pairs_int = nearfield_op.PairsIntegrator('elasticH3', params, np.float32, 1, 1, pts, full_tris)
I = pairs_int.vert_adj(nq, np.array([[0,1,0,0]]))
obs_basis_tris = np.array([
[[0,0],[0.5,0.5],[0,1]], [[0,0],[1,0],[0.5,0.5]]
])
src_basis_tris = np.array([
[[0,0],[1,0],[0,1]], [[0,0],[1,0],[0,1]]
])
sep_tris = np.array([[0,5,2], [0,1,5], [0,4,3], [0,4,3]])
pairs_int = nearfield_op.PairsIntegrator('elasticH3', params, np.float32, 1, 1, pts, sep_tris)
I0 = pairs_int.vert_adj(nq, np.array([[0,2,0,0],[1,3,0,0]]))
from tectosaur.nearfield._table_lookup import sub_basis
I1 = np.array([sub_basis(
I0[i].flatten().tolist(), obs_basis_tris[i].tolist(), src_basis_tris[i].tolist()
) for i in range(2)]).reshape((2,3,3,3,3))
np.testing.assert_almost_equal(I[0], I1[0] + I1[1], 6)
def full_integral_op_tester(k, use_fmm, n = 5):
pts = np.array([[0,0,0], [1,1,0], [0, 1, 1], [0,0,2]])
tris = np.array([[0, 1, 2], [2, 1, 3]])
rect_mesh = mesh_gen.make_rect(n, n, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
out = np.zeros(1)
params = [1.0, 0.25]
for m in [(pts, tris), rect_mesh]:
dense_op = dense_integral_op.DenseIntegralOp(
5, 3, 3, 2.0, k, params, m[0], m[1], float_type
)
x = np.ones(dense_op.shape[1])
dense_res = dense_op.dot(x)
if use_fmm:
farfield_op_type = PtToPtFMMFarfieldOp(100, 3.0, 300)
else:
farfield_op_type = PtToPtDirectFarfieldOp
sparse_op = sparse_integral_op.SparseIntegralOp(
5, 3, 3, 2.0, k, params, m[0], m[1],
float_type, farfield_op_type
)
sparse_res = sparse_op.dot(x)
assert(np.max(np.abs(sparse_res - dense_res)) / np.mean(np.abs(dense_res)) < 5e-4)
out = np.hstack((out, sparse_res))
return out
@slow
@golden_master(digits = 5)
def test_full_integral_op_nofmm(request, kernel):
return full_integral_op_tester(kernel, False)
@slow
@golden_master(digits = 7)
def test_full_integral_op_fmm(request):
return full_integral_op_tester('elasticU3', True, n = 30)
@golden_master(digits = 7)
def test_full_integral_op_nofmm_fast(request):
m = mesh_gen.make_rect(5, 5, [[-1, 0, 1], [-1, 0, -1], [1, 0, -1], [1, 0, 1]])
dense_op = dense_integral_op.DenseIntegralOp(
5, 3, 3, 2.0, 'elasticU3', [1.0, 0.25], m[0], m[1], float_type
)
return dense_op.mat
def test_mass_op():
m = mesh_gen.make_rect(2, 2, [[0,0,0],[1,0,0],[1,1,0],[0,1,0]])
op = mass_op.MassOp(3, m[0], m[1])
exact00 = quad.quadrature(
lambda x: (1 - x[:,0] - x[:,1]) * (1 - x[:,0] - x[:,1]),
quad.gauss2d_tri(10)
)
exact03 = quad.quadrature(
lambda x: (1 - x[:,0] - x[:,1]) * x[:,0],
quad.gauss2d_tri(10)
)
np.testing.assert_almost_equal(op.mat[0,0], exact00)
np.testing.assert_almost_equal(op.mat[0,3], exact03)
def test_mass_tensor_dim():
m = mesh_gen.make_rect(2, 2, [[0,0,0],[1,0,0],[1,1,0],[0,1,0]])
op1 = mass_op.MassOp(3, m[0], m[1], tensor_dim = 1)
op3 = mass_op.MassOp(3, m[0], m[1])
x = np.random.rand(op3.shape[1]).reshape((-1,3,3))
x[:,:,1] = 0
x[:,:,2] = 0
y3 = op3.dot(x.flatten())
y1 = op1.dot(x[:,:,0].flatten())
np.testing.assert_almost_equal(y1, y3.reshape((-1,3,3))[:,:,0].flatten())
@golden_master()
def test_interior(request):
np.random.seed(10)
corners = [[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]]
pts, tris = mesh_gen.make_rect(3, 3, corners)
obs_pts = pts.copy()
obs_pts[:,2] += 1.0
obs_ns = np.random.rand(*obs_pts.shape)
obs_ns /= np.linalg.norm(obs_ns, axis = 1)[:,np.newaxis]
input = np.ones(tris.shape[0] * 9)
K = 'elasticH3'
params = [1.0, 0.25]
op = tct.InteriorOp(
obs_pts, obs_ns, (pts, tris), K, 4, params, float_type
)
return op.dot(input)
@golden_master()
def test_interior_nearfield(request):
np.random.seed(10)
corners = [[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]]
src_mesh = mesh_gen.make_rect(30, 30, corners)
xs = np.linspace(-3, 3, 50)
X, Z = np.meshgrid(xs, xs)
Y = np.ones_like(X) * 0.0
obs_pts = np.array([e.flatten() for e in [X, Y, Z]]).T.copy()
obs_ns = np.zeros(obs_pts.shape)
obs_ns[:,2] = 1.0
input = np.zeros(src_mesh[1].shape[0] * 9)
input.reshape((-1,3))[:,0] = 1.0
K = 'elasticT3'
params = [1.0, 0.25]
op = tct.InteriorOp(
obs_pts, obs_ns, src_mesh, K, 4, params, float_type
)
out = op.dot(input)
# import matplotlib.pyplot as plt
# for d in range(3):
# plt.subplot(1, 3, d + 1)
# plt.contourf(X, Z, out.reshape((-1,3))[:,d].reshape(X.shape))
# plt.colorbar()
# plt.show()
return out
@profile
def benchmark_nearfield_construction():
corners = [[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]]
near_threshold = 1.5
n = 80
pts, tris = mesh_gen.make_rect(n, n, corners)
n = nearfield_op.NearfieldIntegralOp(1, 1, 1, 2.0, 'elasticU3', [1.0, 0.25], pts, tris)
@profile
def benchmark_vert_adj():
from tectosaur.util.timer import Timer
import tectosaur.mesh.find_near_adj as find_near_adj
from tectosaur.nearfield.pairs_integrator import PairsIntegrator
kernel = 'elasticH3'
params = [1.0, 0.25]
float_type = np.float32
L = 5
nq_vert_adjacent = 7
nx = ny = int(2 ** L / np.sqrt(2))
t = Timer()
pts, tris = mesh_gen.make_rect(nx, ny, [[-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0]])
logger.debug('n_tris: ' + str(tris.shape[0]))
t.report('make rect')
close_or_touch_pairs = find_near_adj.find_close_or_touching(pts, tris, 1.25)
nearfield_pairs, va, ea = find_near_adj.split_adjacent_close(close_or_touch_pairs, tris)
t.report('find near')
pairs_int = PairsIntegrator(kernel, params, float_type, 1, 1, pts, tris)
t.report('setup integrator')
va_mat_rot = pairs_int.vert_adj(nq_vert_adjacent, va)
t.report('vert adj')
if __name__ == "__main__":
benchmark_vert_adj()
```
#### File: tectosaur/tests/test_sparse.py
```python
import numpy as np
import scipy.sparse
import tectosaur.util.sparse as sparse
import logging
logger = logging.getLogger(__name__)
def test_bsrmv():
A = np.zeros((4,4))
A[:2,:2] = np.random.rand(2,2)
A[2:,2:] = np.random.rand(2,2)
A_bsr = sparse.from_scipy_bsr(scipy.sparse.bsr_matrix(A))
x = np.random.rand(A.shape[1])
correct = A.dot(x)
out = A_bsr.dot(x)
np.testing.assert_almost_equal(out, correct)
def test_dense_bsrmv():
A = np.random.rand(100,100)
A_bsr = sparse.from_scipy_bsr(scipy.sparse.bsr_matrix(A))
x = np.random.rand(A.shape[1])
correct = A.dot(x)
out = A_bsr.dot(x)
np.testing.assert_almost_equal(out, correct)
def dense_to_coo(A, blocksize):
assert(A.shape[0] % blocksize == 0)
assert(A.shape[1] % blocksize == 0)
n_block_rows = A.shape[0] // blocksize
n_block_cols = A.shape[1] // blocksize
data = np.swapaxes(
A.reshape((n_block_rows,blocksize,n_block_cols,blocksize)), 1, 2
).reshape((-1, blocksize, blocksize)).copy()
rows = np.tile(np.arange(n_block_rows)[:,np.newaxis], (1, n_block_cols)).flatten().copy()
cols = np.tile(np.arange(n_block_cols)[np.newaxis, :], (n_block_rows, 1)).flatten().copy()
return rows, cols, data
def dense_to_coo_tester(shape):
A = np.random.rand(*shape)
bs = 4
rows, cols, data = dense_to_coo(A, bs)
A_re = np.empty(A.shape)
for i in range(rows.shape[0]):
A_re[(bs*rows[i]):(bs*rows[i]+bs), (bs*cols[i]):(bs*cols[i]+bs)] = data[i]
np.testing.assert_almost_equal(A, A_re)
def test_dense_to_coo():
dense_to_coo_tester((100, 100))
dense_to_coo_tester((60, 100))
dense_to_coo_tester((100, 60))
def dense_bcoomv_tester(shape):
A = np.random.rand(*shape)
x = np.random.rand(A.shape[1])
correct = A.dot(x)
rows, cols, data = dense_to_coo(A, 4)
A_bcoo = sparse.BCOOMatrix(rows, cols, data, A.shape)
out = A_bcoo.dot(x)
np.testing.assert_almost_equal(out, correct)
def test_dense_bcoomv():
dense_bcoomv_tester((100, 100))
dense_bcoomv_tester((60, 100))
dense_bcoomv_tester((100, 60))
def test_to_bsr():
A = np.random.rand(100,100)
x = np.random.rand(A.shape[1])
rows, cols, data = dense_to_coo(A, 4)
A_bcoo = sparse.BCOOMatrix(rows, cols, data, A.shape)
np.testing.assert_almost_equal(A_bcoo.dot(x), A.dot(x))
A_bsr = A_bcoo.to_bsr()
np.testing.assert_almost_equal(A_bsr.dot(x), A.dot(x))
def benchmark_bsrmv():
from tectosaur.util.timer import Timer
float_type = np.float32
blocksize = 9
nb = 100000
nnzbs = 5000000
t = Timer()
rows = np.random.randint(nb, size = (nnzbs))
cols = np.random.randint(nb, size = (nnzbs))
data = np.ones((nnzbs, blocksize, blocksize))
x = np.random.rand(nb * blocksize).astype(float_type)
t.report('random')
mat_bcoo = sparse.BCOOMatrix(rows, cols, data, (nb * blocksize, nb * blocksize))
t.report('make bcoo')
mat_bsr = mat_bcoo.to_bsr()
t.report('make bsr')
mat_sp = mat_bsr.to_scipy()
t.report('make scipy')
for i in range(3):
y = mat_bsr.dot(x)
t.report('bsr mv')
y2 = mat_bcoo.dot(x)
t.report('bcoo mv')
correct = mat_sp.dot(x)
t.report('scipy mv')
np.testing.assert_almost_equal(y, correct, 2)
np.testing.assert_almost_equal(y2, correct, 2)
if __name__ == "__main__":
benchmark_bsrmv()
``` |
{
"source": "jlmaurer/tsfresh",
"score": 2
} |
#### File: tests/feature_extraction/test_ts_features.py
```python
from __future__ import absolute_import, division
import numpy as np
import pandas as pd
from tests.fixtures import DataTestCase
from tsfresh.feature_extraction.extraction import extract_features
from tsfresh.feature_extraction.settings import FeatureExtractionSettings
import six
class FeatureExtractorTestCase(DataTestCase):
"""The unit tests in this module make sure if the time series features are created properly"""
def setUp(self):
self.settings = FeatureExtractionSettings()
self.settings.PROFILING = False
def test_calculate_ts_features(self):
# todo: implement more methods and test more aspects
df = self.create_test_data_sample()
extracted_features = extract_features(df, self.settings, "id", "sort", "kind", "val")
self.assertIsInstance(extracted_features, pd.DataFrame)
self.assertTrue(np.all(extracted_features.a__maximum == np.array([71, 77])))
self.assertTrue(np.all(extracted_features.a__sum_values == np.array([691, 1017])))
self.assertTrue(np.all(extracted_features.a__abs_energy == np.array([32211, 63167])))
self.assertTrue(np.all(extracted_features.b__sum_values == np.array([757, 695])))
self.assertTrue(np.all(extracted_features.b__minimum == np.array([3, 1])))
self.assertTrue(np.all(extracted_features.b__abs_energy == np.array([36619, 35483])))
self.assertTrue(np.all(extracted_features.b__mean == np.array([37.85, 34.75])))
self.assertTrue(np.all(extracted_features.b__median == np.array([39.5, 28.0])))
df_sts = self.create_one_valued_time_series()
extracted_features_sts = extract_features(df_sts, self.settings, "id", "sort", "kind", "val")
self.assertIsInstance(extracted_features_sts, pd.DataFrame)
self.assertTrue(np.all(extracted_features_sts.a__maximum == np.array([1.0, 6.0])))
self.assertTrue(np.all(extracted_features_sts.a__sum_values == np.array([1.0, 11.0])))
self.assertTrue(np.all(extracted_features_sts.a__count_above_mean == np.array([0, 1])))
def test_calculate_ts_features_after_randomisation(self):
df = self.create_test_data_sample()
df_random = df.copy().sample(frac=1)
extracted_features = extract_features(df, self.settings, "id", "sort", "kind", "val").sort_index()
extracted_features_from_random = extract_features(df_random, self.settings,
"id", "sort", "kind", "val").sort_index()
six.assertCountEqual(self, extracted_features.columns, extracted_features_from_random.columns)
for col in extracted_features:
self.assertIsNone(np.testing.assert_array_almost_equal(extracted_features[col],
extracted_features_from_random[col]))
```
#### File: tsfresh/feature_selection/feature_selector.py
```python
from __future__ import absolute_import, division, print_function
from builtins import zip
from builtins import range
import os
import numpy as np
import pandas as pd
import logging
from tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \
target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test
from tsfresh.feature_selection.settings import FeatureSignificanceTestsSettings
_logger = logging.getLogger(__name__)
def check_fs_sig_bh(X, y, settings=None):
"""
The wrapper function that calls the significance test functions in this package.
In total, for each feature from the input pandas.DataFrame an univariate feature significance test is conducted.
Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to decide which features
to keep and which to delete.
We are testing
:math:`H_0` = the Feature is not relevant and can not be added
against
:math:`H_1` = the Feature is relevant and should be kept
or in other words
:math:`H_0` = Target and Feature are independent / the Feature has no influence on the target
:math:`H_1` = Target and Feature are associated / dependent
When the target is binary this becomes
:math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)`
:math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)`
Where :math:`F` is the distribution of the target.
In the same way we can state the hypothesis when the feature is binary
:math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)`
:math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)`
Here :math:`T` is the distribution of the target.
TODO: And for real valued?
:param X: The DataFrame containing all the features and the target
:type X: pandas.DataFrame
:param y: The target vector
:type y: pandas.Series
:param settings: The feature selection settings to use for performing the tests.
:type settings: FeatureSignificanceTestsSettings
:return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance
of this particular feature. The DataFrame has the columns
"Feature",
"type" (binary, real or const),
"p_value" (the significance of this feature as a p-value, lower means more significant)
"rejected" (if the Benjamini Hochberg procedure rejected this feature)
:rtype: pandas.DataFrame
"""
if settings is None:
settings = FeatureSignificanceTestsSettings()
target_is_binary = len(set(y)) == 2
# todo: solve the multiclassification case. for a multi classification the algorithm considers the target to be
# regression. Instead one could perform a binary one versus all classification.
# Only allow entries for which the target is known!
y = y.astype(np.float)
X = X.copy().loc[~(y == np.NaN), :]
# Create the DataFrame df_features containing the information about the different hypotheses
# Every row contains information over one feature column from X
df_features = pd.DataFrame()
# Don't process features from the ignore-list
df_features['Feature'] = list(set(X.columns))
df_features = df_features.set_index('Feature', drop=False)
# Don't process constant features
for feature in df_features['Feature']:
if len(pd.unique(X[feature])) == 1:
df_features = df_features.drop(feature)
_logger.warning("[test_feature_significance] Feature {} is constant".format(feature))
# Add relevant columns to df_features
df_features["type"] = np.nan
df_features["p_value"] = np.nan
df_features["rejected"] = np.nan
# Process the features
for feature in df_features['Feature']:
if target_is_binary:
# Decide if the current feature is binary or not
if len(set(X[feature].values)) == 2:
df_features.loc[df_features.Feature == feature, "type"] = "binary"
p_value = target_binary_feature_binary_test(X[feature], y, settings)
else:
df_features.loc[df_features.Feature == feature, "type"] = "real"
p_value = target_binary_feature_real_test(X[feature], y, settings)
else:
# Decide if the current feature is binary or not
if len(set(X[feature].values)) == 2:
df_features.loc[df_features.Feature == feature, "type"] = "binary"
p_value = target_real_feature_binary_test(X[feature], y, settings)
else:
df_features.loc[df_features.Feature == feature, "type"] = "real"
p_value = target_real_feature_real_test(X[feature], y, settings)
# Add p_values to df_features
df_features.loc[df_features['Feature'] == feature, "p_value"] = p_value
# Check for constant features
for feature in list(set(X.columns)):
if len(pd.unique(X[feature])) == 1:
df_features.loc[feature, "type"] = "const"
df_features.loc[feature, "rejected"] = True
# Perform the real feature rejection
df_features = benjamini_hochberg_test(df_features, settings)
if settings.write_selection_report:
# Write results of BH - Test to file
if not os.path.exists(settings.result_dir):
os.mkdir(settings.result_dir)
with open(os.path.join(settings.result_dir, "fs_bh_results.txt"), 'w') as file_out:
file_out.write(("Performed BH Test to control the false discovery rate(FDR); \n"
"FDR-Level={0};Hypothesis independent={1}\n"
).format(settings.fdr_level, settings.hypotheses_independent))
df_features.to_csv(index=False, path_or_buf=file_out, sep=';', float_format='%.4f')
return df_features
def benjamini_hochberg_test(df_pvalues, settings):
"""
This is an implementation of the benjamini hochberg procedure that calculates which of the hypotheses belonging
to the different p-Values from df_p to reject. While doing so, this test controls the false discovery rate,
which is the ratio of false rejections by all rejections:
.. math::
FDR = \\mathbb{E} \\left [ \\frac{ |\\text{false rejections}| }{ |\\text{all rejections}|} \\right]
References
----------
.. [1] Benjamini, Yoav and <NAME> (2001).
The control of the false discovery rate in multiple testing under dependency.
Annals of statistics, 1165--1188
:param df_pvalues: This DataFrame should contain the p_values of the different hypotheses in a column named
"p_values".
:type df_pvalues: pandas.DataFrame
:param settings: The settings object to use for controlling the false discovery rate (FDR_level) and
whether to threat the hypothesis independent or not (hypotheses_independent).
:type settings: FeatureSignificanceTestsSettings
:return: The same DataFrame as the input, but with an added boolean column "rejected".
:rtype: pandas.DataFrame
"""
# Get auxiliary variables and vectors
df_pvalues = df_pvalues.sort_values(by="p_value")
m = len(df_pvalues)
K = list(range(1, m + 1))
# Calculate the weight vector C
if settings.hypotheses_independent:
# c(k) = 1
C = [1] * m
else:
# c(k) = \sum_{i=1}^m 1/i
C = [sum([1.0 / i for i in range(1, k + 1)]) for k in K]
# Calculate the vector T to compare to the p_value
T = [settings.fdr_level * k / m * 1.0 / c for k, c in zip(K, C)]
# Get the last rejected p_value
try:
k_max = list(df_pvalues.p_value <= T).index(False)
except ValueError:
k_max = m
# Add the column denoting if hypothesis was rejected
df_pvalues["rejected"] = [True] * k_max + [False] * (m - k_max)
return df_pvalues
``` |
{
"source": "jlmayfield/Cirq",
"score": 3
} |
#### File: contrib/acquaintance/shift_test.py
```python
import cirq
import cirq.contrib.acquaintance as cca
def test_circular_shift_gate_init():
g = cca.CircularShiftGate(4, 2)
assert g.num_qubits() == 4
assert g.shift == 2
g = cca.CircularShiftGate(4, 1, swap_gate = cirq.CZ)
assert g.swap_gate == cirq.CZ
def test_circular_shift_gate_eq():
equals_tester = cirq.testing.EqualsTester()
equals_tester.add_equality_group(cca.CircularShiftGate(4, 1),
cca.CircularShiftGate(4, 1))
equals_tester.add_equality_group(
cca.CircularShiftGate(4, 1, swap_gate=cirq.CZ))
equals_tester.add_equality_group(cca.CircularShiftGate(4, 2))
equals_tester.add_equality_group(cca.CircularShiftGate(3, 2))
equals_tester.add_equality_group(
cca.CircularShiftGate(3, 2, swap_gate=cirq.CZ))
def test_circular_shift_gate_permutation():
assert (cca.CircularShiftGate(3, 4).permutation() ==
{0: 2, 1: 0, 2: 1})
assert (cca.CircularShiftGate(4, 0).permutation() ==
{0: 0, 1: 1, 2: 2, 3: 3})
assert (cca.CircularShiftGate(5, 2).permutation() ==
{0:3, 1: 4, 2: 0, 3: 1, 4: 2})
def test_circular_shift_gate_repr():
g = cca.CircularShiftGate(3, 2)
cirq.testing.assert_equivalent_repr(g)
def test_circular_shift_gate_decomposition():
qubits = [cirq.NamedQubit(q) for q in 'abcdef']
expander = cirq.ExpandComposite()
circular_shift = cca.CircularShiftGate(2, 1, cirq.CZ)(*qubits[:2])
circuit = cirq.Circuit.from_ops(circular_shift)
expander.optimize_circuit(circuit)
expected_circuit = cirq.Circuit(
(cirq.Moment((cirq.CZ(*qubits[:2]),)),))
assert circuit == expected_circuit
no_decomp = lambda op: (isinstance(op, cirq.GateOperation) and
op.gate == cirq.SWAP)
expander = cirq.ExpandComposite(no_decomp=no_decomp)
circular_shift = cca.CircularShiftGate(6, 3)(*qubits)
circuit = cirq.Circuit.from_ops(circular_shift)
expander.optimize_circuit(circuit)
actual_text_diagram = circuit.to_text_diagram().strip()
expected_text_diagram = """
a: ───────────×───────────
│
b: ───────×───×───×───────
│ │
c: ───×───×───×───×───×───
│ │ │
d: ───×───×───×───×───×───
│ │
e: ───────×───×───×───────
│
f: ───────────×───────────
""".strip()
assert actual_text_diagram == expected_text_diagram
circular_shift = cca.CircularShiftGate(6, 2)(*qubits)
circuit = cirq.Circuit.from_ops(circular_shift)
expander.optimize_circuit(circuit)
actual_text_diagram = circuit.to_text_diagram().strip()
expected_text_diagram = """
a: ───────×───────────────
│
b: ───×───×───×───────────
│ │
c: ───×───×───×───×───────
│ │
d: ───────×───×───×───×───
│ │
e: ───────────×───×───×───
│
f: ───────────────×───────
""".strip()
assert actual_text_diagram == expected_text_diagram
def test_circular_shift_gate_wire_symbols():
qubits = [cirq.NamedQubit(q) for q in 'xyz']
circuit = cirq.Circuit.from_ops(cca.CircularShiftGate(3, 2)(*qubits))
actual_text_diagram = circuit.to_text_diagram().strip()
expected_text_diagram = """
x: ───╲0╱───
│
y: ───╲1╱───
│
z: ───╱2╲───
""".strip()
assert actual_text_diagram == expected_text_diagram
actual_text_diagram = circuit.to_text_diagram(use_unicode_characters=False)
expected_text_diagram = r"""
x: ---\0/---
|
y: ---\1/---
|
z: ---/2\---
""".strip()
assert actual_text_diagram.strip() == expected_text_diagram
```
#### File: contrib/quirk/export_to_quirk_test.py
```python
import pytest
import cirq
from cirq.contrib.quirk.export_to_quirk import circuit_to_quirk_url
def assert_links_to(circuit: cirq.Circuit, expected: str, **kwargs):
actual = circuit_to_quirk_url(circuit, **kwargs)
actual = actual.replace('\n', '').replace(' ', '').strip()
expected = expected.replace('],[', '],\n[').strip()
expected = expected.replace('\n', '').replace(' ', '')
assert actual == expected
def test_x_z_same_col():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
circuit = cirq.Circuit.from_ops(cirq.X(a), cirq.Z(b))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["X","Z"]]}
""", escape_url=False)
assert_links_to(
circuit,
'http://algassert.com/quirk#circuit='
'%7B%22cols%22%3A%5B%5B%22X%22%2C%22Z%22%5D%5D%7D')
def test_x_cnot_split_cols():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
c = cirq.NamedQubit('c')
circuit = cirq.Circuit.from_ops(cirq.CNOT(a, b), cirq.X(c))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","X"],[1,1,"X"]]}
""", escape_url=False)
def test_cz_cnot_split_cols():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
c = cirq.NamedQubit('c')
circuit = cirq.Circuit.from_ops(cirq.CNOT(a, b), cirq.CZ(b, c))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","X"],[1,"•","Z"]]}
""", escape_url=False)
def test_various_known_gate_types():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
circuit = cirq.Circuit.from_ops(
cirq.X(a),
cirq.X(a)**0.25,
cirq.X(a)**-0.5,
cirq.Z(a),
cirq.Z(a)**0.5,
cirq.Y(a),
cirq.Y(a)**-0.25,
cirq.Y(a)**cirq.Symbol('t'),
cirq.H(a),
cirq.measure(a),
cirq.measure(a, b, key='not-relevant'),
cirq.SWAP(a, b),
cirq.CNOT(a, b),
cirq.CNOT(b, a),
cirq.CZ(a, b),
)
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
["X"],
["X^¼"],
["X^-½"],
["Z"],
["Z^½"],
["Y"],
["Y^-¼"],
["Y^t"],
["H"],
["Measure"],
["Measure","Measure"],
["Swap","Swap"],
["•","X"],
["X","•"],
["•","Z"]]}
""", escape_url=False)
class MysteryOperation(cirq.Operation):
def __init__(self, *qubits):
self._qubits = qubits
@property
def qubits(self):
return self._qubits
def with_qubits(self, *new_qubits):
return MysteryOperation(*new_qubits)
class MysteryGate(cirq.SingleQubitGate):
pass
def test_various_unknown_gate_types():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
circuit = cirq.Circuit.from_ops(
MysteryOperation(b),
cirq.SWAP(a, b)**0.5,
cirq.H(a)**0.5,
cirq.SingleQubitCliffordGate.X_sqrt.merged_with(
cirq.SingleQubitCliffordGate.Z_sqrt).on(a),
cirq.X(a)**(1/5),
cirq.Y(a)**(1/5),
cirq.Z(a)**(1/5),
cirq.CZ(a, b)**(1/5),
cirq.PhasedXPowGate(phase_exponent=0.25)(a),
cirq.PhasedXPowGate(exponent=1, phase_exponent=cirq.Symbol('r'))(a),
cirq.PhasedXPowGate(exponent=0.001, phase_exponent=0.1)(a)
)
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
[1,"UNKNOWN"],
["UNKNOWN", "UNKNOWN"],
[{"id":"?","matrix":"{{0.853553+0.146447i,0.353553-0.353553i},
{0.353553-0.353553i,0.146447+0.853553i}}"}],
[{"id":"?","matrix":"{{0.5+0.5i,0.5+0.5i},{0.5-0.5i,-0.5+0.5i}}"}],
[{"id":"?",
"matrix":"{{0.904508+0.293893i, 0.095492-0.293893i},
{0.095492-0.293893i, 0.904508+0.293893i}}"}],
[{"id":"?",
"matrix":"{{0.904508+0.293893i, 0.293893+0.095492i},
{-0.293893-0.095492i, 0.904508+0.293893i}}"}],
[{"id":"?",
"matrix":"{{1, 0},
{0, 0.809017+0.587785i}}"}],
["UNKNOWN", "UNKNOWN"],
[{"id":"?",
"matrix":"{{0, 0.707107+0.707107i},
{0.707107-0.707107i, 0}}"}],
["UNKNOWN"],
[{"id":"?",
"matrix":"{{0.999998+0.001571i,0.000488-0.001493i},
{-0.000483-0.001495i,0.999998+0.001571i}}"}]
]}
""", escape_url=False, prefer_unknown_gate_to_failure=True)
def test_unrecognized_single_qubit_gate_with_matrix():
a = cirq.NamedQubit('a')
circuit = cirq.Circuit.from_ops(
cirq.X(a)**0.2731,
)
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[[{
"id":"?",
"matrix":"{
{0.826988+0.378258i, 0.173012-0.378258i},
{0.173012-0.378258i, 0.826988+0.378258i}
}"}]]}
""", escape_url=False)
def test_unknown_gate():
class UnknownGate(cirq.SingleQubitGate):
pass
a = cirq.NamedQubit('a')
circuit = cirq.Circuit.from_ops(UnknownGate()(a))
with pytest.raises(TypeError):
_ = circuit_to_quirk_url(circuit)
with pytest.raises(TypeError):
_ = circuit_to_quirk_url(circuit, escape_url=False)
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["UNKNOWN"]]}
""", prefer_unknown_gate_to_failure=True, escape_url=False)
def test_controlled_gate():
a, b, c, d = cirq.LineQubit.range(4)
circuit = cirq.Circuit.from_ops(
cirq.ControlledGate(cirq.ControlledGate(cirq.CZ)).on(a, d, c, b))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","Z","•", "•"]]}
""", escape_url=False)
# Doesn't merge.
circuit = cirq.Circuit.from_ops(
cirq.ControlledGate(cirq.X).on(a, b),
cirq.ControlledGate(cirq.Z).on(c, d))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","X"],[1,1,"•", "Z"]]}
""", escape_url=False)
# Unknown sub-gate.
circuit = cirq.Circuit.from_ops(
cirq.ControlledGate(MysteryGate()).on(a, b))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["UNKNOWN","UNKNOWN"]]}
""", escape_url=False, prefer_unknown_gate_to_failure=True)
def test_toffoli():
a, b, c, d = cirq.LineQubit.range(4)
# Raw.
circuit = cirq.Circuit.from_ops(
cirq.TOFFOLI(a, b, c))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","•","X"]]}
""", escape_url=False)
# With exponent. Doesn't merge with other operation.
circuit = cirq.Circuit.from_ops(
cirq.CCX(a, b, c)**0.5,
cirq.H(d))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
["•","•","X^½"],[1,1,1,"H"]]}
""", escape_url=False)
# Unknown exponent.
circuit = cirq.Circuit.from_ops(
cirq.CCX(a, b, c)**0.01)
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
["UNKNOWN","UNKNOWN","UNKNOWN"]
]}
""", escape_url=False, prefer_unknown_gate_to_failure=True)
def test_fredkin():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.Circuit.from_ops(
cirq.FREDKIN(a, b, c))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","Swap","Swap"]]}
""", escape_url=False)
# Doesn't merge.
x, y, z = cirq.LineQubit.range(3, 6)
circuit = cirq.Circuit.from_ops(
cirq.CSWAP(a, b, c),
cirq.CSWAP(x, y, z))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
["•","Swap","Swap"],
[1,1,1,"•","Swap","Swap"]
]}
""", escape_url=False)
def test_ccz():
a, b, c, d = cirq.LineQubit.range(4)
# Raw.
circuit = cirq.Circuit.from_ops(
cirq.CCZ(a, b, c))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","•","Z"]]}
""", escape_url=False)
# Symbolic exponent.
circuit = cirq.Circuit.from_ops(
cirq.CCZ(a, b, c)**cirq.Symbol('t'))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[["•","•","Z^t"]]}
""", escape_url=False)
# Unknown exponent.
circuit = cirq.Circuit.from_ops(
cirq.CCZ(a, b, c)**0.01)
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
["UNKNOWN","UNKNOWN","UNKNOWN"]
]}
""", escape_url=False, prefer_unknown_gate_to_failure=True)
# With exponent. Doesn't merge with other operation.
circuit = cirq.Circuit.from_ops(
cirq.CCZ(a, b, c)**0.5,
cirq.H(d))
assert_links_to(circuit, """
http://algassert.com/quirk#circuit={"cols":[
["•","•","Z^½"],[1,1,1,"H"]]}
""", escape_url=False)
```
#### File: line/placement/optimization_test.py
```python
import math
import pytest
from cirq.google.line.placement import optimization
from cirq.testing.mock import mock
def test_accept_accepts():
# Cost constant, should be accepted.
assert optimization._accept(0.0, 0.0, 1.0)[0]
# Cost improved, should be accepted.
assert optimization._accept(0.0, -0.1, 1.0)[0]
# Cost decreased, should be accepted if low sample.
assert optimization._accept(0.0, 1.0, 1.0)[0]
# Cost decreased, should be accepted if below the threshold (exp(-1.0))
assert optimization._accept(1.0 / math.e - 1e-9, 1.0, 1.0)[0]
def test_accept_rejects():
# Cost decreased, should be rejected if high sample.
assert not optimization._accept(1.0 - 1e-9, 1.0, 1.0)[0]
# Cost decreased, should be rejected if above the threshold (exp(-1.0))
assert not optimization._accept(1.0 / math.e + 1e-9, 1.0, 1.0)[0]
def test_anneal_minimize_improves_when_better():
assert optimization.anneal_minimize(
'initial', lambda s: 1.0 if s == 'initial' else 0.0,
lambda s: 'better', lambda: 1.0, 1.0, 0.5, 0.5, 1) == 'better'
def test_anneal_minimize_keeps_when_worse_and_discarded():
assert optimization.anneal_minimize(
'initial', lambda s: 0.0 if s == 'initial' else 1.0,
lambda s: 'better', lambda: 0.9, 1.0, 0.5, 0.5, 1) == 'initial'
def test_anneal_minimize_raises_when_wrong_cooling_factor():
with pytest.raises(ValueError):
optimization.anneal_minimize(
'initial', lambda s: 1.0 if s == 'initial' else 0.0,
lambda s: 'better', lambda: 1.0, 1.0, 0.5, 2.0, 1)
def test_anneal_minimize_calls_trace_func():
trace_func = mock.Mock()
optimization.anneal_minimize(
'initial', lambda s: 1.0 if s == 'initial' else 0.0,
lambda s: 'better', lambda: 1.0, 1.0, 0.5, 0.5, 1,
trace_func=trace_func)
trace_func.assert_has_calls([mock.call('initial', 1.0, 1.0, 1.0, True),
mock.call('better', 1.0, 0.0, 1.0, True)])
```
#### File: cirq/google/params_test.py
```python
import pytest
from cirq.google import params
from cirq.study.sweeps import Linspace, Points, Product, UnitSweep, Zip
def test_gen_sweep_points():
points = [0.5, 1.0, 1.5, 2.0, 2.5]
sweep = {
'parameter_key': 'foo',
'points': {
'points': list(points)
}
}
out = params._sweep_from_single_param_sweep_proto_dict(sweep)
assert out == Points('foo', [0.5, 1.0, 1.5, 2.0, 2.5])
def test_gen_sweep_linspace():
sweep = {
'parameter_key': 'foo',
'linspace': {
'first_point': 0,
'last_point': 10,
'num_points': 11
}
}
out = params._sweep_from_single_param_sweep_proto_dict(sweep)
assert out == Linspace('foo', 0, 10, 11)
def test_gen_param_sweep_zip():
s1 = {
'parameter_key': 'foo',
'points': {
'points': [1, 2, 3]
}
}
s2 = {
'parameter_key': 'bar',
'points': {
'points': [4, 5]
}
}
sweep = {
'sweeps': [s1, s2]
}
out = params._sweep_from_param_sweep_zip_proto_dict(sweep)
assert out == Points('foo', [1, 2, 3]) + Points('bar', [4, 5])
def test_gen_empty_param_sweep():
out = params.sweep_from_proto_dict({})
assert out == UnitSweep
def test_gen_param_sweep():
s1 = {
'parameter_key': 'foo',
'points': {
'points': [1, 2, 3]
}
}
s2 = {
'parameter_key': 'bar',
'points': {
'points': [4, 5]
}
}
ps = {
'sweep': {
'factors': [
{
'sweeps': [s1]
},
{
'sweeps': [s2]
}
]
}
}
out = params.sweep_from_proto_dict(ps)
assert out == Product(Zip(Points('foo', [1, 2, 3])),
Zip(Points('bar', [4, 5])))
def test_empty_param_sweep_keys():
assert params.sweep_from_proto_dict({}).keys == []
def test_sweep_from_proto_dict_missing_type():
s1 = {
'parameter_key': 'foo',
}
ps = {
'sweep': {
'factors': [
{
'sweeps': [s1]
},
]
}
}
with pytest.raises(ValueError):
params.sweep_from_proto_dict(ps)
def test_param_sweep_keys():
s11 = {
'parameter_key': 'foo',
'points': {
'points': range(5)
},
}
s12 = {
'parameter_key': 'bar',
'points': {
'points': range(7)
},
}
s21 = {
'parameter_key': 'baz',
'points': {
'points': range(11)
},
}
s22 = {
'parameter_key': 'qux',
'points': {
'points': range(13)
}
}
ps = {
'sweep': {
'factors': [
{
'sweeps': [s11, s12],
},
{
'sweeps': [s21, s22]
}
]
}
}
out = params.sweep_from_proto_dict(ps)
assert out.keys == ['foo', 'bar', 'baz', 'qux']
def test_empty_param_sweep_size():
assert len(params.sweep_from_proto_dict({})) == 1
def test_param_sweep_size():
s11 = {
'parameter_key': '11',
'linspace': {
'first_point': 0,
'last_point': 10,
'num_points': 5
}
}
s12 = {
'parameter_key': '12',
'points': {
'points': range(7)
}
}
s21 = {
'parameter_key': '21',
'linspace': {
'first_point': 0,
'last_point': 10,
'num_points': 11
}
}
s22 = {
'parameter_key': '22',
'points': {
'points': range(13)
}
}
ps = {
'sweep': {
'factors': [
{
'sweeps': [s11, s12],
},
{
'sweeps': [s21, s22]
}
]
}
}
# Sweeps sx1 and sx2 are zipped, so should use num number of points.
# These are then producted, so this should multiply number of points.
assert len(params.sweep_from_proto_dict(ps)) == 5 * 11
def test_param_sweep_size_no_sweeps():
ps = {
'sweep': {
'factors': [
{
},
{
}
]
}
}
assert len(params.sweep_from_proto_dict(ps)) == 1
def example_sweeps():
empty_sweep = {}
empty_product = {
'sweep': {}
}
empty_zip = {
'sweep': {
'factors': [{}, {}]
}
}
s11 = {
'parameter_key': '11',
'linspace': {
'first_point': 0,
'last_point': 10,
'num_points': 5
}
}
s12 = {
'parameter_key': '12',
'points': {
'points': range(7)
}
}
s21 = {
'parameter_key': '21',
'linspace': {
'first_point': 0,
'last_point': 10,
'num_points': 11
}
}
s22 = {
'parameter_key': '22',
'points': {
'points': range(13)
}
}
full_sweep = {
'sweep': {
'factors': [
{
'sweeps': [s11, s12],
},
{
'sweeps': [s21, s22]
}
]
}
}
return [empty_sweep, empty_product, empty_zip, full_sweep]
@pytest.mark.parametrize('param_sweep', example_sweeps())
def test_param_sweep_size_versus_gen(param_sweep):
sweep = params.sweep_from_proto_dict(param_sweep)
print(sweep)
predicted_size = len(sweep)
out = list(sweep)
assert len(out) == predicted_size
@pytest.mark.parametrize('sweep,expected', [
(
UnitSweep,
UnitSweep
),
(
Linspace('a', 0, 10, 25),
Product(Zip(Linspace('a', 0, 10, 25)))
),
(
Points('a', [1, 2, 3]),
Product(Zip(Points('a', [1, 2, 3])))
),
(
Zip(Linspace('a', 0, 1, 5), Points('b', [1, 2, 3])),
Product(Zip(Linspace('a', 0, 1, 5), Points('b', [1, 2, 3]))),
),
(
Product(Linspace('a', 0, 1, 5), Points('b', [1, 2, 3])),
Product(Zip(Linspace('a', 0, 1, 5)), Zip(Points('b', [1, 2, 3]))),
),
(
Product(
Zip(Points('a', [1, 2, 3]), Points('b', [4, 5, 6])),
Linspace('c', 0, 1, 5),
),
Product(
Zip(Points('a', [1, 2, 3]), Points('b', [4, 5, 6])),
Zip(Linspace('c', 0, 1, 5)),
),
),
(
Product(
Zip(Linspace('a', 0, 1, 5), Points('b', [1, 2, 3])),
Zip(Linspace('c', 0, 1, 8), Points('d', [1, 0.5, 0.25, 0.125])),
),
Product(
Zip(Linspace('a', 0, 1, 5), Points('b', [1, 2, 3])),
Zip(Linspace('c', 0, 1, 8), Points('d', [1, 0.5, 0.25, 0.125])),
),
),
])
def test_sweep_to_proto_dict(sweep, expected):
proto = params.sweep_to_proto_dict(sweep)
out = params.sweep_from_proto_dict(proto)
assert out == expected
@pytest.mark.parametrize('bad_sweep', [
Zip(Product(Linspace('a', 0, 10, 25), Linspace('b', 0, 10, 25))),
])
def test_sweep_to_proto_fail(bad_sweep):
with pytest.raises(ValueError):
params.sweep_to_proto_dict(bad_sweep)
```
#### File: google/sim/xmon_simulator.py
```python
import math
import collections
from typing import cast, Dict, Iterator, List, Set, Union
from typing import Tuple # pylint: disable=unused-import
import numpy as np
from cirq import circuits, ops, study, protocols, optimizers
from cirq.sim import simulator
from cirq.google import convert_to_xmon_gates
from cirq.google.sim import xmon_stepper
class XmonOptions:
"""XmonOptions for the XmonSimulator.
Attributes:
num_prefix_qubits: Sharding of the wave function is performed over 2
raised to this value number of qubits.
min_qubits_before_shard: Sharding will be done only for this number
of qubits or more. The default is 18.
use_processes: Whether or not to use processes instead of threads.
Processes can improve the performance slightly (varies by machine
but on the order of 10 percent faster). However this varies
significantly by architecture, and processes should not be used
for interactive use on Windows.
"""
def __init__(self,
num_shards: int=None,
min_qubits_before_shard: int=18,
use_processes: bool=False) -> None:
"""XmonSimulator options constructor.
Args:
num_shards: sharding will be done for the greatest value of a
power of two less than this value. If None, the default will
be used which is the smallest power of two less than or equal
to the number of CPUs.
min_qubits_before_shard: Sharding will be done only for this number
of qubits or more. The default is 18.
use_processes: Whether or not to use processes instead of threads.
Processes can improve the performance slightly (varies by
machine but on the order of 10 percent faster). However this
varies significantly by architecture, and processes should not
be used for interactive python use on Windows.
"""
assert num_shards is None or num_shards > 0, (
"Num_shards cannot be less than 1.")
if num_shards is None:
self.num_prefix_qubits = None
else:
self.num_prefix_qubits = int(math.log(num_shards, 2))
assert min_qubits_before_shard >= 0, (
'Min_qubit_before_shard must be positive.')
self.min_qubits_before_shard = min_qubits_before_shard
self.use_processes = use_processes
class XmonSimulator(simulator.SimulatesSamples,
simulator.SimulatesIntermediateWaveFunction):
"""XmonSimulator for Xmon class quantum circuits.
This simulator has different methods for different types of simulations.
For simulations that mimic the quantum hardware, the run methods are
defined in the SimulatesSamples interface:
run
run_sweep
These methods do not return or give access to the full wave function.
To get access to the wave function during a simulation, including being
able to set the wave function, the simulate methods are defined in the
SimulatesFinalWaveFunction interface:
simulate
simulate_sweep
simulate_moment_steps (for stepping through a circuit moment by moment)
"""
def __init__(self, options: XmonOptions = None) -> None:
"""Construct a XmonSimulator.
Args:
options: XmonOptions configuring the simulation.
"""
self.options = options or XmonOptions()
def _run(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
repetitions: int,
) -> Dict[str, List[np.ndarray]]:
"""See definition in `cirq.SimulatesSamples`."""
xmon_circuit, keys = self._to_xmon_circuit(
circuit,
param_resolver)
if xmon_circuit.are_all_measurements_terminal():
return self._run_sweep_sample(xmon_circuit, repetitions)
else:
return self._run_sweep_repeat(keys, xmon_circuit, repetitions)
def _run_sweep_repeat(self, keys, circuit, repetitions):
measurements = {k: [] for k in
keys} # type: Dict[str, List[np.ndarray]]
for _ in range(repetitions):
all_step_results = self._base_iterator(
circuit,
qubit_order=ops.QubitOrder.DEFAULT,
initial_state=0)
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k].append(np.array(v, dtype=bool))
return {k: np.array(v) for k, v in measurements.items()}
def _run_sweep_sample(self, circuit, repetitions):
all_step_results = self._base_iterator(
circuit,
qubit_order=ops.QubitOrder.DEFAULT,
initial_state=0,
perform_measurements=False)
step_result = None
for step_result in all_step_results:
pass
if step_result is None:
return {}
measurement_ops = [op for _, op, _ in
circuit.findall_operations_with_gate_type(
ops.MeasurementGate)]
return step_result.sample_measurement_ops(measurement_ops, repetitions)
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: Union[int, np.ndarray],
perform_measurements: bool = True,
) -> Iterator['XmonStepResult']:
"""See definition in `cirq.SimulatesIntermediateWaveFunction`."""
param_resolver = param_resolver or study.ParamResolver({})
xmon_circuit, _ = self._to_xmon_circuit(circuit, param_resolver)
return self._base_iterator(xmon_circuit,
qubit_order,
initial_state,
perform_measurements)
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: Union[int, np.ndarray],
perform_measurements: bool=True,
) -> Iterator['XmonStepResult']:
"""See _simulator_iterator."""
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(
circuit.all_qubits())
qubit_map = {q: i for i, q in enumerate(reversed(qubits))}
if isinstance(initial_state, np.ndarray):
initial_state = initial_state.astype(dtype=np.complex64,
casting='safe')
with xmon_stepper.Stepper(
num_qubits=len(qubits),
num_prefix_qubits=self.options.num_prefix_qubits,
initial_state=initial_state,
min_qubits_before_shard=self.options.min_qubits_before_shard,
use_processes=self.options.use_processes
) as stepper:
for moment in circuit:
measurements = collections.defaultdict(
list) # type: Dict[str, List[bool]]
phase_map = {} # type: Dict[Tuple[int, ...], float]
for op in moment.operations:
gate = cast(ops.GateOperation, op).gate
if isinstance(gate, ops.ZPowGate):
index = qubit_map[op.qubits[0]]
phase_map[(index,)] = cast(float, gate.exponent)
elif isinstance(gate, ops.CZPowGate):
index0 = qubit_map[op.qubits[0]]
index1 = qubit_map[op.qubits[1]]
phase_map[(index0, index1)] = cast(float,
gate.exponent)
elif isinstance(gate, ops.XPowGate):
index = qubit_map[op.qubits[0]]
stepper.simulate_w(
index=index,
half_turns=gate.exponent,
axis_half_turns=0)
elif isinstance(gate, ops.YPowGate):
index = qubit_map[op.qubits[0]]
stepper.simulate_w(
index=index,
half_turns=gate.exponent,
axis_half_turns=0.5)
elif isinstance(gate, ops.PhasedXPowGate):
index = qubit_map[op.qubits[0]]
stepper.simulate_w(
index=index,
half_turns=gate.exponent,
axis_half_turns=gate.phase_exponent)
elif isinstance(gate, ops.MeasurementGate):
if perform_measurements:
invert_mask = (
gate.invert_mask or len(op.qubits) * (False,))
for qubit, invert in zip(op.qubits, invert_mask):
index = qubit_map[qubit]
result = stepper.simulate_measurement(index)
if invert:
result = not result
measurements[cast(str, gate.key)].append(result)
else:
# coverage: ignore
raise TypeError('{!r} is not supported by the '
'xmon simulator.'.format(gate))
stepper.simulate_phases(phase_map)
yield XmonStepResult(stepper, qubit_map, measurements)
def _to_xmon_circuit(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver
) -> Tuple[circuits.Circuit, Set[str]]:
# TODO: Use one optimization pass.
xmon_circuit = protocols.resolve_parameters(circuit, param_resolver)
convert_to_xmon_gates.ConvertToXmonGates().optimize_circuit(
xmon_circuit)
optimizers.DropEmptyMoments().optimize_circuit(xmon_circuit)
keys = find_measurement_keys(xmon_circuit)
return xmon_circuit, keys
def find_measurement_keys(circuit: circuits.Circuit) -> Set[str]:
keys = set() # type: Set[str]
for _, _, gate in circuit.findall_operations_with_gate_type(
ops.MeasurementGate):
key = gate.key
if key in keys:
raise ValueError('Repeated Measurement key {}'.format(key))
keys.add(key)
return keys
class XmonStepResult(simulator.StepResult):
"""Results of a step of the simulator.
Attributes:
qubit_map: A map from the Qubits in the Circuit to the the index
of this qubit for a canonical ordering. This canonical ordering is
used to define the state (see the state_vector() method).
measurements: A dictionary from measurement gate key to measurement
results, ordered by the qubits that the measurement operates on.
"""
def __init__(
self,
stepper: xmon_stepper.Stepper,
qubit_map: Dict,
measurements: Dict[str, np.ndarray]) -> None:
self.qubit_map = qubit_map or {}
self.measurements = measurements or collections.defaultdict(list)
self._stepper = stepper
def state_vector(self) -> np.ndarray:
"""Return the state (wave function) at this point in the computation.
The state is returned in the computational basis with these basis
states defined by the qubit_map. In particular the value in the
qubit_map is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| QubitA | QubitB | QubitC
:-: | :----: | :----: | :----:
0 | 0 | 0 | 0
1 | 0 | 0 | 1
2 | 0 | 1 | 0
3 | 0 | 1 | 1
4 | 1 | 0 | 0
5 | 1 | 0 | 1
6 | 1 | 1 | 0
7 | 1 | 1 | 1
"""
return self._stepper.current_state
def set_state(self, state: Union[int, np.ndarray]):
"""Updates the state of the simulator to the given new state.
Args:
state: If this is an int, then this is the state to reset
the stepper to, expressed as an integer of the computational basis.
Integer to bitwise indices is little endian. Otherwise if this is
a np.ndarray this must be the correct size and have dtype of
np.complex64.
Raises:
ValueError if the state is incorrectly sized or not of the correct
dtype.
"""
self._stepper.reset_state(state)
def sample(self, qubits: List[ops.QubitId], repetitions: int=1):
"""Samples from the wave function at this point in the computation.
Note that this does not collapse the wave function.
Returns:
Measurement results with True corresponding to the |1> state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits.
"""
return self._stepper.sample_measurements(
indices=[self.qubit_map[q] for q in qubits],
repetitions=repetitions)
```
#### File: cirq/ops/controlled_operation.py
```python
from typing import Union, Any, Optional
import numpy as np
from cirq import protocols, linalg, value
from cirq.ops import raw_types
from cirq.type_workarounds import NotImplementedType
@value.value_equality
class ControlledOperation(raw_types.Operation):
def __init__(self,
control: raw_types.QubitId,
sub_operation: raw_types.Operation):
self.control = control
self.sub_operation = sub_operation
@property
def qubits(self):
return (self.control,) + self.sub_operation.qubits
def with_qubits(self, *new_qubits):
return ControlledOperation(
new_qubits[0],
self.sub_operation.with_qubits(*new_qubits[1:]))
def _decompose_(self):
result = protocols.decompose_once(self.sub_operation, NotImplemented)
if result is NotImplemented:
return NotImplemented
return [ControlledOperation(self.control, op) for op in result]
def _value_equality_values_(self):
return tuple([self.control, self.sub_operation])
def _has_unitary_(self) -> bool:
return protocols.has_unitary(self.sub_operation)
def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:
sub_matrix = protocols.unitary(self.sub_operation, None)
if sub_matrix is None:
return NotImplemented
return linalg.block_diag(np.eye(sub_matrix.shape[0]), sub_matrix)
def __str__(self):
return 'C({}){}'.format(self.control, str(self.sub_operation))
def __repr__(self):
return 'cirq.ControlledOperation(control={!r},' \
' sub_operation={!r})'.format(self.control, self.sub_operation)
def _is_parameterized_(self) -> bool:
return protocols.is_parameterized(self.sub_operation)
def _resolve_parameters_(self, resolver):
new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver)
return ControlledOperation(self.control, new_sub_op)
def _trace_distance_bound_(self) -> float:
return protocols.trace_distance_bound(self.sub_operation)
def __pow__(self, exponent: Any) -> 'ControlledOperation':
new_sub_op = protocols.pow(self.sub_operation,
exponent,
NotImplemented)
if new_sub_op is NotImplemented:
return NotImplemented
return ControlledOperation(self.control, new_sub_op)
def _circuit_diagram_info_(self,
args: protocols.CircuitDiagramInfoArgs
) -> Optional[protocols.CircuitDiagramInfo]:
sub_args = protocols.CircuitDiagramInfoArgs(
known_qubit_count=(args.known_qubit_count - 1
if args.known_qubit_count is not None else None),
known_qubits=(args.known_qubits[1:]
if args.known_qubits is not None else None),
use_unicode_characters=args.use_unicode_characters,
precision=args.precision,
qubit_map=args.qubit_map
)
sub_info = protocols.circuit_diagram_info(self.sub_operation,
sub_args,
None)
if sub_info is None:
return NotImplemented
return protocols.CircuitDiagramInfo(
wire_symbols=('@',) + sub_info.wire_symbols,
exponent=sub_info.exponent)
```
#### File: cirq/ops/pauli_string_test.py
```python
import itertools
import numpy as np
import pytest
from cirq.testing import (
EqualsTester,
)
import cirq
def _make_qubits(n):
return [cirq.NamedQubit('q{}'.format(i)) for i in range(n)]
def _sample_qubit_pauli_maps():
qubits = _make_qubits(3)
paulis_or_none = (None, cirq.X, cirq.Y, cirq.Z)
for paulis in itertools.product(paulis_or_none, repeat=len(qubits)):
yield {qubit: pauli for qubit, pauli in zip(qubits, paulis)
if pauli is not None}
def test_eq_ne_hash():
q0, q1, q2 = _make_qubits(3)
eq = EqualsTester()
eq.make_equality_group(
lambda: cirq.PauliString({}),
lambda: cirq.PauliString({}, False))
eq.add_equality_group(cirq.PauliString({}, True))
for q, pauli in itertools.product((q0, q1), (cirq.X, cirq.Y, cirq.Z)):
eq.add_equality_group(cirq.PauliString({q: pauli}, False))
eq.add_equality_group(cirq.PauliString({q: pauli}, True))
for q, p0, p1 in itertools.product((q0, q1), (cirq.X, cirq.Y, cirq.Z),
(cirq.X, cirq.Y, cirq.Z)):
eq.add_equality_group(cirq.PauliString({q: p0, q2: p1}, False))
def test_equal_up_to_sign():
q0, = _make_qubits(1)
assert cirq.PauliString({}, False).equal_up_to_sign(
cirq.PauliString({}, False))
assert cirq.PauliString({}, True).equal_up_to_sign(
cirq.PauliString({}, True))
assert cirq.PauliString({}, False).equal_up_to_sign(
cirq.PauliString({}, True))
assert cirq.PauliString({q0: cirq.X}, False).equal_up_to_sign(
cirq.PauliString({q0: cirq.X}, False))
assert cirq.PauliString({q0: cirq.X}, True).equal_up_to_sign(
cirq.PauliString({q0: cirq.X}, True))
assert cirq.PauliString({q0: cirq.X}, False).equal_up_to_sign(
cirq.PauliString({q0: cirq.X}, True))
assert not cirq.PauliString({q0: cirq.X}, False).equal_up_to_sign(
cirq.PauliString({q0: cirq.Y}, False))
assert not cirq.PauliString({q0: cirq.X}, True).equal_up_to_sign(
cirq.PauliString({q0: cirq.Y}, True))
assert not cirq.PauliString({q0: cirq.X}, False).equal_up_to_sign(
cirq.PauliString({q0: cirq.Y}, True))
assert not cirq.PauliString({q0: cirq.X}, False).equal_up_to_sign(
cirq.PauliString({}, False))
assert not cirq.PauliString({q0: cirq.X}, True).equal_up_to_sign(
cirq.PauliString({}, True))
assert not cirq.PauliString({q0: cirq.X}, False).equal_up_to_sign(
cirq.PauliString({}, True))
@pytest.mark.parametrize('pauli', (cirq.X, cirq.Y, cirq.Z))
def test_from_single(pauli):
q0, = _make_qubits(1)
assert (cirq.PauliString.from_single(q0, pauli)
== cirq.PauliString({q0: pauli}))
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_getitem(qubit_pauli_map):
other = cirq.NamedQubit('other')
pauli_string = cirq.PauliString(qubit_pauli_map)
for key in qubit_pauli_map:
assert qubit_pauli_map[key] == pauli_string[key]
with pytest.raises(KeyError):
_ = qubit_pauli_map[other]
with pytest.raises(KeyError):
_ = pauli_string[other]
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_get(qubit_pauli_map):
other = cirq.NamedQubit('other')
pauli_string = cirq.PauliString(qubit_pauli_map)
for key in qubit_pauli_map:
assert qubit_pauli_map.get(key) == pauli_string.get(key)
assert qubit_pauli_map.get(other) == pauli_string.get(other) == None
# pylint: disable=too-many-function-args
assert qubit_pauli_map.get(other, 5) == pauli_string.get(other, 5) == 5
# pylint: enable=too-many-function-args
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_contains(qubit_pauli_map):
other = cirq.NamedQubit('other')
pauli_string = cirq.PauliString(qubit_pauli_map)
for key in qubit_pauli_map:
assert key in pauli_string
assert other not in pauli_string
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_keys(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert (len(qubit_pauli_map.keys()) == len(pauli_string.keys())
== len(pauli_string.qubits))
assert (set(qubit_pauli_map.keys()) == set(pauli_string.keys())
== set(pauli_string.qubits))
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_items(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(qubit_pauli_map.items()) == len(pauli_string.items())
assert set(qubit_pauli_map.items()) == set(pauli_string.items())
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_values(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(qubit_pauli_map.values()) == len(pauli_string.values())
assert set(qubit_pauli_map.values()) == set(pauli_string.values())
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_len(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(qubit_pauli_map) == len(pauli_string)
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_iter(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(tuple(qubit_pauli_map)) == len(tuple(pauli_string))
assert set(tuple(qubit_pauli_map)) == set(tuple(pauli_string))
# NamedQubit name repr in Python2 is different: u'q0' vs 'q0'
@cirq.testing.only_test_in_python3
def test_repr():
q0, q1, q2 = _make_qubits(3)
pauli_string = cirq.PauliString({q2: cirq.X, q1: cirq.Y, q0: cirq.Z})
assert (repr(pauli_string) ==
"cirq.PauliString({cirq.NamedQubit('q0'): cirq.Z, "
"cirq.NamedQubit('q1'): cirq.Y, cirq.NamedQubit('q2'): "
"cirq.X}, False)")
assert (repr(pauli_string.negate()) ==
"cirq.PauliString({cirq.NamedQubit('q0'): cirq.Z, "
"cirq.NamedQubit('q1'): cirq.Y, cirq.NamedQubit('q2'): "
"cirq.X}, True)")
def test_str():
q0, q1, q2 = _make_qubits(3)
pauli_string = cirq.PauliString({q2: cirq.X, q1: cirq.Y, q0: cirq.Z})
assert str(pauli_string) == '{+, q0:Z, q1:Y, q2:X}'
assert str(pauli_string.negate()) == '{-, q0:Z, q1:Y, q2:X}'
@pytest.mark.parametrize('map1,map2,out', (lambda q0, q1, q2: (
({}, {}, {}),
({q0: cirq.X}, {q0: cirq.Y}, {q0: (cirq.X, cirq.Y)}),
({q0: cirq.X}, {q1: cirq.X}, {}),
({q0: cirq.Y, q1: cirq.Z}, {q1: cirq.Y, q2: cirq.X},
{q1: (cirq.Z, cirq.Y)}),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {}, {}),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {q0: cirq.Y, q1: cirq.Z},
{q0: (cirq.X, cirq.Y), q1: (cirq.Y, cirq.Z)}),
))(*_make_qubits(3)))
def test_zip_items(map1, map2, out):
ps1 = cirq.PauliString(map1)
ps2 = cirq.PauliString(map2)
out_actual = tuple(ps1.zip_items(ps2))
assert len(out_actual) == len(out)
assert dict(out_actual) == out
@pytest.mark.parametrize('map1,map2,out', (lambda q0, q1, q2: (
({}, {}, ()),
({q0: cirq.X}, {q0: cirq.Y}, ((cirq.X, cirq.Y),)),
({q0: cirq.X}, {q1: cirq.X}, ()),
({q0: cirq.Y, q1: cirq.Z}, {q1: cirq.Y, q2: cirq.X},
((cirq.Z, cirq.Y),)),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {}, ()),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {q0: cirq.Y, q1: cirq.Z},
# Order not necessary
((cirq.X, cirq.Y), (cirq.Y, cirq.Z)))
))(*_make_qubits(3)))
def test_zip_paulis(map1, map2, out):
ps1 = cirq.PauliString(map1)
ps2 = cirq.PauliString(map2)
out_actual = tuple(ps1.zip_paulis(ps2))
assert len(out_actual) == len(out)
if len(out) <= 1:
assert out_actual == out
assert set(out_actual) == set(out) # Ignore output order
def test_commutes_with():
q0, q1, q2 = _make_qubits(3)
assert cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q0, cirq.X))
assert not cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q0, cirq.Y))
assert cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q1, cirq.X))
assert cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q1, cirq.Y))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Y}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.X}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Z, q2: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.X, q2: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.Z, q2: cirq.X}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.X, q1: cirq.Y}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.X, q1: cirq.Z}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.Y, q1: cirq.X}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.Y, q1: cirq.Z}))
def test_negate():
q0, q1 = _make_qubits(2)
qubit_pauli_map = {q0: cirq.X, q1: cirq.Y}
ps1 = cirq.PauliString(qubit_pauli_map)
ps2 = cirq.PauliString(qubit_pauli_map, True)
assert ps1.negate() == -ps1 == ps2
assert ps1 == ps2.negate() == -ps2
assert ps1.negate().negate() == ps1
def test_pos():
q0, q1 = _make_qubits(2)
qubit_pauli_map = {q0: cirq.X, q1: cirq.Y}
ps1 = cirq.PauliString(qubit_pauli_map)
assert ps1 == +ps1
def test_map_qubits():
a, b = (cirq.NamedQubit(name) for name in 'ab')
q0, q1 = _make_qubits(2)
qubit_pauli_map1 = {a: cirq.X, b: cirq.Y}
qubit_pauli_map2 = {q0: cirq.X, q1: cirq.Y}
qubit_map = {a: q0, b: q1}
ps1 = cirq.PauliString(qubit_pauli_map1)
ps2 = cirq.PauliString(qubit_pauli_map2)
assert ps1.map_qubits(qubit_map) == ps2
def test_to_z_basis_ops():
x0 = np.array([1,1]) / np.sqrt(2)
x1 = np.array([1,-1]) / np.sqrt(2)
y0 = np.array([1,1j]) / np.sqrt(2)
y1 = np.array([1,-1j]) / np.sqrt(2)
z0 = np.array([1,0])
z1 = np.array([0,1])
q0, q1, q2, q3, q4, q5 = _make_qubits(6)
pauli_string = cirq.PauliString({q0: cirq.X, q1: cirq.X,
q2: cirq.Y, q3: cirq.Y,
q4: cirq.Z, q5: cirq.Z})
circuit = cirq.Circuit.from_ops(
pauli_string.to_z_basis_ops())
initial_state = cirq.kron(x0, x1, y0, y1, z0, z1)
z_basis_state = circuit.apply_unitary_effect_to_state(initial_state)
expected_state = np.zeros(2 ** 6)
expected_state[0b010101] = 1
cirq.testing.assert_allclose_up_to_global_phase(
z_basis_state, expected_state, rtol=1e-7, atol=1e-7)
def _assert_pass_over(ops, before, after):
assert before.pass_operations_over(ops[::-1]) == after
assert (after.pass_operations_over(ops, after_to_before=True)
== before)
@pytest.mark.parametrize('shift,t_or_f',
itertools.product(range(3), (True, False)))
def test_pass_operations_over_single(shift, t_or_f):
q0, q1 = _make_qubits(2)
X, Y, Z = (cirq.Pauli.by_relative_index(pauli, shift)
for pauli in (cirq.X, cirq.Y, cirq.Z))
op0 = cirq.SingleQubitCliffordGate.from_pauli(Y)(q1)
ps_before = cirq.PauliString({q0: X}, t_or_f)
ps_after = ps_before
_assert_pass_over([op0], ps_before, ps_after)
op0 = cirq.SingleQubitCliffordGate.from_pauli(X)(q0)
op1 = cirq.SingleQubitCliffordGate.from_pauli(Y)(q1)
ps_before = cirq.PauliString({q0: X, q1: Y}, t_or_f)
ps_after = ps_before
_assert_pass_over([op0, op1], ps_before, ps_after)
op0 = cirq.SingleQubitCliffordGate.from_double_map({Z: (X,False),
X: (Z,False)})(q0)
ps_before = cirq.PauliString({q0: X, q1: Y}, t_or_f)
ps_after = cirq.PauliString({q0: Z, q1: Y}, t_or_f)
_assert_pass_over([op0], ps_before, ps_after)
op1 = cirq.SingleQubitCliffordGate.from_pauli(X)(q1)
ps_before = cirq.PauliString({q0: X, q1: Y}, t_or_f)
ps_after = ps_before.negate()
_assert_pass_over([op1], ps_before, ps_after)
ps_after = cirq.PauliString({q0: Z, q1: Y}, not t_or_f)
_assert_pass_over([op0, op1], ps_before, ps_after)
op0 = cirq.SingleQubitCliffordGate.from_pauli(Z, True)(q0)
op1 = cirq.SingleQubitCliffordGate.from_pauli(X, True)(q0)
ps_before = cirq.PauliString({q0: X}, t_or_f)
ps_after = cirq.PauliString({q0: Y}, not t_or_f)
_assert_pass_over([op0, op1], ps_before, ps_after)
@pytest.mark.parametrize('shift,t_or_f1, t_or_f2,neg',
itertools.product(range(3), *((True, False),)*3))
def test_pass_operations_over_double(shift, t_or_f1, t_or_f2, neg):
q0, q1, q2 = _make_qubits(3)
X, Y, Z = (cirq.Pauli.by_relative_index(pauli, shift)
for pauli in (cirq.X, cirq.Y, cirq.Z))
op0 = cirq.PauliInteractionGate(Z, t_or_f1, X, t_or_f2)(q0, q1)
ps_before = cirq.PauliString({q0: Z, q2: Y}, neg)
ps_after = cirq.PauliString({q0: Z, q2: Y}, neg)
_assert_pass_over([op0], ps_before, ps_after)
op0 = cirq.PauliInteractionGate(Y, t_or_f1, X, t_or_f2)(q0, q1)
ps_before = cirq.PauliString({q0: Z, q2: Y}, neg)
ps_after = cirq.PauliString({q0: Z, q2: Y, q1: X}, neg)
_assert_pass_over([op0], ps_before, ps_after)
op0 = cirq.PauliInteractionGate(Z, t_or_f1, X, t_or_f2)(q0, q1)
ps_before = cirq.PauliString({q0: Z, q1: Y}, neg)
ps_after = cirq.PauliString({q1: Y}, neg)
_assert_pass_over([op0], ps_before, ps_after)
op0 = cirq.PauliInteractionGate(Y, t_or_f1, X, t_or_f2)(q0, q1)
ps_before = cirq.PauliString({q0: Z, q1: Y}, neg)
ps_after = cirq.PauliString({q0: X, q1: Z}, neg ^ t_or_f1 ^ t_or_f2)
_assert_pass_over([op0], ps_before, ps_after)
op0 = cirq.PauliInteractionGate(X, t_or_f1, X, t_or_f2)(q0, q1)
ps_before = cirq.PauliString({q0: Z, q1: Y}, neg)
ps_after = cirq.PauliString({q0: Y, q1: Z}, not neg ^ t_or_f1 ^ t_or_f2)
_assert_pass_over([op0], ps_before, ps_after)
def test_pass_operations_over_cz():
q0, q1 = _make_qubits(2)
op0 = cirq.CZ(q0, q1)
ps_before = cirq.PauliString({q0: cirq.Z, q1: cirq.Y})
ps_after = cirq.PauliString({q1: cirq.Y})
_assert_pass_over([op0], ps_before, ps_after)
def test_pass_operations_over_no_common_qubits():
class DummyGate(cirq.SingleQubitGate):
pass
q0, q1 = _make_qubits(2)
op0 = DummyGate()(q1)
ps_before = cirq.PauliString({q0: cirq.Z})
ps_after = cirq.PauliString({q0: cirq.Z})
_assert_pass_over([op0], ps_before, ps_after)
def test_pass_unsupported_operations_over():
q0, = _make_qubits(1)
pauli_string = cirq.PauliString({q0: cirq.X})
with pytest.raises(TypeError):
pauli_string.pass_operations_over([cirq.X(q0)])
def test_with_qubits():
old_qubits = cirq.LineQubit.range(9)
new_qubits = cirq.LineQubit.range(9, 18)
qubit_pauli_map = {q: cirq.Pauli.by_index(q.x) for q in old_qubits}
pauli_string = cirq.PauliString(qubit_pauli_map, negated=True)
new_pauli_string = pauli_string.with_qubits(*new_qubits)
assert new_pauli_string.qubits == tuple(new_qubits)
for q in new_qubits:
assert new_pauli_string[q] == cirq.Pauli.by_index(q.x)
assert new_pauli_string.negated is True
```
#### File: cirq/ops/reversible_composite_gate_test.py
```python
import pytest
import cirq
class _FlipGate(cirq.SingleQubitGate):
def __init__(self, val):
self.val = val
def __pow__(self, exponent):
assert exponent == -1
return _FlipGate(~self.val)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.val == other.val
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((_FlipGate, self.val))
def test_inverse():
with pytest.raises(TypeError):
_ = cirq.inverse(
cirq.measure(cirq.NamedQubit('q')))
def rev_freeze(root):
return cirq.freeze_op_tree(cirq.inverse(root))
operations = [
cirq.GateOperation(_FlipGate(i), [cirq.NamedQubit(str(i))])
for i in range(10)
]
expected = [
cirq.GateOperation(_FlipGate(~i), [cirq.NamedQubit(str(i))])
for i in range(10)
]
# Just an item.
assert rev_freeze(operations[0]) == expected[0]
# Flat list.
assert rev_freeze(operations) == tuple(expected[::-1])
# Tree.
assert (
rev_freeze((operations[1:5], operations[0], operations[5:])) ==
(tuple(expected[5:][::-1]), expected[0],
tuple(expected[1:5][::-1])))
# Flattening after reversing is equivalent to reversing then flattening.
t = (operations[1:5], operations[0], operations[5:])
assert (
tuple(cirq.flatten_op_tree(rev_freeze(t))) ==
tuple(rev_freeze(cirq.flatten_op_tree(t))))
def test_child_class():
class Impl(cirq.ReversibleCompositeGate):
def _decompose_(self, qubits):
yield _FlipGate(1)(*qubits)
yield _FlipGate(2)(*qubits), _FlipGate(3)(*qubits)
gate = Impl()
reversed_gate = gate**-1
assert gate is reversed_gate**-1
with pytest.raises(TypeError):
_ = gate**0.5
with pytest.raises(TypeError):
_ = reversed_gate**0.5
q = cirq.NamedQubit('q')
assert (cirq.decompose_once_with_qubits(gate, [q]) ==
[_FlipGate(1)(q), _FlipGate(2)(q), _FlipGate(3)(q)])
assert (cirq.decompose_once_with_qubits(reversed_gate, [q]) ==
[_FlipGate(~3)(q), _FlipGate(~2)(q), _FlipGate(~1)(q)])
def test_enforces_abstract():
with pytest.raises(TypeError):
_ = cirq.ReversibleCompositeGate()
# noinspection PyAbstractClass
class Missing(cirq.ReversibleCompositeGate):
pass
with pytest.raises(TypeError):
_ = Missing()
class Included(cirq.ReversibleCompositeGate):
def _decompose_(self, qubits):
pass
assert isinstance(Included(), cirq.ReversibleCompositeGate)
def test_works_with_basic_gates():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
basics = [cirq.X(a),
cirq.Y(a)**0.5,
cirq.Z(a),
cirq.CZ(a, b)**-0.25,
cirq.CNOT(a, b),
cirq.H(b),
cirq.SWAP(a, b)]
assert list(cirq.inverse(basics)) == [
cirq.SWAP(a, b),
cirq.H(b),
cirq.CNOT(a, b),
cirq.CZ(a, b)**0.25,
cirq.Z(a),
cirq.Y(a)**-0.5,
cirq.X(a),
]
```
#### File: cirq/protocols/approximate_equality_test.py
```python
import cirq
def test_approx_eq_primitives():
assert cirq.approx_eq(1.0, 1.0 + 1e-10, atol=1e-09)
assert not cirq.approx_eq(1.0, 1.0 + 1e-10, atol=1e-11)
assert cirq.approx_eq(0.0, 1e-10, atol=1e-09)
assert not cirq.approx_eq(0.0, 1e-10, atol=1e-11)
assert cirq.approx_eq(complex(1, 1), complex(1.1, 1.2), atol=0.3)
assert not cirq.approx_eq(complex(1, 1), complex(1.1, 1.2), atol=0.1)
def test_approx_eq_special_numerics():
assert not cirq.approx_eq(float('nan'), 0, atol=0.0)
assert not cirq.approx_eq(float('nan'), float('nan'), atol=0.0)
assert not cirq.approx_eq(float('inf'), float('-inf'), atol=0.0)
assert not cirq.approx_eq(float('inf'), 5, atol=0.0)
assert not cirq.approx_eq(float('inf'), 0, atol=0.0)
assert cirq.approx_eq(float('inf'), float('inf'), atol=0.0)
def test_approx_eq_tuple():
assert cirq.approx_eq((1, 1), (1, 1), atol=0.0)
assert not cirq.approx_eq((1, 1), (1, 1, 1), atol=0.0)
assert not cirq.approx_eq((1, 1), (1,), atol=0.0)
assert cirq.approx_eq((1.1, 1.2, 1.3), (1, 1, 1), atol=0.4)
assert not cirq.approx_eq((1.1, 1.2, 1.3), (1, 1, 1), atol=0.2)
def test_approx_eq_list():
assert cirq.approx_eq([], [], atol=0.0)
assert not cirq.approx_eq([], [[]], atol=0.0)
assert cirq.approx_eq([1, 1], [1, 1], atol=0.0)
assert not cirq.approx_eq([1, 1], [1, 1, 1], atol=0.0)
assert not cirq.approx_eq([1, 1], [1,], atol=0.0)
assert cirq.approx_eq([1.1, 1.2, 1.3], [1, 1, 1], atol=0.4)
assert not cirq.approx_eq([1.1, 1.2, 1.3], [1, 1, 1], atol=0.2)
def test_approx_eq_default():
assert cirq.approx_eq(1.0, 1.0 + 1e-9)
assert cirq.approx_eq(1.0, 1.0 - 1e-9)
assert not cirq.approx_eq(1.0, 1.0 + 1e-7)
assert not cirq.approx_eq(1.0, 1.0 - 1e-7)
def test_approx_eq_iterables():
def gen_1_1():
yield 1
yield 1
assert cirq.approx_eq((1, 1), [1, 1], atol=0.0)
assert cirq.approx_eq((1, 1), gen_1_1(), atol=0.0)
assert cirq.approx_eq(gen_1_1(), [1, 1], atol=0.0)
class A:
def __init__(self, val):
self.val = val
def _approx_eq_(self, other, atol):
if not isinstance(self, type(other)):
return NotImplemented
return cirq.approx_eq(self.val, other.val, atol=atol)
class B:
def __init__(self, val):
self.val = val
def _approx_eq_(self, other, atol):
if not isinstance(self.val, type(other)):
return NotImplemented
return cirq.approx_eq(self.val, other, atol=atol)
def test_approx_eq_supported():
assert cirq.approx_eq(A(0.0), A(0.1), atol=0.1)
assert not cirq.approx_eq(A(0.0), A(0.1), atol=0.0)
assert cirq.approx_eq(B(0.0), 0.1, atol=0.1)
assert cirq.approx_eq(0.1, B(0.0), atol=0.1)
class C:
def __init__(self, val):
self.val = val
def __eq__(self, other):
if not isinstance(self, type(other)):
return NotImplemented
return self.val == other.val
def test_approx_eq_uses__eq__():
assert cirq.approx_eq(C(0), C(0), atol=0.0)
assert not cirq.approx_eq(C(1), C(2), atol=0.0)
assert cirq.approx_eq([C(0)], [C(0)], atol=0.0)
assert not cirq.approx_eq([C(1)], [C(2)], atol=0.0)
def test_approx_eq_types_mismatch():
assert not cirq.approx_eq(0, A(0), atol=0.0)
assert not cirq.approx_eq(A(0), 0, atol=0.0)
assert not cirq.approx_eq(B(0), A(0), atol=0.0)
assert not cirq.approx_eq(A(0), B(0), atol=0.0)
assert not cirq.approx_eq(C(0), A(0), atol=0.0)
assert not cirq.approx_eq(A(0), C(0), atol=0.0)
assert not cirq.approx_eq(complex(0, 0), 0, atol=0.0)
assert not cirq.approx_eq(0, complex(0, 0), atol=0.0)
assert not cirq.approx_eq(0, [0], atol=1.0)
assert not cirq.approx_eq([0], 0, atol=0.0)
```
#### File: cirq/protocols/mixture.py
```python
from typing import Any, Sequence, Tuple, Union
import numpy as np
from typing_extensions import Protocol
from cirq.type_workarounds import NotImplementedType
# This is a special indicator value used by the inverse method to determine
# whether or not the caller provided a 'default' argument.
RaiseTypeErrorIfNotProvided = ((0.0, []),) # type: Sequence[Tuple[float, Any]]
class SupportsMixture(Protocol):
"""An object that may be describable as a probabilistic combination.
A mixture is described by an iterable of tuples of the form
(probability of object, object)
The probability components of the tuples must sum to 1.0 and be between
0 and 1 (inclusive).
"""
def _mixture_(self) -> Union[Sequence[Tuple[float, Any]], NotImplementedType]:
pass
def mixture(
val: Any,
default: Any = RaiseTypeErrorIfNotProvided) -> Sequence[Tuple[float, Any]]:
"""Return a iterable of the tuples representing a probabilistic combination.
A mixture is described by an iterable of tuples of the form
(probability of object, object)
The probability components of the tuples must sum to 1.0 and be between
0 and 1 (inclusive).
Args:
val: The value whose mixture is being computed.
default: A default value if val does not support mixture.
Returns:
An iterable of tuples of size 2. The first element of the tuple is a
probability (between 0 and 1) and th second is the object that occurs
with that probability in the mixture. The probabilities will sum to 1.0.
"""
getter = getattr(val, '_mixture_', None)
result = NotImplemented if getter is None else getter()
if result is not NotImplemented:
return result
if default is not RaiseTypeErrorIfNotProvided:
return default
if getter is None:
raise TypeError(
"object of type '{}' has no _mixture_ method.".format(type(val)))
raise TypeError("object of type '{}' does have a _mixture_ method, "
"but it returned NotImplemented.".format(type(val)))
def validate_mixture(supports_mixture: SupportsMixture):
"""Validates that the mixture's tuple are valid probabilities."""
mixture_tuple = mixture(supports_mixture, None)
if mixture_tuple is None:
raise TypeError('{}_mixture did not have a _mixture_ method'.format(
supports_mixture))
def validate_probability(p, p_str):
if p < 0:
raise ValueError('{} was less than 0.'.format(p_str))
elif p > 1:
raise ValueError('{} was greater than 1.'.format(p_str))
total = 0.0
for p, val in mixture_tuple:
validate_probability(p, '{}\'s probability'.format(str(val)))
total += p
if not np.isclose(total, 1.0):
raise ValueError('Sum of probabilities of a mixture was not 1.0')
```
#### File: cirq/study/sweepable_test.py
```python
import cirq
def test_to_resolvers_single():
resolver = cirq.ParamResolver({})
assert cirq.to_resolvers(resolver) == [resolver]
def test_to_resolvers_sweep():
sweep = cirq.Linspace('a', 0, 1, 10)
assert cirq.to_resolvers(sweep) == list(sweep)
def test_to_resolvers_iterable():
resolvers = [cirq.ParamResolver({'a': 2}), cirq.ParamResolver({'a': 1})]
assert cirq.to_resolvers(resolvers) == resolvers
def test_to_resolvers_iterable_sweeps():
sweeps = [cirq.Linspace('a', 0, 1, 10), cirq.Linspace('b', 0, 1, 10)]
assert cirq.to_resolvers(sweeps) == sum([list(sweeps[0]), list(sweeps[1])],
[])
```
#### File: cirq/testing/lin_alg_utils_test.py
```python
import numpy as np
import pytest
from cirq.testing import (
random_unitary,
assert_allclose_up_to_global_phase,
)
from cirq.linalg import is_unitary
def test_random_unitary():
u1 = random_unitary(2)
u2 = random_unitary(2)
assert is_unitary(u1)
assert is_unitary(u2)
assert not np.allclose(u1, u2)
def test_assert_allclose_up_to_global_phase():
assert_allclose_up_to_global_phase(
np.array([[1]]),
np.array([[1j]]),
atol=0)
with pytest.raises(AssertionError):
assert_allclose_up_to_global_phase(
np.array([[1]]),
np.array([[2]]),
atol=0)
assert_allclose_up_to_global_phase(
np.array([[1e-8, -1, 1e-8]]),
np.array([[1e-8, 1, 1e-8]]),
atol=1e-6)
with pytest.raises(AssertionError):
assert_allclose_up_to_global_phase(
np.array([[1e-4, -1, 1e-4]]),
np.array([[1e-4, 1, 1e-4]]),
atol=1e-6)
assert_allclose_up_to_global_phase(
np.array([[1, 2], [3, 4]]),
np.array([[-1, -2], [-3, -4]]),
atol=0)
```
#### File: Cirq/dev_tools/env_tools.py
```python
import os
import shutil
import sys
from typing import Optional, Iterable, Callable, cast
from dev_tools import shell_tools, git_env_tools
from dev_tools.github_repository import GithubRepository
from dev_tools.prepared_env import PreparedEnv
def get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]:
"""Iterates through relevant python files within the given directory.
Args:
directory: The top-level directory to explore.
Yields:
File paths.
"""
for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
if os.path.split(dirpath)[-1].startswith('.'):
dirnames.clear()
continue
for filename in filenames:
if filename.endswith('.py') and not filename.endswith('_pb2.py'):
yield os.path.join(dirpath, filename)
def create_virtual_env(venv_path: str,
requirements_paths: Iterable[str],
python_path: str,
verbose: bool) -> None:
"""Creates a new virtual environment and then installs dependencies.
Args:
venv_path: Where to put the virtual environment's state.
requirements_paths: Location of requirements files to -r install.
python_path: The python binary to use.
verbose: When set, more progress output is produced.
"""
shell_tools.run_cmd('virtualenv',
None if verbose else '--quiet',
'-p',
python_path,
venv_path,
out=sys.stderr)
pip_path = os.path.join(venv_path, 'bin', 'pip')
for req_path in requirements_paths:
shell_tools.run_cmd(pip_path,
'install',
None if verbose else '--quiet',
'-r',
req_path,
out=sys.stderr)
def prepare_temporary_test_environment(
destination_directory: str,
repository: GithubRepository,
pull_request_number: Optional[int],
verbose: bool,
env_name: str = '.test_virtualenv',
python_path: str = sys.executable,
commit_ids_known_callback: Callable[[PreparedEnv], None] = None
) -> PreparedEnv:
"""Prepares a temporary test environment at the (existing empty) directory.
Args:
destination_directory: The location to put files. The caller is
responsible for deleting the directory, whether or not this method
succeeds or fails.
repository: The github repository to download content from, if a pull
request number is given.
pull_request_number: If set, test content is fetched from github.
Otherwise copies of local files are used.
verbose: When set, more progress output is produced.
env_name: The name to use for the virtual environment.
python_path: Location of the python binary to use within the
virtual environment.
commit_ids_known_callback: A function to call when the actual commit id
being tested is known, before the virtual environment is ready.
Returns:
Commit ids corresponding to content to test/compare.
"""
# Fetch content.
if pull_request_number is not None:
env = git_env_tools.fetch_github_pull_request(
destination_directory=destination_directory,
repository=repository,
pull_request_number=pull_request_number,
verbose=verbose)
else:
env = git_env_tools.fetch_local_files(
destination_directory=destination_directory,
verbose=verbose)
if commit_ids_known_callback is not None:
commit_ids_known_callback(env)
# Create virtual environment.
base_path = cast(str, env.destination_directory)
env_path = os.path.join(base_path, env_name)
req_path = os.path.join(base_path, 'requirements.txt')
dev_req_path = os.path.join(base_path,
'dev_tools',
'conf',
'pip-list-dev-tools.txt')
contrib_req_path = os.path.join(base_path,
'cirq',
'contrib',
'contrib-requirements.txt')
rev_paths = [req_path, dev_req_path, contrib_req_path]
create_virtual_env(venv_path=env_path,
python_path=python_path,
requirements_paths=rev_paths,
verbose=verbose)
return PreparedEnv(github_repo=env.repository,
actual_commit_id=env.actual_commit_id,
compare_commit_id=env.compare_commit_id,
destination_directory=env.destination_directory,
virtual_env_path=env_path)
def derive_temporary_python2_environment(
destination_directory: str,
python3_environment: PreparedEnv,
verbose: bool,
env_name: str = '.test_virtualenv_py2',
python_path: str = "/usr/bin/python2.7") -> PreparedEnv:
"""Creates a python 2.7 environment starting from a prepared python 3 one.
Args:
destination_directory: Where to put the python 2 environment.
python3_environment: The prepared environment to start from.
verbose: When set, more progress output is produced.
env_name: The name to use for the virtualenv directory.
python_path: The python binary to use.
Returns:
A description of the environment that was prepared.
"""
shutil.rmtree(destination_directory)
input_directory = cast(str, python3_environment.destination_directory)
os.chdir(input_directory)
conversion_script_path = os.path.join(
input_directory,
'dev_tools',
'python2.7-generate.sh')
shell_tools.run_cmd('bash',
conversion_script_path,
destination_directory,
input_directory,
python3_environment.virtual_env_path,
out=sys.stderr)
os.chdir(destination_directory)
# Create virtual environment.
env_path = os.path.join(destination_directory, env_name)
# (These files are output by dev_tools/python2.7-generate.sh.)
req_path = os.path.join(destination_directory, 'requirements.txt')
dev_req_path = os.path.join(destination_directory,
'pip-list-test-tools.txt')
contrib_req_path = os.path.join(destination_directory,
'cirq',
'contrib',
'contrib-requirements.txt')
req_paths = [req_path, dev_req_path, contrib_req_path]
create_virtual_env(venv_path=env_path,
python_path=python_path,
requirements_paths=req_paths,
verbose=verbose)
return PreparedEnv(github_repo=python3_environment.repository,
actual_commit_id=python3_environment.actual_commit_id,
compare_commit_id=python3_environment.compare_commit_id,
destination_directory=destination_directory,
virtual_env_path=env_path)
``` |
{
"source": "jlmbaka/directory-pie",
"score": 3
} |
#### File: jlmbaka/directory-pie/pie.py
```python
__author__ = 'jeanlouis.mbaka'
import dirsize
import matplotlib.pyplot as plt
import unittest
import sys, getopt
def draw_pie_chart(data_dict):
"""
Display directories and files sizes in pie chart.
:param data_dict: diction of {}
"""
labels = [key for key in data_dict.keys()]
values = [value for value in data_dict.values()]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = [0.1] * len(labels)
plt.pie(values, labels=labels, startangle=90, explode=explode, colors = colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
if __name__ == "__main__":
# Read arguments from the second position
argv = sys.argv[1:]
target_directory = ""
try:
# opts that require argument must be followed by a colon (:) e.g. d:
opts, args = getopt.getopt(argv, "hd:", ["directory="])
except getopt.GetoptError:
# Error: print usage
print("pie.py -d <directory>")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
# Help: print usage
print("pie.py -d <directory>")
sys.exit()
elif opt in ("-d", "--directory"):
target_directory = arg
print("Directory is {0}".format(target_directory))
# Get data & draw
data_dict = dirsize.get_dir_size(target_directory)
draw_pie_chart(data_dict)
``` |
{
"source": "jlmbaka/kindle2notion",
"score": 3
} |
#### File: jlmbaka/kindle2notion/utilities.py
```python
from requests import get
# Get Cover Image
NO_COVER_IMG = "https://via.placeholder.com/150x200?text=No%20Cover"
def getBookCoverUri(title, author):
req_uri = "https://www.googleapis.com/books/v1/volumes?q="
if title == None: return
req_uri += "intitle:" + title
if author != None:
req_uri += "+inauthor:" + author
response = get(req_uri).json().get("items", [])
if len(response) > 0:
return response[0].get("volumeInfo", {}).get("imageLinks", {}).get("thumbnail")
return
# Initializing Special chars
BOLD = "__"
ITALIC = "*"
``` |
{
"source": "jlmcgehee21/disterminal",
"score": 3
} |
#### File: disterminal/disterminal/helpers.py
```python
import click
import numpy as np
import sys
from scipy import stats
AUTO_XLIMITS = {
'cdf': (0, 10000, .05),
'pdf': (-10000, 10000, .05),
'ppf': (0, 1, .01)
}
def get_dist_callable(distribution):
try:
return getattr(stats, distribution)
except AttributeError:
click.echo('scipy.stats does not contain distribution "{}"'.format(
distribution))
sys.exit(1)
def get_fun_callable(dist, distname, function):
try:
return getattr(dist, function)
except AttributeError:
click.echo('scipy.stats.{} does not have function "{}"'.format(
distname, function))
sys.exit(1)
def check_nan(main_call):
x = np.arange(-100, 100, 1)
y = main_call(x)
if np.all(np.isnan(y)):
click.echo('all values are NaN, nothing to plot...')
sys.exit(1)
def autorange(main_call, function):
limits = AUTO_XLIMITS.get(function, (-10000, 10000, .05))
x = np.arange(*limits)
y = main_call(x)
min_y = 0.0001
max_y = 0.9999
x = x[np.logical_and(y >= min_y, y < max_y)]
return np.linspace(x.min(), x.max(), 100)
```
#### File: disterminal/tests/test_cli.py
```python
import pytest
from click.testing import CliRunner
from disterminal import cli
@pytest.fixture
def runner():
return CliRunner()
def test_cli_no_args(runner):
"""Test the CLI."""
result = runner.invoke(cli.main)
assert result.exit_code == 2
def test_cli_help(runner):
for help_opt in ['-h', '--help']:
help_result = runner.invoke(cli.main, [help_opt])
assert help_result.exit_code == 0
assert 'Supported Distributions:' in help_result.output
assert 'norm, pareto' in help_result.output
assert 'https://docs.scipy.org/doc/scipy/reference/stats.html' in help_result.output
def test_bad_distribution(runner):
result = runner.invoke(cli.main, ['foo', 'bar'])
assert 'scipy.stats does not contain distribution "foo"\n' == result.output
def test_bad_function(runner):
result = runner.invoke(cli.main, ['norm', 'bar'])
assert 'scipy.stats.norm does not have function "bar"\n' == result.output
def test_all_nan(runner):
result = runner.invoke(cli.main, ['beta', 'pdf', '0', '0'])
assert 'all values are NaN, nothing to plot...\n' == result.output
```
#### File: disterminal/tests/test_helpers.py
```python
import pytest
from disterminal import helpers
import numpy as np
def main_call(x):
out = np.zeros(x.shape)
out[1] = 0.1
out[-1] = 0.1
return out
def test_autorange():
x = helpers.autorange(main_call, '')
assert x.shape == (100,)
assert x.min() == pytest.approx(-9999.95)
assert x.max() == pytest.approx(9999.95)
``` |
{
"source": "jlmcgehee21/SoCo",
"score": 3
} |
#### File: soco/plugins/talk.py
```python
import sys
import re
import urllib, urllib2
import time
from ..plugins import SoCoPlugin
__all__ = ['Talk']
class TalkerPlugin(SoCoPlugin):
"""
The main use of this plugin is to make your Sonos system speak text. It works by sending a request to the
Google Text To Speech service, downloading an MP3 from the service, then playing the MP3 on the desired Sonos
players. It will pause and resume playback properly if you are listening to music at the time the message is
sent.
SETUP REQUIREMENTS: You must add the path to the Google Text To Speech MP3 to your Sonos music library in order
to obtain the URI for that file. Once this is done, you can find the URI "get_music_library_information()"
method in the soco package.
"""
def __init__(self,soco,mp3Path,sonosURI,zoneNames=None,maxAttempts=5):
"""
:param soco: soco instance per soco plugin instructions
:param mp3Path: The path you wish for the TTS message to be saved.
:param sonosURI: URI of mp3 file. This should point to the same file that exists at mp3Path
:param zoneNames: List of Sonos player names you wish for your message to play on. i.e. ['Kitchen','Office'].
If nothing is passed, the message will play on all Sonos players.
:param maxAttempts: Number of attempts to run soco.discover(). I found that regardless of the timeout passed to
soco.discover(), it may still fail, but multiple attempts usually works.
:return: TalkerPlugin object
"""
self.sonosURI = sonosURI
self.mp3Path = mp3Path
discovered = None
iter=0
while discovered is None and iter < maxAttempts:
discovered = soco.discover(timeout=2)
iter += 1
assert discovered is not None, 'Connection to Sonos system failed.'
zoneList = []
nameList = []
for zone in discovered:
zoneList.append(zone)
nameList.append(zone.player_name)
if zoneNames:
assert type(zoneNames) == list and all([zone in nameList for zone in zoneNames]), \
'Speaker object must be instantiated with a list of existing zone names on your network'
speakingSoCos = [zone for zone in zoneList if zone.player_name in zoneNames]
else:
speakingSoCos = zoneList
self.masterSoCo = speakingSoCos[0]
speakingSoCos.pop(0)
self.slaveSoCos = speakingSoCos
# if setup is True:
# self._setAudioDirectory()
super(TalkerPlugin, self).__init__(soco)
def talk(self,talkString='This is a test. Testing 1 2 3',volume=25):
"""
:param talkString: String you wish your Sonos system to speak
:param volume: Volume you wish for your Sonos system to speak at. The volume will be set back to the previous
value after the message has been spoken
:return: None
"""
self._formGroup()
tts = GoogleTTS()
text_lines = tts.convertTextAsLinesOfText(talkString)
tts.downloadAudioFile(text_lines,'en',open(self.mp3Path,'wb'))
oldvolumes = [self.masterSoCo.volume]
oldtracks = [self.masterSoCo.get_current_track_info()]
oldqueues = [self.masterSoCo.get_queue()]
oldStates = [self.masterSoCo.get_current_transport_info()]
allSoCos = [self.masterSoCo]
for SoCo in self.slaveSoCos:
oldvolumes.append(SoCo.volume)
oldtracks.append(SoCo.get_current_track_info())
oldqueues.append(SoCo.get_queue())
oldStates.append(SoCo.get_current_transport_info())
allSoCos.append(SoCo)
self.masterSoCo.volume = volume
self.masterSoCo.play_uri(self.sonosURI,title=u'Python Talking Script')
# self.masterSoCo.get_current_track_info()['duration']
time.sleep(float(time.strptime(self.masterSoCo.get_current_track_info()['duration'],'%H:%M:%S').tm_sec))
for ind,SoCo in enumerate(allSoCos):
SoCo.volume=oldvolumes[ind]
if oldStates[ind]['current_transport_state'] == 'PLAYING':
SoCo.play_from_queue(int(oldtracks[ind]['playlist_position'])-1)
SoCo.seek(oldtracks[ind]['position'])
self._delGroup()
def _formGroup(self):
for SoCo in self.slaveSoCos:
SoCo.join(self.masterSoCo)
def _delGroup(self):
for SoCo in self.slaveSoCos:
SoCo.unjoin()
class GoogleTTS(object):
"""
Taken from script at https://github.com/JulienD/Google-Text-To-Speech. No license info in repo.
"""
def __init__(self):
pass
def convertTextAsLinesOfText(self,text):
""" This convert a word, a short text, a long text into several parts to
smaller than 100 characters.
"""
# Sanitizes the text.
text = text.replace('\n','')
text_list = re.split('(\,|\.|\;|\:)', text)
# Splits a text into chunks of texts.
text_lines = []
for idx, val in enumerate(text_list):
if (idx % 2 == 0):
text_lines.append(val)
else :
# Combines the string + the punctuation.
joined_text = ''.join((text_lines.pop(),val))
# Checks if the chunk need to be splitted again.
if len(joined_text) < 100:
text_lines.append(joined_text)
else:
subparts = re.split('( )', joined_text)
temp_string = ""
temp_array = []
for part in subparts:
temp_string = temp_string + part
if len(temp_string) > 80:
temp_array.append(temp_string)
temp_string = ""
#append final part
temp_array.append(temp_string)
text_lines.extend(temp_array)
return text_lines
def downloadAudioFile(self,text_lines, language, audio_file):
"""
Donwloads a MP3 from Google Translatea mp3 based on a text and a
language code.
"""
for idx, line in enumerate(text_lines):
query_params = {"tl": language, "q": line, "total": len(text_lines), "idx": idx}
url = "http://translate.google.com/translate_tts?ie=UTF-8" + "&" + self.unicode_urlencode(query_params)
headers = {"Host":"translate.google.com", "User-Agent":"Mozilla 5.10"}
req = urllib2.Request(url, '', headers)
sys.stdout.write('.')
sys.stdout.flush()
if len(line) > 0:
try:
response = urllib2.urlopen(req)
audio_file.write(response.read())
time.sleep(.5)
except urllib2.HTTPError as e:
print ('%s' % e)
print 'Saved MP3 to %s' % (audio_file.name)
audio_file.close()
def unicode_urlencode(self,params):
"""
Encodes params to be injected in an url.
"""
if isinstance(params, dict):
params = params.items()
return urllib.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params])
def testStuff():
import soco
talker = TalkerPlugin(soco,'/Users/Jeff/BitBucket/Personal/Python/SonosExperiments/AudioMessages/talkOutput.mp3',
'x-file-cifs://MACBOOKPRO-5A98/AudioMessages/talkOutput.mp3')
talker.talk(volume='75')
if __name__ == '__main__':
testStuff()
``` |
{
"source": "jlmelville/passaas",
"score": 4
} |
#### File: passaas/models/util.py
```python
def sanitize_id(int_id):
"""
Return int_id as either an integer or None, if it is not convertible.
For use with model find function where either integer or None is acceptable, but
input from the controller is either a string representation of the integer or None.
This handles the conversion and swallows the exception, making for a more "fluent"
interface.
Arguments:
int_id {String or None} -- An id to convert. Can be None.
Returns:
{int or None} -- The converted id.
"""
try:
return int(int_id)
except TypeError:
return None
def find(objects, key, value):
"""
Return a list of all items in objects where the key attribute is equal to value.
If value is None, objects is returned. If no objects match, an empty list is
returned.
"""
if value is None:
return objects
return [o for o in objects if getattr(o, key) == value]
```
#### File: tests/bad_passwd_config/__init__.py
```python
import os
import shutil
import pytest
import webtest as wt
from passaas.app import create_app
from passaas.config import TestConfig, Config
class MissingPasswdConfig(TestConfig):
"""Configuration pointing to a missing passwd file."""
# https://github.com/jarus/flask-testing/issues/21
# and https://stackoverflow.com/a/28139033
PRESERVE_CONTEXT_ON_EXCEPTION = False
PASSWD_PATH = os.path.abspath(
# this file purposely doesn't exist
os.path.join(Config.PROJECT_ROOT, "tests", "test_data", "missing_passwd")
)
@pytest.yield_fixture(scope="function")
def missing_passwd_app():
"""Application with a missing passwd file."""
_app = create_app(MissingPasswdConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def test_missing_passwd_app(missing_passwd_app):
"""Webtest app with a missing passwd file."""
return wt.TestApp(missing_passwd_app)
class MalformedPasswdTooFewElementsConfig(TestConfig):
"""Configuration for a malformed passwd file with too few elements in a line."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
PASSWD_PATH = os.path.abspath(
os.path.join(
Config.PROJECT_ROOT, "tests", "test_data", "malformed_passwd_too_few"
)
)
@pytest.yield_fixture(scope="function")
def malformed_passwd_too_few_elements_app():
"""Application with a malformed passwd file."""
_app = create_app(MalformedPasswdTooFewElementsConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def test_malformed_passwd_too_few_elements_app(malformed_passwd_too_few_elements_app):
"""Webtest app with a malformed passwd file."""
return wt.TestApp(malformed_passwd_too_few_elements_app)
class MalformedPasswdTooManyElementsConfig(TestConfig):
"""Configuration for a malformed passwd file with too many elements in a line."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
PASSWD_PATH = os.path.abspath(
os.path.join(
Config.PROJECT_ROOT, "tests", "test_data", "malformed_passwd_too_many"
)
)
@pytest.yield_fixture(scope="function")
def malformed_passwd_too_many_elements_app():
"""Application with a malformed passwd file with too many elements in a line."""
_app = create_app(MalformedPasswdTooManyElementsConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def test_malformed_passwd_too_many_elements_app(malformed_passwd_too_many_elements_app):
"""Webtest app with a malformed passwd file with too many elements in a line."""
return wt.TestApp(malformed_passwd_too_many_elements_app)
class MalformedPasswdBadUidConfig(TestConfig):
"""Configuration pointing to a malformed passwd file with a non-numeric uid."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
PASSWD_PATH = os.path.abspath(
os.path.join(
Config.PROJECT_ROOT, "tests", "test_data", "malformed_passwd_bad_uid"
)
)
@pytest.yield_fixture(scope="function")
def malformed_passwd_bad_uid_app():
"""Application with a bad uid passwd file."""
_app = create_app(MalformedPasswdBadUidConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def test_malformed_passwd_bad_uid_app(malformed_passwd_bad_uid_app):
"""Webtest app with a bad uid passwd file."""
return wt.TestApp(malformed_passwd_bad_uid_app)
class MalformedPasswdBadGidConfig(TestConfig):
"""Configuration pointing to a malformed passwd file with a non-numeric gid."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
PASSWD_PATH = os.path.abspath(
os.path.join(
Config.PROJECT_ROOT, "tests", "test_data", "malformed_passwd_bad_gid"
)
)
@pytest.yield_fixture(scope="function")
def malformed_passwd_bad_gid_app():
"""Application with a bad gid passwd file."""
_app = create_app(MalformedPasswdBadGidConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def test_malformed_passwd_bad_gid_app(malformed_passwd_bad_gid_app):
"""Webtest app with a bad gid passwd file."""
return wt.TestApp(malformed_passwd_bad_gid_app)
class EmptyPasswdConfig(TestConfig):
"""Configuration pointing to an empty passwd file."""
PRESERVE_CONTEXT_ON_EXCEPTION = False
PASSWD_PATH = os.path.abspath(
os.path.join(Config.PROJECT_ROOT, "tests", "test_data", "empty_passwd")
)
@pytest.yield_fixture(scope="function")
def empty_passwd_app():
"""Application with an empty passwd file."""
_app = create_app(EmptyPasswdConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def test_empty_passwd_app(empty_passwd_app):
"""Webtest app with an empty passwd file."""
return wt.TestApp(empty_passwd_app)
```
#### File: passaas/tests/conftest.py
```python
import os
import shutil
import pytest
import webtest as wt
from passaas.app import create_app
from passaas.config import TestConfig, Config
# These modules contain extra configuration for testing misconfigured apps and map to
# directories in the test folder. You could store them all in this file, but it leads
# to a large file that is hard to navigate.
pytest_plugins = ["bad_passwd_config", "bad_group_config"]
@pytest.yield_fixture(scope="function")
def app():
"""Application for the tests."""
_app = create_app(TestConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
ctx.pop()
@pytest.fixture(scope="function")
def testapp(app):
"""Webtest app."""
return wt.TestApp(app)
class PasswdUpdateConfig(TestConfig):
"""Configuration pointing to a passwd file intended to be updated between calls."""
PASSWD_PATH = os.path.abspath(
os.path.join(Config.PROJECT_ROOT, "tests", "test_data", "passwd4")
)
@pytest.yield_fixture(scope="function")
def passwd_update_app():
"""Application with a passwd file intended to be updated."""
_app = create_app(PasswdUpdateConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
# Clean up after test
dest = _app.app.config["PASSWD_PATH"]
dest_dir = os.path.dirname(dest)
src = os.path.abspath(os.path.join(dest_dir, "passwd4.orig"))
shutil.copyfile(src, dest)
ctx.pop()
@pytest.fixture(scope="function")
def test_passwd_update_app(passwd_update_app):
"""Webtest app with a passwd file intended to be updated."""
return wt.TestApp(passwd_update_app)
# Groups
class GroupUpdateConfig(TestConfig):
"""Configuration pointing to a group file intended to be updated between calls."""
GROUP_PATH = os.path.abspath(
os.path.join(Config.PROJECT_ROOT, "tests", "test_data", "group4")
)
@pytest.yield_fixture(scope="function")
def group_update_app():
"""Application with a passwd file intended to be updated."""
_app = create_app(GroupUpdateConfig)
ctx = _app.app.test_request_context()
ctx.push()
yield _app.app
# Clean up after test
dest = _app.app.config["GROUP_PATH"]
dest_dir = os.path.dirname(dest)
src = os.path.abspath(os.path.join(dest_dir, "group4.orig"))
shutil.copyfile(src, dest)
ctx.pop()
@pytest.fixture(scope="function")
def test_group_update_app(group_update_app):
"""Webtest app with a group file intended to be updated."""
return wt.TestApp(group_update_app)
``` |
{
"source": "jlmhackaton/Patient2Vec",
"score": 2
} |
#### File: jlmhackaton/Patient2Vec/main.py
```python
from Patient2Vec import *
TIME_COL_HDR = 'Time_inED'
SUBJECT_COL_HDR = 'SUBJECT_ID'
SUBJECT_COLTAG_HDR = 'HADM_ID'
TAG_HDR = 'INTERPRETATION'
def PrepareDataset(features_data_path, \
tags_data_path,\
BATCH_SIZE = 40, \
seq_len = 10, \
pred_len = 1, \
train_propotion = 0.7, \
valid_propotion = 0.2,
shuffle=False):
""" Prepare training and testing datasets and dataloaders.
Convert admissions table to training and testing dataset.
The vertical axis of admissions_pd is the admission time axis and the horizontal axis
is the features axis.
Args:
admissions_pd: a Matrix containing spatial-temporal speed data for a network
seq_len: length of input sequence
pred_len: length of predicted sequence
Returns:
Training dataloader
Testing dataloader
"""
admissions_pd = pd.read_csv(features_data_path) #header = None, names=["Eloss", "entropy", "loss", "tErr", "rotErr", "r1", "r2", "r3", "tx", "ty", "tz" ], index_col=False, skiprows=0, delimiter=" "
admissions_byID = admissions_pd.groupby(SUBJECT_COL_HDR)
tag_pd = pd.read_csv(tags_data_path)
tags_byID = tag_pd.groupby(SUBJECT_COLTAG_HDR)
max_admin_per_patient = 0
features_list = []
tag_list = []
for group, tag_it in zip(admissions_byID, tags_byID):
date_sorted = group[1].sort_values(by=[TIME_COL_HDR])
features = date_sorted[admissions_pd.columns[2:]].values
features_list.append(features)
max_admin_per_patient = max(max_admin_per_patient, features.shape[0])
tag = tag_it[1][[TAG_HDR]].values[0]
tag_list.append(tag)
print('Maximum number of admissions per patient is ' + str(max_admin_per_patient))
sample_size = len(features_list)
features_nd = np.zeros([sample_size,max_admin_per_patient, features_list[0].shape[-1]])
for idx, patient_adm in enumerate(features_list):
h = patient_adm.shape[0]
features_nd[idx, 0:h] = patient_adm
# shuffle = True
if shuffle: # doesn't work !! need to debug
# shuffle and split the dataset to training and testing datasets
print('Start to shuffle and split dataset ...')
index = np.arange(sample_size, dtype = int)
np.random.seed(1024)
np.random.shuffle(index)
features_nd = features_nd[index]
tag_list = np.array(tag_list)
tag_list = tag_list[index]
if False:
X_last_obsv = X_last_obsv[index]
Mask = Mask[index]
Delta = Delta[index]
features_list = np.expand_dims(features_list, axis=1)
X_last_obsv = np.expand_dims(X_last_obsv, axis=1)
Mask = np.expand_dims(Mask, axis=1)
Delta = np.expand_dims(Delta, axis=1)
dataset_agger = np.concatenate((features_list, X_last_obsv, Mask, Delta), axis = 1)
train_index = int(np.floor(sample_size * train_propotion))
valid_index = int(np.floor(sample_size * (train_propotion + valid_propotion)))
train_data, train_label = features_nd[:train_index], tag_list[:train_index]
valid_data, valid_label = features_nd[train_index:valid_index], tag_list[train_index:valid_index]
test_data, test_label = features_nd[valid_index:], tag_list[valid_index:]
train_data, train_label = torch.Tensor(train_data), torch.Tensor(train_label)
valid_data, valid_label = torch.Tensor(valid_data), torch.Tensor(valid_label)
test_data, test_label = torch.Tensor(test_data), torch.Tensor(test_label)
train_dataset = utils.TensorDataset(train_data, train_label)
valid_dataset = utils.TensorDataset(valid_data, valid_label)
test_dataset = utils.TensorDataset(test_data, test_label)
train_dataloader = utils.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
valid_dataloader = utils.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
test_dataloader = utils.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
# X_mean = np.mean(features_list, axis = 0)
print('Finished preprocessing')
return train_dataloader, valid_dataloader, test_dataloader
def Train_Model(model, train_dataloader, valid_dataloader, batch_size, num_epochs = 300, patience=10, min_delta = 0.00001):
print('Model Structure: ', model)
print('Start Training ... ')
output_last = True
#model.cuda()
#
# if (type(model) == nn.modules.container.Sequential):
# output_last = model[-1].output_last
# print('Output type dermined by the last layer')
# else:
# output_last = model.output_last
# print('Output type dermined by the model')
# loss_MSE = torch.nn.MSELoss()
# loss_L1 = torch.nn.L1Loss()
criterion = nn.BCELoss()
learning_rate = 0.0001
optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate, alpha=0.99)
use_gpu = torch.cuda.is_available()
# interval = 100
losses_train = []
losses_valid = []
losses_epochs_train = []
losses_epochs_valid = []
cur_time = time.time()
pre_time = time.time()
# Variables for Early Stopping
is_best_model = 0
patient_epoch = 0
for epoch in range(num_epochs):
trained_number = 0
valid_dataloader_iter = iter(valid_dataloader)
losses_epoch_train = []
losses_epoch_valid = []
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
model.zero_grad()
# TODO: extract them
inputs_other = []#(age, gender, previous_hospitalization_history)
outputs, alpha, beta = model(inputs, inputs_other, batch_size)
# if output_last:
# # loss_train = loss_MSE(torch.squeeze(outputs), torch.squeeze(labels))
loss_train = get_loss(outputs, labels, criterion=criterion, mtr=beta)
# else:
# full_labels = torch.cat((inputs[:,1:,:], labels), dim = 1)
# loss_train = loss_MSE(outputs, full_labels)
losses_train.append(loss_train.data)
losses_epoch_train.append(loss_train.data)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
# validation
try:
inputs_val, labels_val = next(valid_dataloader_iter)
except StopIteration:
valid_dataloader_iter = iter(valid_dataloader)
inputs_val, labels_val = next(valid_dataloader_iter)
if use_gpu:
inputs_val, labels_val = Variable(inputs_val.cuda()), Variable(labels_val.cuda())
else:
inputs_val, labels_val = Variable(inputs_val), Variable(labels_val)
model.zero_grad()
# TODO: extract them
inputs_other = [] # (age, gender, previous_hospitalization_history)
inputs_other_val = [] # (age, gender, previous_hospitalization_history)
outputs_val, alpha_val, beta_val = model(inputs_val, inputs_other_val, batch_size)
# if output_last:
# # loss_valid = loss_MSE(torch.squeeze(outputs_val), torch.squeeze(labels_val))
loss_valid = get_loss(outputs_val, labels_val, criterion=criterion, mtr=beta_val)
# else:
# full_labels_val = torch.cat((inputs_val[:,1:,:], labels_val), dim = 1)
# loss_valid = loss_MSE(outputs_val, full_labels_val)
losses_valid.append(loss_valid.data)
losses_epoch_valid.append(loss_valid.data)
# output
trained_number += 1
avg_losses_epoch_train = sum(losses_epoch_train).cpu().numpy() / float(len(losses_epoch_train))
avg_losses_epoch_valid = sum(losses_epoch_valid).cpu().numpy() / float(len(losses_epoch_valid))
losses_epochs_train.append(avg_losses_epoch_train)
losses_epochs_valid.append(avg_losses_epoch_valid)
# Early Stopping
if epoch == 0:
is_best_model = 1
best_model = model
min_loss_epoch_valid = 10000.0
if avg_losses_epoch_valid < min_loss_epoch_valid:
min_loss_epoch_valid = avg_losses_epoch_valid
else:
if min_loss_epoch_valid - avg_losses_epoch_valid > min_delta:
is_best_model = 1
best_model = model
torch.save(model.state_dict(), 'best_model.pt')
min_loss_epoch_valid = avg_losses_epoch_valid
patient_epoch = 0
else:
is_best_model = 0
patient_epoch += 1
if patient_epoch >= patience:
print('Early Stopped at Epoch:', epoch)
break
# Print training parameters
cur_time = time.time()
print('Epoch: {}, train_loss: {}, valid_loss: {}, time: {}, best model: {}'.format( \
epoch, \
np.around(avg_losses_epoch_train, decimals=8),\
np.around(avg_losses_epoch_valid, decimals=8),\
np.around([cur_time - pre_time] , decimals=2),\
is_best_model) )
pre_time = cur_time
return best_model, [losses_train, losses_valid, losses_epochs_train, losses_epochs_valid]
def Test_Model(model, test_dataloader, batch_size):
# if (type(model) == nn.modules.container.Sequential):
# output_last = model[-1].output_last
# else:
# output_last = model.output_last
inputs, labels = next(iter(test_dataloader))
cur_time = time.time()
pre_time = time.time()
use_gpu = torch.cuda.is_available()
# loss_MSE = torch.nn.MSELoss()
# loss_L1 = torch.nn.MSELoss()
criterion = nn.BCELoss()
tested_batch = 0
# losses_mse = []
losses_bce = []
# losses_l1 = []
MAEs = []
MAPEs = []
for data in test_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# TODO: extract them
inputs_other = [] # (age, gender, previous_hospitalization_history)
outputs, alpha, beta = model(inputs, inputs_other, batch_size)
# loss_MSE = torch.nn.MSELoss()
# loss_L1 = torch.nn.L1Loss()
loss_bce = get_loss(outputs, labels, criterion, beta)
# if output_last:
# loss_mse = loss_MSE(torch.squeeze(outputs), torch.squeeze(labels))
# loss_l1 = loss_L1(torch.squeeze(outputs), torch.squeeze(labels))
MAE = torch.mean(torch.abs(torch.squeeze(outputs) - torch.squeeze(labels)))
MAPE = torch.mean(torch.abs(torch.squeeze(outputs) - torch.squeeze(labels)) / torch.squeeze(labels))
# else:
# loss_mse = loss_MSE(outputs[:,-1,:], labels)
# loss_l1 = loss_L1(outputs[:,-1,:], labels)
# MAE = torch.mean(torch.abs(outputs[:,-1,:] - torch.squeeze(labels)))
# MAPE = torch.mean(torch.abs(outputs[:,-1,:] - torch.squeeze(labels)) / torch.squeeze(labels))
# losses_mse.append(loss_mse.data)
# losses_l1.append(loss_l1.data)
losses_bce.append(loss_bce.data)
MAEs.append(MAE.data)
MAPEs.append(MAPE.data)
tested_batch += 1
if tested_batch % 1000 == 0:
cur_time = time.time()
print('Tested #: {}, loss_bce: {}, time: {}'.format( \
tested_batch * batch_size, \
np.around([loss_bce.data[0]], decimals=8), \
# np.around([loss_mse.data[0]], decimals=8), \
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
losses_bce = np.array(losses_bce)
# losses_l1 = np.array(losses_l1)
# losses_mse = np.array(losses_mse)
MAEs = np.array(MAEs)
MAPEs = np.array(MAPEs)
mean_l1 = np.mean(losses_bce)
std_l1 = np.std(losses_bce)
MAE_ = np.mean(MAEs)
MAPE_ = np.mean(MAPEs) * 100
print('Tested: bce_mean: {}, bce_std: {}, MAE: {} MAPE: {}'.format(mean_l1, std_l1, MAE_, MAPE_))
return [losses_bce, mean_l1, std_l1]
if __name__ == "__main__":
# TODO: pick demographic features and remove them from feature. remove the comment that ignores them
# TODO: add the dimension of the added demographic features to the last linear layer
# TODO: remove the repeat trick
# TODO: adapt the test/train code
# TODO: save & load the model and data
##########################################################3
########### configurations
###########################################################
features_datapath = './data/input.csv'
tags_datapath = './data/output.csv'
load_model = False
batch_size = 1
shuffle = False # shuffle dataset
############################################################
train_dataloader, valid_dataloader, test_dataloader = PrepareDataset(features_data_path=features_datapath,
tags_data_path=tags_datapath, BATCH_SIZE=batch_size, shuffle =shuffle)
inputs, labels = next(iter(train_dataloader))
[batch_size, seq_len, input_dim] = inputs.size()
output_dim = labels.shape[-1]
pat2vec = Patient2Vec(input_size=input_dim, hidden_size=256, n_layers=1, att_dim=1, initrange=1,
output_size=output_dim, rnn_type='GRU', seq_len=seq_len, pad_size=2,
n_filters=3, bi=True)
if not load_model:
best_grud, losses_grud = Train_Model(pat2vec, train_dataloader, valid_dataloader, num_epochs = 40, batch_size=batch_size)
[losses_bce, mean_l1, std_l1] = Test_Model(best_grud, test_dataloader, batch_size=batch_size)
else:
pat2vec.load_state_dict(torch.load('best_model.pt'))
# pat2vec.eval()
[losses_mse, mean_l1, std_l1] = Test_Model(pat2vec, test_dataloader, batch_size=batch_size)
``` |
{
"source": "jlmilton/C2319_cart",
"score": 2
} |
#### File: C2319_cart/home/views.py
```python
from django.shortcuts import render , get_object_or_404, redirect
from django.http import HttpResponse
from .models import About
from django.contrib import messages
from django.views.generic import ListView, DetailView
from django.utils import timezone
from .models import (
Item, Order, OrderItem
)
# Create your views here.
def home(request):
return render(request, '../templates/home.html', {'title': 'Home'})
def about(request):
return render(request, '../templates/about.html', {'title': 'About'})
def milestone_17(request):
return render(request, '../templates/milestone_17.html', {'title': 'Milestone 17'})
def forsale(request):
return render(request, '../templates/forsale.html', {'title': 'For Sale'})
class HomeView(ListView):
model= Item
template_name = "home.html"
class ProductView(DetailView):
model = Item
template_name = "product.html"
def add_to_cart(request, pk):
item = get_object_or_404(Item,pk = pk)
order_item,created= OrderItem.objects.get_or_create(item= item, user=request.user, ordered = False)
order_qs = Order.objects.filter(user = request.user, ordered = False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__pk=item.pk).exists():
order_item.quantity += 1
order_item.save()
messages.info(request, "Added Quantity Item!")
return redirect("home:product", pk = pk)
else:
order.items.add(order_item)
messages.info(request,"ITem added to your cart")
return redirect("home:product",pk=pk)
else:
ordered_date=timezone.now()
order=Order.objects.create(user=request.user,order_date=order_date)
order.items.add(order_item)
messages.info(request,"items added to your cart")
return redirect("home:product",pk=pk)
def remove_from_cart(request,pk):
item = get_object_or_404(Item, px=pk)
order_qs=Order.objects.filter(user=request.user,ordered=False)
if order_qs.exists():
order=order_qs[0]
if order.items.filter(item__pk= item.pk).exists():
order_item=OrderItem.objects.filter(item=item,user=request.user,ordered=False)[0]
order_item.delete()
messages.info(request,"Item \""+ order_item.item.item_name+"\"removed from your cart")
return redirect("home:product")
else:
messages.info(request,"this item not in your cart")
return redirect("home:product",pk=pk)
else:
messages.info(request,"you do not have an order")
return redirect("home:product",pk=pk)
``` |
{
"source": "JLMin/pyping",
"score": 3
} |
#### File: pyping/pping/result.py
```python
import socket
import statistics
from .session import Response
class Result:
__slots__ = ['responses', 'times', 'all_times',
'hostname', 'hostalias', 'iplist',
'sent', 'recv', 'lost',
'max', 'min', 'avg', 'stdev']
def __init__(self, addr, resps):
(self.hostname,
self.hostalias,
self.iplist) = Result._get_host_info(addr)
self.responses = resps
self.times = [r.rtt for r in resps if r.status == Response.OK]
self.all_times = [r.rtt if r.status == Response.OK else None
for r in resps]
self.sent = len(self.responses)
self.recv = len(self.times)
self.lost = self.sent - self.recv
self.max = max(self.times) if self.times else 0
self.min = min(self.times) if self.times else 0
self.avg = statistics.mean(self.times) if self.times else 0
self.stdev = statistics.pstdev(self.times) if self.times else 0
def __str__(self):
if self.recv > 0:
return '\n'.join(Result._prettify(r) for r in self.responses) + (
f'\n\nPing statistics for {self.hostname}:\n'
f'\tPackets: Sent = {self.sent}, '
f'Received = {self.recv}, '
f'Lost = {self.lost} '
f'({round(self.lost / self.sent * 100)}% loss)\n'
f'Approximate round trip times in milli-seconds:\n'
f'\tAverage = {round(self.avg * 1000)}ms, '
f'Minimum = {round(self.min * 1000)}ms, '
f'Maximum = {round(self.max * 1000)}ms, '
f'Stdev = {round(self.stdev * 1000,1)}'
)
else:
return self.responses[0].error
def __repr__(self):
if self.recv > 0:
return (
f'{self.__class__.__name__} of [{self.hostname}] > '
f'{round(self.avg * 1000)}ms ~ {round(self.stdev * 1000,1)} '
f'[{self.recv}/{self.sent}, L:{self.lost}]'
)
else:
return (
f'{self.__class__.__name__} of [{self.hostname}] error > '
f'{self.responses[0].error}'
)
def __getitem__(self, key):
return self.responses[key]
@staticmethod
def _get_host_info(addr):
try:
return socket.gethostbyname_ex(addr)
except OSError:
return addr, [], []
@staticmethod
def _prettify(resp):
if resp.status == Response.OK:
return (
f'Reply from {resp.src}: bytes={resp.size} '
f'time={round(resp.rtt * 1000)}ms TTL={resp.ttl}'
)
else:
return resp.error
```
#### File: pyping/tests/test_session.py
```python
import pytest
from pping.session import Request, Response
class TestSession:
@pytest.mark.parametrize(
'address', (
'-1.0.0.0',
'2192.168.127.12',
'?'
)
)
def test_ping_returns_only_one_result_with_invalid_address(self, address):
# repeat > 1, assert len(result) == 1
result = Request.ping(address=address, repeat=4,
interval=1, size=0,
timeout=1, ttl=64)
assert isinstance(result, list)
assert len(result) == 1
assert result[0].status == Response.ERROR
@pytest.mark.parametrize(
'address, repeat, interval, timeout', (
('127.0.0.1', 4, 0, 0.5),
)
)
def test_reliable_address(self, address, repeat, interval, timeout):
result = Request.ping(address=address, repeat=repeat,
interval=interval, size=32,
timeout=timeout, ttl=64)
assert isinstance(result, list)
assert len(result) == repeat
assert result[0].status == Response.OK
@pytest.mark.parametrize(
'address, repeat, interval, timeout', (
('random.jmsjms.com', 4, 0, 0.2),
)
)
def test_timed_out(self, address, repeat, interval, timeout):
# This is a ping-able address, but I'm pretty sure we won't
# receive reply from it.
# This test will fail if somehow you do get response from it.
result = Request.ping(address=address, repeat=repeat,
interval=interval, size=32,
timeout=timeout, ttl=64)
assert isinstance(result, list)
assert len(result) == repeat
assert result[0].status == Response.TIMEDOUT
``` |
{
"source": "jlmjkfd/CS760_2021S2_PA4",
"score": 3
} |
#### File: CS760_2021S2_PA4/2_TOT/visualize_pnas.py
```python
import fileinput
from sets import Set
import random
import scipy.special
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from scipy.stats import beta
import pprint, pickle
def VisualizeTopics(phi, words, num_topics, viz_threshold=9e-3):
phi_viz = np.transpose(phi)
words_to_display = ~np.all(phi_viz <= viz_threshold, axis=1)
words_viz = [words[i] for i in range(len(words_to_display)) if words_to_display[i]]
phi_viz = phi_viz[words_to_display]
fig, ax = plt.subplots()
heatmap = plt.pcolor(phi_viz, cmap=plt.cm.Blues, alpha=0.8)
plt.colorbar()
#fig.set_size_inches(8, 11)
ax.grid(False)
ax.set_frame_on(False)
ax.set_xticks(np.arange(phi_viz.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(phi_viz.shape[0]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
#plt.xticks(rotation=45)
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
column_labels = words_viz #['Word ' + str(i) for i in range(1,1000)]
row_labels = ['Topic ' + str(i) for i in range(1,num_topics+1)]
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.show()
def VisualizeEvolution(psi):
xs = np.linspace(0, 1, num=1000)
fig, ax = plt.subplots()
for i in range(len(psi)):
ys = [math.pow(1-x, psi[i][0]-1) * math.pow(x, psi[i][1]-1) / scipy.special.beta(psi[i][0],psi[i][1]) for x in xs]
ax.plot(xs, ys, label='Topic ' + str(i+1))
ax.legend(loc='best', frameon=False)
plt.show()
def main():
resultspath = '../results/pnas_tot/'
tot_pickle_path = resultspath + 'pnas_tot.pickle'
tot_pickle = open(tot_pickle_path, 'rb')
par = pickle.load(tot_pickle)
VisualizeTopics(par['n'], par['word_token'], par['T'])
VisualizeEvolution(par['psi'])
if __name__ == "__main__":
main()
``` |
{
"source": "jlmonge/FlaskDatabaseQuery",
"score": 3
} |
#### File: jlmonge/FlaskDatabaseQuery/analytic_functions.py
```python
from datetime import date
from decimal import Decimal, DecimalException
# ----- check_float() -----
# Helper function for analytics, used for input validation.
# Passes in the value to be tested.
# Makes no external function calls.
# Checks the value can be expressed as a floating point number.
# Returns TRUE if valid, FALSE if invalid.
# -------------
def check_float(potential_float):
try:
float(potential_float)
return True
except ValueError:
return False
# --------------------------
# ----- most_funded_category_per_year() -----
# Helper function for analytics, namely... well, analytics_most_funded_category()
# Passes in the year to test for, and the datafile to be read.
# Makes no external function calls.
# Reads each entry, finds the pledged value, and increments it to the corresponding category if the year is correct.
# Returns a list containing the category with the highest amount pledged for the requested year, and that amount.
# ---------------------
def most_funded_category_per_year(year , file_data):
category_dict = { # key=main category, value= total amount pledged for the year
'Games':0, 'Design': 0, 'Technology': 0, 'Film & Video': 0, 'Music': 0,
'Publishing': 0, 'Fashion': 0, 'Food': 0, 'Art': 0,
'Comics': 0, 'Photography': 0, 'Theater': 0, 'Crafts': 0,
'Journalism': 0, 'Dance': 0}
result = []
if len(file_data) == 0 or file_data == [{}]:
return result
for key in file_data:
if key['main_category'] in category_dict.keys():
if check_float(key['pledged']):
str = key['launched']
if str[0:4] == year:
category_dict[key['main_category']] += float(key['pledged'])
list_of_values = category_dict.values()
max_value = max(list_of_values)
result.append(max_value)
max_key = max(category_dict, key=category_dict.get)
result.append(max_key)
return result
# -------------------------------------------
# ----- bad_date() -----
# Helper function for analytics, used for input validation.
# Passes in the date to be read, expected to be in the format "yyyy-mm-dd hh:mm:ss", or at least "yyyy-mm-dd"
# Makes no external function calls.
# Reads the date and checks it against various criteria:
# - A project could not be launched before 2008, when Kickstarter was created.
# - A project should not be made after the year 3000, when humans have learned ascension, computers have become obsolete, and the Earth has been reclaimed by nature.
# - A project's month should not be less than 1, for January, or greater than 12, for December.
# - A project's day should not be less than 1 or greater than 31, because those days do not exist.
# Returns a boolean of TRUE indicating invalid date, or FALSE if correct.
# -----------
def bad_date(date):
if(len(date) < 10):
return True
try:
yearNum = int(date[0:4])
monthNum = int(date[5:7])
dayNum = int(date[8:10])
except:
return True
if yearNum < 2008 or yearNum > 3000:
return True
if monthNum < 1 or monthNum > 12:
return True
if dayNum < 1 or dayNum > 31:
return True
return False
# -----------------------
# ----- average_length_ks() -----
# Helper function for analytics, namely make_length_analytic()
# Passes in the datafile to be read.
# Calls on bad_date() for input validation.
# Reads each entry, collects the start and end date, adds the difference to the entry's year.
# Returns the completed list of years, list of average kickstarter lengths for those years, and the total average across all years.
# ---------------
def average_length_ks(pyfile):
labels = [] #labels for each datapoint
returnData = [] #datapoints(average length per year)
totalAverage = 0
totalDates = 0
dataByMonth = [] #
#listValues = ["year",0.0,0]#"year or total", sum of lengths, number of values
if len(pyfile) == 0 or pyfile == [{}]:
return labels,returnData,totalAverage
for i in pyfile: # For every entry,
if bad_date(i["launched"]) or bad_date(i["deadline"]): # Check if dates are valid,
continue
startDate = date(int(i["launched"][0:4]),int(i["launched"][5:7]),int(i["launched"][8:10])) # Gather the starting time
endDate = date(int(i["deadline"][0:4]),int(i["deadline"][5:7]),int(i["deadline"][8:10])) # and the ending time,
timeBetween = endDate - startDate # Find the difference,
if timeBetween.days < 0:
continue
yearNotInList = True
for val in range(len(dataByMonth)): # Then for all currently collected data,
if dataByMonth[val][0] == i["launched"][0:4]: # Find the year,
yearNotInList = False
dataByMonth[val][1] = dataByMonth[val][1] + timeBetween.days # add this entry's time to the year's total,
dataByMonth[val][2] = dataByMonth[val][2] + 1 # and increment the project count.
if yearNotInList:
dataByMonth.append([i["launched"][0:4],timeBetween.days,1]) # If year is missing, these are the first values for it.
#sort by year
for iteration in dataByMonth: # For every year in the collected data,
labels.append(iteration[0]) # Add the year to labels list,
returnData.append(iteration[1]/iteration[2]) # Add that year's (total length / total projects) average to returnData,
totalDates = iteration[2] + totalDates # and calculate the totals.
totalAverage = iteration[1] + totalAverage
if totalDates == 0:#error check for if there were only bad kickstarters passed in to prevent divide by zero
totalAverage = 0
else:
totalAverage = totalAverage/totalDates
# Finally, return everything.
return labels, returnData,totalAverage
# --------------------------------
# ----- countProjects() -----
# Helper function for analytics, namely popularMonth().
# Passes in the datafile to be read.
# Calls on bad_date for input validation.
# Reads each entry, collects the date launched, and increments the corresponding list.
# Returns the completed dictionary.
# ----------------
def countProjects(dataFile):
# list format: {Year}:[Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec]
# each value represents the number of projects launched in that month for that year.
retDict = {}
if len(dataFile) == 0 or dataFile == [{}]:
return retDict
yearList = gatherYears(dataFile)
for year in yearList:
retDict[str(year)] = [0,0,0,0,0,0,0,0,0,0,0,0]
for item in dataFile:
launchTime = item['launched'] # 2012-03-17 03:24:11
if (bad_date(launchTime) == False): #checks to see if launch time is actually a date
launchVals = launchTime.split('-') # ['2012', '03', '17 03:24:11']
retDict[launchVals[0]][(int(launchVals[1]) - 1)] += 1
return retDict
# ----------------------------
# ----- count_cat_fail_success() -----
# Helper function for analytics, namely category_fail()
# Passes in the data file to be read.
# Makes no external function calls.
# Reads each entry and increments the fail or success value for its category, depending on its state.
# Returns the list of category titles, and the completed list of ratios for those categories
# -----------------
def count_cat_fail_success(data):
if len(data) == 0 or data == [{}]:
return [{}]
category_dict = { # key=main category, value=#successful[0],#failed[1]
'Games':[0,0], 'Design':[0,0], 'Technology':[0,0], 'Film & Video':[0,0], 'Music':[0,0],
'Publishing':[0,0], 'Fashion':[0,0], 'Food':[0,0], 'Art':[0,0],
'Comics':[0,0], 'Photography':[0,0], 'Theater':[0,0], 'Crafts':[0,0],
'Journalism':[0,0], 'Dance':[0,0]}
for proj in data:
if proj['state'] == 'successful':
category_dict[proj['main_category']][0] += 1
elif proj['state'] == 'failed' or proj['state'] == 'canceled':
category_dict[proj['main_category']][1] += 1
category_names = list(category_dict.keys())
# FOR DEBUGGING: category_successful = [x[0] for x in list(category_dict.values())]
# FOR DEBUGGING: category_failed = [x[1] for x in list(category_dict.values())]
category_failed_ratio = [x[1] / (x[0] + x[1]) if x[0] or x[1] else 0 for x \
in list(category_dict.values())] # list comprehension
return category_names, category_failed_ratio
# -------------------------------------
# ----- findAmbitious() -----
# Helper function for analytics, namely ambitiousProjects()
# Passes in the data file to be read.
# Calls on bad_date() for input validation.
# Reads each entry, locates which year and month it belongs to, compares goals, keeps the higher one.
# If goals are equal, keeps the project with the highest pledged.
# Returns the completed and sorted-by-date dictionary
# -------------
def findAmbitious(dataFile):
# dictionary format: {year-month}:[ID,goal,pledged]
retDict = {}
if len(dataFile) == 0 or dataFile == [{}]:
return retDict
for item in dataFile:
if (bad_date(item['launched']) == False): # 2012-03-17 03:24:11
date = item['launched'][0:7] # 2012-03
try:
int(item['ID'])
Decimal(item['goal'])
Decimal(item['pledged'])
except (ValueError, DecimalException):
continue
itemVals = [int(item['ID']),int(Decimal(item['goal'])),int(Decimal(item['pledged']))]
try:
compVals = retDict.get(date)
# if goal is higher, or goal is equal and pledged is higher
if ((itemVals[1] > compVals[1]) or ((itemVals[1] == compVals[1]) and (itemVals[2] > compVals[2]))):
retDict[date] = itemVals
except:
retDict.setdefault(date, itemVals)
sortDict = {}
for i in sorted(retDict):
sortDict[i] = retDict[i]
return sortDict
# ---------------------------
# ----- gatherYears() -----
# Helper function for analytics, namely ambitiousProjects() and countProjects().
# Passes in the data file to be read.
# Calls on bad_date for input validation.
# Reads each entry, adds a new year if it is not yet added.
# Returns the completed list of years.
# -------------
def gatherYears(dataFile):
retList = []
if len(dataFile) == 0 or dataFile == [{}]:
return retList
for item in dataFile:
date = item['launched'] # 2012-03-17 03:24:11
if (bad_date(date) == False):
try: retList.index(date[0:4]) # find 2012 in list, if not...
except: retList.append(date[0:4]) # add 2012 to list
retList.sort() # sort years in ascending order
return retList
# -------------------------
# ----- createDropdown() -----
# Helper function for analytics, namely ambitiousProjects() and countProjects().
# Passes in the figure to edit, the number of bars, the keys for the bar data, the list of tab titles, and the number of bars to be seen on each tab.
# Makes no external function calls.
# Creates a dropdown menu with the desired characteristics, and applies it to the figure.
# Returns the edited figure.
# ----------------------------
def createDropdown(figure,barCount,titleKeys,titleList,barsPerTab):
tabList = []
visList = []
labelList = []
for i in range(barCount): # Add a visual boolean for every bar
visList.append(False)
for key in titleKeys: # Add each desired tab title to a list
labelList.append(key)
for i in range(int(barCount / barsPerTab)): # Add a new tab to tabList (number of tabs = barCount divided by barsPerTab)
tabList.append(
dict(
label=labelList[i],
method="update",
args=[{"visible": []}, # This blank list will be filled later
{"title": titleList[i]}]
)
)
visIndex = 0
for item in tabList: # For every tab to be made,
copyVis = visList.copy() # Create a copy of our visList
try:
for i in range(barsPerTab):
copyVis[(visIndex + i)] = True # and allow only the necessary bars to be seen
except:
print('An error occurred! Graph may not display correctly!') # If something bad happens, don't crash
finally:
item['args'][0]['visible'] = copyVis # Update this bar's visible arguments to the proper values instead of a blank list
visIndex += barsPerTab # Increment visIndex for the next loop
# Update the figure with its new, fancy dropdown menu!
figure.update_layout(
updatemenus=[
dict(
active=0,
buttons=tabList
)
]
)
return figure
# ----------------------------
# ----- count_categories_per_month() -----
# Helper function for analytics, namely category_per_month().
# Passes in the data file to be read.
# Makes no external function calls.
# Counts the number of projects belonging to each month and its corresponding category.
# Returns the completed dictionary of categories for all months.
# ------------------
def count_categories_per_month(data):
# Check if it is necessary to create dictionary
if len(data) == 0 or not data[0]:#quick check to see if pyfile is either empty or has an empty dictionary inside
print("empty file passed into analytic")
return [{}]
# Initialize variables
month_dict = {'01':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '02':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'03':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '04':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '05':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'06':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '07':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '08':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'09':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '10':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '11':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'12':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}
categories = ['Games', 'Design', 'Technology', 'Film & Video', 'Music', 'Publishing',
'Fashion', 'Food', 'Art', 'Comics', 'Photography', 'Theater', 'Crafts', 'Journalism',
'Dance']
# Increment each category respectively
for proj in data:
projDate = proj['launched']
if bad_date(projDate):
continue
projMonth = projDate[5:7] # substring of the month
projCat = proj['main_category']
if projCat in categories:
catIndex = categories.index(projCat)
month_dict[projMonth][catIndex] += 1 #increment up that category
return month_dict
# --------------------------------------
# ----- get_countrys_category() -----
# Helper function for analytics, namely popular_category_perNation().
# Passes in the data file to be read.
# Makes no external function calls.
# Counts the number of projects belonging to each country, and its corresponding category.
# Returns the completed dictionary of categories for all countries.
# ----------------
def get_countrys_category(data):
# Check if it is necessary to create dictionary
if len(data) == 0 or not data[0]:#quick check to see if pyfile is either empty or has an empty dictionary inside
print("empty file passed into analytic")
return {}
# Initialize variables
categories = ['Games', 'Design', 'Technology', 'Film & Video', 'Music', 'Publishing',
'Fashion', 'Food', 'Art', 'Comics', 'Photography', 'Theater', 'Crafts', 'Journalism',
'Dance']
analyticDict = {}
# Loop through dataset to add entries
for proj in data:
projCountry = proj['country']
projCat = proj['main_category']
if projCat not in categories:
continue
catIndex = categories.index(projCat)
if projCountry in analyticDict.keys(): # no need to create new entry in the dictionary
analyticDict[projCountry][catIndex] += 1
else:
#makes entry for the newly detected country
analyticDict[projCountry] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
analyticDict[projCountry][catIndex] += 1
return analyticDict
# ---------------------------------
##Successful words analytics
def count_words(data):
count_dict = {}
for item in data:
if 'state' in item.keys():
if(item['state'] == "successful"):
res = item['name'].split()
for i in res:
if(len(i) >= 4):
if i in count_dict:
count_dict[i] += 1
else:
count_dict[i] = 1
return count_dict
``` |
{
"source": "JLMorales/slack-bulk-delete",
"score": 2
} |
#### File: JLMorales/slack-bulk-delete/helper_functions.py
```python
import sys
def cli_progress(end_val, bar_length=20):
for i in xrange(0, end_val):
percent = float(i) / end_val
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rPercent: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
``` |
{
"source": "JL-Moriarty/Cargo-handling-robot",
"score": 2
} |
#### File: JL-Moriarty/Cargo-handling-robot/Imgprocess.py
```python
import pyzbar.pyzbar as pyzbar
import cv2
import numpy as np
import picamera as pca
import time
import os
import collections
from luma.core.interface.serial import i2c, spi
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
import serial
def readQRcode(x):
qrdata = ""
img = cv2.imread(x)
qr = pyzbar.decode(img)
for item in qr:
qrdata = item.data.decode()
return(qrdata)
def getColorList():
dict = collections.defaultdict(list)
lower_red = np.array([156, 43, 46])
upper_red = np.array([180, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['1']=color_list
# 红色2
lower_red = np.array([0, 43, 46])
upper_red = np.array([10, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['11'] = color_list
#绿色
lower_green = np.array([35, 43, 46])
upper_green = np.array([77, 255, 255])
color_list = []
color_list.append(lower_green)
color_list.append(upper_green)
dict['2'] = color_list
#蓝色
lower_blue = np.array([100, 43, 46])
upper_blue = np.array([124, 255, 255])
color_list = []
color_list.append(lower_blue)
color_list.append(upper_blue)
dict['3'] = color_list
return dict
#处理图片
def judgeColor(frame):
#print('go in get_color')
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
maxsum = -100
color = None
color_dict = getColorList()
for d in color_dict:
mask = cv2.inRange(hsv,color_dict[d][0],color_dict[d][1])
cv2.imwrite(d+'.jpg',mask)
binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
binary = cv2.dilate(binary,None,iterations=2)
img, cnts, hiera = cv2.findContours(binary.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
sum = 0
for c in cnts:
sum+=cv2.contourArea(c)
if sum > maxsum :
maxsum = sum
color = d
return color
def capturePic():
cam = pca.PiCamera()
cam.start_preview()
time.sleep(2)
cam.capture("/home/pi/Desktop/COLOR.jpg")
cam.stop_preview()
cam.close()
def captureQRcode():
cam = pca.PiCamera()
cam.start_preview()
time.sleep(2)
cam.capture("/home/pi/Desktop/QRCODE.jpg")
cam.stop_preview()
cam.close()
'''def captureimgjudgeclr():
cam = pca.PiCamera()
cam.start_preview()
time.sleep(8)
cam.capture("/home/pi/Desktop/test.jpg")
cam.stop_preview()
img = cv2.imread("/home/pi/Desktop/test.jpg")
pxc = img[400,300]
m = max(pxc)
if m == pxc[0]:
flag = 3
elif m ==pxc[1]:
flag = 2
else:
flag = 1
return flag'''
def display(a,b = "ZZULI"):
# rev.1 users set port=0
# substitute spi(device=0, port=0) below if using that interface
serial = i2c(port=1, address=0x3C)
# substitute ssd1331(...) or sh1106(...) below if using that device
device = sh1106(serial)#这里改ssd1306, ssd1325, ssd1331, sh1106
with canvas(device) as draw:
draw.rectangle(device.bounding_box,outline="white", fill="black")
draw.text((55, 16), b, fill="white")
draw.text((40, 36), "QRcode:"+a, fill="white")
#draw.text((8, 40), "color distinguish:"+b, fill="white")
def serialParse():
ser = serial.Serial("/dev/ttyUSB0",9600,timeout = 10)
serialNum = ser.read(1)
return serialNum
'''def serialWrite(serialNumber):
ser = serial.Serial("/dev/ttyUSB0",9600,timeout = 10)
ser.write(serialNumber)'''
def main():
while 1:
ser = serial.Serial("/dev/ttyUSB0",9600,timeout = 10)
#serialCheck = serialParse()
serialCheck = ser.read(1)
if serialCheck == b'\x05':
capturePic()
pic = "/home/pi/Desktop/COLOR.jpg"
img = cv2.imread(pic)
colorNum = judgeColor(img)
#print(colorNum)
ser.write(str.encode(colorNum))
#print("write over")
#print(type(colorNum))
time.sleep(3)
#serialWrite(colorNum)
elif serialCheck == b'\x06':
captureQRcode()
pic = "/home/pi/Desktop/QRCODE.jpg"
qrNum = readQRcode(pic)
#print(qrNum)
#print(type(qrNum))
ser.write(str.encode(qrNum))
display(qrNum)
#serialWrite(str.encode(qrNum))
#print("write over")
time.sleep(3)
main()
``` |
{
"source": "jlmucb/class_notes",
"score": 4
} |
#### File: code/py/gen.py
```python
print "Hello, Python!";
if True:
print "True"
else:
print "False"
#!/usr/bin/python
import sys
try:
# open file stream
file = open(file_name, "w")
except IOError:
print "There was an error writing to", file_name
sys.exit()
print "Enter '", file_finish,
print "' When finished"
while file_text != file_finish:
file_text = raw_input("Enter text: ")
if file_text == file_finish:
# close the file
file.close
break
file.write(file_text)
file.write("\n")
file.close()
#!/usr/bin/python
import sys, getopt
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
#!/usr/bin/python
tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )
list = [ 'abcd', 786 , 2.23, 'john', 70.2 ]
tuple[2] = 1000 # Invalid syntax with tuple
list[2] = 1000 # Valid syntax with list
if ( a > b ):
print "Line 5 - a is greater than b"
else:
print "Line 5 - a is not greater than b"
#!/usr/bin/python
a = 10
b = 20
list = [1, 2, 3, 4, 5 ];TUTORIALS POINT
Simply Easy Learning
if ( a in list ):
print "Line 1 - a is available in the given list"
else:
print "Line 1 - a is not available in the given list"
if ( b not in list ):
print "Line 2 - b is not available in the given list"
else:
print "Line 2 - b is available in the given list"
a = 2
if ( a in list ):
print "Line 3 - a is available in the given list"
else:
print "Line 3 - a is not available in the given list"
#!/usr/bin/python
count = 0
while (count < 9):
print 'The count is:', count
count = count + 1
print "Good bye!"
#!/usr/bin/python
for letter in 'Python': # First Example
print 'Current Letter :', letter
fruits = ['banana', 'apple', 'mango']
for fruit in fruits: # Second Example
print 'Current fruit :', fruit
print "Good bye!"
#!/usr/bin/python
fruits = ['banana', 'apple', 'mango']
for index in range(len(fruits)):
print 'Current fruit :', fruits[index]
print "Good bye!"
#!/usr/bin/python
for letter in 'Python': # First Example
if letter == 'h':
continue
print 'Current Letter :', letter
var = 10 # Second Example
while var > 0:
var = var -1
if var == 5:
continue
print 'Current variable value :', var
print “Good bye!”
#!/usr/bin/python
import random
random.seed( 10 )
print "Random number with seed 10 : ", random.random()
# It will generate same random number
random.seed( 10 )
print "Random number with seed 10 : ", random.random()
# It will generate same random number
random.seed( 10 )
print "Random number with seed 10 : ", random.random()
var1 = 'Hello World!'
var2 = "Python Programming"
print "var1[0]: ", var1[0]
print "var2[1:5]: ", var2[1:5]
#!/usr/bin/python
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'};
print "dict['Name']: ", dict['Name'];
print "dict['Age']: ", dict['Age'];
#!/usr/bin/python
import time; # This is required to include time module.
ticks = time.time()
print "Number of ticks since 12:00am, January 1, 1970:", ticks
def functionname( parameters ):
"function_docstring"
function_suite
return [expression]
All parameters (arguments) in the Python language are passed by reference. It means if you change what a
parameter refers to within a function, the change also reflects back in the calling function. For example:
#!/usr/bin/python
# Function definition is here
def changeme( mylist ):
"This changes a passed list into this function"
mylist.append([1,2,3,4]);
print "Values inside the function: ", mylist
return
# Now you can call changeme function
mylist = [10,20,30];
changeme( mylist );
print "Values outside the function: ", mylist
#!/usr/bin/python
# Function definition is here
sum = lambda arg1, arg2: arg1 + arg2;
# Now you can call sum as a function
print "Value of total : ", sum( 10, 20 )
print "Value of total : ", sum( 20, 20 )
The Python code for a module named aname normally resides in a file named aname.py. Here's an example of a
simple module, hello.py
def print_func( par ):
print "Hello : ", par
return
The import Statement:
You can use any Python source file as a module by executing an import statement in some other Python source
file. The import has the following syntax:
import module1[, module2[,... moduleN]
When the interpreter encounters an import statement, it imports the module if the module is present in the search
path. A search path is a list of directories that the interpreter searches before importing a module. For example, to
import the module hello.py, you need to put the following command at the top of the script:
#!/usr/bin/python
# Import module hello
import hello
# Now you can call defined function that module as follows
hello.print_func("Zara")
When the above code is executed, it produces the following result:
Hello : Zara
#!/usr/bin/python
Money = 2000
def AddMoney():
# Uncomment the following line to fix the code:
# global Money
Money = Money + 1
print Money
AddMoney()
print Money
#!/usr/bin/python
str = raw_input("Enter your input: ");
print "Received input is : ", str
#!/usr/bin/python
# Open a file
fo = open("foo.txt", "r+")
str = fo.read(10);
print "Read String is : ", str
# Close opened file
fo.close()
input(prompt)
len(thing)
open(filename, mode)
print(line)
type(thing)
ord(char)
chr(number) Not that many!
bool(thing)
float(thing)
int(thing)
iter(list)
list(thing)
range(from, to, stride)
str(thing)
```
#### File: code/py/graphfromfile.linux.py
```python
import sys, getopt
from matplotlib.pyplot import plot, show
import matplotlib.pyplot as plt
class Coords:
def __init__(self, x, y):
self.x = x
self.y = y
def getCoords( line ):
a = ''
b = ''
l = line.strip()
for i in range(0, len(l)):
if l[i] == ' ':
break
a = a + l[i]
if i == 0:
return ""
for j in range(i + 1, len(l)):
if l[j] != ' ':
break
for i in range(j,len(l)):
if l[i] == ' ':
break
b = b + l[i]
c = Coords(float(a), float(b))
return c
def printCoordList( lst ):
c = Coords(0.0, 0.0)
return c
def stripComment( l ):
for i in range(0, len(l)):
if l[i] == '#':
return l[0:i].strip()
return l.strip()
def main(argv=sys.argv):
inputfile = sys.argv[1]
outputfile = sys.argv[2]
f = open(inputfile)
l = []
for line in f:
l.append(line)
f.close()
title= ''
xlabel=''
ylabel=''
for i in range(0, len(l)):
# print 'line ', i, ' ', l[i].rstrip()
nl = stripComment(l[i])
if len(nl) <= 0:
continue
if title == '':
title = nl
elif xlabel == '':
xlabel = nl
elif ylabel == '':
ylabel = nl
break
print('title: ', title, ', xlabel: ', xlabel, ', ylabel: ', ylabel)
coords = []
for j in range(i + 1, len(l)):
nl = stripComment(l[j])
t = getCoords(nl)
if t == '':
continue
coords.append(t)
xlist = []
ylist = []
for i in range(0, len(coords)):
xlist.append(coords[i].x)
ylist.append(coords[i].y)
top = ylist[0]
bottom = ylist[0]
left = xlist[0]
right = xlist[0]
for i in range(0, len(coords)):
if ylist[i] > top:
top = ylist[i]
if ylist[i] < bottom:
bottom = ylist[i]
if xlist[i] > right:
right = xlist[i]
if xlist[i] < left:
left = xlist[i]
left = left - .05 * (right - left)
right = right + .05 * (right - left)
bottom = bottom - .05 * (top - bottom)
top = top + .05 * (top - bottom)
print('left: ', left, ', right: ', right)
print('bottom: ', bottom, ', top: ', top)
print(len(coords), ' points')
fig = plt.figure()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.axes().set_aspect('equal')
plt.axis([left, right, bottom, top])
plt.plot(xlist, ylist)
plt.savefig(outputfile, dpi=25)
plt.show()
main(sys.argv)
```
#### File: code/py/p2.py
```python
total_cost = 100.00 days = 3
cost_per_day = total_cost / days
if cost_per_day > 40:
print("Too expensive")
elif cost_per_day > 30:
print("Reasonable cost")
elif cost_per_day > 20:
print("Excellent cost")
else:
print("Incredible bargain")
#Get information from user
balance = input("Enter how much you want to save: ")
payment = input("Enter how much you will save each period: ")
#Calculate number of payments that will be needed
#Present information to user
#Get information from user
balance = float(input("Enter how much you want to save: "))
while value <= 0:
value = int(input ("Enter a Positive value!"))
myfile = open("Filename", "r")
myfile = open("Filename", "w")
myfile.write("This line is written to the file.")
myfile.close()
# lists
print (daily_high_temps[4])
for i in range(7):
print(daily_high_temps[i])
daily_high_temps = [83, 80, 73, 75, 79, 83, 86]
x = len(daily_high_temps)
list1 += list2
list1.append(3.9)
varname[a:b]
car_tuple = "Buick", "Century", 23498
########## Analyze Data ##########
#Get date of interest
month = int(input("For the date you care about, enter the month: "))
day = int(input("For the date you care about, enter the day: "))
#Find historical data for date
gooddata = []
for singleday in datalist:
if (singleday[0] == day) and (singleday[1] == month):
gooddata.append([singleday[2], singleday[3],
singleday[4], singleday[5]])
#Perform analysis
minsofar = 120
maxsofar = -100
numgooddates = 0
sumofmin=0
sumofmax=0
for singleday in gooddata:
numgooddates += 1
sumofmin += singleday[1]
sumofmax += singleday[2]
if singleday[1] < minsofar:
print(minsofar, singleday[1])
minsofar = singleday[1]
if singleday[2] > maxsofar:
maxsofar = singleday[2]
avglow = sumofmin / numgooddates
avghigh = sumofmax / numgooddates
def getName():
first = input("Enter your first name:")
last = input("Enter your last name:")
full_name = first + ' ' + last
return full_name
name = getName()
```
#### File: code/py/p3.py
```python
def calc_miles(gallons, mpg=20.0):
return gallons*mpg
print( calc_miles(10.0, 15.0) )
print( calc_miles(10.0) )
for line in infile:
#STUFF DELETED HERE
m, d, y = date.split('/')
month = int(m)
day = int(d)
year = int(y)
#Put data into list
datalist.append([day, month, year, lowtemp, hightemp,
rainfall])
#STUFF DELETED HERE
#Find historical data for date
gooddata = []
for singleday in datalist:
if (singleday[0] == month) and (singleday[1] == day):
gooddata.append([singleday[2], singleday[3],
singleday[4], singleday[5]])
from random import choice
import pyglet
window = pyglet.window.Window(width=400, height = 450,
caption="GameWindow")
Im1 = pyglet.image.load('BlueTri.jpg')
Im2 = pyglet.image.load('PurpleStar.jpg')
Im3 = ('OrangeDiamond.jpg')
Im4 = pyglet.image.load('YellowCircle.jpg')
Im5 = pyglet.image.load('RedHex.jpg')
def InitializeGrid(board):
#Initialize Grid by reading in from file
for i in range(8):
for j in range(8):
board[i][j] = choice(['A', 'B', 'C', 'D', 'E'])
def Initialize(board):
#Initialize game
#Initialize grid
InitializeGrid(board)
#Initialize score
global score
score = 0
#Initialize turn number
global turn
turn = 1
#Set up graphical info
def ContinueGame(current_score, goal_score = 100):
#Return false if game should end, true if game is not over
if (current_score >= goal_score):
return False
else:
return True
def SwapPieces(board, move):
#Swap objects in two positions
temp = board[move[0]][move[1]]
board[move[0]][move[1]] = board[move[2]][move[3]]
board[move[2]][move[3]] = temp
def RemovePieces(board):
#Remove 3-in-a-row and 3-in-a-column pieces
#Create board to store remove-or-not
remove = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0,
0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0,
0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
#Go through rows
for i in range(8):
for j inpyglet.image.load range(6):
if (board[i][j] == board[i][j+1]) and (board[i][j] ==
board[i][j+2]):
#three in a row are the same!
remove[i][j] = 1;
remove[i][j+1] = 1;
remove[i][j+2] = 1;
#Go through columns
for j in range(8):
for i in range(6):
if (board[i][j] == board[i+1][j]) and (board[i][j] ==
board[i+2][j]):
#three in a row are the same!
remove[i][j] = 1;
remove[i+1][j] = 1;
remove[i+2][j] = 1;
#Eliminate those marked
global score
removed_any = False
for i in range(8):
for j in range(8):
if remove[i][j] == 1:
board[i][j] = 0
score += 1
removed_any = True
return removed_any
def DropPieces(board):
#Drop pieces to fill in blanks
for j in range(8):
#make list of pieces in the column
listofpieces = []
for i in range(8):
if board[i][j] != 0:
listofpieces.append(board[i][j])
#copy that list into colulmn
for i in range(len(listofpieces)):
board[i][j] = listofpieces[i]
#fill in remainder of column with 0s
for i in range(len(listofpieces), 8):
board[i][j] = 0
def FillBlanks(board):
#Fill blanks with random pieces
for i in range(8):
for j in range(8):
if (board[i][j] == 0):
board[i][j] = choice(['A', 'B', 'C', 'D', 'E'])
def Update(board, move):
#Update the board according to move
SwapPieces(board, move)
pieces_eliminated = True
while pieces_eliminated:
pieces_eliminated = RemovePieces(board)
DropPieces(board)
FillBlanks(board)
@window.event
def on_draw():
window.clear()
for i in range(7,-1,-1):
#Draw each row
y = 50+50*i
for j in range(8):
#draw each piece, first getting position
x = 50*j
if board[i][j] == 'A':
Im1.blit(x,y)
elif board[i][j] == 'B':
Im2.blit(x,y)
elif board[i][j] == 'C':
Im3.blit(x,y)
elif board[i][j] == 'D':
Im4.blit(x,y)
elif board[i][j] == 'E':
Im5.blit(x,y)
label = pyglet.text.Label('Turn: '+str(turn)+
'Score: '+str(score), font_name='Arial', font_size=18, x=20,
y = 10)
label.draw()
@window.event
def on_mouse_press(x, y, button, modifiers):
#Get the starting cell
global startx
global starty
startx = x
starty = y
@window.event
def on_mouse_release(x, y, button, modifiers):
#Get starting and ending cell and see if they are adjacent
startcol = startx//50
startrow = (starty-50)//50
endcol = x//50
endrow = (y-50)//50
#Check whether ending is adjacent to starting and if so,
make move.
if ((startcol==endcol and startrow==endrow - 1)
or (startcol==endcol and startrow==endrow+1) or
(startrow==endrow and startcol==endcol-1) or
(startrow==endrow and startcol==endcol+1)):
Update(board,[startrow,startcol,endrow,endcol])
global turn
turn += 1
#See if game is over
if not ContinueGame(score):
print("You won in", turn, "turns!")
exit()
#State main variables
score = 100
turn = 100
goalscore = 100
board = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
#Initialize game
Initialize(board)
pyglet.app.run()
from matplotlib import pyplot
pyplot.plot([0,1,2,3,4,5], [0,1,4,9,16,25])
pyplot.axis([0,5,0,25])
pyplot.show()
from matplotlib.pyplot import plot, show
#Set initial conditions
time = 0
balance = 1000
#Set list to store data
timelist=[time]
balancelist=[balance]
while (time < 10):
#Increase balance and time
balance += balance*0.03
time += 1
#Store time and balance in lists
timelist.append(time)
balancelist.append(balance)
#Output the simulation results
for i in range(len(timelist)):
print("Year:", timelist[i], " Balance:", balancelist[i])
plot(timelist, balancelist)
show()
class BankAccount:
balance = 0.0
def __init__(self):
self.deposits = []
checking_account = BankAccount()
savings_account = BankAccount()
checking_account.deposits.append(100.0)
print(savings_account.deposits)
class FootballPlayer:
name = "<NAME>"
team = "None"
years_in_league = 0
def printPlayer(self):
print(self.name+" playing for the "+self.team+":")
class Quarterback(FootballPlayer):
pass_attempts = 0
completions = 0
pass_yards = 0
def completionRate(self):
return self.completions/self.pass_attempts
def yardsPerAttempt(self):
return self.pass_yards/self.pass_attempts
class RunningBack(FootballPlayer):
rushes = 0
rush_yards = 0
def yardsPerRush(self):
return self.rush_yards/self.rushes
class FootballPlayer:
name = "<NAME>"
team = "None"
years_in_league = 0
def printPlayer(self):
print(self.name+" playing for the "+self.team+":")
def isGood(self):
print("Error! isGood is not defined!")
return False
class Quarterback(FootballPlayer):
pass_attempts = 0
completions = 0
pass_yards = 0
def completionRate(self):
return self.completions/self.pass_attempts
def yardsPerAttempt(self):
return self.pass_yards/self.pass_attempts
def isGood(self):
return (self.yardsPerAttempt() > 7)
class RunningBack(FootballPlayer):
rushes = 0
rush_yards = 0
def yardsPerRush(self):
return self.rush_yards/self.rushes
def isGood(self):
return (self.yardsPerRush() > 4)
book_queue = []
book_queue.append(medium_book)
book_queue.append(short_book)
book_queue.append(long_book)
next_book = book_queue.pop(0)
def isIn(L, v): i= 0
while (i<len(L)):
if L[i] == v:
return True
else:
i += 1
return False
favorite_foods = ['pizza', 'barbeque', 'gumbo', 'chicken and
dumplings', 'pecan pie', 'ice cream']
print(isIn(favorite_foods, 'gumbo'))
print(isIn(favorite_foods, 'coconut'))
OUTPUT:
True
False
def mergeSort(L):
n = len(L)
if n <= 1:
return
L1 = L[:n//2]
L2 = L[n//2:]
mergeSort(L1)
mergeSort(L2)
merge(L, L1, L2)
return
def merge(L, L1, L2):
i= 0
j= 0
k= 0
while (j < len(L1)) or (k < len(L2)):
if j < len(L1):
if k < len(L2):
#we are not at the end of L1 or L2, so pull the smaller value
if L1[j] < L2[k]:
L[i] = L1[j]
j += 1
else:
L[i] = L2[k]
k += 1
else:
#we are at the end of L2, so just pull from L1
L[i] = L1[j]
j += 1
else:
#we are at the end of L1, so just pull from L2
L[i] = L2[k]
k += 1
i += 1
return
def merge(L, L1, L2):
i= 0
j= 0
k= 0
while (j < len(L1)) or (k < len(L2)):
if j < len(L1):
if k < len(L2):
#we are not at the end of L1 or L2, so pull the
smaller value
if L1[j] < L2[k]:
L[i] = L1[j]
j += 1
else:
L[i] = L2[k]
k += 1
else:
#we are at the end of L2, so just pull from L1
L[i] = L1[j]
j += 1
else:
#we are at the end of L1, so just pull from L2
L[i] = L2[k]
k += 1
i += 1
return
class node:
def __init__(self, name, parent=-1):
self._name = name
self._parent = parent
self._left = -1
self._right = -1
def getName(self):
return self._name
def getParent(self):
return self._parent
def getLeft(self):
return self._left
def getRight(self):
return self._right
def setParent(self, p):
self._parent = p
def setLeft(self, l):
self._left = l
def setRight(self, r):
self._right = r
class node:
def __init__(self, name):
self._name = name
self._friends = []
self._status = 0
self._discoveredby = 0
def getName(self):
return self._name
def getFriends(self):
return self._friends
def addFriend(self, friend_index):
self._friends.append(friend_index)
def isUnseen(self):
if self._status == 0:
return True
else:
return False
def isSeen(self):
if self._status == 1:
return True
else:
return False
def setUnseen(self):
self._status = 0
def setSeen(self):
self._status = 1
def discover(self, n):
self._discoveredby = n
def discovered(self):
return self._discoveredby
def BFS(nodelist, start, goal):
to_visit = queue()
nodelist[start].setSeen()
to_visit.enqueue(start)
found = False
while (not found) and (not to_visit.isEmpty()):
current = to_visit.dequeue()
neighbors = nodelist[current].getNeighbors()
for neighbor in neighbors:
if nodelist[neighbor].isUnseen():
nodelist[neighbor].setSeen()
nodelist[neighbor].discover(current)
if neighbor == goal:
found = True
else:
to_visit.enqueue(neighbor)
def retrievePath(nodelist, start, goal):
#Return the path from start to goal
def BFS(nodelist, start, goal):
to_visit = queue()
nodelist[start].setSeen()
to_visit.enqueue(start)
found = False
while (not found) and (not to_visit.isEmpty()):
current = to_visit.dequeue()
neighbors = nodelist[current].getNeighbors()
for neighbor in neighbors:
if nodelist[neighbor].isUnseen():
nodelist[neighbor].setSeen()
nodelist[neighbor].discover(current)
if neighbor == goal:
found = True
else:
to_visit.enqueue(neighbor)
return retrievePath(noswliar, start, goal)
from multiprocessing import Process
def print_process(number):
print("Printing from process", number)
if __name__ == '__main__':
process_list = []
for i in range(20):
p = Process(target=print_process, args=(i,))
process_list.append(p)
for p in process_list:
p.start()
for line in infile:
#STUFF DELETED HERE
m, d, y = date.split('/')
month = int(m)
day = int(d)
year = int(y)
#Put data into list
datalist.append([day, month, year, lowtemp, hightemp,
rainfall])
#STUFF DELETED HERE
#Find historical data for date
gooddata = []
for singleday in datalist:
if (singleday[0] == month) and (singleday[1] == day):
gooddata.append([singleday[2], singleday[3],
singleday[4], singleday[5]])
try:
#Commands to try out
except <name of exception>:
#how to handle that exception
import math
print(math.sin(math.pi/2))
import webbrowser
webbrowser.open("http://www.thegreatcourses.com")
import shutil
shutil.copy("File1.txt", "File2.txt")
``` |
{
"source": "jlmwise/python-docs-samples",
"score": 2
} |
#### File: python-docs-samples/dlp/custom_infotype.py
```python
def omit_name_if_also_email(
project,
content_string,
):
"""Marches PERSON_NAME and EMAIL_ADDRESS, but not both.
Uses the Data Loss Prevention API omit matches on PERSON_NAME if the
EMAIL_ADDRESS detector also matches.
Args:
project: The Google Cloud project id to use as a parent resource.
content_string: The string to inspect.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp_v2.DlpServiceClient()
# Construct a list of infoTypes for DLP to locate in `content_string`. See
# https://cloud.google.com/dlp/docs/concepts-infotypes for more information
# about supported infoTypes.
info_types_to_locate = [{"name": "PERSON_NAME"}, {"name": "EMAIL_ADDRESS"}]
# Construct the configuration dictionary that will only match on PERSON_NAME
# if the EMAIL_ADDRESS doesn't also match. This configuration helps reduce
# the total number of findings when there is a large overlap between different
# infoTypes.
inspect_config = {
"info_types":
info_types_to_locate,
"rule_set": [{
"info_types": [{
"name": "PERSON_NAME"
}],
"rules": [{
"exclusion_rule": {
"exclude_info_types": {
"info_types": [{
"name": "EMAIL_ADDRESS"
}]
},
"matching_type": "MATCHING_TYPE_PARTIAL_MATCH"
}
}]
}]
}
# Construct the `item`.
item = {"value": content_string}
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Call the API.
response = dlp.inspect_content(parent, inspect_config, item)
return [f.info_type.name for f in response.result.findings]
# [END dlp_omit_name_if_also_email]
# [START inspect_with_person_name_w_custom_hotword]
def inspect_with_person_name_w_custom_hotword(
project,
content_string,
custom_hotword="patient"
):
"""Uses the Data Loss Prevention API increase likelihood for matches on
PERSON_NAME if the user specified custom hotword is present. Only
includes findings with the increased likelihood by setting a minimum
likelihood threshold of VERY_LIKELY.
Args:
project: The Google Cloud project id to use as a parent resource.
content_string: The string to inspect.
custom_hotword: The custom hotword used for likelihood boosting.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp_v2.DlpServiceClient()
# Construct a rule set with caller provided hotword, with a likelihood
# boost to VERY_LIKELY when the hotword are present within the 50 character-
# window preceding the PII finding.
hotword_rule = {
"hotword_regex": {"pattern": custom_hotword},
"likelihood_adjustment": {"fixed_likelihood": "VERY_LIKELY"},
"proximity": {"window_before": 50},
}
rule_set = [
{
"info_types": [{"name": "PERSON_NAME"}],
"rules": [{"hotword_rule": hotword_rule}],
}
]
# Construct the configuration dictionary with the custom regex info type.
inspect_config = {
"rule_set": rule_set,
"min_likelihood": "VERY_LIKELY",
}
# Construct the `item`.
item = {"value": content_string}
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Call the API.
response = dlp.inspect_content(parent, inspect_config, item)
# Print out the results.
if response.result.findings:
for finding in response.result.findings:
try:
if finding.quote:
print(f"Quote: {finding.quote}")
except AttributeError:
pass
print(f"Info type: {finding.info_type.name}")
print(f"Likelihood: {finding.likelihood}")
else:
print("No findings.")
# [END inspect_with_person_name_w_custom_hotword]
# [START dlp_inspect_with_medical_record_number_custom_regex_detector]
def inspect_with_medical_record_number_custom_regex_detector(
project,
content_string,
):
"""Uses the Data Loss Prevention API to analyze string with medical record
number custom regex detector
Args:
project: The Google Cloud project id to use as a parent resource.
content_string: The string to inspect.
Returns:
None; the response from the API is printed to the terminal.
"""
# Import the client library.
import google.cloud.dlp
# Instantiate a client.
dlp = google.cloud.dlp_v2.DlpServiceClient()
# Construct a custom regex detector info type called "C_MRN",
# with ###-#-##### pattern, where each # represents a digit from 1 to 9.
# The detector has a detection likelihood of POSSIBLE.
custom_info_types = [
{
"info_type": {"name": "C_MRN"},
"regex": {"pattern": "[1-9]{3}-[1-9]{1}-[1-9]{5}"},
"likelihood": "POSSIBLE",
}
]
# Construct the configuration dictionary with the custom regex info type.
inspect_config = {
"custom_info_types": custom_info_types,
}
# Construct the `item`.
item = {"value": content_string}
# Convert the project id into a full resource id.
parent = dlp.project_path(project)
# Call the API.
response = dlp.inspect_content(parent, inspect_config, item)
# Print out the results.
if response.result.findings:
for finding in response.result.findings:
try:
if finding.quote:
print(f"Quote: {finding.quote}")
except AttributeError:
pass
print(f"Info type: {finding.info_type.name}")
print(f"Likelihood: {finding.likelihood}")
else:
print("No findings.")
# [END dlp_inspect_with_medical_record_number_custom_regex_detector]
``` |
{
"source": "jlnatalicio/Python-programs",
"score": 3
} |
#### File: res/old/main_2.py
```python
import pygame
from pygame import mixer
import random
import math
# initialize pygame module
pygame.init()
# create screen (height x width)
screen = pygame.display.set_mode((640, 480))
# Title and Icon of window
# "Icon made by Smashicons from www.flaticon.com"
pygame.display.set_caption("Space Invaders PyClone")
icon = pygame.image.load('res/img/space-invaders.png')
pygame.display.set_icon(icon)
# Create Background
# ("Sound made by VABsounds from www.freesound.org")
background_img = pygame.image.load('res/img/background.png')
mixer.music.load('res/sounds/background.wav')
mixer.music.play(-1)
# Create Player
player_img = pygame.image.load('res/img/player.png')
# player initial position
player_x = 304
player_y = 420
player_x_change = 0.0
# Score ("Font made by Geronimo Font Studios from www.dafont.com")
score = 0
font = pygame.font.Font('res/fonts/Gameplay.ttf', 16)
text_x = 10
text_y = 10
game_over_font = pygame.font.Font('res/fonts/Gameplay.ttf', 64)
# Create Bullet
# ("Sounds made by TheDweebMan and kutejnikov from www.freesound.org")
bullet_img = pygame.image.load('res/img/bullet.png')
bullet_x = 0
bullet_y = player_y
bullet_y_change = 0.3
bullet_state = "ready"
# Create Enemy
# "Icon made by Freepik from www.flaticon.com"
enemy_img = []
enemy_x = []
enemy_y = []
enemy_x_change = []
enemy_y_change = []
num_enemies = 6
for i in range(num_enemies):
enemy_img.append(pygame.image.load('res/img/alien1.png'))
enemy_x.append( float(random.randint(0, 608)) )
enemy_y.append( float(random.randint(50, 100)) )
enemy_x_change.append(0.18)
enemy_y_change.append(32.0)
def drawGameOverScreen():
game_over_text = game_over_font.render("GAME OVER", True, (255,255,255))
screen.blit(game_over_text, (128, 128))
def drawScore(x, y):
score_value = font.render("Hi-Score: " + str(score), True, (255,255,255))
screen.blit(score_value, (x, y))
def drawPlayer(pos_x, pos_y):
screen.blit(player_img, (pos_x, pos_y)) # draw player on screen
def drawEnemy(pos_x, pos_y, i):
screen.blit(enemy_img[i], (pos_x, pos_y)) # draw enemy on screen
def fireBullet(x, y):
global bullet_state # you can access this variable in the function using global
bullet_state = "fire"
screen.blit(bullet_img, (x, y))
def checkCollision(x1, y1, x2, y2):
x_coordinate_sqr = (x1 - x2) ** 2
y_coordinate_sqr = (y1 - y2) ** 2
distance = math.sqrt(x_coordinate_sqr + y_coordinate_sqr)
if distance < 16.0:
return True
else:
return False
game_is_running = True
while game_is_running: # sets game loop
# paint screen R G B
screen.fill((0, 0, 0))
#draw background
screen.blit(background_img, (0, 0))
for event in pygame.event.get():
if (event.type == pygame.QUIT or event.type == pygame.WINDOWCLOSE):
game_is_running = False
# if keystroke is pressed, check whether its right or left
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player_x_change = -0.2
if event.key == pygame.K_RIGHT:
player_x_change = 0.2
if event.key == pygame.K_SPACE:
if bullet_state == "ready":
bullet_sound = mixer.Sound('res/sounds/laser.wav')
bullet_sound.play()
# get current x coordinate of spaceship
bullet_x = player_x
fireBullet(bullet_x, bullet_y)
if event.key == pygame.K_ESCAPE:
game_is_running = False
# if keystroke is released, check whether its right or left
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
player_x_change = 0.0
# update player movement and keeping the player on the boundaries of screen
player_x += player_x_change
if player_x < 0:
player_x = 0
elif player_x > 604:
player_x = 604
# update enemy movement and keeping the enemy on the boundaries of screen
for i in range(num_enemies):
# Game Over
if enemy_y[i] > player_y:
for j in range(num_enemies):
enemy_y[j] = 2000
drawGameOverScreen()
break
enemy_x[i] += enemy_x_change[i]
if enemy_x[i] <= 0.0:
enemy_x_change[i] = 0.18
enemy_y[i] += enemy_y_change[i]
elif enemy_x[i] > 604.0:
enemy_x_change[i] = -0.18
enemy_y[i] += enemy_y_change[i]
bullet_hit = checkCollision(enemy_x[i], enemy_y[i], bullet_x, bullet_y)
if bullet_hit:
bullet_y = player_y
bullet_state = "ready"
score += 1
enemy_x[i] = float(random.randint(0, 608))
enemy_y[i] = float(random.randint(50, 100))
drawEnemy(enemy_x[i], enemy_y[i], i)
# update bullet movement
if bullet_y <= 0: # when bullet hits the top of the screen...
bullet_y = player_y # teleport to player's position
bullet_state = "ready" # and prepare it to be fired again
if bullet_state == "fire":
fireBullet(bullet_x, bullet_y)
bullet_y -= bullet_y_change
# collision
bullet_hit = checkCollision(enemy_x[i], enemy_y[i], bullet_x, bullet_y)
if bullet_hit:
explosion_sound = mixer.Sound('res/sounds/explosion.wav')
explosion_sound.play()
bullet_y = player_y
bullet_state = "ready"
score += 1
enemy_x = random.randint(0, 608)
enemy_y = random.randint(50, 100)
drawPlayer(player_x, player_y)
drawScore(text_x, text_y)
pygame.display.update() # we need to update the screen to see changes
pygame.font.quit()
pygame.mixer.quit()
pygame.display.quit() # unitialize module and close window
``` |
{
"source": "jlnbtz/DLAS_speech_tokenizer",
"score": 3
} |
#### File: src/util/progress.py
```python
import math
import sys
def print_bar(part, whole, length, prefix='', suffix='', delimiter='\r', end='\n'):
number = part / whole * length
floor = math.floor(number)
sys.stdout.write(prefix + '█' * floor)
rest = number - floor
if part < whole:
if rest < 0.125:
sys.stdout.write(' ')
elif rest < 0.25:
sys.stdout.write('▏')
elif rest < 0.375:
sys.stdout.write('▎')
elif rest < 0.5:
sys.stdout.write('▍')
elif rest < 0.625:
sys.stdout.write('▌')
elif rest < 0.75:
sys.stdout.write('▋')
elif rest < 0.875:
sys.stdout.write('▊')
else:
sys.stdout.write('▉')
print(' ' * (length - floor - 1) + suffix, end=delimiter if part < whole else end)
# progress.py ends here
``` |
{
"source": "jlnc/reto-python",
"score": 3
} |
#### File: reto-04/eleloi/configurator.py
```python
import toml
import os
import pydantic
from pathlib import Path
class Configuration(pydantic.BaseModel):
directorio: str
class Configurator:
def __init__(self, config_path: str, config_filename: str) -> None:
self.config_file = os.path.join(config_path, config_filename)
def read(self) -> Configuration:
if not os.path.isfile(self.config_file):
self._write_toml(self._get_default_configuration())
return Configuration(**toml.load(self.config_file))
def _write_toml(self, config: Configuration) -> None:
with open(self.config_file, "w", encoding="utf-8") as f:
f.write(toml.dumps(config.dict()))
@staticmethod
def _get_user_download_directory() -> str:
home_dir = str(Path.home())
download_dir = os.path.join(home_dir, "downloads")
if download_dir:
return download_dir
raise Exception("Could not find user download directory")
def _get_default_configuration(self) -> Configuration:
default_download_directory = self._get_user_download_directory()
return Configuration(directorio=default_download_directory)
```
#### File: reto-05/eleloi/main.py
```python
import _config
import _utils
CONFIG_PATH = "config.toml"
def main():
config = _config.read(CONFIG_PATH)
for path_name in [x.in_ for x in config.directorios]:
_utils.list_jpg_files_in_dir(path_name)
if __name__ == "__main__":
main()
```
#### File: eleloi/test/test_config.py
```python
import os
import _config
SAMPLE_CONFIG_PATH = "test_config.toml"
def clean_sample_data():
if os.path.isfile(SAMPLE_CONFIG_PATH):
os.remove(SAMPLE_CONFIG_PATH)
assert not os.path.isfile(SAMPLE_CONFIG_PATH)
def test_can_create_default_configuration():
clean_sample_data()
_config.read(SAMPLE_CONFIG_PATH)
assert os.path.isfile(SAMPLE_CONFIG_PATH)
```
#### File: fpellicer/src/utils.py
```python
import shutil
from pathlib import Path
import mimetypes
import functools
def banner(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""
Añade como cabecera la ruta desde la cual se listan los archivos.
"""
print(f"=== {args[0]} ===")
function(*args, **kwargs)
return wrapper
@banner
def list_images(path: Path) -> None:
"""
Lista las imágenes del directorio indicado.
Parameters:
path (Path): Ruta al directorio de imágenes.
Returns:
None
"""
for file in path.iterdir():
mime = mimetypes.guess_type(file)[0]
if file.is_file() and mime == "image/jpeg":
print(file.name)
@banner
def actions(src: Path, dst: Path, action: str) -> None:
"""
Copia o mueve el contenido 'src' a 'dst' según el valor de 'action'.
Parameters:
src (Path): Directorio de origen.
dst (Path): Directorio de destino.
action (str): Puede ser: 'move', 'copy'.
Returns:
None
"""
if action == "copy":
for item in src.iterdir():
shutil.copy(item, dst)
elif action == "move":
for item in src.iterdir():
src_ = dst / item.name
if src_.exists():
src_.unlink()
# Versión <= 3.8 NO acepta objetos tipo Path como src
shutil.move(item.as_posix(), dst)
else:
pass
```
#### File: joselo/src/configurator.py
```python
import os
from pathlib import Path
import toml
from typing import Dict, Union, MutableMapping, Any
class Configurator:
"""Una clase para generar y verificar la configuración."""
HEADER = "directorios"
DIR_INPUT = "in"
DIR_OUTPUT = "out"
def __init__(self, path: Path, config: str) -> None:
"""Constructor.
Almacena los parametros de entada y chequea la configuración.
Parameters
----------
path : Path
El path al directorio de la configuración.
config : str
El nombre del fichero de configuración.
Returns
-------
None
"""
self.directory = path
self.filename = config
self.check()
def check(self) -> None:
"""Chequea la integridad de la configuración.
1) Si no existe el directorio de la configuración, lo crea.
2) Si no existe el fichero de configuración, lo crea y escribe
el nombre de la sección principal: '["directorios"]'.
3) Si existe el fichero de configuración, lo lee y crea todos
los directorios definidos en él.
No verifica si los paths de esos directorios contienen cosas
como ~/directorio, $HOME/directorio, ../../directorio, etc
pero debería, ya que en esos casos se crean paths relativos
al directorio actual: ${PWD}/"~/directorio",
${PWD}/"$HOME/directorio", etc y no es eso lo que queremos.
Returns
-------
None.
"""
if not self.directory.exists():
os.makedirs(self.directory)
config_file = self.directory / self.filename
if not config_file.exists():
conf = {self.__class__.HEADER: {}}
with open(config_file, 'w') as fwrite:
toml.dump(conf, fwrite)
conf = toml.load(config_file)
# print(conf, flush=True)
dirs_inout = (self.__class__.DIR_INPUT, self.__class__.DIR_OUTPUT)
for item in conf[self.__class__.HEADER].values():
# print(item, flush=True)
for path in [Path(item[d]) for d in item if d in dirs_inout]:
# print(path, flush=True)
if not path.exists():
os.makedirs(path)
def read(self) -> MutableMapping[str, Any]:
"""Leer la configuración.
Accede al fichero de configuración usando los atributos que
se definen en el constructor (self.path y self.filename)
Returns
-------
MutableMapping[str, Any]
"""
config_file = self.directory / self.filename
return toml.load(config_file)
def save(self, conf: Union[Dict, MutableMapping[str, Any]]) -> None:
"""Guarda la configuración en el formato TOML.
Parameters
----------
conf : Union[Dict, MutableMapping[str, Any]]
La variable que hemos usado para definir la configuración
de forma interna.
Returns
-------
None
"""
config_file = self.directory / self.filename
with open(config_file, 'w') as fw:
toml.dump(conf, fw)
```
#### File: reto-07/eleloi/_config.py
```python
from enum import Enum
import re
import toml
import os
import pydantic
class InvalidFilterString(ValueError):
def __init__(self, value: str, *args: object) -> None:
self.value = value
super().__init__(*args)
class Action(Enum):
NONE = "none"
MOVE = "move"
COPY = "copy"
class UserDir(pydantic.BaseModel):
in_: str = pydantic.Field(alias="in")
out: str
actions: list[Action]
filter_: str = pydantic.Field(
# regex=r"^\*\.\w+$",
# description="a filter to select which files to process, like *.jpg",
alias="filter",
)
@pydantic.validator("filter_")
@classmethod
def check_filter_regex_match(cls, value):
if not re.search(r"^\*\.\w+$", value):
raise InvalidFilterString(value=value)
return value
class Configuration(pydantic.BaseModel):
directorios: list[UserDir]
def _generate_default_configuration_file(config_path: str) -> None:
default_config = Configuration(directorios=[])
with open(config_path, "w", encoding="utf-8") as f:
f.write(toml.dumps(default_config.dict()))
def read(config_path: str) -> Configuration:
if not os.path.isfile(config_path):
_generate_default_configuration_file(config_path)
return Configuration(**toml.load(config_path))
```
#### File: eleloi/test/test_executers.py
```python
import shutil
import os
import _executers
import _config
from .sample_data import SAMPLE_DIR, none_dir, copy_dir, move_dir
def _clean_sample_data_dir():
if os.path.isdir(SAMPLE_DIR):
shutil.rmtree(SAMPLE_DIR)
assert not os.path.isdir(SAMPLE_DIR)
def _create_folders_if_dont_exist(sample_data: _config.UserDir):
_executers._create_folders_if_dont_exists(
[sample_data.in_, sample_data.out]
)
def _fill_with_sample_files_with_extension(
directory: str, num_files: int, extension: str
) -> None:
for filename in [f"sample{x}.{extension}" for x in range(0, num_files)]:
with open(os.path.join(directory, filename), "w") as f:
f.write("sample data")
def test_none_executer_creates_directory():
_clean_sample_data_dir()
executer = _executers.NoneExecuter(
none_dir.in_, none_dir.out, none_dir.filter_
)
executer.run()
assert os.path.isdir(SAMPLE_DIR)
def test_copy_executer_copy_all_files():
_clean_sample_data_dir()
_create_folders_if_dont_exist(copy_dir)
_fill_with_sample_files_with_extension(copy_dir.in_, 3, "jpg")
_fill_with_sample_files_with_extension(copy_dir.in_, 2, "txt")
assert len(os.listdir(copy_dir.in_)) == 5
assert len(os.listdir(copy_dir.out)) == 0
executer = _executers.CopyExecuter(
copy_dir.in_, copy_dir.out, copy_dir.filter_
)
executer.run()
assert len(os.listdir(copy_dir.in_)) == 5
assert len(os.listdir(copy_dir.out)) == 3
def test_copy_executer_copy_existing_files():
_clean_sample_data_dir()
_create_folders_if_dont_exist(copy_dir)
_fill_with_sample_files_with_extension(copy_dir.in_, 3, "jpg")
_fill_with_sample_files_with_extension(copy_dir.out, 1, "jpg")
_fill_with_sample_files_with_extension(copy_dir.in_, 2, "txt")
assert len(os.listdir(copy_dir.in_)) == 5
assert len(os.listdir(copy_dir.out)) == 1
executer = _executers.CopyExecuter(
copy_dir.in_, copy_dir.out, copy_dir.filter_
)
executer.run()
assert len(os.listdir(copy_dir.in_)) == 5
assert len(os.listdir(copy_dir.out)) == 3
def test_move_executer_all_files():
_clean_sample_data_dir()
_create_folders_if_dont_exist(move_dir)
_fill_with_sample_files_with_extension(move_dir.in_, 3, "jpg")
_fill_with_sample_files_with_extension(move_dir.in_, 2, "txt")
assert len(os.listdir(move_dir.in_)) == 5
assert len(os.listdir(move_dir.out)) == 0
executer = _executers.MoveExecuter(
move_dir.in_, move_dir.out, move_dir.filter_
)
executer.run()
assert len(os.listdir(move_dir.in_)) == 2
assert len(os.listdir(move_dir.out)) == 3
def test_copy_executer_move_existing_files():
_clean_sample_data_dir()
_create_folders_if_dont_exist(move_dir)
_fill_with_sample_files_with_extension(move_dir.in_, 3, "jpg")
_fill_with_sample_files_with_extension(move_dir.in_, 2, "txt")
_fill_with_sample_files_with_extension(move_dir.out, 1, "jpg")
assert len(os.listdir(move_dir.in_)) == 5
assert len(os.listdir(move_dir.out)) == 1
executer = _executers.MoveExecuter(
move_dir.in_, move_dir.out, move_dir.filter_
)
executer.run()
assert len(os.listdir(move_dir.in_)) == 2
assert len(os.listdir(move_dir.out)) == 3
def test_copy_over_same_out_path():
_clean_sample_data_dir()
_create_folders_if_dont_exist(copy_dir)
_fill_with_sample_files_with_extension(copy_dir.out, 3, "jpg")
_fill_with_sample_files_with_extension(copy_dir.out, 2, "txt")
assert len(os.listdir(copy_dir.out)) == 5
executer = _executers.CopyExecuter(
copy_dir.out, copy_dir.out, copy_dir.filter_
)
executer.run()
assert len(os.listdir(copy_dir.out)) == 5
def test_move_executer_same_out_path():
_clean_sample_data_dir()
_create_folders_if_dont_exist(move_dir)
_fill_with_sample_files_with_extension(move_dir.out, 3, "jpg")
_fill_with_sample_files_with_extension(move_dir.out, 2, "txt")
assert len(os.listdir(move_dir.out)) == 5
executer = _executers.MoveExecuter(
move_dir.out, move_dir.out, move_dir.filter_
)
executer.run()
assert len(os.listdir(move_dir.out)) == 5
```
#### File: joselo/test/mkdirs.py
```python
import os
from pathlib import Path
import shutil
import sys
sys.path.append(os.path.join("../src")) # noqa
from _constants import (
CONFIG,
CONFIG_APP,
CONFIG_DIR_IN,
CONFIG_DIR_OUT,
CONFIG_FILE,
CONFIG_HEADER,
FILES,
TEST_ROOT,)
from configurator import Configurator
def mkdirs(): # noqa
if TEST_ROOT.exists():
shutil.rmtree(TEST_ROOT)
configurator = Configurator(TEST_ROOT, CONFIG_FILE)
configurator.save(CONFIG)
config = configurator.read()
shutil.copy(TEST_ROOT / CONFIG_FILE, CONFIG_APP)
for item in config[CONFIG_HEADER].values():
for dir_ in (CONFIG_DIR_IN, CONFIG_DIR_OUT):
path = Path(item[dir_])
os.makedirs(path)
if dir_ == CONFIG_DIR_IN:
for f in FILES:
file = path / f
file.touch()
if __name__ == '__main__':
mkdirs()
```
#### File: fpellicer/src/resize_image.py
```python
import mimetypes
from pathlib import Path
from PIL import Image
class ResizeImage:
"""
Redimensiona una imagen indicando la anchura y la altura.
"""
def __init__(self, file_in, file_out, args):
"""
Args:
file_in (Path): Archivo de entrada (imagen)
file_out (Path): Directorio donde guardar en archivo editado
args (dict): Valores para el ancho (width) y el alto (height)
Returns:
None
"""
self.file_in = file_in
self.file_out = file_out
self.width = int(args["width"])
self.height = int(args["height"])
def check(self) -> bool:
"""
Verifica que el archivo de entrada es una imagen y que el
directorio de salida existe.
"""
mimetype = mimetypes.guess_type(self.file_in)[0]
return "image" in mimetype and self.file_out.exists()
def execute(self) -> None:
"""Redimensiona una imagen"""
with Image.open(self.file_in) as img:
img.load()
resized_image = img.resize((self.width, self.height))
name = self.file_in.stem + "_resized" + self.file_in.suffix
resized_image.save(self.file_out / name)
print(self.file_out / name)
def thumbnail(self) -> None:
"""Redimensiona una imagen preservando las proporciones"""
with Image.open(self.file_in) as img:
img.load()
img_copy = img.copy()
img_copy.thumbnail((self.width, self.height))
name = self.file_in.stem + "_thumbnail" + self.file_in.suffix
img_copy.save(self.file_out / name)
print(self.file_out / name)
def main():
file_in = Path("/home/fran/Testing/reto-python/ImagesIn_1/tux.png")
file_out = Path("/home/fran/Testing/reto-python/ImagesOut_1/")
args = {"width": 200, "height": 200}
resize_image = ResizeImage(file_in, file_out, args)
if resize_image.check():
resize_image.execute()
# resize_image.thumbnail()
if __name__ == "__main__":
main()
```
#### File: avarez/src/instagram_filters.py
```python
from pathlib import Path
from PIL import Image
import pilgram
class InstagramFilter():
'''
instagram image class
'''
def __init__(self, dir_path, file_name, file_ext):
'''
init method
'''
self.filters = {1: "_1977",
2: "aden",
3: "brannan",
4: "brooklyn",
5: "clarendon",
6: "earlybird",
7: "gingham",
8: "hudson",
9: "inkwell",
10: "kelvin",
11: "lark",
12: "lofi",
13: "maven",
14: "mayfair",
15: "moon",
16: "nashville",
17: "perpetua",
18: "reyes",
19: "rise",
20: "slumber",
21: "stinson",
22: "toaster",
23: "valencia",
24: "walden",
25: "willow",
26: "xpro2"}
self.dir_path = dir_path
self.file_name = file_name
self.file_ext = file_ext
self.filter_name = None
def list_filters(self):
'''
list filters method
'''
for filter_index in self.filters.keys():
print(f"{filter_index:2d} : {self.filters[filter_index]}")
def set_filter(self, filter_index):
'''
set filter method
'''
self.filter_name = self.filters[filter_index]
def filters_indexes(self):
'''
filters indexes method
'''
return self.filters.keys()
def check(self):
'''
check method
'''
if Path(f"{self.dir_path}/{self.file_name}.{self.file_ext}"):
print(f"In: {self.dir_path}/{self.file_name}.{self.file_ext}")
if self.filter_name:
print(f"Filtro: {self.filter_name}")
return True
return False
def execute(self):
'''
execute method
'''
in_img = f"{self.dir_path}/{self.file_name}.{self.file_ext}"
out_img = f"{self.dir_path}/{self.file_name}.{self.filter_name}.{self.file_ext}"
print(f"Out: {out_img}")
img = Image.open(in_img)
if self.filter_name == "_1977": # 1
pilgram._1977(img).save(out_img)
if self.filter_name == "aden": # 2
pilgram.aden(img).save(out_img)
if self.filter_name == "brannan": # 3
pilgram.brannan(img).save(out_img)
if self.filter_name == "brooklyn": # 4
pilgram.brooklyn(img).save(out_img)
if self.filter_name == "clarendon": # 5
pilgram.clarendon(img).save(out_img)
if self.filter_name == "earlybird": # 6
pilgram.earlybird(img).save(out_img)
if self.filter_name == "gingham": # 7
pilgram.gingham(img).save(out_img)
if self.filter_name == "hudson": # 8
pilgram.hudson(img).save(out_img)
if self.filter_name == "inkwell": # 9
pilgram.inkwell(img).save(out_img)
if self.filter_name == "kelvin": # 10
pilgram.kelvin(img).save(out_img)
if self.filter_name == "lark": # 11
pilgram.lark(img).save(out_img)
if self.filter_name == "lofi": # 12
pilgram.lofi(img).save(out_img)
if self.filter_name == "maven": # 13
pilgram.maven(img).save(out_img)
if self.filter_name == "mayfair": # 14
pilgram.mayfair(img).save(out_img)
if self.filter_name == "moon": # 15
pilgram.moon(img).save(out_img)
if self.filter_name == "nashville": # 16
pilgram.nashville(img).save(out_img)
if self.filter_name == "perpetua": # 17
pilgram.perpetua(img).save(out_img)
if self.filter_name == "reyes": # 18
pilgram.reyes(img).save(out_img)
if self.filter_name == "rise": # 19
pilgram.rise(img).save(out_img)
if self.filter_name == "slumber": # 20
pilgram.slumber(img).save(out_img)
if self.filter_name == "stinson": # 21
pilgram.stinson(img).save(out_img)
if self.filter_name == "toaster": # 22
pilgram.toaster(img).save(out_img)
if self.filter_name == "valencia": # 23
pilgram.valencia(img).save(out_img)
if self.filter_name == "walden": # 24
pilgram.walden(img).save(out_img)
if self.filter_name == "willow": # 25
pilgram.willow(img).save(out_img)
if self.filter_name == "xpro2": # 26
pilgram.xpro2(img).save(out_img)
``` |
{
"source": "jlndk/rmacro",
"score": 3
} |
#### File: jlndk/rmacro/keyutil.py
```python
import sys
import os
from lib import keyboard
def main():
# Check if the extension is present
if not keyboard.supportsRecording():
print("Cannot record keyboard. RECORD extension not found")
sys.exit(1)
# Start keyboard capturing
keyboard.createRecordingContext(onGlobalKeypress)
def onGlobalKeypress(reply):
#We only care about legit keypresses. Therefore ignore everything else
if not keyboard.isValidEvent(reply):
return
# Loop through all keystrokes and handle them one-by-one
while len(reply.data):
#But first we need to parse the keycode since it's in a binary format.
event, reply.data = keyboard.parseKeystroke(reply.data)
#Then we can handle the event
handleKeyEvent(event)
def handleKeyEvent(event):
if event.type == keyboard.KeyRelease:
clearScreen()
keyString = keyboard.getKeyStringFromCode(event.detail)
print "Key '{1}' pressed with keycode '{0}'".format(event.detail, keyString)
def clearScreen():
os.system("clear")
print "Press a key to see the keycode."
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("")
os.system("clear")
sys.exit(0)
```
#### File: rmacro/lib/keyboard.py
```python
import sys
import os
import time
from Xlib import X, XK, display, protocol
from Xlib.ext import record
local_dpy = display.Display()
recording_display = display.Display()
#Save these values so that programs don't need to import x
KeyRelease = X.KeyRelease
KeyPress = X.KeyPress
#
# Returns if all the requirements for capturing the keyboard is present.
#
def supportsRecording():
return recording_display.has_extension("RECORD")
#
# Starts capturing keyboard events and propergates them to a callback
#
def createRecordingContext(callback):
recArgs = [
{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.MotionNotify),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}
]
# Create a recording context; we only want key and mouse events (not
# preocessed)
context = recording_display.record_create_context(0, [record.AllClients], recArgs)
recording_display.record_enable_context(context, callback)
recording_display.record_free_context(context)
#
# Reads the binary keycodes and returns readable ones
#
def parseKeystroke(data):
eventField = protocol.rq.EventField(None)
disp = recording_display.display
return eventField.parse_binary_value(data, disp, None, None)
def send_key(keycode, kind):
#If a list of keycodes is passed, just call this function for each keycode.
if type(keycode) is list:
for char in keycode:
send_key(char, kind)
return
window = local_dpy.get_input_focus()._data["focus"]
if(kind == X.KeyPress):
event_to_send = protocol.event.KeyPress(
time=int(time.time()),
root=local_dpy.screen().root,
window=window,
same_screen=0, child=X.NONE,
root_x=0, root_y=0, event_x=0, event_y=0,
state=16,
detail=keycode
)
elif(kind == X.KeyRelease):
event_to_send = protocol.event.KeyRelease(
time=int(time.time()),
root=local_dpy.screen().root,
window=window,
same_screen=0, child=X.NONE,
root_x=0, root_y=0, event_x=0, event_y=0,
state=16,
detail=keycode
)
else:
print("Invalid event type. Cancelling keypress")
return
window.send_event(event_to_send, propagate=True)
local_dpy.flush()
time.sleep(0.01)
def isValidEvent(reply):
if reply.client_swapped or (reply.category != record.FromServer):
return False
if not len(reply.data) or ord(str(reply.data[0])) < 2: # not an event
return False
return True
def getKeyStringFromKeySym(keysym):
for name in dir(XK):
if name[:3] == "XK_" and getattr(XK, name) == keysym:
return name[3:]
return "[%d]" % keysym
def getKeyStringFromCode(keyCode):
keysym = local_dpy.keycode_to_keysym(keyCode, 0)
return getKeyStringFromKeySym(keysym)
``` |
{
"source": "jlnerd/JLpy_Utilities",
"score": 4
} |
#### File: ML/inspection/maximize_success.py
```python
def dropping_label_categories(model, X, y, drop_label_categories, show_plot = True):
"""
maximize success for a given problem by dropping the rows/samples containing the labels in the drop_label_categories list. This type of maximization is appropriate for problems such as credit default, where one wishes to simply ignore customers who will default.
Arguments:
----------
model: the model object of interest from which predictions will be made on the X dataset passed
X, y: The features and labels the evaluation will be performed on
drop_label_categories: list. The names of the label categories you wish to exclude for optimal results.
Returns:
-------
df_lift: The lift score for each of the categories in the drop_label_categories list.
- The lift is calculated as the original % of occurances / the % of occurances after dropping samples based on the predicted values
y_drop: The true values corresponding to the samples selected after dropping the label categories from the y_pred
y_pred_drop: The remaining predicted values after dropping the samples with values matching those in the drop_label_categories list
y_drop_value_counts: Pandas df containg the value counts for the label categories before dropping based on the predicted values
y_drop_value_counts: Pandas df containg the value counts for the label categories after dropping based on the predicted values
"""
import pandas as pd
y_pred = model.predict(X)
y_pred = pd.DataFrame(y_pred, columns = list(y.columns))
y_pred.index = y.index
y_pred_drop = y_pred[~(y_pred.iloc[:,0].isin(drop_label_categories))]
drop_idx_list = list(y_pred[(y_pred.iloc[:,0].isin(drop_label_categories))].index.values)
y_drop = y.drop(drop_idx_list)
assert(y_drop.shape==y_pred_drop.shape)
y_value_counts = y.iloc[:,0].value_counts().reset_index()
y_value_counts.columns = [y.columns[0], 'counts']
y_value_counts['% of Total'] = y_value_counts['counts']/y_value_counts['counts'].sum()*100
y_drop_value_counts = y_drop.iloc[:,0].value_counts().reset_index()
y_drop_value_counts.columns = [y_drop.columns[0], 'counts']
y_drop_value_counts['% of Total'] = y_drop_value_counts['counts']/y_drop_value_counts['counts'].sum()*100
df_lift = (y_value_counts['% of Total'][y_value_counts.iloc[:,0].isin(drop_label_categories)] / y_drop_value_counts['% of Total'][y_drop_value_counts.iloc[:,0].isin(drop_label_categories)]).reset_index()
df_lift.columns = [y.columns[0],'lift']
if show_plot:
import matplotlib.pyplot as plt
fig, ax_list = plt.subplots(1,2)
p=0
for ylabel in ['counts', '% of Total']:
for df_counts, label in [[y_value_counts, 'before drop'],
[y_drop_value_counts, 'after drop']]:
ax_list[p].bar(df_counts[y.columns[0]].astype(str),
df_counts[ylabel], label = label,alpha=0.5)
ax_list[p].set_xticklabels(df_counts[y.columns[0]].astype(str), rotation=90)
ax_list[p].legend()
header = y.columns[0]
if len(header)>20:
xlabel = '\n'.join(header.split(' '))
else:
xlabel = header
ax_list[p].ticklabel_format(axis='y', style='sci', scilimits=(-3,3))
ax_list[p].set_xlabel(xlabel)
ax_list[p].set_ylabel(ylabel)
ax_list[p].grid(which='both',visible=False)
p+=1
fig.tight_layout(rect=(0,0,1.5,1))
plt.show()
return df_lift, y_drop, y_pred_drop, y_value_counts, y_drop_value_counts
def varying_features_for_max_prob(model, X, y,
free_categorical_features= None,
free_continuous_features = None,
success_label = 1,
verbose = 1):
"""
Vary features in X to maximize the probability of success.
Arguments:
----------
model: the model object of interest from which predictions will be made on the X dataset passed
X, y: The features and labels the evaluation will be performed on. We assume the categorical features in the X data should be One-Hot encoded, such that their values are only either 0 or 1.
LabelEncoder: The LabelEncoder object that defines the possible categorical features which
free_categorical_features: the categorical features in X which can be freely varied for success
free_continuous_features: the continuous features in X which can be freely varied for success
success_label: the label category in y which you want to be maxime success/probability for.
verbose: print-out verbosity
Returns:
--------
X_opt: Pandas DataFrame with the optimal parameters inserted for each case in X
y_opt_probas: the probabilities for each optimal condition
lift_dict: dictionary containing various lift metrics ('mean(opt/orig proba)', 'median(opt/orig proba)', 'stddev(opt/orig probe)':np.std(y_opt_probas/y_proba))
"""
import itertools
import joblib
import scipy, scipy.optimize
import sklearn, sklearn.metrics
import numpy as np
import warnings
def cont_feat_optimizer(X_slice,
free_cat_feature_headers,
free_cont_feature_headers,
model,
success_label,
cat_feat_combo
):
"""
Optimizer to find the best free continuous feature values for a given example (slice) of features and labels
"""
import scipy, scipy.optimize
#update X_slice free categorical features with categorical feature combo case
X_slice[free_cat_feature_headers] = cat_feat_combo
def loss(free_cont_feat_vals,
free_cont_feature_headers,
X_slice, model):
X_slice[free_cont_feature_headers] = free_cont_feat_vals
y_pred_proba = model.predict_proba(X_slice)
loss_ = 1-y_pred_proba[0][success_label]
return loss_
#define initial guess for optimial free continuous feature values
free_cont_feat_vals = np.array(X_slice[free_cont_feature_headers]).reshape(-1,1)
#run optimizer for free continuous features
results = scipy.optimize.minimize(loss, free_cont_feat_vals,
args = (free_cont_feature_headers,
X_slice, model)
)
#fetch optimal results and updated X_slice
optimal_cont_feat_vals = results['x']
X_slice[free_cont_feature_headers] = optimal_cont_feat_vals
y_pred_proba = model.predict_proba(X_slice)
y_pred_proba = y_pred_proba[0][success_label]
return optimal_cont_feat_vals, y_pred_proba
def cat_cont_feat_optimizer(X_slice,
free_categorical_features,
free_continuous_features,
model,
success_label,
cat_feat_combos):
results_dict={'cat features':[],
'optimal_cont_feat_vals':[],
'y_pred_proba':[]}
for cat_feat_combo in cat_feat_combos:
optimal_cont_feat_vals, y_pred_proba = cont_feat_optimizer(X_slice,
free_categorical_features,
free_continuous_features,
model,
success_label,
cat_feat_combo
)
results_dict['cat features'].append(cat_feat_combo)
results_dict['optimal_cont_feat_vals'].append(optimal_cont_feat_vals)
results_dict['y_pred_proba'].append(y_pred_proba)
idx_max_proba = np.argmax(results_dict['y_pred_proba'])
opt_cat_feat_vals = results_dict['cat features'][idx_max_proba]
opt_cont_feat_vals = results_dict['optimal_cont_feat_vals'][idx_max_proba]
opt_proba = results_dict['y_pred_proba'][idx_max_proba]
return opt_cat_feat_vals, opt_cont_feat_vals, opt_proba
warnings.filterwarnings('ignore')
#fetch baseline probs
y_proba = model.predict_proba(X)[:,success_label]
#fetch locked features list
locked_features = [feature for feature in X.columns if feature not in free_categorical_features and feature not in free_continuous_features]
#build all possible combinations of categorical features
cat_feat_combos = list(itertools.product([0, 1], repeat=len(free_categorical_features)))
X_opt = X
y_opt_probas = []
for i in range(X.shape[0]):
if verbose>=1:
print('Progress:',round(i/X.shape[0]*100,2),'%',end='\r')
X_slice = X.iloc[i,:].to_frame().T
opt_cat_feat_vals, opt_cont_feat_vals, opt_proba = cat_cont_feat_optimizer(
X_slice,
free_categorical_features,
free_continuous_features,
model,
success_label,
cat_feat_combos)
y_opt_probas.append(opt_proba)
X_opt[free_categorical_features] = list(opt_cat_feat_vals)
X_opt[free_continuous_features] = list(opt_cont_feat_vals)
lift_dict = {'mean(opt/orig proba)':np.mean(y_opt_probas/y_proba),
'median(opt/orig proba)':np.median(y_opt_probas/y_proba),
'stddev(opt/orig probe)':np.std(y_opt_probas/y_proba)
}
warnings.filterwarnings('default')
return X_opt, y_opt_probas, lift_dict
```
#### File: ML/inspection/_unique.py
```python
def uniques(df):
"""
Inspect number of unique values and the unique values themselves for the passed dataframe
"""
# dtypes = df.dtypes.reset_index()
# dtypes.columns = ['column','dtype']
# dtypes = dtypes.sort_values(['dtype','column'])
for col in df.columns:#dtypes['column']:
if 'dask' in str(type(df)):
uniques = df[col].compute().sort_values().unique()
else:
uniques = df[col].sort_values().unique()
n_uniques = len(uniques)
print('\n----',col,'----',
'\nn_uniques:',n_uniques,
'\ndtype:',uniques.dtype)
if n_uniques<=20:
print('\nuniques[:]:')
display(list(uniques))
else:
print('\nuniques[:20]:')
display(list(uniques[:20])+['...'])
```
#### File: ML/model_selection/_search.py
```python
import numpy as _np
import os as _os
import hyperopt as _hyperopt
import time as _time
import functools as _functools
import warnings as _warnings
import matplotlib.pyplot as _plt
import sklearn.model_selection as _sklearn_model_selection
from .. import NeuralNet as _NeuralNet
from ... import file_utils as _file_utils
from hyperopt import base as _base
_base.have_bson = False
class GridSearchCV():
def __init__(self,
models_dict,
cv = 4,
scoring= {'metric':None,'maximize':True},
metrics = {None:None},
retrain = True,
path_GridSearchCV_dir = 'GridSearchCV',
n_jobs = -1,
verbose = 2,
**kwargs):
"""
hyperparameter GridSearchCV across different types of models
Arguments:
----------
models_dict: dictionary containing all models and their param_grid.
- Dictionary Format: {'model name':{'model':model object,
'param_grid': {parameter name, parameter list}]
cv: cross-validation index.
scoring: Default: None.
- If scoring['metric'] = None, use default score for given sklearn model, or use 'loss' for neural network.
- For custom scoring functions, pass 'scoring = {'metric':function or key-word string,
'maximize':True/False}
- for sklearn/dask_ml GridSearchCV, a list of valid metrics can be printed via 'sklearn.metrics.SCORERS.keys()'
metrics: dictionary with formating like {metric name (str), metric function (sklearn.metrics...)}. The metric will be evaluated after CV on the test set
retrain: Boolean. whether or not you want to retrain the model if it is already been saved in the path_GridSearchCV_dir folder
path_GridSearchCV_dir: root directory where the GridSearchCV outputs will be saved.
n_jobs: int. Default: -1. number of parallel jobs to run. If -1, all available threads will be used
- Note: parallel computing is not supported for Neural Net models
verbose: verbosity of prints.
"""
self.models_dict = models_dict
self.cv = cv
self.scoring = scoring
self.metrics = metrics
self.retrain = retrain
self.path_GridSearchCV_dir = path_GridSearchCV_dir
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
self.save = _file_utils.save
self.load = _file_utils.load
def load_NeuralNet(self, path_model_dir, X_train, y_train, epochs):
"""
load model_dict for Nueral Net case
"""
#fetch best params
best_params_ = self.load('best_params_', 'dill', path_model_dir)
#rebuild model_dict
model_dict = _NeuralNet.DenseNet.model_dict(**best_params_)
model_dict['best_model'] = _NeuralNet.utils.load_model(_os.path.join(path_model_dir,'best_estimator_.h5'))
model_dict['best_params'] = best_params_
model_dict['best_cv_score'] = _np.nan
return model_dict
def _single_model_GridSearchCV(self,
model_dict_,
X_train, y_train, X_test, y_test,
path_model_dir):
"""
Run Grid Search CV on a single model specified by the "key" argument
"""
type_model = str(type(model_dict_['model']))
type_X_train = str(type(X_train))
if ('sklearn' in type_model or 'xgboost' in type_model) and 'dask' not in type_X_train:
GridSearchCV = _sklearn_model_selection.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
n_jobs= self.n_jobs,
cv = self.cv,
scoring= self.scoring['metric'],
verbose = _np.max((0,self.verbose-1))
)
if y_train.shape[1]==1:
y_train = _np.array(y_train).reshape(-1,)
GridSearchCV.fit(X_train,y_train)
elif 'dask' in type_X_train:
from ..dask_ml_extend import model_selection as dask_ml_model_selection
GridSearchCV = dask_ml_model_selection.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
n_jobs= self.n_jobs,
cv = self.cv,
scoring= self.scoring['metric'],
)
GridSearchCV.fit(X_train, y_train)
else: #run gridsearch using neural net function
if self.scoring['metric'] == None:
self.scoring={'metric': 'loss', 'maximize': False}
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
GridSearchCV = _NeuralNet.search.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
cv = self.cv,
scoring=self.scoring,
epochs = epochs,
path_report_folder = path_model_dir,
verbose = _np.max((0,self.verbose-1))
)
GridSearchCV.fit(X_train, y_train, X_test, y_test)
model_dict_['best_model'] = GridSearchCV.best_estimator_
model_dict_['best_params'] = GridSearchCV.best_params_
model_dict_['best_cv_score'] = GridSearchCV.best_score_
if 'sklearn' in str(type(model_dict_['model'])):
self.save(model_dict_, 'model_dict', 'dill', path_model_dir)
return model_dict_
def fit(self,
X_train,
y_train,
X_test,
y_test):
"""
Fit the X_train, y_train dataset & evaluate metrics on X_test, y_test for each of the best models found in each individual models GridSearchCV
Arguments:
---------
X_train, y_train, X_test, y_test: train & test datasets (pandas or dask dataframes)
"""
#instantiate path_model_dirs dictionary so we can know where the models are saved
self.path_model_dirs = {}
for key in self.models_dict.keys():
if self.verbose >=1: print('\n----',key,'----')
#define model directory
path_model_dir = _os.path.join(self.path_GridSearchCV_dir, key)
self.path_model_dirs[key] = path_model_dir
if self.verbose >=1: print('path_model_dir:',path_model_dir)
model_type = type(self.models_dict[key]['model'])
if 'sklearn' in str(model_type) or 'xgboost' in str(model_type):
path_file = _os.path.join(path_model_dir,'model_dict.dill')
elif 'Net' in key:
path_file = _os.path.join(path_model_dir,'best_params_.dill')
if self.retrain or _os.path.isfile(path_file)==False:
self.models_dict[key] = self._single_model_GridSearchCV(self.models_dict[key],
X_train, y_train,
X_test, y_test,
path_model_dir)
else: #reload previously trained model
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)
elif 'Net' in key:
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
self.models_dict[key] = self.load_NeuralNet(path_model_dir,
X_train, y_train,
epochs)
y_pred = self.models_dict[key]['best_model'].predict(X_test)
if 'Net' not in key:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)
else:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(X_test, y_test, verbose =0)
if self.verbose >=1:
print('\tbest_cv_score:',self.models_dict[key]['best_cv_score'])
print('\tbest_pred_score:',self.models_dict[key]['best_pred_score'])
for metric_key in self.metrics.keys():
if self.metrics[metric_key] !=None:
try:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)
print('\t',metric_key,':',self.models_dict[key][metric_key])
except Exception as e:
print('Exception occured for',metric_key,':',str(e))
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)
elif 'Net' in key:
model_dict_subset = self.models_dict[key].copy()
for key in self.models_dict[key].keys():
if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):
model_dict_subset.pop(key)
class BayesianSearchCV():
def __init__(self,
models_dict,
cv = 4,
scoring= {'metric':None,'maximize':True},
metrics = {None:None},
retrain = True,
path_BayesianSearchCV_dir = 'BayesianSearchCV',
n_jobs = -1,
verbose = 2,
**kwargs):
"""
Hyperparameter BayesianSearchCV across different types of models. This class leverages the hyperopt API.
Arguments:
----------
models_dict: dictionary containing all models and their param_grid.
- Dictionary Format: {'model name':{'model':model object,
'param_grid': {parameter name, parameter list}]
cv: cross-validation index.
scoring: Default: None.
- If scoring['metric'] = None, use default score for given sklearn model, or use 'loss' for neural network.
- For custom scoring functions, pass 'scoring = {'metric':function or key-word string,
'maximize':True/False}
- for sklearn/xgboost/dask_ml GridSearchCV, a list of valid metrics can be printed via 'sklearn.metrics.SCORERS.keys()'
metrics: dictionary of the form {metric name (str): metric function (sklearn.metrics...)}. The metric will be evaluated after CV on the test set
retrain: Boolean. whether or not you want to retrain the model if it is already been saved in the path_GridSearchCV_dir folder
path_BayesianSearchCV_dir: root directory where the BayesianSearchCV outputs will be saved.
n_jobs: int. Defualt: -1. number of parallel jobs to run. If -1, all available threads will be used
- Note: parallel computing is not supported for Neural Net models
verbose: print-out verbosity
Notes:
------
Technically, the optimization is performed using the tree-structured parzeen estimator approach, not a pure bayesian estimator. This approach is more efficient handling hyperparameter optimization tasks with high dimensions and small fitness evaluation budgets. See more details in the paper linked below
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
"""
self.models_dict = models_dict
self.cv = cv
self.scoring = scoring
self.metrics = metrics
self.retrain = retrain
self.path_BayesianSearchCV_dir = path_BayesianSearchCV_dir
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
self.save = _file_utils.save
self.load = _file_utils.load
#define model directory
self.path_model_dirs = {}
for key in self.models_dict.keys():
self.path_model_dirs[key] = _os.path.join(self.path_BayesianSearchCV_dir, key)
def _build_space(self, param_grid):
"""
Build the hyperparameter space for input into hyperopt.fmin() function.
Arguments:
----------
param_grid: hyperparameter dictionary with key-list pairs.
Returns:
--------
space: dictionary with key-hyperopt.hp... pairs
Notes:
------
For each hyperparameter of interest, the max and min in the list of possible values in the param_grid[key] element is evaluated. If the difference between the number of decades between the min and max value is greater than 1, a uniform probability distribution will be sampled between log10(min) and log10(max). This will result in the prefixe 'log10.' being pre-pended to the key in the 'space' dict for the given hyperparameter under consideration.
For the case of non-numeric hyperparameters, the space[key] value will be assigned using the hyperopt.hp.choice() function, with the choices being in integer form (index), rather than their raw string value.
To convert the hyperparameters from hyperopts 'space' back to the parameters required by the model under evaluation, we run the function '_update_model_params()' in each instance of the 'objective' function evaluation.
"""
if self.verbose>9:
'Building param space...'
_warnings.filterwarnings('ignore')
param_grid = param_grid.copy()
space = {}
for key in param_grid.keys():
params = param_grid[key]
if self.verbose>9:
print('\tinput:',key, params)
type_str = str(type(params[0]))
if 'float' in type_str or 'int' in type_str:
min_ = min(params)
max_ = max(params)
log10_min_ = _np.log10(min_)
log10_max_ = _np.log10(max_)
if round(log10_max_)-round(log10_min_)>1 and round(log10_max_)-round(log10_min_)!=_np.inf: # use uniform distribution on log spacing
space['log10.'+key] = _hyperopt.hp.uniform(key, log10_min_, log10_max_)
if self.verbose>9:
print('\toutput:','log10.'+key, 'uniform', log10_min_, log10_max_)
else:
if 'int' in type_str:
space[key] = _hyperopt.hp.quniform(key, min_, max_, 1)
if self.verbose>9:
print('\toutput:',key, 'quniform', min_, max_)
elif 'float' in type_str:
space[key] = _hyperopt.hp.uniform(key, min_, max_)
if self.verbose>9:
print('\toutput:',key, 'uniform', min_, max_)
elif 'str' in type_str:
space[key] = _hyperopt.hp.choice(key, [i for i in range(len(params))])
if self.verbose>9:
print('\toutput:',key, 'choice', [i for i in range(len(params))])
else:
raise Exception('type(params[0]) is '+type_str+'. This type of hyperparameter is not yet supported.')
assert(len(space.keys())==len(param_grid.keys())), 'len(space.keys())='+str(len(space.keys()))+', which is not equal to len(param_grid.keys())='+str(len(param_grid.keys()))
if self.verbose>9:
print('...finished building space')
_warnings.filterwarnings('default')
return space
def _plot_space(self, space):
'''
Generate plots to visualize the probability distribution for the parameter space being evaluated.
Arguments:
----------
space: dictionary of form {<parameter ID>: hyperopt.hp... object} generated from the '_build_space()' function
Returns:
-------
None. displays histograms showing the probability space
'''
n_samples = 5000
for title, space_slice in space.items():
evaluated = [_hyperopt.pyll.stochastic.sample(space_slice) for _ in range(n_samples)]
_plt.title(title)
_plt.hist(evaluated)
_plt.grid(which='both',visible=False)
_plt.show()
def _update_model_params(self, params, model_ID, model, param_grid):
"""
Iterate through the params and update the models arguments/params, ensuring the type of each parameter does not change after updating and transforming log10 distributions back to their base value
Arguments:
----------
params: hyperparameter dictionary being evaluated by hyperopt
model: model being evaluated
param_grid: original parameter grid under evaluation
Returns
-------
params_transform: dictionary similar to params, but transformed to match the inputs required by the model
model: Updated model object with the params under evaluation applied to the models arguments by updating the model.__dict__ values.
"""
params = params.copy()
param_grid = param_grid.copy()
params_transform = {}
for key in params.keys():
if 'log10.' in key:
log10_transform = True
else:
log10_transform = False
key = key.replace('log10.','')
type_str = str(type(param_grid[key][0]))
if 'int' in type_str:
if log10_transform:
params_transform[key] = int(10**params['log10.'+key])
else:
params_transform[key] = int(params[key])
elif 'float' in type_str:
if log10_transform:
params_transform[key] = float(10**params['log10.'+key])
else:
params_transform[key] = float(params[key])
elif 'str' in type_str: #index the param grid for hyperparams using 'choice'
params_transform[key] = param_grid[key][params[key]]
if 'densenet' not in model_ID.lower():
model.__dict__[key] = params_transform[key]
assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)
if 'str' in type_str:
assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])
else:
assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )
if 'densenet' in model_ID.lower():
model = model(**params_transform)
return params_transform, model
def _objective(self, params, model_ID, model_dict, X, y, **kwargs):
"""
Objective function for hyperopt fmin. Note hyperopt assumes the only argument required is the params argument, thus before passing this objective as an argument into the hyperopt.fmin() function, we specify the other arguments using the functools.partial() function (see the _single_model_BayesianSearchCV() function code for more details)
Arguments:
----------
params: hyperparameter dictionary for an individual evaluation
model_dict: dictionary of form {'model': estimator/model object,
'param_grid':dictionary defining the hyperparameter bounds}
X: dataframe of features on which the cv_score will be evaluated
y: dataframe of labels on which the cv_score will be evaluated
Returns:
-------
objective: dictionary of form {'loss': cv_score,
'params': hyperparameters using the the evaluation,
'status': hyperopt.STATUS_OK,
'eval_time': evaluation time}
Notes:
------
sklearn-style models try to maximize their score by default, while hyperopt assumes we are trying to minimize our loss, thus if a scoring metric is not defined, or if a metric is specified with a maximize boolean==True, the cv_score will be transformed by cv_score=1/cv_score before being output to the hyperopt fmin optimizer.
In contrast, in Neural Net models, the default scorer is the loss function, thus if the cv_score will only be transformed to 1/cv_score if scoring['maximize']=True and scoring['metric']!=None
"""
model = model_dict['model']
param_grid = model_dict['param_grid'].copy()
params = params.copy()
obj_verbose = max(0,self.verbose-2)
type_X = str(type(X))
if 'dask' in type_X:
X = X.compute()
y = y.compute()
if obj_verbose>=2:
print('params',params)
params_transform, model = self._update_model_params(params,
model_ID,
model,
param_grid)
type_model = str(type(model))
if obj_verbose>=2:
print('params_transform',params_transform)
if 'sklearn' in type_model or 'xgboost' in type_model:
cv_scores = _sklearn_model_selection.cross_val_score(model, X, y,
scoring= self.scoring['metric'],
cv = self.cv,
n_jobs= self.n_jobs,
verbose = obj_verbose
)
else: #using neural net function
import tensorflow as _tf
#check for kwargs
epochs = 100
batch_size = 32
callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]
for item in kwargs.items():
if 'epochs' in item[0]:
epochs = item[1]
elif 'batch_size' in item[0]:
batch_size = item[1]
elif 'callbacks' in item[0]:
callbacks = item[1]
cv_scores = _NeuralNet.cross_val_score(model,
batch_size,
epochs,
X, y,
callbacks,
scoring = self.scoring['metric'],
cv = self.cv,
verbose= obj_verbose)
cv_score = _np.mean(cv_scores)
if 'sklearn' in type_model or 'xgboost' in type_model:
if self.scoring['maximize']==True or self.scoring['metric']==None:
cv_score = 1/cv_score
else:
if self.scoring['maximize']==True and self.scoring['metric']!=None :
cv_score = 1/cv_score
objective = {'loss': cv_score,
'params': params,
'status': _hyperopt.STATUS_OK,
'eval_time': _time.time()}
return objective
def _single_model_BayesianSearchCV(self,
model_ID,
model_dict,
X_train, y_train,
X_test, y_test,
path_model_dir,
refit=True,
**kwargs):
"""
Run BayesianSearchCV on a single model of interest, save the results, and return the updated model_dict
Arguments:
----------
model_dict: dictionary of form {'model': estimator/model object,
'param_grid':dictionary defining the hyperparameter bounds}
X_train, y_train, X_test, y_test: training and test sets under evaluation
path_model_dir: path to directory where the model results will be saved. For none-NeuralNet models, the model_dict will be saved as model_dict.dill. For NeuralNets, the model and othere relevant parameters will be saved using keras-based saving methods.
refit: boolean. whether or not to refit the model on the full training set using the best_params
Returns:
--------
model_dict: the passed model_dict, but with key-value pairs for: 'best_params', 'best_model', 'best_cv_score'
"""
if self.verbose>=1:
print('Fitting',self.cv,'folds for each of',self.max_evals,'candidates, totalling',self.cv*self.max_evals,'fits')
model_dict = model_dict.copy()
model = model_dict['model']
type_model = str(type(model))
model_type = str(type(model_dict['model']))
param_grid = model_dict['param_grid'].copy()
objective = _functools.partial(self._objective,
model_ID = model_ID,
model_dict = model_dict,
X = X_train, y=y_train,
**kwargs)
space = self._build_space(param_grid)
if self.verbose>=4:
self._plot_space(space)
best_params_bad_keys = _hyperopt.fmin(fn = objective,
space = space,
algo = _hyperopt.tpe.suggest,
max_evals = self.max_evals,
trials = _hyperopt.Trials(),
verbose = self.verbose)
# hyperopt doesn't return the best params dict with keys matching the 'space' keys.
# This breaks handling of 'log10.' transformed parameters. Fix is implemented below
best_params_ = {}
for key in space.keys():
best_params_[key] = best_params_bad_keys[key.replace('log10.','')]
if self.verbose>=3:
print('hyperopt_input_best_params_:',best_params_)
best_score_ = self._objective(best_params_,
model_ID,
model_dict = model_dict,
X = X_train, y=y_train)['loss']
#transform params back to original model values
best_params_, best_model_ = self._update_model_params(best_params_, model_ID, model, param_grid)
if self.verbose>=3:
print('model_input_best_params_:',best_params_)
if refit:
if 'sklearn' in type_model or 'xgboost' in type_model:
if y_train.shape[1]==1:
y_train = _np.array(y_train).reshape(-1,)
best_model_.fit(X_train, y_train)
else: #using neural net function
import tensorflow as _tf
if 'dataframe' in str(type(X_train)).lower():
X_train = _np.array(X_train)
X_test = _np.array(X_test)
if 'dataframe' in str(type(y_train)).lower():
y_train = _np.array(y_train)
y_test = _np.array(y_test)
#check for kwargs
epochs = 100
batch_size = 32
callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]
for item in kwargs.items():
if 'epochs' in item[0]:
epochs = item[1]
elif 'batch_size' in item[0]:
batch_size = item[1]
elif 'callbacks' in item[0]:
callbacks = item[1]
history = best_model_.fit(x= X_train,
y= y_train,
validation_data=(X_test, y_test),
batch_size=batch_size,
epochs = epochs,
verbose= max(0,self.verbose-2),
callbacks = callbacks)
model_dict['best_params'] = best_params_
model_dict['best_model'] = best_model_
model_dict['best_cv_score'] = best_score_
if 'sklearn' in model_type or 'xgboost' in model_type:
self.save(model_dict, 'model_dict', 'dill', path_model_dir)
else:
if _os.path.isdir(path_model_dir)==False:
_os.makedirs(path_model_dir)
best_model_.save(_os.path.join(path_model_dir, 'best_model.h5'))
self.save(model_dict['best_params'], 'best_params', 'dill', path_model_dir)
return model_dict
def fit(self,
X_train,
y_train,
X_test,
y_test,
max_evals,
**kwargs,
):
"""
Fit the X_train, y_train dataset & evaluate metrics on X_test, y_test for each of the best models found in each individual models GridSearchCV
Arguments:
---------
X_train, y_train, X_test, y_test: train & test datasets (pandas or dask dataframes)
max_evals: Max number of evaluations to perform during the BayesianSearchCV procedure for each model.
kwargs: For use in neural network hyperopts: epochs, batch_size, callbacks
Returns:
-------
None. The models_dict dictionary will be updated for each model to include key-value pairs for: 'best_params', 'best_model', 'best_cv_score', 'best_pred_score', and a key-value pair for each of the metrics in the metrics dictionary, where the 'best_pred_score' and the metrics are evaluated on the test set passed
"""
self.max_evals = max_evals
for key in self.models_dict.keys():
path_model_dir = self.path_model_dirs[key]
if self.verbose >=1:
print('\n----',key,'----')
print('path_model_dir:',path_model_dir)
model_dict = self.models_dict[key]
model_type = str(type(model_dict['model']))
if 'sklearn' in model_type or 'xgboost' in model_type:
path_file = _os.path.join(path_model_dir,'model_dict.dill')
elif 'Net' in key:
path_file = _os.path.join(path_model_dir,'best_model.h5')
if self.retrain or _os.path.isfile(path_file)==False:
model_dict = self._single_model_BayesianSearchCV(key,
model_dict,
X_train, y_train,
X_test, y_test,
path_model_dir,
**kwargs)
self.models_dict[key] = model_dict
else: #reload previously trained model
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)
elif 'Net' in key:
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
self.models_dict[key]['best_model'] = _NeuralNet.utils.load_model(
_os.path.join(path_model_dir,'best_model.h5'))
self.models_dict[key]['best_params'] = self.load('best_params', 'dill', path_model_dir)
if 'Net' in key:
y_pred = self.models_dict[key]['best_model'].predict(_np.array(X_test))
else:
y_pred = self.models_dict[key]['best_model'].predict(X_test)
if 'Net' not in key:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)
y_pred_proba = self.models_dict[key]['best_model'].predict_proba(X_test)[:,1]
else:
if 'crossentropy' in self.models_dict[key]['best_model'].loss:
y_pred_proba = y_pred
y_pred = (y_pred < 0.5).astype(int)
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(_np.array(X_test),
_np.array(y_test),
verbose =0)
if self.verbose >=1:
try:
print('\tbest_cv_score:',self.models_dict[key]['best_cv_score'])
except Exception as e:
print('Exception occured for:'+str(e))
try:
print('\tbest_pred_score:',self.models_dict[key]['best_pred_score'])
except Exception as e:
print('Exception occured for:'+str(e))
for metric_key in self.metrics.keys():
if self.metrics[metric_key] !=None:
try:
if 'roc' in metric_key:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred_proba)
else:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)
print('\t',metric_key,':',self.models_dict[key][metric_key])
except Exception as e:
print('Exception occured for',metric_key,':',str(e))
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)
elif 'Net' in key:
model_dict_subset = self.models_dict[key].copy()
for key in self.models_dict[key].keys():
if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):
model_dict_subset.pop(key)
```
#### File: ML/NeuralNet/plot.py
```python
import matplotlib.pyplot as _plt
def learning_curves(history, rect = (0,0,1,1)):
"""
plot learning curves for each metric
"""
metrics = [key for key in history.history.keys() if key != 'lr' and 'val' not in key]
fig, ax_list = _plt.subplots(1,len(metrics))
if len(metrics)==1:
ax_list = [ax_list]
p=0
for metric in metrics:
for train_val_label in ['','val_']:
label = train_val_label+metric
ax_list[p].plot(history.epoch, history.history[label], label = label)
ax_list[p].set_xlabel('epoch')
ax_list[p].set_ylabel(metric)
ax_list[p].legend()
p+=1
fig.tight_layout(rect=rect)
_plt.show()
```
#### File: ML/preprocessing/_CorrCoeff.py
```python
import warnings as _warnings
class CorrCoeffThreshold():
def __init__(self,
AbsCorrCoeff_threshold = 0.99,
iterative_sample_size = None):
"""
Method for filtering features with correlation coefficients >= the AbsCorrCoeff_threshold (absolute value of corerlation coeff. threshold) value passed, where the correlation coefficient is the pearson coerrelation coefficient
Arguments:
----------
AbsCorrCoeff_threshold: float. default = 0.99. valid range: 0 to 1
iterative_sample_size: float
"""
assert(AbsCorrCoeff_threshold>=0)
assert(AbsCorrCoeff_threshold<=1)
self.AbsCorrCoeff_threshold = AbsCorrCoeff_threshold
def __fetch_dropped_features_slice_dict__(self,
np_corr_col,
feature_idx,
AbsCorrCoeff_threshold):
"""
Arguments:
----------
np_corr_col: column of correlation coefficients for the feature of feature_idx
feature_idx: the index corresponding to the column slice of the correlation coeff. matrix
AbsCorrCoeff_threshold: Absolute value threshold limit for the correlation coefficients to filter
"""
import numpy as np
dropped_features_slice_dict = {'dropped feature idx':[],
'correlated feature idx':[],
'corr coeff':[]}
for i in range(np_corr_col.shape[0]):
if i!=feature_idx:
if np.abs(np_corr_col[i])>= AbsCorrCoeff_threshold:
dropped_features_slice_dict['dropped feature idx'].append(i)
dropped_features_slice_dict['correlated feature idx'].append(feature_idx)
dropped_features_slice_dict['corr coeff'].append(np_corr_col[i])
return dropped_features_slice_dict
def fit(self,
df,
CorrCoeff_features = 'auto',
verbose = 0):
"""
Fit the CorrelationCoeffThreshold object to the data
Arguments:
----------
df: the dataframe of interest
CorrCoeff_features: list. the subset of features to analyze the correlation coeff. on. If 'auto' then all columns in the df will be used
"""
import numpy as np
import joblib
import gc
import dask
_warnings.filterwarnings('ignore')
df = df.copy()
type_df = type(df)
#assigne self.CorrCoeff_features
if CorrCoeff_features == 'auto':
self.CorrCoeff_features = list(df.columns)
else:
assert(type(CorrCoeff_features)==type(list())), 'CorrCoeff_features must be "auto" or a list'
self.CorrCoeff_features = CorrCoeff_features
df = df[self.CorrCoeff_features]
if type_df==dask.dataframe.core.DataFrame:
np_corr = np.array(df.corr())
else:
np_corr = np.corrcoef(df, rowvar=False)
del df
gc.collect()
executor = joblib.parallel.Parallel(n_jobs = -1, verbose=verbose, backend='multiprocessing')
tasks = [joblib.parallel.delayed(self.__fetch_dropped_features_slice_dict__)(np_corr[:,feature_idx],
feature_idx,
self.AbsCorrCoeff_threshold) for feature_idx in range(np_corr.shape[0])]
del np_corr
gc.collect()
#execute the task
outputs = executor(tasks)
del tasks
gc.collect()
#assemble outputs into full dictionary
self.dropped_features_dict = {'dropped feature':[],
'correlated feature':[],
'corr coeff':[]}
self.outputs = outputs
for dropped_feat_slice_dict in outputs:
for i in range(len(dropped_feat_slice_dict['dropped feature idx'])):
correlated_feat = self.CorrCoeff_features[dropped_feat_slice_dict['correlated feature idx'][i]]
dropped_feat = self.CorrCoeff_features[dropped_feat_slice_dict['dropped feature idx'][i]]
corr_coeff = dropped_feat_slice_dict['corr coeff'][i]
if correlated_feat not in self.dropped_features_dict['correlated feature'] and correlated_feat not in self.dropped_features_dict['dropped feature']:
self.dropped_features_dict['dropped feature'].append(dropped_feat)
self.dropped_features_dict['correlated feature'].append(correlated_feat)
self.dropped_features_dict['corr coeff'].append(corr_coeff)
_warnings.filterwarnings('default')
def transform(self, df):
"""
Transform the dataframe based on the previously run fit
Arguments:
----------
df: dataframe which will be transformed
"""
_warnings.filterwarnings('ignore')
df = df.copy()
for feature in self.dropped_features_dict['dropped feature']:
if feature in df.columns:
df = df.drop(columns=[feature])
_warnings.filterwarnings('default')
return df
```
#### File: ML/preprocessing/Impute.py
```python
import numpy as _np
def categorical_features(X,
categorical_headers,
strategy = 'most_frequent',
estimator = None,
verbose= 0):
"""
Impute (fill nan) values for categorical features
Arguments:
----------
X: pandas dataframe. If strategy = 'iterative', then all categorical features must be label encoded in a previous step, with nan values remaining after encoding.
categorical_headers: list of categorical feature headers.
strategy : The imputation strategy.
- If If “constant”, then replace missing values with fill_value. Can be used with strings or numeric data. fill_value will be 0 when imputing numerical data and “missing_value” for strings or object data types.
- If "most_frequent", then replace missing using the most frequent value along each column. Can be used with strings or numeric data.
- If 'iterative', then use sklearn.imputer.IterativeImputer with the specified estimator
estimator: sklearn estimator object
The estimator to be used if 'iterative' strategy chosen
Note: sklearn.impute.IterativeImputer has a number of other options which could be varied/tuned, but for simplicity we just use the defaults
Returns:
--------
X: Imputed dataframe
Imputer: Imputer object
"""
import sklearn.preprocessing, sklearn.impute
from sklearn.experimental import enable_iterative_imputer
import warnings
import pandas as pd
import dask
warnings.filterwarnings('ignore')
X = X.copy()
if strategy != 'iterative':
Imputer = sklearn.impute.SimpleImputer(strategy=strategy,
verbose = verbose)
else:
n_nearest_features = _np.min([10, len(categorical_headers)]) #use less than or equal to 10 features
Imputer = sklearn.impute.IterativeImputer(estimator= estimator,
initial_strategy = 'most_frequent',
verbose = verbose,
n_nearest_features = n_nearest_features)
#create a dummy nan row to ensure any dataset containing nan for any of the features can be transformed
type_X = type(X)
if type_X==dask.dataframe.core.DataFrame:
npartitions = X.npartitions
X = X.compute()
X_nans = pd.DataFrame(_np.array([[_np.nan for header in categorical_headers]]), columns = categorical_headers)
X_fit = pd.concat((X[categorical_headers],X_nans))
X_drop = X_fit.dropna(axis='columns', how = 'all')
all_nan_categorical_columns = [header for header in categorical_headers if header not in X_drop.columns]
for col in all_nan_categorical_columns:
X_fit[col] = 0
X[col] = 0
Imputer.fit(X_fit)
try:
X[categorical_headers] = Imputer.transform(X[categorical_headers])
except:
print('X[categorical_headers]:')
display(X[categorical_headers])
print('X_fit:')
display(X_fit)
print(X_fit.shape)
print('Imputer.transform(X[categorical_headers]):')
display(Imputer.transform(X[categorical_headers]))
print(Imputer.transform(X[categorical_headers]).shape)
raise
if type_X==dask.dataframe.core.DataFrame:
X = dask.dataframe.from_pandas(X, npartitions=npartitions)
warnings.filterwarnings('default')
return X, Imputer
def continuous_features(X,
continuous_headers,
strategy = 'median',
estimator = None,
verbose= 0):
"""
Impute (fill nan) values for continuous features
Arguments:
----------
X: pandas dataframe. If strategy = 'iterative', then all categorical features must be label encoded in a previous step, with nan values remaining after encoding.
continuous_headers: list of continuous feature headers.
strategy : The imputation strategy.
- If If “constant”, then replace missing values with fill_value. fill_value will be 0 when imputing numerical data.
- If "most_frequent", then replace missing using the most frequent value along each column.
- If 'iterative', then use sklearn.imputer.IterativeImputer with the specified estimator
estimator: sklearn estimator object
The estimator to be used if 'iterative' strategy chosen
Note: sklearn.impute.IterativeImputer has a number of other options which could be varied/tuned, but for simplicity we just use the defaults
Returns:
--------
X: Imputed dataframe
Imputer: Imputer object
"""
import sklearn.preprocessing, sklearn.impute
from sklearn.experimental import enable_iterative_imputer
import warnings
import pandas as pd
import numpy as np
import dask
warnings.filterwarnings('ignore')
X = X.copy()
if strategy in ['most_frequent', 'constant', 'mean', 'median']:
Imputer = sklearn.impute.SimpleImputer(strategy=strategy,
verbose = verbose)
if strategy == 'iterative':
n_nearest_features = _np.min([10, len(continuous_headers)])
Imputer = sklearn.impute.IterativeImputer(estimator= estimator,
initial_strategy = 'most_frequent',
verbose = verbose,
n_nearest_features = n_nearest_features)
type_X = type(X)
if type_X==dask.dataframe.core.DataFrame:
npartitions = X.npartitions
X = X.compute()
#create a dummy nan row to ensure any dataset containing nan for any of the features can be transformed
X_nans = pd.DataFrame(_np.array([[_np.nan for header in continuous_headers]]), columns = continuous_headers)
X_fit = pd.concat((X[continuous_headers],X_nans))
X_drop = X_fit.dropna(axis='columns', how = 'all')
all_nan_columns = [header for header in continuous_headers if header not in X_drop.columns]
for col in all_nan_columns:
X_fit[col] = 0
X[col] = 0
Imputer.fit(X_fit)
X[continuous_headers] = Imputer.transform(X[continuous_headers])
if type_X==dask.dataframe.core.DataFrame:
X = dask.dataframe.from_pandas(X, npartitions=npartitions)
warnings.filterwarnings('default')
return X, Imputer
def default_iterative_regressors_dict():
"""
dictionary of typical iterative estimators
"""
import sklearn, sklearn.linear_model, sklearn.ensemble
#focus on BayesianRidge (sklearn default) and RandomForest, since they generally perform better than simple linear or DecisionTree and scale better than KNN
iterative_regressors_dict = {'BayesianRidge':sklearn.linear_model.BayesianRidge(),
'RandomForestRegressor': sklearn.ensemble.RandomForestRegressor(n_jobs=-1)
}
#sklearn.linear_model.LinearRegression(n_jobs=-1),
#sklearn.neighbors.KNeighborsRegressor(n_jobs=-1)
#sklearn.tree.DecisionTreeRegressor(),
return iterative_regressors_dict
def __unit_test__(X, headers_dict, verbose =1 ):
"""
Iterate over impute_categorical_feature and impute_continuous_features options & ensure everything works for this particular dataset
"""
print('------running impute.continuous_features validation-------')
for strategy in ['mean','median','iterative']:
print('strategy:',strategy,)
if strategy in ['most_frequent','mean','median']:
X_imputed, headers_dict, Imputer = Impute.continuous_features(X,
headers_dict,
strategy = strategy,
estimator = None,
verbose = verbose)
else:
iterative_estimators_dict = Impute.fetch_iterative_estimators_dict()
for estimatorID in iterative_estimators_dict.keys():
print('estimator:',estimatorID)
X_imputed, headers_dict, Imputer = Impute.continuous_features(X,
headers_dict,
strategy = strategy,
estimator = iterative_estimators_dict[estimatorID],
verbose = verbose)
print('------running impute.categorical_features validation-------')
for strategy in ['most_frequent', 'iterative']:
print('strategy:',strategy,)
if strategy == 'most_frequent':
X_imputed, headers_dict, Imputer = Impute.categorical_features(X,
headers_dict,
strategy = strategy,
estimator = None,
verbose = verbose)
else:
for estimator in impute.fetch_typical_iterative_estimators():
print('estimator:',estimator)
X_imputed, headers_dict, Imputer = Impute.categorical_features(X,
headers_dict,
strategy = strategy,
estimator = estimator,
verbose = verbose)
print('\nall imputation options validated!')
```
#### File: JLpy_Utilities/pyDSlib/_plot_imgs.py
```python
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import numpy as _np
import warnings as _warnings
import os as _os
def from_list(imgs_list, n_plot_columns = 3, cmap = 'viridis', title_list = None):
"""
Plot the images contained in the list of images passed
Arguments:
----------
imgs_list: list where each element is an array-like image
n_plot_columns: int. Number of plot columns per row of plots to display
- If len(imgs_list)<n_plot_columns, the n_plot_columns will be updated to be equal to the len(imgs_list)
cmap: matplotlib colormap
title_list: list of strings to use as the title for each plot
Returns:
--------
None. the plots will be displayed
"""
if len(imgs_list)<n_plot_columns:
n_plot_columns = len(imgs_list)
if n_plot_columns == 1:
fig, ax = _plt.subplots(1, n_plot_columns)
ax_list = [ax]
else:
fig, ax_list = _plt.subplots(1, n_plot_columns)
p = 0
for i in range(len(imgs_list)):
img = imgs_list[i]
if type(title_list)==type(list()):
ax_list[p].set_title(title_list[i])
ax_list[p].imshow(img, cmap = cmap)
ax_list[p].grid(which='both', visible=False)
ax_list[p].set_axis_off()
p+=1
if p==n_plot_columns:
p=0
try:
fig.tight_layout(rect=(0,0,int(n_plot_columns/1.2),1))
except:
try:
fig.tight_layout()
except Exception as e:
print('Exception: '+ str(e))
_plt.show()
# generate new plot if this isn't the last header
if i != len(imgs_list)-1:
fig, ax_list = _plt.subplots(1, n_plot_columns)
for ax in ax_list:
ax.grid(which='both',visible=False)
ax.set_axis_off()
p=0
# ensure last plot is formated and shown
if p!=n_plot_columns:
try:
fig.tight_layout(rect=(0,0,int(n_plot_columns/1.2),1))
except:
try:
fig.tight_layout()
except Exception as e:
print('Exception: '+ str(e))
_plt.show()
def from_files(path_imgs_dir, filenames = 'auto',
n_plot_columns = 3,
cmap = 'viridis',
):
"""
Plot the images contained in the path_imgs_dir.
Arguments:
----------
path_imgs_dir: path to directory where images are stored
filenames: list of filenames for images of interest, or 'auto'.
- If 'auto' all the image files within the directory will be plotted
n_plot_columns: int. Number of plot columns per row of plots to display
cmap: matplotlib colormap
Returns:
--------
None. the plots will be displayed
"""
if type(filenames)==type(list()):
path_imgs = [_os.path.join(path_imgs_dir, filename) for filename in filenames]
elif filenames == 'auto':
path_imgs = [_os.path.join(path_imgs_dir, filename) for filename in _os.listdir(path_imgs_dir) if 'png' in filename or 'tiff' in filename or 'bmp' in filename or 'dcm' in filename or 'jpg' in filename or 'jpeg' in filename]
imgs_list = []
for p in range(len(path_imgs)):
path_img = path_imgs[p]
if 'dcm' in path_img:
import pydicom
img = pydicom.dcmread(path_img).pixel_array
else:
img = _plt.imread(path_img)
imgs_list.append(img)
p+=1
if p%n_plot_columns==0 or p>len(path_imgs):
from_list(imgs_list, n_plot_columns, cmap)
imgs_list = []
```
#### File: JLpy_Utilities/pyDSlib/summary_tables.py
```python
import pandas as _pd
import numpy as _np
#Fetch unique Device IDs
def count_subgroups_in_group(df,
group_label,
sub_group_label,
Additional_output_labels=None):
"""
Create a summary table showing the count for subgroups within a group
Arguments:
----------
df: dataframe of interest
group_label: column on which the data will be grouped by
sub_group_label: column on which the nunique counts will be made for the defined group.
Additional_output_labels: a list of columns that are less or equally unique as the subgroup
Returns:
--------
df_group_w_subgroup_count: dataframe showing unique value counts for given subgroup in a group
"""
df_group = df.groupby(group_label)
group_ID_list = []
subgroup_count_list = []
df_group_w_subgroup_count = _pd.DataFrame()
for group_ID, group_subset in df_group:
if Additional_output_labels == None:
group_subset_out = group_subset[[group_label]].drop_duplicates()
else:
group_subset_out = group_subset[[group_label, *Additional_output_labels]].drop_duplicates()
df_group_w_subgroup_count = _pd.concat((df_group_w_subgroup_count,group_subset_out),axis=0).reset_index(drop=True)
subgroup_count_list.append(group_subset[sub_group_label].drop_duplicates().count())
df_group_w_subgroup_count[sub_group_label+'_count'] = subgroup_count_list
return df_group_w_subgroup_count
```
#### File: JLpy_Utilities/tests/test_kaggle.py
```python
import pytest
import pyDSlib
# def test_kaggle_setupe_config_dir():
# pyDSlib.kaggle.setup_config_dir(username='jtleona01', key = 'foo')
def test_kaggle_competition_download_files(tmpdir):
try:
pyDSlib.kaggle.competition_download_files(competition='foo',
path_report_dir=tmpdir)
except Exception as e:
assert('Reason: Unauthorized' in str(e) or 'Could not find kaggle.json' in str(e) or 'foo is not a valid competition' in str(e)), 'pyDSlib.kaggle.competition_download_files is returning an unexpected error:'+str(e)
```
#### File: JLpy_Utilities/tests/test_summary_tables.py
```python
import pytest
import sys, os
import pandas as pd
import pyDSlib
def test_count_subgroups_in_group():
df = {}
df['subgroup'] = []
df['group'] = []
for color in ['R','G','B']:
slice_ = [i for i in range(3)]
df['subgroup'] = df['subgroup']+ slice_+slice_
df['group'] = df['group'] + [color for vale in slice_+slice_]
df = pd.DataFrame.from_dict(df)
df_test = pyDSlib.summary_tables.count_subgroups_in_group(df, group_label='group',
sub_group_label='subgroup')
assert(df_test.iloc[0,1]==3), 'expected df_test.iloc[0,1]=3, received df_test.iloc[0,1]='+str(df_test.iloc[0,1])
``` |
{
"source": "jlnieh/sweetsmelloforchid",
"score": 3
} |
#### File: jlnieh/sweetsmelloforchid/cookall.py
```python
__author__ = "<NAME>"
__version__ = "0.0.1"
import os
import sys
import shutil
import datetime
import argparse
import re
import zipfile
from subprocess import call
TOOL_EPUBCHECK=os.path.join('tools', 'epubcheck', 'epubcheck.jar')
SOURCES=('vol01', 'vol02')
FOLDER_BUILD='build'
FOLDER_RELEASE='dist'
FOLDER_METAINFO='META-INF'
FOLDER_BOOKROOT='EPUB'
FOLDER_CONTENTS='contents'
FOLDER_TEMPLATES='templates'
FILENAME_CONTENT_TEMPLATE='content.xhtml'
FILENAME_PACKAGEOPF='package.opf'
FILENAME_NAV='nav.xhtml'
CONSTSTR_MIMETYPE='application/epub+zip'
CONSTSTR_METAINFO="""<?xml version="1.0" encoding="UTF-8"?>
<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="{0}/{1}" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>""".format(FOLDER_BOOKROOT, FILENAME_PACKAGEOPF)
BOOK_ITEMS = []
TOC_ITEMS = []
LINEBREAK_IN_POEM = False
def prepare_folders(build_dir):
if not os.path.isdir(FOLDER_BUILD):
os.makedirs(FOLDER_BUILD)
if not os.path.isdir(FOLDER_RELEASE):
os.makedirs(FOLDER_RELEASE)
if os.path.isdir(build_dir):
shutil.rmtree(build_dir)
os.makedirs(build_dir)
os.makedirs(os.path.join(build_dir, FOLDER_METAINFO))
os.makedirs(os.path.join(build_dir, FOLDER_BOOKROOT))
def prepare_mimetype(build_dir):
outfn = os.path.join(build_dir, 'mimetype')
with open(outfn, 'w') as fout:
fout.write(CONSTSTR_MIMETYPE)
def prepare_metainfo(build_dir):
outfn = os.path.join(build_dir, FOLDER_METAINFO, 'container.xml')
with open(outfn, 'w') as fout:
fout.write(CONSTSTR_METAINFO)
def prepare_fixtures(src_vol, build_dir):
for root, dirs, files in os.walk(src_vol):
dirs[:] = [d for d in dirs if d not in (FOLDER_CONTENTS, FOLDER_TEMPLATES)]
for fname in files:
path_src = os.path.join(root, fname)
rel_pathname = os.path.relpath(path_src, src_vol)
path_dest = os.path.join(build_dir, FOLDER_BOOKROOT, rel_pathname)
dest_folder = os.path.dirname(path_dest)
if not os.path.isdir(dest_folder):
os.makedirs(dest_folder)
shutil.copy(path_src, path_dest)
def splitSubHeader(line):
subHeaderPos = line.find(' ')
if (subHeaderPos > 0):
return (line[:subHeaderPos], line[subHeaderPos+4:])
else:
return (line, '')
PATTERN_IMAGETITLE=re.compile(r'\!\[(.+)\]\((.+)\)')
def parseImageTitle(line):
result = PATTERN_IMAGETITLE.match(line)
if result:
return result[1]
return line
PATTERN_CLEARHTMLTAG=re.compile('<.*?>')
def filterPageTitle(line):
raw_title = parseImageTitle(line)
return PATTERN_CLEARHTMLTAG.sub('', raw_title)
def getImageTitleTag(line):
result = PATTERN_IMAGETITLE.match(line)
if result:
return """<img src="{0}" alt="{1}" title="{1}"/>""".format(result[2], result[1])
return line
PATTERN_PAGETITLE='<!--PAGE_TITLE-->'
PATTERN_PAGEBODY='<!--PAGE_BODY-->'
PATTERN_FOOTNOTE=re.compile(r'\[(\d+)\]')
PATTERN_DATE=re.compile(r'^[\d]+')
def convert_doc(fname_src, fname_template, build_dir, fname_base):
fname_dest = os.path.join(build_dir, FOLDER_BOOKROOT, fname_base)
pageTitle = ''
strContent = ''
pg_id = fname_base[0:3]
h2_id = 0
h4_id = 0
curPara = []
with open(fname_src, 'r', encoding='utf-8') as fin:
for line in fin:
line = line.rstrip()
if len(line) == 0:
if(len(curPara)>0) and (curPara[-1] == 'p'):
strContent += '</{0}>\n'.format(curPara.pop())
elif line.startswith('# '):
(pageTitle, pageSubTitle) = splitSubHeader(line[2:])
h2_id += 1
localHeaderId = '{0}h1{1:02}'.format(pg_id, h2_id)
if '## ' == pageSubTitle[0:3]: # specail case
TOC_ITEMS.append((fname_base, localHeaderId, 2, "{0}".format(pageSubTitle[3:])))
else:
TOC_ITEMS.append((fname_base, localHeaderId, 2, "{1}《{0}》".format(filterPageTitle(pageTitle), pageSubTitle)))
h4_id = 0
while(len(curPara)>0):
strContent += '</{0}>\n'.format(curPara.pop())
if '## ' == pageSubTitle[0:3]: # specail case
strContent += """<header><h1 id="{0}">{1}</h1><h2 class="subtitle">{2}</h2></header>\n""".format(localHeaderId, getImageTitleTag(pageTitle), pageSubTitle[3:])
else:
strContent += """<header><h2 id="{0}">{1}</h2><p class="subtitle center">{2}</p></header>\n""".format(localHeaderId, getImageTitleTag(pageTitle), pageSubTitle)
elif line.startswith('## '):
(pageTitle, pageSubTitle) = splitSubHeader(line[3:])
h2_id += 1
localHeaderId = '{0}h2{1:02}'.format(pg_id, h2_id)
TOC_ITEMS.append((fname_base, localHeaderId, 2, filterPageTitle(pageTitle)))
h4_id = 0
while(len(curPara)>0):
strContent += '</{0}>\n'.format(curPara.pop())
strContent += """<header><h2 id="{0}">{1}</h2>{2}</header>\n""".format(localHeaderId, getImageTitleTag(pageTitle), pageSubTitle)
elif line.startswith('### '):
strContent += """<h3>{0}</h3>\n""".format(line[4:])
elif line.startswith('#### '):
(poemTitle, poemDate) = splitSubHeader(line[5:])
h4_id += 1
localHeaderId = '{0}h4{1:02}{2:03}'.format(pg_id, h2_id, h4_id)
m = PATTERN_FOOTNOTE.search(poemTitle)
if m:
poemTitleDisp = PATTERN_FOOTNOTE.sub(r'<sub><a href="#n\1" epub:type="noteref">\1</a></sub>', poemTitle)
poemTitle = PATTERN_FOOTNOTE.sub(r'', poemTitle)
else:
poemTitleDisp = poemTitle
if len(poemDate) > 0:
m = PATTERN_DATE.search(poemDate)
if m:
strPoemSub = '<p class="poem-date">{0}</p>'.format(poemDate)
else:
strPoemSub = '<p class="poem-author">{0}</p>'.format(poemDate)
poemTitle = '附{0}君《{1}》'.format(poemDate[0], poemTitle)
else:
strPoemSub = ''
TOC_ITEMS.append((fname_base, localHeaderId, 4, poemTitle))
while(len(curPara)>0):
strContent += '</{0}>\n'.format(curPara.pop())
strContent += """<article id="{0}"><header><h3 class="poem-title">{1}</h3>{2}</header>""".format(localHeaderId, poemTitleDisp, strPoemSub)
curPara.append('article')
elif line.startswith('##### '):
(poemTitle, poemDate) = splitSubHeader(line[6:])
m = PATTERN_FOOTNOTE.search(poemTitle)
if m:
poemTitle = PATTERN_FOOTNOTE.sub(r'<sub><a href="#n\1" epub:type="noteref">\1</a></sub>', poemTitle)
if(len(curPara)>1) and (curPara[-1] == 'p'):
strContent += '</{0}>\n'.format(curPara.pop())
strContent += '<h4 class="poem-title">{0}</h4>'.format(poemTitle)
if len(poemDate) > 0:
m = PATTERN_DATE.search(poemDate)
if m:
strContent += '<p class="poem-date">{0}</p>'.format(poemDate)
else:
strContent += '<p class="poem-author">{0}</p>'.format(poemDate)
elif line.startswith('['):
if(len(curPara)>0) and (curPara[-1] == 'p'):
strContent += '</{0}>\n'.format(curPara.pop())
pos = line.find('] ')
note_id = int(line[1:pos])
note_str = line[pos+2:]
strContent += '<aside id="n{0}" data-footnote-seq="{0}" epub:type="footnote">{1}</aside>'.format(note_id, note_str)
elif line.startswith('> '):
if(0 == len(curPara)) or (curPara[-1] != 'p'):
strContent += '<p class="poem">'
curPara.append('p')
elif LINEBREAK_IN_POEM:
strContent += '<br/>'
strContent += PATTERN_FOOTNOTE.sub(r'<sub><a href="#n\1" epub:type="noteref">\1</a></sub>', line[2:])
else:
if(len(curPara)<1):
strContent += '<p>'
curPara.append('p')
strContent += PATTERN_FOOTNOTE.sub(r'<sub><a href="#n\1" epub:type="noteref">\1</a></sub>', line)
while(len(curPara)>0):
strContent += '</{0}>\n'.format(curPara.pop())
with open(fname_template, 'r', encoding='utf-8') as fin, open(fname_dest, 'w', encoding='utf-8') as fout:
for line in fin:
if line.find(PATTERN_PAGETITLE) >= 0:
line = line.replace(PATTERN_PAGETITLE, filterPageTitle(pageTitle))
elif line.find(PATTERN_PAGEBODY) >= 0:
line = line.replace(PATTERN_PAGEBODY, strContent)
fout.write(line)
def generate_docs(src_vol, build_dir):
path_src_root = os.path.join(src_vol, FOLDER_CONTENTS)
fname_template = os.path.join(src_vol, FOLDER_TEMPLATES, FILENAME_CONTENT_TEMPLATE)
for root, dirs, files in os.walk(path_src_root):
for fname in files:
(fbase, fext) = os.path.splitext(fname)
if '.md' != fext:
continue
BOOK_ITEMS.append(fbase)
convert_doc(os.path.join(root, fname), fname_template, build_dir, fbase + '.xhtml')
BOOK_ITEMS.sort(key=lambda x: x[1:3])
PATTERN_TOC = '<!--TOC-->'
PATTERN_TOC2 = '<!--TOC2-->'
def generate_toc(src_vol, build_dir):
str_items = ['', '']
cur_lvl = 0
cur_sec = 0
for item in TOC_ITEMS:
if 'p' == item[0][0]: #skip this item to ToC
continue
if (0 == cur_sec) and ('s11' == item[0][0:3]): # special for vol02 to seperate TOC into part2
if 4 == cur_lvl:
str_items[cur_sec] += '</li>\n' + ' ' * 16 + '</ol></li>'
elif 2 == cur_lvl:
str_items[cur_sec] += '</li>'
cur_lvl = 0
cur_sec = 1
if item[2] > cur_lvl:
if cur_lvl > 0:
indentSpace = '\n' + ' ' * (cur_lvl * 2 + 12)
str_items[cur_sec] += indentSpace + '<ol>'
elif item[2] < cur_lvl:
indentSpace = '\n' + ' ' * (item[2] * 2 + 12)
str_items[cur_sec] += '</li>' + indentSpace + '</ol></li>'
else:
str_items[cur_sec] += '</li>'
indentSpace = '\n' + ' ' * (item[2] * 2 + 12)
str_items[cur_sec] += indentSpace + '<li><a href="{0}#{1}">{2}</a>'.format(item[0], item[1], item[3])
cur_lvl = item[2]
if 4 == cur_lvl:
str_items[cur_sec] += '</li>\n' + ' ' * 16 + '</ol></li>'
elif 2 == cur_lvl:
str_items[cur_sec] += '</li>'
if '' == str_items[0]:
str_items[0] = '<li><a>No title</a></li>'
if '' == str_items[1]:
str_items[1] = '<li><a>No title</a></li>'
fname_src = os.path.join(src_vol, FOLDER_TEMPLATES, FILENAME_NAV)
fname_dest= os.path.join(build_dir, FOLDER_BOOKROOT, FILENAME_NAV)
with open(fname_src, 'r', encoding='utf-8') as fin, open(fname_dest, 'w', encoding='utf-8') as fout:
for line in fin:
if line.find(PATTERN_TOC) >= 0:
line = line.replace(PATTERN_TOC, str_items[0])
elif line.find(PATTERN_TOC2) >= 0:
line = line.replace(PATTERN_TOC2, str_items[1])
fout.write(line)
PATTERN_MODIFIEDDATETIME = '<!--DATE_MODIFIED-->'
PATTERN_BOOKVERSION = '<!--BOOK_VERSION-->'
PATTERN_MANIFEST = '<!--LIST_MANIFEST-->'
PATTERN_SPINE = '<!--LIST_SPINE-->'
PATTERN_SPINE_PREV = '<!--LIST_SPINE_PREV-->'
def generate_opf(src_vol, build_dir):
lineHeader = '\n' + ' ' * 8
str_now = datetime.datetime.utcnow().isoformat(timespec='seconds') + 'Z'
str_items = ''
str_itemref = ''
str_itemref_prev = ''
is_prev_ok = True
for item in BOOK_ITEMS:
if '' != str_items:
str_items += lineHeader
str_items += '<item href="{0}.xhtml" id="{0}" media-type="application/xhtml+xml"/>'.format(item)
if is_prev_ok and 'p' == item[0]:
if '' != str_itemref_prev:
str_itemref_prev += lineHeader
str_itemref_prev += '<itemref idref="{0}"/>'.format(item)
else:
if is_prev_ok:
is_prev_ok = False
if '' != str_itemref:
str_itemref += lineHeader
str_itemref += '<itemref idref="{0}"/>'.format(item)
fname_src = os.path.join(src_vol, FOLDER_TEMPLATES, FILENAME_PACKAGEOPF)
fname_dest= os.path.join(build_dir, FOLDER_BOOKROOT, FILENAME_PACKAGEOPF)
with open(fname_src, 'r', encoding='utf-8') as fin, open(fname_dest, 'w', encoding='utf-8') as fout:
for line in fin:
if line.find(PATTERN_MODIFIEDDATETIME) >= 0:
line = line.replace(PATTERN_MODIFIEDDATETIME, str_now)
elif line.find(PATTERN_BOOKVERSION) >= 0:
line = line.replace(PATTERN_BOOKVERSION, '1.0.0')
elif line.find(PATTERN_MANIFEST) >= 0:
line = line.replace(PATTERN_MANIFEST, str_items)
elif line.find(PATTERN_SPINE) >= 0:
line = line.replace(PATTERN_SPINE, str_itemref)
elif line.find(PATTERN_SPINE_PREV) >= 0:
line = line.replace(PATTERN_SPINE_PREV, str_itemref_prev)
fout.write(line)
def package_book(build_dir, target_fn):
today = datetime.date.today()
rel_fname = "ssoo{0}_r{1:04}{2:02}{3:02}.epub".format(target_fn[-1], today.year, today.month, today.day)
ret_dir = os.getcwd()
os.chdir(build_dir)
epub_fname = os.path.join(ret_dir, FOLDER_RELEASE, rel_fname)
if os.path.isfile(epub_fname):
os.remove(epub_fname)
with zipfile.ZipFile(epub_fname, mode='w') as zfout:
for root, dirs, files in os.walk('.'):
for fname in files:
src_file = os.path.join(root, fname)
print('Adding {0} into EPUB...'.format(src_file))
zfout.write(src_file)
os.chdir(ret_dir)
return epub_fname
def verify_book(epub_fname):
run_cmd = ['java', '-jar', TOOL_EPUBCHECK, epub_fname]
if os.path.isdir(epub_fname):
run_cmd.append('-mode', 'exp')
# print(run_cmd)
ret = call(run_cmd)
if 0 == ret:
print("EPUB <{0}> is verified OK!".format(epub_fname))
else:
raise Exception("Failed to verify the EPUB file: {0}".format(epub_fname))
def cook_book(vol):
del BOOK_ITEMS[:]
del TOC_ITEMS[:]
build_dir = os.path.join(FOLDER_BUILD, vol)
prepare_folders(build_dir)
prepare_mimetype(build_dir)
prepare_metainfo(build_dir)
prepare_fixtures(vol, build_dir)
generate_docs(vol, build_dir)
generate_toc(vol, build_dir)
generate_opf(vol, build_dir)
epub_fname = package_book(build_dir, vol)
verify_book(epub_fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add elements to the repo')
parser.add_argument('-v', '--volume', dest='volumes', choices=SOURCES, action='append',
help='select one or more volumes to build (default: %(default)s)')
args = parser.parse_args()
if (args.volumes is None) or (len(args.volumes) == 0):
VOL_LIST = SOURCES
else:
VOL_LIST = args.volumes
for vol in VOL_LIST:
if 'vol02' == vol:
LINEBREAK_IN_POEM = True
if os.path.isdir(vol):
cook_book(vol)
else:
raise Exception("Volmume<{0}> is not existed!".format(vol))
``` |
{
"source": "jlnprssnr/luma.led_matrix",
"score": 2
} |
#### File: luma.led_matrix/tests/baseline_data.py
```python
import json
from pathlib import Path
def get_json_data(fname):
"""
Load JSON reference data.
:param fname: Filename without extension.
:type fname: str
"""
base_dir = Path(__file__).resolve().parent
fpath = base_dir.joinpath('reference', 'data', fname + '.json')
with fpath.open() as f:
return json.load(f)
``` |
{
"source": "jlo118/DLlab2",
"score": 3
} |
#### File: jlo118/DLlab2/Q4.py
```python
from keras.layers import Dense, Embedding, LSTM
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
from keras.preprocessing.text import Tokenizer
#Load Data
data = pd.read_csv('spam.csv')
# Keeping only the neccessary columns
data = data[['Category','Message']]
#Clean Data
data['Message'] = data['Message'].apply(lambda x: x.lower())
data['Message'] = data['Message'].apply((lambda x: re.sub('[^a-zA-z0-9\s]', '', x)))
for idx, row in data.iterrows():
row[0] = row[0].replace('rt', ' ')
max_features = 199
tokenizer = Tokenizer(num_words=max_features, split=' ')
tokenizer.fit_on_texts(data['Message'].values)
X = tokenizer.texts_to_sequences(data['Message'].values)
#Pad to make length the same
X = pad_sequences(X)
embed_dim = 128
lstm_out = 196
def model1():
model = Sequential()
model.add(Embedding(max_features, embed_dim,input_length = X.shape[1]))
model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model
# print(model.summary())
labelencoder = LabelEncoder()
integer_encoded = labelencoder.fit_transform(data['Category'])
y = to_categorical(integer_encoded)
X_train, X_test, Y_train, Y_test = train_test_split(X,y, test_size = 0.33, random_state = 42)
batch_size = 32
model = model1()
model.fit(X_train, Y_train, epochs = 10, batch_size=batch_size, verbose = 2)
score,acc = model.evaluate(X_test,Y_test,verbose=2,batch_size=batch_size)
print(score)
print(acc)
print(model.metrics_names)
``` |
{
"source": "jlobatop/GA-CFD-MO",
"score": 3
} |
#### File: airfoil-parametrization/joukowsky/joukowsky_variableR.py
```python
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
# initial center and radius values
x_cent = 0
y_cent = 0
radius1 = 1
# definition of the circle parameters
center = np.array([x_cent,y_cent])
# calculation of the circle coordinates
angle = np.linspace(0,2*np.pi,720)
chi1 = center[0] + radius1*np.cos(angle)
eta1 = center[1] + radius1*np.sin(angle)
# calculations of the Joukowsky transform
x1 = ((chi1)*(chi1**2+eta1**2+1))/(chi1**2+eta1**2)
y1 = ((eta1)*(chi1**2+eta1**2-1))/(chi1**2+eta1**2)
# initial figure definition
fig, ax = plt.subplots(figsize=(6,6))
plt.subplots_adjust(bottom=0.25)
# zeta plane
plt.subplot(1, 2, 1)
l, = plt.plot(chi1,eta1,'g',label='Circle')
m, = plt.plot([center[0],center[0]],[center[1],center[1]],'w',marker='x',mec='k',markersize=10,label='Center')
plt.scatter([-1,1],[0,0],c=['r','r'],s=25,marker='h',label='Reference Points')
plt.axis('equal')
plt.xlim([-3,3])
plt.grid(True)
plt.xlabel(r"$\chi$",size=14)
plt.ylabel(r"$\eta$",size=14)
plt.legend(loc='lower left')
# z plane
plt.subplot(1, 2, 2)
plt.axis('equal')
plt.xlim([-3,3])
n, = plt.plot(x1,y1,'g')
# current value of the sliders
x0 = 0
y0 = 0
r0 = 1
# position of the sliders
axx = plt.axes([0.18, 0.12, 0.65, 0.02], facecolor='white')
axy = plt.axes([0.18, 0.08, 0.65, 0.02], facecolor='white')
axr = plt.axes([0.18, 0.04, 0.65, 0.02], facecolor='white')
# slider assignation
sx = Slider(axx, r"$\mu_x$", -1, 1, valinit=x0)
sy = Slider(axy, r"$\mu_y$", -1, 1, valinit=y0)
sr = Slider(axr, r"$R$", 0, 2, valinit=r0)
# updating the figure
def update(val):
x_cent = sx.val
y_cent = sy.val
radius1 = sr.val
# redefinition of the circle parameters
center = np.array([x_cent,y_cent])
# calculate again the circle coordinates
angle = np.linspace(0,2*np.pi,720)
chi1 = center[0] + radius1*np.cos(angle)
eta1 = center[1] + radius1*np.sin(angle)
# calculate again Joukowsky transform
x1 = ((chi1)*(chi1**2+eta1**2+1))/(chi1**2+eta1**2)
y1 = ((eta1)*(chi1**2+eta1**2-1))/(chi1**2+eta1**2)
# update the zeta plane
l.set_xdata(chi1)
l.set_ydata(eta1)
# update circle center coordinates
m.set_xdata([x_cent,x_cent])
m.set_ydata([y_cent,y_cent])
# update the z plane
n.set_xdata(x1)
n.set_ydata(y1)
# draw the selected updates
fig.canvas.draw_idle()
# call the sliders
sx.on_changed(update)
sy.on_changed(update)
sr.on_changed(update)
# show the figure
plt.show()
```
#### File: cases/NSGA_cylinder/forcePlotAnalysis.py
```python
import matplotlib
#This is required to 'plot' inside the CLI
matplotlib.use('AGG')
import numpy as np
import matplotlib.pyplot as plt
from io import StringIO
import io
import sys
# Get values from the input
gen = int(sys.argv[1])
ind = int(sys.argv[2])
# Read the file efficientyly
s = open('./gen%i/ind%i/postProcessing/forces/0/forces.dat' %(gen, ind)).read().replace('(',' ').replace(')',' ').replace('\t',' ')
forces = np.genfromtxt(io.BytesIO(s.encode()))
# RMS function
def rms(x):
return np.sqrt(np.mean(x**2))
# Plot a figure with forces evolution
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(20,15))
ax1.plot(forces[100:,0],forces[100:,1],'b',linewidth=2,label='Pressure Force X')
ax1.plot(forces[100:,0],forces[100:,2],'g',linewidth=2,label='Pressure Force Y')
ax1.set_xlabel('Time (s)', fontsize=16)
ax1.set_ylabel(r'Force ($N$)', fontsize=16)
ax1.legend(loc='lower left', fontsize=16)
ax1.tick_params(labelsize=14)
ax2.plot(forces[100:,0],forces[100:,4],'r',linewidth=2,label='Viscous Force X')
ax2.plot(forces[100:,0],forces[100:,5],'c',linewidth=2,label='Viscous Force Y')
ax2.set_xlabel('Time (s)', fontsize=16)
ax2.set_ylabel(r'Force ($N$)', fontsize=16)
ax2.legend(loc='upper right', fontsize=16)
ax2.tick_params(labelsize=14)
ax3.plot(forces[100:,0],forces[100:,10],'k',linewidth=2,label='Pressure Moment X')
ax3.set_xlabel('Time (s)', fontsize=16)
ax3.set_ylabel(r'Moment ($N\cdot m$)', fontsize=16)
ax3.legend(loc='lower left', fontsize=16)
ax3.tick_params(labelsize=14)
ax4.plot(forces[100:,0],forces[100:,12],'m',linewidth=2,label='Pressure Moment Z')
ax4.set_xlabel('Time (s)', fontsize=16)
ax4.set_ylabel(r'Moment ($N\cdot m$)', fontsize=16)
ax4.legend(loc='lower left', fontsize=16)
ax4.tick_params(labelsize=14)
plt.savefig('./gen%i/ind%i/VALg%ii%i.png' %(gen, ind, gen, ind), bbox_inches='tight', dpi=100)
# Get
timestp40 = int(np.argwhere(forces[:,0]>40)[0])
matFX = np.invert(forces[timestp40:,1] > forces[-1,1])
logicFX = np.logical_xor(matFX[0:-2],matFX[1:-1])
if len(np.argwhere(logicFX))%2 == 1:
fx = int(np.argwhere(logicFX)[1])
else:
fx = int(np.argwhere(logicFX)[0])
matFY = np.invert(forces[timestp40:,2] > forces[-1,2])
logicFY = np.logical_xor(matFY[0:-2],matFY[1:-1])
if np.sum(logicFY) == 0:
fy = timestp40
else:
if len(np.argwhere(logicFY))%2 == 1:
fy = int(np.argwhere(logicFY)[1])
else:
fy = int(np.argwhere(logicFY)[0])
fig, (ax1) = plt.subplots(1, figsize=(10,8))
ax1.plot(forces[timestp40+fx:,0],forces[timestp40+fx:,1]+forces[timestp40+fx:,4],'b',linewidth=2,label='Pressure Force X')
# ax1.plot(forces[timestp40+fx:,0],np.mean(forces[timestp40+fx:,1])*np.ones(len(forces[timestp40+fx:,0])),':b',linewidth=1)
ax1.plot(forces[timestp40+fy:,0],forces[timestp40+fy:,2]+forces[timestp40+fy:,5],'g',linewidth=2,label='Pressure Force Y')
# ax1.plot(forces[timestp40+fy:,0],np.mean(forces[timestp40+fy:,2])*np.ones(len(forces[timestp40+fy:,0])),':g',linewidth=1)
ax1.set_xlabel('Time (s)', fontsize=16)
ax1.set_ylabel(r'Force ($N$)', fontsize=16)
ax1.legend(loc='lower left', fontsize=16)
ax1.tick_params(labelsize=14)
ax1.set_ylim([-0.3,0.3])
ax1.set_xlim([0,150])
plt.savefig('./gen%i/data/OSCg%ii%i.png' %(gen, gen, ind), bbox_inches='tight', dpi=100)
np.savetxt('./gen%i/data/FITg%ii%i.txt' %(gen, gen, ind),
np.array([np.mean(forces[timestp40+fx:,1]+forces[timestp40+fx:,4]),np.mean(forces[timestp40+fy:,2]+forces[timestp40+fy:,5]),
np.std(forces[timestp40+fx:,1]+forces[timestp40+fx:,4]),np.std(forces[timestp40+fy:,2]+forces[timestp40+fy:,5]),
rms(forces[timestp40+fx:,1]+forces[timestp40+fx:,4]),rms(forces[timestp40+fy:,2]+forces[timestp40+fy:,5])]))
```
#### File: openFoam-case/run/allRun.py
```python
from tqdm import tqdm
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import time
##########################################################################################################
# FUNCTION DEFINITION
##########################################################################################################
# Function to print a 'string' above the progress bar while it is still running
def print_tqdm(string):
tqdm.write(string)
# Copy the folder from ./case to ./run returning to ./run
def caseCopy():
os.chdir("..")
os.system("cp -r ./baseCase ./run/case")
os.chdir("run")
# Once the simulation is done, transfer the folders from case to the folder of each individual
def folderTransfer(analysisType,i,j):
numFolder = []
nonNumFolder = []
for cFolder in os.listdir('case'):
if analysisType == 'transient':
os.system("mv ./case/%s ./generation%i/ind%i/sim/%s" %(cFolder,i,j,cFolder))
elif analysisType == 'steady':
if str.isnumeric(cFolder):
numFolder.append(int(cFolder))
else:
nonNumFolder.append(cFolder)
else:
print_tqdm('\033[0;33mError in analysisType\033[0m')
break
if analysisType == 'steady':
os.system("mv ./case/%i ./generation%i/ind%i/sim/%i" %(min(numFolder),i,j,min(numFolder)))
os.system("mv ./case/%i ./generation%i/ind%i/sim/%i" %(max(numFolder),i,j,max(numFolder)))
for cFolder in nonNumFolder:
os.system("mv ./case/%s ./generation%i/ind%i/sim/%s" %(cFolder,i,j,cFolder))
##########################################################################################################
# PARAMETER DEFINITION
##########################################################################################################
analysisType = 'steady' # 'steady' (state) or 'transient' for folder transfer
generations = 2
individuals = 2
values = np.array([[10,20],[30,40]]) # values that will take VARIABLE1 in U
# Initial linebreak to leave some space
print_tqdm('\n')
##########################################################################################################
# MAIN FUNCTION
##########################################################################################################
# loop over the number of iterations
for i in range(generations):
# if there is not a folder for the current generation, this will create it
if os.path.isdir("generation%i" %i) == False:
os.system("mkdir generation%i" %i)
# if it exists, remove that generation folder
else:
os.system("rm -rf ./generationn%i/" %i)
# evaluate the function for each individual of the generation
for j in tqdm(range(individuals),desc="{Generation %2.i}" %i):
# print the current individual in the CLI
print_tqdm('Inidividual %i' %j)
# if there is not a folder for the current individual, this will create it
if os.path.isdir("generation%i/ind%i" %(i,j)) == False:
os.system("mkdir generation%i/ind%i" %(i,j))
# if there is not a folder for the current individual simulation, this will create it
if os.path.isdir("generation%i/ind%i/sim" %(i,j)) == False:
os.system("mkdir generation%i/ind%i/sim" %(i,j))
# copy the preconfigured case
caseCopy()
# change a value in simulations to see change
os.system("sed -i 's/VARIABLE1/%i/g' ./case/0/U" %values[i,j])
# print the current state in the CLI
print_tqdm(' Simulating inidividual %i' %j)
# simulation of the case saving both stderr and stdout
os.system("simpleFoam -case ./case > 'generation%i/ind%i/g%ii%i.txt' 2> 'generation%i/ind%i/error_g%ii%i.txt'" %(i,j,i,j,i,j,i,j))
# transfer the results to the generation/individual folder
print_tqdm(' Moving inidividual %i' %j)
folderTransfer(analysisType,i,j)
# plot the stdout results into some fancy graphs
print_tqdm(' Plotting inidividual %i' %j)
os.system("python ./plotting.py ./generation%i/ind%i/ g%ii%i.txt" %(i,j,i,j))
# if the error file is empty, this will erase the output of the solver, keeping only the plots
if int(os.popen("du -h 'generation%i/ind%i/error_g%ii%i.txt'" %(i,j,i,j)).read()[0]) == 0:
os.system("rm 'generation%i/ind%i/g%ii%i.txt'" %(i,j,i,j))
os.system("rm 'generation%i/ind%i/error_g%ii%i.txt'" %(i,j,i,j))
# otherwise print error and don't remove the stdout
else:
print_tqdm('\033[0;33mError in simulation. Review logfile\033[0m')
# remove the case folder and begin again
os.system("rm -r case")
if i+1 != generations:
# doing things...
print_tqdm('Evaluating generation fitness...')
time.sleep(1)
# do more things...
print_tqdm('Creating new generation... \n')
time.sleep(1)
``` |
{
"source": "jlobatop/snowPile",
"score": 3
} |
#### File: snowPile/testGA/problemSetup.py
```python
import numpy as np
################################################################################
# INPUTS #
################################################################################
# number of variables of each individual
Nvar = 7
# number of indiiduals per generation (x^Nvar being x a real number)
Nind = 2**Nvar
# search spaces limits
var_low = np.array([-10,-1,-1,-1,-1,-1,-1])
var_high = np.array([10,1,1,1,1,1,1])
# comparison mode for search space limits (row per each one of the variables,
# first row is for the low limit and the second row in for the high limit)
compMode = [['leq', 'geq'],
['leq', 'geq'],
['leq', 'geq'],
['leq', 'geq'],
['leq', 'geq'],
['leq', 'geq'],
['leq', 'geq']]
################################################################################
# FUNCTION #
################################################################################
def constrainedPts(points, var_low, var_high, compMode):
"""Function that will constraint points out of bounds
INPUTS:
points: points in the parameter search space as a numpy.ndarray
(var_low): limits of the search domain variables
(var_high): limits of the search domain
compMode: comparison mode ('leq','geq','less','greater','equal') as list
OUTPUTS:
booleanMat: boolean matrix with 1 for the individuals out of bounds
This function will evaluate the constraints for all points in the set
returning a boolean masked matrix with the values that are constrained.
"""
# preallocate a matrix to analyze out-of-bound points with 'points' shape
# for the low limit
boolMatLo = np.zeros((points.shape))
# for the high limit
boolMatHi = np.zeros((points.shape))
# get the points that are valid under the constraints
for i in range(Nvar):
# upper limit comparison
# lower or equal to the high limit
if compMode[i][0] == 'leq':
boolMatHi[:,i] = np.logical_or(points[:,i] < var_high[i],
points[:,i] == var_high[i])
# lower to the high specified limit
elif compMode[i][0] == 'less':
boolMatHi[:,i] = np.logical_or(points[:,i] < var_high[i])
# strictly equal to the high specified limit (be careful!)
elif compMode[i][0] == 'eq':
boolMatHi[:,i] = np.logical_or(points[:,i] == var_low[i])
# error if specified differently
else:
raise RuntimeError('Bad comparison mode matrix')
# lower limit comparison
# greater or equal to the lower limit
if compMode[i][1] == 'geq':
boolMatLo[:,i] = np.logical_or(points[:,i] > var_low[i],
points[:,i] == var_low[i])
# greater than the high limit
elif compMode[i][1] == 'greater':
boolMatLo[:,i] = np.logical_or(points[:,i] > var_low[i])
# strictly equal to the high specified limit (be careful!)
elif compMode[i][1] == 'eq':
boolMatLo[:,i] = np.logical_or(points[:,i] == var_low[i])
# error if specified differently
else:
raise RuntimeError('Bad comparison mode matrix')
# combine both the low and high boolean matrices
boolMat = np.logical_and(boolMatHi,boolMatLo)
# once all the comparisons are made, the output should be an AND array where
# all the conditions are met by each one of the individuals
return np.logical_not(np.logical_and.reduce((boolMat.T)))
``` |
{
"source": "jlocke817/python_for_testers",
"score": 3
} |
#### File: jlocke817/python_for_testers/application2.py
```python
from selenium.webdriver.firefox.webdriver import WebDriver
class Application:
def __init__(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def open_main_page(self):
wd = self.wd
wd.get("http://localhost/addressbook/index.php")
def logout(self):
wd = self.wd
wd.find_element_by_link_text("Logout").click()
def new_contact(self, data):
wd = self.wd
wd.find_element_by_link_text("add new").click()
def login(self, username, password):
wd = self.wd
self.open_main_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("%s" % username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("%s" % password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def destroy(self):
self.wd.quit()
``` |
{
"source": "JLockerman/timescaledb-docker-ha",
"score": 2
} |
#### File: timescaledb-docker-ha/scripts/augment_patroni_configuration.py
```python
import yaml
import os
import sys
TSDB_DEFAULTS = """
postgresql:
parameters:
logging_collector: 'off'
log_destination: 'stderr'
create_replica_methods:
- pgbackrest
- basebackup
pgbackrest:
command: '/usr/bin/pgbackrest --stanza=poddb --delta restore --log-level-stderr=info'
keep_data: True
no_params: True
no_master: True
bootstrap:
dcs:
postgresql:
recovery_conf:
recovery_target_timeline: latest
standby_mode: 'on'
restore_command: 'pgbackrest --stanza=poddb archive-get %f "%p"'
"""
def merge(source, destination):
"""Merge source into destination.
Values from source override those of destination"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
merge(value, node)
else:
destination[key] = value
return destination
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Usage: {0} <patroni.yaml>".format(sys.argv[0]))
sys.exit(2)
with open(sys.argv[1], 'r+') as f:
# Not all postgresql parameters that are set in the SPILO_CONFIGURATION environment variables
# are overridden by the configure_spilo.py script.
#
# Therefore, what we do is:
#
# 1. We run configure_spilo.py to generate a sane configuration
# 2. We override that configuration with our sane TSDB_DEFAULTS
# 3. We override that configuration with our explicitly passed on settings
tsdb_defaults = yaml.safe_load(TSDB_DEFAULTS) or {}
spilo_generated_configuration = yaml.safe_load(f) or {}
operator_generated_configuration = yaml.safe_load(os.environ.get('SPILO_CONFIGURATION', '{}')) or {}
final_configuration = merge(operator_generated_configuration, merge(tsdb_defaults, spilo_generated_configuration))
# This namespace used in etcd/consul
# Other provisions are also available, but this ensures no naming collisions
# for deployments in separate Kubernetes Namespaces will occur
# https://github.com/zalando/patroni/blob/master/docs/ENVIRONMENT.rst#globaluniversal
if 'etcd' in final_configuration and os.getenv('POD_NAMESPACE'):
final_configuration['namespace'] = os.getenv('POD_NAMESPACE')
f.seek(0)
yaml.dump(final_configuration, f, default_flow_style=False)
f.truncate()
``` |
{
"source": "JLoDoesIt/pybliometrics",
"score": 2
} |
#### File: scopus/tests/test_AffiliationRetrieval.py
```python
from nose.tools import assert_equal, assert_true
from pybliometrics.scopus import AffiliationRetrieval
light = AffiliationRetrieval('60000356', refresh=30, view="LIGHT")
standard = AffiliationRetrieval('60000356', refresh=30, view="STANDARD")
def test_address():
assert_equal(light.address, 'Private Bag X3')
assert_equal(standard.address, 'Private Bag X3')
def test_affiliation_name():
assert_equal(light.affiliation_name, 'University of Cape Town')
assert_equal(standard.affiliation_name, 'University of Cape Town')
def test_author_count():
expected = 12900
assert_true(light.author_count >= expected)
assert_true(standard.author_count >= expected)
def test_city():
assert_equal(light.city, 'Cape Town')
assert_equal(standard.city, 'Cape Town')
def test_country():
assert_equal(light.country, 'South Africa')
assert_equal(standard.country, 'South Africa')
def test_date_created():
assert_equal(light.date_created, None)
assert_equal(standard.date_created, (2008, 2, 2))
def test_document_count():
expected = 73581
assert_true(light.document_count >= expected)
assert_true(standard.document_count >= expected)
def test_eid():
assert_equal(light.eid, '10-s2.0-60000356')
assert_equal(standard.eid, '10-s2.0-60000356')
def test_identifier():
assert_equal(light.identifier, 60000356)
assert_equal(standard.identifier, 60000356)
def test_name_variants():
expected = "<class 'pybliometrics.scopus.affiliation_retrieval.Variant'>"
assert_equal(str(type(light.name_variants[0])), expected)
assert_equal(str(type(standard.name_variants[0])), expected)
def test_org_domain():
assert_equal(light.org_domain, None)
assert_equal(standard.org_domain, 'uct.ac.za')
def test_org_type():
assert_equal(light.org_type, None)
assert_equal(standard.org_type, 'univ')
def test_org_URL():
assert_equal(light.org_URL, None)
assert_equal(standard.org_URL, 'http://www.uct.ac.za')
def test_postal_code():
assert_equal(light.postal_code, None)
assert_equal(standard.postal_code, '7701')
def test_scopus_affiliation_link():
expected = 'https://www.scopus.com/affil/profile.uri?afid='\
'60000356&partnerID=HzOxMe3b&origin=inward'
assert_equal(light.scopus_affiliation_link, expected)
assert_equal(standard.scopus_affiliation_link, expected)
def test_self_link():
expected = 'https://api.elsevier.com/content/affiliation/affiliation_id/60000356'
assert_equal(light.self_link, expected)
assert_equal(standard.self_link, expected)
def test_search_link():
expected = 'https://api.elsevier.com/content/search/scopus?query=af-id%2860000356%29'
assert_equal(light.search_link, expected)
assert_equal(standard.search_link, expected)
def test_state():
assert_equal(light.state, None)
assert_equal(standard.state, 'Western Cape')
def test_status():
assert_equal(light.status, None)
assert_equal(standard.status, "update")
def sort_name():
assert_equal(light.sort_name, None)
assert_equal(standard.sort_name, 'Cape Town, University of')
def url():
expected = 'https://api.elsevier.com/content/affiliation/affiliation_id/60000356'
assert_equal(light.url, expected)
assert_equal(standard.url, expected)
``` |
{
"source": "jlodonia/xendit-python",
"score": 2
} |
#### File: xendit-python/xendit/_xendit_param_injector.py
```python
from inspect import signature
from .models import Balance
from .models import BatchDisbursement
from .models import CardlessCredit
from .models import CreditCard
from .models import DirectDebit
from .models import Disbursement
from .models import PHDisbursement
from .models import EWallet
from .models import Invoice
from .models import Payout
from .models import QRCode
from .models import RecurringPayment
from .models import RetailOutlet
from .models import VirtualAccount
from .models import XenPlatform
class _XenditParamInjector:
"""Builder class to inject parameters (api_key, base_url, http_client) to feature class"""
def __init__(self, params):
self.params = params
def instantiate_balance(self) -> Balance:
return self.instantiate(Balance)
def instantiate_batch_disbursement(self) -> BatchDisbursement:
return self.instantiate(BatchDisbursement)
def instantiate_cardless_credit(self) -> CardlessCredit:
return self.instantiate(CardlessCredit)
def instantiate_credit_card(self) -> CreditCard:
return self.instantiate(CreditCard)
def instantiate_direct_debit(self) -> DirectDebit:
return self.instantiate(DirectDebit)
def instantiate_disbursement(self) -> Disbursement:
return self.instantiate(Disbursement)
def instantiate_disbursement(self) -> PHDisbursement:
return self.instantiate(PHDisbursement)
def instantiate_ewallet(self) -> EWallet:
return self.instantiate(EWallet)
def instantiate_invoice(self) -> Invoice:
return self.instantiate(Invoice)
def instantiate_payout(self) -> Payout:
return self.instantiate(Payout)
def instantiate_qrcode(self) -> QRCode:
return self.instantiate(QRCode)
def instantiate_recurring_payment(self) -> RecurringPayment:
return self.instantiate(RecurringPayment)
def instantiate_retail_outlet(self) -> RetailOutlet:
return self.instantiate(RetailOutlet)
def instantiate_virtual_account(self) -> VirtualAccount:
return self.instantiate(VirtualAccount)
def instantiate_xenplatform(self) -> XenPlatform:
return self.instantiate(XenPlatform)
def instantiate(self, injected_class):
"""Inject every static method in `injected_class` with provided parameters.
Args:
- injected_class (class): Class that will be injected
Return:
injected_class
"""
params = self.params
injected_class = type(
injected_class.__name__,
injected_class.__bases__,
dict(injected_class.__dict__),
)
for keys, value in vars(injected_class).items():
if type(value) == staticmethod and not keys.startswith("_"):
_XenditParamInjector._inject_function(
injected_class, params, keys, value
)
return injected_class
@staticmethod
def _inject_function(injected_class, params, func_name, func_value):
"""Inject `func_name` function with params"""
api_key, base_url, http_client = params
attr = func_value.__func__
def inject_func_with_api_key(*args, **kwargs):
kwargs["api_key"] = api_key
kwargs["base_url"] = base_url
kwargs["http_client"] = http_client
result = attr(*args, **kwargs)
return result
inject_func_with_api_key.__signature__ = signature(attr)
setattr(injected_class, func_name, staticmethod(inject_func_with_api_key))
``` |
{
"source": "J-L-O/DTC",
"score": 2
} |
#### File: J-L-O/DTC/imagenet_DTC.py
```python
import os
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import adjusted_rand_score as ari_score
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch.optim import SGD
from tqdm import tqdm
from data.imagenetloader import ImageNetLoader30, Entity30LoaderUnlabeled
from models.resnet import resnet18
from modules.module import feat2prob, target_distribution
from utils import ramps
from utils.util import cluster_acc, Identity, AverageMeter, seed_torch, str2bool
warnings.filterwarnings("ignore", category=UserWarning)
if os.environ.get('REMOTE_PYCHARM_DEBUG_SESSION', False):
import pydevd_pycharm
pydevd_pycharm.settrace('localhost', port=12034, stdoutToServer=True, stderrToServer=True, suspend=False)
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = model(x)
feat = feat.view(x.size(0), -1)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
# evaluate clustering performance
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return acc, nmi, ari, kmeans.cluster_centers_, probs
def warmup_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(args.warmup_epochs):
loss_record = AverageMeter()
acc_record = AverageMeter()
model.train()
for batch_idx, (x, label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
output = model(x)
prob = feat2prob(output, model.center)
loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Warmup_train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args, epoch)
args.p_targets = target_distribution(probs)
def Baseline_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(args.epochs):
loss_record = AverageMeter()
acc_record = AverageMeter()
model.train()
for batch_idx, (x, _, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
output = model(x)
prob = feat2prob(output, model.center)
loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args, epoch)
if epoch % args.update_interval == 0:
print('updating target ...')
args.p_targets = target_distribution(probs)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def PI_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
w = 0
for epoch in range(args.epochs):
loss_record = AverageMeter()
acc_record = AverageMeter()
model.train()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
x, x_bar = x.to(device), x_bar.to(device)
feat = model(x)
feat_bar = model(x_bar)
prob = feat2prob(feat, model.center)
prob_bar = feat2prob(feat_bar, model.center)
sharp_loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
consistency_loss = F.mse_loss(prob, prob_bar)
loss = sharp_loss + w * consistency_loss
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args, epoch)
if epoch % args.update_interval == 0:
print('updating target ...')
args.p_targets = target_distribution(probs)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def TE_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
w = 0
alpha = 0.6
ntrain = len(train_loader.dataset)
Z = torch.zeros(ntrain, args.n_clusters).float().to(device) # intermediate values
z_ema = torch.zeros(ntrain, args.n_clusters).float().to(device) # temporal outputs
z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(device) # current outputs
for epoch in range(args.epochs):
loss_record = AverageMeter()
acc_record = AverageMeter()
model.train()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, (x, label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
z_epoch[idx, :] = prob
prob_bar = Variable(z_ema[idx, :], requires_grad=False)
sharp_loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
consistency_loss = F.mse_loss(prob, prob_bar)
loss = sharp_loss + w * consistency_loss
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
Z = alpha * Z + (1. - alpha) * z_epoch
z_ema = Z * (1. / (1. - alpha ** (epoch + 1)))
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args, epoch)
args.p_targets = target_distribution(probs)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def TEP_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
w = 0
alpha = 0.6
ntrain = len(train_loader.dataset)
Z = torch.zeros(ntrain, args.n_clusters).float().to(device) # intermediate values
z_ema = torch.zeros(ntrain, args.n_clusters).float().to(device) # temporal outputs
z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(device) # current outputs
for epoch in range(args.epochs):
loss_record = AverageMeter()
acc_record = AverageMeter()
model.train()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, (x, label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args, epoch)
z_epoch = probs.float().to(device)
Z = alpha * Z + (1. - alpha) * z_epoch
z_ema = Z * (1. / (1. - alpha ** (epoch + 1)))
if epoch % args.update_interval == 0:
print('updating target ...')
args.p_targets = target_distribution(z_ema).float().to(device)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
@torch.no_grad()
def test(model, test_loader, args, epoch=0):
model.eval()
acc_record = AverageMeter()
preds = np.array([])
targets = np.array([])
feats = np.zeros((len(test_loader.dataset), args.n_clusters))
probs = np.zeros((len(test_loader.dataset), args.n_clusters))
for batch_idx, (x, label, idx) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
output = model(x)
prob = feat2prob(output, model.center)
_, pred = prob.max(1)
targets = np.append(targets, label.cpu().numpy())
preds = np.append(preds, pred.cpu().numpy())
idx = idx.data.cpu().numpy()
feats[idx, :] = output.cpu().detach().numpy()
probs[idx, :] = prob.cpu().detach().numpy()
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets,
preds)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
return acc, nmi, ari, torch.from_numpy(probs)
def copy_param(model, pretrain_dir, loc=None):
pre_dict = torch.load(pretrain_dir)
new = list(pre_dict.items())
model_kvpair = model.state_dict()
if loc is not None:
count = 0
for key, value in list(model_kvpair.items())[:loc]:
layer_name, weights = new[count]
model_kvpair[key] = weights
count += 1
else:
count = 0
for key, value in model_kvpair.items():
layer_name, weights = new[count]
model_kvpair[key] = weights
count += 1
model.load_state_dict(model_kvpair, strict=False)
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='cluster',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--warmup_lr', type=float, default=0.1)
parser.add_argument('--lr', type=float, default=0.05)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-5)
parser.add_argument('--warmup_epochs', default=10, type=int)
parser.add_argument('--epochs', default=60, type=int)
parser.add_argument('--rampup_length', default=5, type=int)
parser.add_argument('--rampup_coefficient', type=float, default=100.0)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--update_interval', default=5, type=int)
parser.add_argument('--n_clusters', default=30, type=int)
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--save_txt', default=False, type=str2bool, help='save txt or not', metavar='BOOL')
parser.add_argument('--pretrain_dir', type=str,
default='./data/experiments/pretrained/resnet18_imagenet_classif_882_ICLR18.pth')
parser.add_argument('--dataset_root', type=str, default='./data/datasets/ImageNet/')
parser.add_argument('--exp_root', type=str, default='./data/experiments/')
parser.add_argument('--model_name', type=str, default='resnet18')
parser.add_argument('--save_txt_name', type=str, default='result.txt')
parser.add_argument('--DTC', type=str, default='TEP')
parser.add_argument("--imagenet_subset", default="all", type=str, help="imagenet subset ('all' or entity30)")
parser.add_argument("--imagenet_split_unlabeled", default="A", type=str, help="unlabeled split [A, B, C, u1, u2, u3]")
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
seed_torch(args.seed)
runner_name = os.path.basename(__file__).split(".")[0]
model_dir = args.exp_root + '{}/{}'.format(runner_name, args.DTC)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
args.model_dir = model_dir + '/' + args.model_name + '.pth'
args.save_txt_path = args.exp_root + '{}/{}'.format(runner_name, args.save_txt_name)
if args.imagenet_subset == "all":
loader_train = ImageNetLoader30(batch_size=args.batch_size, num_workers=args.num_workers,
imagenet_path=args.dataset_root, subset=args.imagenet_split_unlabeled,
aug=None, shuffle=True)
loader_train_twice = ImageNetLoader30(batch_size=args.batch_size, num_workers=args.num_workers,
imagenet_path=args.dataset_root, subset=args.imagenet_split_unlabeled,
aug='twice', shuffle=True)
loader_eval = ImageNetLoader30(batch_size=args.batch_size, num_workers=args.num_workers,
imagenet_path=args.dataset_root, subset=args.imagenet_split_unlabeled, aug=None)
num_classes = 882
else:
loader_train = Entity30LoaderUnlabeled(batch_size=args.batch_size, num_workers=args.num_workers,
imagenet_path=args.dataset_root,
imagenet_split=args.imagenet_split_unlabeled, aug=None, shuffle=True)
loader_train_twice = Entity30LoaderUnlabeled(batch_size=args.batch_size, num_workers=args.num_workers,
imagenet_path=args.dataset_root, shuffle=True,
imagenet_split=args.imagenet_split_unlabeled, aug='twice')
loader_eval = Entity30LoaderUnlabeled(batch_size=args.batch_size, num_workers=args.num_workers,
imagenet_path=args.dataset_root,
imagenet_split=args.imagenet_split_unlabeled, aug=None)
num_classes = 90
model = resnet18(num_classes=num_classes)
model = copy_param(model, args.pretrain_dir)
model.last = Identity()
acc_init, nmi_init, ari_init, init_centers, init_probs = init_prob_kmeans(model, loader_eval, args)
args.p_targets = target_distribution(init_probs)
model = resnet18(num_classes=args.n_clusters)
model = copy_param(model, args.pretrain_dir, loc=-2)
print('load pretrained state_dict from {}'.format(args.pretrain_dir))
model.center = Parameter(torch.Tensor(args.n_clusters, args.n_clusters))
model = model.to(device)
model.center.data = torch.tensor(init_centers).float().to(device)
warmup_train(model, loader_train, loader_eval, args)
if args.DTC == 'Baseline':
Baseline_train(model, loader_train, loader_eval, args)
elif args.DTC == 'PI':
PI_train(model, loader_train_twice, loader_eval, args)
elif args.DTC == 'TE':
TE_train(model, loader_train, loader_eval, args)
elif args.DTC == 'TEP':
TEP_train(model, loader_train, loader_eval, args)
acc, nmi, ari, _ = test(model, loader_eval, args)
subset_string = 'Subset {}, split {}'.format(args.imagenet_subset, args.imagenet_split_unlabeled)
print(subset_string + ' init ACC {:.4f}, NMI {:.4f}, ARI {:.4f}'.format(acc_init, nmi_init, ari_init))
print(subset_string + ' final ACC {:.4f}, NMI {:.4f}, ARI {:.4f}'.format(acc, nmi, ari))
if args.save_txt:
with open(args.save_txt_path, 'a') as f:
f.write("{:.4f}, {:.4f}, {:.4f}\n".format(acc_init, nmi_init, ari_init))
f.write("{:.4f}, {:.4f}, {:.4f}\n".format(acc, nmi, ari))
``` |
{
"source": "jloehel/grafana-backup-tool",
"score": 2
} |
#### File: grafana-backup-tool/grafana_backup/dashboardApi.py
```python
import re
import json
import requests
import sys
from grafana_backup.commons import log_response, to_python2_and_3_compatible_string
def health_check(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/health'.format(grafana_url)
print("\n[Pre-Check] grafana health check: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def auth_check(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/auth/keys'.format(grafana_url)
print("\n[Pre-Check] grafana auth check: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def uid_feature_check(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
# Get first dashboard on first page
print("\n[Pre-Check] grafana uid feature check: calling 'search_dashboard'")
(status, content) = search_dashboard(1, 1, grafana_url, http_get_headers, verify_ssl, client_cert, debug)
if status == 200 and len(content):
if 'uid' in content[0]:
dashboard_uid_support = True
else:
dashboard_uid_support = False
else:
if len(content):
dashboard_uid_support = "get dashboards failed, status: {0}, msg: {1}".format(status, content)
else:
# No dashboards exist, disable uid feature
dashboard_uid_support = False
# Get first datasource
print("\n[Pre-Check] grafana uid feature check: calling 'search_datasource'")
(status, content) = search_datasource(grafana_url, http_get_headers, verify_ssl, client_cert, debug)
if status == 200 and len(content):
if 'uid' in content[0]:
datasource_uid_support = True
else:
datasource_uid_support = False
else:
if len(content):
datasource_uid_support = "get datasources failed, status: {0}, msg: {1}".format(status, content)
else:
# No datasources exist, disable uid feature
datasource_uid_support = False
return dashboard_uid_support, datasource_uid_support
def paging_feature_check(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
print("\n[Pre-Check] grafana paging_feature_check: calling 'search_dashboard'")
def get_first_dashboard_by_page(page):
(status, content) = search_dashboard(page, 1, grafana_url, http_get_headers, verify_ssl, client_cert, debug)
if status == 200 and len(content):
if sys.version_info[0] > 2:
content[0] = {k: to_python2_and_3_compatible_string(v) for k,v in content[0].items()}
dashboard_values = sorted(content[0].items(), key=lambda kv: str(kv[1]))
else:
content[0] = {k: to_python2_and_3_compatible_string(unicode(v)) for k,v in content[0].iteritems()}
dashboard_values = sorted(content[0].iteritems(), key=lambda kv: str(kv[1]))
return True, dashboard_values
else:
if len(content):
return False, "get dashboards failed, status: {0}, msg: {1}".format(status, content)
else:
# No dashboards exist, disable paging feature
return False, False
# Get first dashboard on first page
(status, content) = get_first_dashboard_by_page(1)
if status is False and content is False:
return False # Paging feature not supported
elif status is True:
dashboard_one_values = content
else:
return content # Fail Message
# Get second dashboard on second page
(status, content) = get_first_dashboard_by_page(2)
if status is False and content is False:
return False # Paging feature not supported
elif status is True:
dashboard_two_values = content
else:
return content # Fail Message
# Compare both pages
return dashboard_one_values != dashboard_two_values
def search_dashboard(page, limit, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/search/?type=dash-db&limit={1}&page={2}'.format(grafana_url, limit, page)
print("search dashboard in grafana: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def get_dashboard(board_uri, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/dashboards/{1}'.format(grafana_url, board_uri)
print("query dashboard uri: {0}".format(url))
(status_code, content) = send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def search_annotations(grafana_url, ts_from, ts_to, http_get_headers, verify_ssl, client_cert, debug):
# there is two types of annotations
# annotation: are user created, custom ones and can be managed via the api
# alert: are created by Grafana itself, can NOT be managed by the api
url = '{0}/api/annotations?type=annotation&limit=5000&from={1}&to={2}'.format(grafana_url, ts_from, ts_to)
(status_code, content) = send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def create_annotation(annotation, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
url = '{0}/api/annotations'.format(grafana_url)
return send_grafana_post(url, annotation, http_post_headers, verify_ssl, client_cert, debug)
def delete_annotation(id_, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
r = requests.delete('{0}/api/annotations/{1}'.format(grafana_url, id_), headers=http_get_headers)
return int(r.status_code)
def search_alert_channels(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/alert-notifications'.format(grafana_url)
print("search alert channels in grafana: {0}".format(url))
return send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
def create_alert_channel(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/alert-notifications'.format(grafana_url), payload, http_post_headers, verify_ssl,
client_cert, debug)
def delete_alert_channel_by_uid(uid, grafana_url, http_post_headers):
r = requests.delete('{0}/api/alert-notifications/uid/{1}'.format(grafana_url, uid), headers=http_post_headers)
return int(r.status_code)
def delete_alert_channel_by_id(id_, grafana_url, http_post_headers):
r = requests.delete('{0}/api/alert-notifications/{1}'.format(grafana_url, id_), headers=http_post_headers)
return int(r.status_code)
def search_alerts(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/alerts'.format(grafana_url)
(status_code, content) = send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def pause_alert(id_, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
url = '{0}/api/alerts/{1}/pause'.format(grafana_url, id_)
payload = '{ "paused": true }'
(status_code, content) = send_grafana_post(url, payload, http_post_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def unpause_alert(id_, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
url = '{0}/api/alerts/{1}/pause'.format(grafana_url, id_)
payload = '{ "paused": false }'
(status_code, content) = send_grafana_post(url, payload, http_post_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def delete_folder(uid, grafana_url, http_post_headers):
r = requests.delete('{0}/api/folders/{1}'.format(grafana_url, uid), headers=http_post_headers)
return int(r.status_code)
def delete_snapshot(key, grafana_url, http_post_headers):
r = requests.delete('{0}/api/snapshots/{1}'.format(grafana_url, key), headers=http_post_headers)
return int(r.status_code)
def delete_dashboard_by_uid(uid, grafana_url, http_post_headers):
r = requests.delete('{0}/api/dashboards/uid/{1}'.format(grafana_url, uid), headers=http_post_headers)
return int(r.status_code)
def delete_dashboard_by_slug(slug, grafana_url, http_post_headers):
r = requests.delete('{0}/api/dashboards/db/{1}'.format(grafana_url, slug), headers=http_post_headers)
return int(r.status_code)
def create_dashboard(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/dashboards/db'.format(grafana_url), payload, http_post_headers, verify_ssl,
client_cert, debug)
def search_datasource(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
print("search datasources in grafana:")
return send_grafana_get('{0}/api/datasources'.format(grafana_url), http_get_headers, verify_ssl, client_cert, debug)
def search_snapshot(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
print("search snapshots in grafana:")
return send_grafana_get('{0}/api/dashboard/snapshots'.format(grafana_url), http_get_headers, verify_ssl, client_cert, debug)
def get_snapshot(key, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
url = '{0}/api/snapshots/{1}'.format(grafana_url, key)
(status_code, content) = send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug)
return (status_code, content)
def create_snapshot(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/snapshots'.format(grafana_url), payload, http_post_headers, verify_ssl,
client_cert, debug)
def create_datasource(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/datasources'.format(grafana_url), payload, http_post_headers, verify_ssl,
client_cert, debug)
def delete_datasource_by_uid(uid, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
url = '{0}/api/datasources/uid/{1}'.format(grafana_url, uid)
r = requests.delete(url, headers=http_post_headers)
return int(r.status_code)
def delete_datasource_by_id(id_, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
url = '{0}/api/datasources/{1}'.format(grafana_url, id_)
r = requests.delete(url, headers=http_post_headers)
return int(r.status_code)
def search_folders(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
print("search folder in grafana:")
return send_grafana_get('{0}/api/search/?type=dash-folder'.format(grafana_url), http_get_headers, verify_ssl,
client_cert, debug)
def get_folder(uid, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
(status_code, content) = send_grafana_get('{0}/api/folders/{1}'.format(grafana_url, uid), http_get_headers,
verify_ssl, client_cert, debug)
print("query folder:{0}, status:{1}".format(uid, status_code))
return (status_code, content)
def get_folder_permissions(uid, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
(status_code, content) = send_grafana_get('{0}/api/folders/{1}/permissions'.format(grafana_url, uid), http_get_headers,
verify_ssl, client_cert, debug)
print("query folder permissions:{0}, status:{1}".format(uid, status_code))
return (status_code, content)
def update_folder_permissions(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
items = json.dumps({'items': payload})
return send_grafana_post('{0}/api/folders/{1}/permissions'.format(grafana_url, payload[0]['uid']), items, http_post_headers, verify_ssl, client_cert,
debug)
def get_folder_id(dashboard, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
folder_uid = ""
try:
folder_uid = dashboard['meta']['folderUid']
except (KeyError):
matches = re.search('dashboards\/f\/(.*)\/.*', dashboard['meta']['folderUrl'])
if matches is not None:
folder_uid = matches.group(1)
else:
folder_uid = '0'
if (folder_uid != ""):
print("debug: quering with uid {}".format(folder_uid))
response = get_folder(folder_uid, grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if isinstance(response[1], dict):
folder_data = response[1]
else:
folder_data = json.loads(response[1])
try:
return folder_data['id']
except (KeyError):
return 0
else:
return 0
def create_folder(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/folders'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert,
debug)
def get_dashboard_versions(dashboard_id, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
(status_code, content) = send_grafana_get('{0}/api/dashboards/id/{1}/versions'.format(grafana_url, dashboard_id), http_get_headers,
verify_ssl, client_cert, debug)
print("query dashboard versions: {0}, status: {1}".format(dashboard_id, status_code))
return (status_code, content)
def get_version(dashboard_id, version_number, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
(status_code, content) = send_grafana_get('{0}/api/dashboards/id/{1}/versions/{2}'.format(grafana_url, dashboard_id, version_number), http_get_headers,
verify_ssl, client_cert, debug)
print("query dashboard {0} version {1}, status: {2}".format(dashboard_id, version_number, status_code))
return (status_code, content)
def search_orgs(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
return send_grafana_get('{0}/api/orgs'.format(grafana_url), http_get_headers, verify_ssl,
client_cert, debug)
def get_org(id, grafana_url, http_get_headers, verify_ssl=False, client_cert=None, debug=True):
return send_grafana_get('{0}/api/orgs/{1}'.format(grafana_url, id),
http_get_headers, verify_ssl, client_cert, debug)
def create_org(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/orgs'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert,
debug)
def update_org(id, payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_put('{0}/api/orgs/{1}'.format(grafana_url, id), payload, http_post_headers, verify_ssl, client_cert,
debug)
def search_users(page, limit, grafana_url, http_get_headers, verify_ssl, client_cert, debug):
return send_grafana_get('{0}/api/users?perpage={1}&page={2}'.format(grafana_url, limit, page),
http_get_headers, verify_ssl, client_cert, debug)
def get_users(grafana_url, http_get_headers, verify_ssl, client_cert, debug):
return send_grafana_get('{0}/api/org/users'.format(grafana_url), http_get_headers, verify_ssl, client_cert, debug)
def set_user_role(user_id, role, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
json_payload = json.dumps({'role': role})
url = '{0}/api/org/users/{1}'.format(grafana_url, user_id)
r = requests.patch(url, headers=http_post_headers, data=json_payload, verify=verify_ssl, cert=client_cert)
return (r.status_code, r.json())
def get_user(id, grafana_url, http_get_headers, verify_ssl=False, client_cert=None, debug=True):
return send_grafana_get('{0}/api/users/{1}'.format(grafana_url, id),
http_get_headers, verify_ssl, client_cert, debug)
def get_user_org(id, grafana_url, http_get_headers, verify_ssl=False, client_cert=None, debug=True):
return send_grafana_get('{0}/api/users/{1}/orgs'.format(grafana_url, id),
http_get_headers, verify_ssl, client_cert, debug)
def create_user(payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/admin/users'.format(grafana_url), payload, http_post_headers, verify_ssl, client_cert,
debug)
def add_user_to_org(org_id, payload, grafana_url, http_post_headers, verify_ssl, client_cert, debug):
return send_grafana_post('{0}/api/orgs/{1}/users'.format(grafana_url, org_id), payload, http_post_headers, verify_ssl, client_cert,
debug)
def send_grafana_get(url, http_get_headers, verify_ssl, client_cert, debug):
r = requests.get(url, headers=http_get_headers, verify=verify_ssl, cert=client_cert)
if debug:
log_response(r)
return (r.status_code, r.json())
def send_grafana_post(url, json_payload, http_post_headers, verify_ssl=False, client_cert=None, debug=True):
r = requests.post(url, headers=http_post_headers, data=json_payload, verify=verify_ssl, cert=client_cert)
if debug:
log_response(r)
try:
return (r.status_code, r.json())
except ValueError:
return (r.status_code, r.text)
def send_grafana_put(url, json_payload, http_post_headers, verify_ssl=False, client_cert=None, debug=True):
r = requests.put(url, headers=http_post_headers, data=json_payload, verify=verify_ssl, cert=client_cert)
if debug:
log_response(r)
return (r.status_code, r.json())
``` |
{
"source": "jloehel/munin_client",
"score": 2
} |
#### File: munin_client/tests/test_munin_client.py
```python
import pytest
class TestMuninClient(object):
def config_test(self):
# Mock server
# Create a cli MuninClient(mock_server)
# cli.config("test_plugin")
# assert with expected result
pass
def fetch_test(self):
pass
def list_test(self):
pass
def nodes_test(self):
pass
def version_test(self):
pass
``` |
{
"source": "jloehel/python_smaclient",
"score": 3
} |
#### File: python_smaclient/python_smaclient/smapi_call.py
```python
import uuid
class SMAPI_Call(object):
def __init__(self, name, request, response1, response2):
self._uuid = uuid.uuid1()
self._name = name
self._request = request
self._response1 = response1
self._response2 = response2
```
#### File: python_smaclient/python_smaclient/smapi_parser.py
```python
from construct import Struct, UBInt32, String, Field
import yaml
class SMAPI_Parser(object):
def __init__(self, config_path):
self._configuration = self.load_configuration(config_path)
self._smapi_request_struct = Struct("SMAPI_REQUEST",
UBInt32("input_length"),
UBInt32("function_name_length"),
String("function_name", lambda ctx: ctx.function_name_length),
UBInt32("authenticated_userid_length"),
String("authenticated_userid", lambda ctx: ctx.authenticated_userid_length),
UBInt32("password_length"),
String("password", lambda ctx: ctx.password_length),
UBInt32("target_identifier_length"),
String("target_identifier", lambda ctx: ctx.target_identifier_length),
Field("additional_parameters", lambda ctx: (ctx.input_length -
(ctx.function_name_length +
ctx.authenticated_userid_length +
ctx.password_length +
ctx.target_identifier_length + 4 * 4)))
)
self._smapi_response_1_struct = Struct("SMAPI_RESPONSE_1", UBInt32("request_id"))
self._smapi_response_2_struct = Struct("SMAPI_RESPONSE_2",
UBInt32("output_length"),
UBInt32("request_id"),
UBInt32("return_code"),
UBInt32("reason_code"),
Field("additional_parameters", lambda ctx: ctx.output_length - 16)
)
def load_configuration(self, path):
with open(path, "r") as stream:
return yaml.load(stream)
def get_configuration(self):
return self._configuration
def parse(self, response):
pass
def build_request(self, container):
return self._smapi_request_struct.build(container)
```
#### File: python_smaclient/python_smaclient/smapi.py
```python
from smapi_request import SMAPI_Request as Request
from smapi_parser import SMAPI_Parser
def send(function_name, target, smhost, smport, smuser, smpass, additional_parameters):
request = Request(str.encode(function_name),
target_identifier = str.encode(target),
authenticated_userid= str.encode(smuser),
password=str.encode(<PASSWORD>),
additional_parameters=str.encode(additional_parameters))
if smhost == "IUCV":
parser = SMAPI_Parser("config.yaml")
raw_command = parser.build_request(request.get_container())
print(raw_command)
return "response"
else:
pass
``` |
{
"source": "jlogans/vampy2017cs",
"score": 4
} |
#### File: jlogans/vampy2017cs/exponent.py
```python
def old_exponent(n, k):
"""
n is base, k is exponent
"""
answer = 1
for i in range(k):
answer *= n
print(answer)
def newExponent(n, k):
"""
n is base, k is exponent
"""
print("newExponent({0}, {1})".format(n, k))
if k == 1:
return n
elif k == 0:
return 1
left = int(k/2)
right = k - left
return newExponent(n, left) * newExponent(n, right)
```
#### File: jlogans/vampy2017cs/intsort.py
```python
import threading
import random
import time
import MMMMM
ints = []
def intsort(beginrangearray, stoprangearray):
for x in range(beginrangearray, stoprangearray):
ints.append(random.randint(1, 10000))
t1 = threading.Thread(target = intsort(0, 25))
t2 = threading.Thread(target = intsort(26, 50))
t3 = threading.Thread(target = intsort(51, 75))
t4 = threading.Thread(target = intsort(76, 100))
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
print("Maximum: {0}, Minimum: {1}, Mode: {2}, Mean: {3}".format(max(ints), min(ints), MMMMM.mode(ints), MMMMM.mean(ints)))
```
#### File: jlogans/vampy2017cs/pro.py
```python
import turtle as t
f=t.forward
l=t.left
t.speed(2)
Size = 200
t.color('green', 'green')
def square(Size):
f(Size)
l(90)
f(Size)
l(90)
f(Size)
l(90)
f(Size)
l(90)
def triangle(Size):
f(Size)
l(120)
f(Size)
l(120)
f(Size)
l(120)
def dog(Size):
square(Size)
f(Size/10)
l(240)
triangle(Size/4)
l(120)
f(Size*0.8)
l(240)
triangle(Size/4)
l(120)
f(Size/10)
l(90)
f(Size*0.8)
l(270)
square(Size*0.4)
f(Size*0.1)
t.penup()
l(90)
f(Size*0.1)
t.pendown()
t.right(90)
f(Size*0.2)
t.penup()
l(90)
f(Size*0.2)
l(90)
f(Size*0.05)
t.pendown()
f(Size*0.025)
t.penup()
f(Size*0.05)
t.pendown()
f(Size*0.025)
t.color('white','white')
dog(Size)
t.exitonclick()
```
#### File: jlogans/vampy2017cs/runningserverchat.py
```python
import threading
import socket
import time
phone = socket.socket()
port = input("What port do you want to connect to?")
addr = (socket.gethostname(), int(port))
phone.bind(addr)
phone.listen(16)
def server():
while True:
try:
conn, callid = phone.accept()
message = bytes.decode(conn.recv(1024))
conn.send("r".encode("UTF-8"))
conn.close()
print("Call from {0}: {1}".format(callid, message))
except KeyboardInterrupt:
conn.close()
phone.close()
break
except:
conn.close()
phone.close()
break
def client():
cpu = input("What computer do you want to connect to? 2-17\n")
alias = input("What do you want your alias to be?")
while True:
try:
msg = (alias + ": " + input("What is the message that you want to send?\n"))
data = msg.encode("UTF-8")
phone = socket.socket()
address = ("vampy-cs-" + str(cpu), int(port))
phone.connect(address)
phone.sendall(data)
phone.close()
time.sleep(0.5)
except ConnectionRefusedError:
print("yo u cant connect")
except KeyboardInterrupt:
phone.close()
break
t1 = threading.Thread(target = server)
t2 = threading.Thread(target = client)
t1.start()
t2.start()
t1.join()
t2.join()
print("Shutting down.")
```
#### File: jlogans/vampy2017cs/stars.py
```python
import turtle as t
r = t.right
f = t.forward
def star(length, k, sides):
t.speed(-1)
if sides != 1:
angle = (k*360)/sides
for i in range(sides):
f(length)
r(angle)
def spiro(sides, length):
t.speed(-1)
angle = 360/sides
for i in range(sides):
for j in range(36):
def staridk(length, k, sides):
t.speed(-1)
if sides != 1:
angle = (k*360)/sides
for i in range(sides):
f(length-i)
r(angle/(((i+1)%5)+length))
print("All the other kids with the pumped up kicks you better run, better run, outrun my gun, all the other kids with the pumped up kicks you better run better run, faster than my bullets")
```
#### File: jlogans/vampy2017cs/TreeBanzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.py
```python
def tree(val):
return [None, val, None]
def data(node, val = None):
if node is None:
return None
elif val is None:
return node[1]
else:
node [1] = val
def yes(node, child=None):
if node is None:
return None
elif child is None:
return node[0]
else:
node[0] = [None, child, None]
def yes(node, child=None):
if node is None:
return None
elif child is None:
return node[0]
else:
node[0] = [None, child, None]
root = data(root, "Am I an object or a place? (YES/NO): ")
yes(root, tree("Am I bigger than a PC? (YES/NO): "))
no(root, tree("Am I human? (YES/NO): "))
yes(yes(root), tree("Am I a building? (YES/NO): "))
no(yes(root), tree("Am I consumed as you use me? (YES/NO): "))
yes(no(root), tree("Am I fictional? (YES/NO): "))
no(no(root), tree("Can you fit me in a cofefe mug? (YES/NO): "))
yes(yes(yes(root)), tree("Am I a salon? (YES/NO): "))
no(yes(yes(root)), tree("Am I New York? (YES/NO): "))
yes(no(yes(root)), tree("Am I pizza? (YES/NO): "))
no(no(yes(root)), tree("Am I a hat? (YES/NO): "))
yes(yes(no(root)), tree("Am I Santa Claus? (YES/NO): "))
yes(yes(no(root)), tree("Am I Santa Claus? (YES/NO): "))
yes(yes(no(root)), tree("Am I Santa Claus? (YES/NO): "))
yes(yes(no(root)), tree("Am I Santa Claus? (YES/NO): "))
print(data(root))
``` |
{
"source": "j-log/Expense-Tracker",
"score": 2
} |
#### File: Expense-Tracker/Project/mark_2_gui.py
```python
import pandas as pd
import os.path
from os import path
from datetime import date
from PyQt5 import QtCore, QtGui, QtWidgets
from dia import Ui_Dialog
import matplotlib.pyplot as plt
import numpy as np
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(659, 430)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("32318-200.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 531, 381))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.lineEdit = QtWidgets.QLineEdit(self.tab)
self.lineEdit.setGeometry(QtCore.QRect(190, 60, 113, 22))
self.lineEdit.setObjectName("lineEdit")
self.comboBox = QtWidgets.QComboBox(self.tab)
self.comboBox.setGeometry(QtCore.QRect(192, 100, 111, 22))
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(60, 60, 131, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(60, 100, 111, 16))
self.label_2.setObjectName("label_2")
self.pushButton_3 = QtWidgets.QPushButton(self.tab)
self.pushButton_3.setGeometry(QtCore.QRect(200, 160, 93, 28))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.enter_record)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.pushButton_4 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_4.setGeometry(QtCore.QRect(210, 320, 93, 28))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(self.view)
## self.radioButton = QtWidgets.QRadioButton(self.tab_2)
## self.radioButton.setGeometry(QtCore.QRect(330, 30, 95, 20))
## self.radioButton.setObjectName("radioButton")
##
## self.radioButton_2 = QtWidgets.QRadioButton(self.tab_2)
## self.radioButton_2.setGeometry(QtCore.QRect(330, 70, 95, 20))
## self.radioButton_2.setObjectName("radioButton_2")
##
## self.radioButton_3 = QtWidgets.QRadioButton(self.tab_2)
## self.radioButton_3.setGeometry(QtCore.QRect(330, 110, 95, 20))
## self.radioButton_3.setObjectName("radioButton_3")
self.tableWidget = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget.setGeometry(QtCore.QRect(10, 10, 501, 301))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.pushButton_5 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_5.setGeometry(QtCore.QRect(70, 230, 93, 28))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_5.clicked.connect(self.summary)
self.label_3 = QtWidgets.QLabel(self.tab_3)
self.label_3.setGeometry(QtCore.QRect(60, 60, 101, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.tab_3)
self.label_4.setGeometry(QtCore.QRect(60, 100, 101, 16))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.tab_3)
self.label_5.setGeometry(QtCore.QRect(60, 140, 111, 16))
self.label_5.setObjectName("label_5")
self.lcdNumber = QtWidgets.QLCDNumber(self.tab_3)
self.lcdNumber.setGeometry(QtCore.QRect(210, 60, 64, 23))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 63, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 63, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 63, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.lcdNumber.setPalette(palette)
self.lcdNumber.setProperty("value", 0.0)
self.lcdNumber.setObjectName("lcdNumber")
self.lcdNumber_2 = QtWidgets.QLCDNumber(self.tab_3)
self.lcdNumber_2.setGeometry(QtCore.QRect(210, 100, 64, 23))
self.lcdNumber_2.setPalette(palette)
self.lcdNumber_2.setProperty("value", 0.0)
self.lcdNumber_2.setObjectName("lcdNumber_2")
self.lcdNumber_3 = QtWidgets.QLCDNumber(self.tab_3)
self.lcdNumber_3.setGeometry(QtCore.QRect(210, 140, 64, 23))
self.lcdNumber_3.setPalette(palette)
self.lcdNumber_3.setProperty("value", 0.0)
self.lcdNumber_3.setObjectName("lcdNumber_3")
self.lcdNumber_4 = QtWidgets.QLCDNumber(self.tab_3)
self.lcdNumber_4.setGeometry(QtCore.QRect(210, 180, 64, 23))
self.lcdNumber_4.setPalette(palette)
self.lcdNumber_4.setProperty("value", 0.0)
self.lcdNumber_4.setObjectName("lcdNumber_4")
self.label_6 = QtWidgets.QLabel(self.tab_3)
self.label_6.setGeometry(QtCore.QRect(60, 180, 111, 16))
self.label_6.setObjectName("label_6")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.pushButton_6 = QtWidgets.QPushButton(self.tab_4)
self.pushButton_6.setGeometry(QtCore.QRect(140, 70, 191, 28))
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_6.clicked.connect(self.visualize)
self.pushButton_7 = QtWidgets.QPushButton(self.tab_4)
self.pushButton_7.setGeometry(QtCore.QRect(140, 120, 191, 28))
self.pushButton_7.setObjectName("pushButton_7")
self.pushButton_7.clicked.connect(self.visualize_2)
self.tabWidget.addTab(self.tab_4, "")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(550, 20, 93, 28))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.reset)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(550, 60, 93, 28))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(quit)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Expense Tracker MARK II"))
self.comboBox.setItemText(0, _translate("MainWindow", "Food"))
self.comboBox.setItemText(1, _translate("MainWindow", "Clothing"))
self.comboBox.setItemText(2, _translate("MainWindow", "Footwears"))
self.comboBox.setItemText(3, _translate("MainWindow", "Personal"))
self.comboBox.setItemText(4, _translate("MainWindow", "Travel"))
self.comboBox.setItemText(5, _translate("MainWindow", "Stationary"))
self.comboBox.setItemText(6, _translate("MainWindow", "Rations"))
self.comboBox.setItemText(7, _translate("MainWindow", "Gifts"))
self.comboBox.setItemText(8, _translate("MainWindow", "Party"))
self.comboBox.setItemText(9, _translate("MainWindow", "Others"))
self.label.setText(_translate("MainWindow", "ENTER AMOUNT"))
self.label_2.setText(_translate("MainWindow", "SELECT REASON"))
self.pushButton_3.setText(_translate("MainWindow", "Add Record"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Add"))
self.pushButton_4.setText(_translate("MainWindow", "View"))
## self.radioButton.setText(_translate("MainWindow", "First 5"))
## self.radioButton_2.setText(_translate("MainWindow", "Last 5"))
## self.radioButton_3.setText(_translate("MainWindow", "All"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "View"))
self.pushButton_5.setText(_translate("MainWindow", "Summarize"))
self.label_3.setText(_translate("MainWindow", "Highset Spending"))
self.label_4.setText(_translate("MainWindow", "Lowest Spending"))
self.label_5.setText(_translate("MainWindow", "Average Spending"))
self.label_6.setText(_translate("MainWindow", "Total Spending"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Summary"))
self.pushButton_6.setText(_translate("MainWindow", "Reason Wise BreakDown"))
self.pushButton_7.setText(_translate("MainWindow", "Day wise BreakDown"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Visualize"))
self.pushButton.setText(_translate("MainWindow", "RESET"))
self.pushButton_2.setText(_translate("MainWindow", "Close"))
def summary(self, MainWindow):
if path.exists('Expense_Record.csv'):
d=pd.read_csv('Expense_Record.csv')
self.lcdNumber.setProperty("value",float(d['Value'].max()))
self.lcdNumber_2.setProperty("value",float(d['Value'].min()))
self.lcdNumber_3.setProperty("value",float(d['Value'].mean()))
self.lcdNumber_4.setProperty("value",float(d['Value'].sum()))
else:
self.lcdNumber.setProperty("value",float(-1))
self.lcdNumber_2.setProperty("value",float(-1))
self.lcdNumber_3.setProperty("value",float(-1))
self.lcdNumber_4.setProperty("value",float(-1))
def reset(self,MainWindow):
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
a=Dialog.exec()
if path.exists('Expense_Record.csv'):
if a==1:
os.remove('Expense_Record.csv')
self.summary(MainWindow)
def enter_record(self,MainWindow):
s=str(date.today())
k=int(self.lineEdit.text())
text = self.comboBox.currentText()
print(s,k,text)
data=[s,k,text]
df=pd.DataFrame({ "date" : data[0],"value":data[1],"reason":data[2]},index=[0])
if path.exists('Expense_Record.csv'):
df.to_csv('Expense_Record.csv',mode='a',header=False,index=False)
else:
df.to_csv('Expense_Record.csv',mode='a',header=['Date','Value','Reason'],index=False)
def visualize(self,MainWindow):
df = pd.read_csv('Expense_Record.csv')
plt.bar(df['Reason'],df['Value'],width=0.4)
plt.show()
def visualize_2(self,MainWindow):
df = pd.read_csv('Expense_Record.csv')
plt.bar(df['Date'],df['Value'],width=0.4)
plt.show()
def view(self,MainWindow):
## if self.radioButton.isChecked()==True:
## print('view 5')
## elif self.radioButton_2.isChecked()==True:
## print('view last 5')
## elif self.radioButton_3.isChecked()==True:
## print('view all')
df = pd.read_csv('Expense_Record.csv')
self.tableWidget.setColumnCount(len(df.columns))
self.tableWidget.setRowCount(len(df.index))
self.tableWidget.setHorizontalHeaderItem(0,QtWidgets.QTableWidgetItem("Date"))
self.tableWidget.setHorizontalHeaderItem(1,QtWidgets.QTableWidgetItem("Value"))
self.tableWidget.setHorizontalHeaderItem(2,QtWidgets.QTableWidgetItem("Reason"))
for i in range(len(df.index)):
for j in range(len(df.columns)):
self.tableWidget.setItem(i,j,QtWidgets.QTableWidgetItem(str(df.iloc[i, j])))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
``` |
{
"source": "jlohding/blog",
"score": 3
} |
#### File: jlohding/blog/app.py
```python
import datetime as dt
from flask import Flask, render_template, redirect
from flask_flatpages import FlatPages
app = Flask(__name__)
app.config['FLATPAGES_EXTENSION'] = '.html' #'.md'
pages = FlatPages(app)
@app.route("/")
@app.route("/home")
def home_page():
posts = [x for x in pages if "date" in x.meta]
sorted_pages=sorted(posts, reverse=True, key=lambda page: dt.datetime.strptime(page.meta["date"], "%d %b %Y"))
return render_template("home.html", pages=sorted_pages)
@app.route("/projects")
def projects_page():
return render_template("projects.html")
@app.route("/blog/")
@app.route("/blog/<filter_tag>")
def blog_page(filter_tag=None):
posts = [x for x in pages if "date" in x.meta]
sorted_pages=sorted(posts, reverse=True, key=lambda page: dt.datetime.strptime(page.meta["date"], "%d %b %Y"))
if filter_tag:
filter_pages = [page for page in sorted_pages if filter_tag in page.meta["tags"] or filter_tag in page.meta["date"]]
else:
filter_pages = sorted_pages
return render_template("blog.html", pages=sorted_pages, filter_pages=filter_pages)
@app.route("/about")
def about_page():
return render_template("about.html")
@app.route("/blog/<path:path>.html")
def blog_post(path):
page = pages.get_or_404(path)
return render_template("post.html", page=page)
@app.route("/external_links/<site>")
def ext_links(site):
sites = {
"github": "https://www.github.com/jlohding",
"arbitrade": "https://www.github.com/jlohding/arbitrade",
"blog_github": "https://www.github.com/jlohding/blog",
"linkedin": "https://www.linkedin.com/in/jerryloh2000",
}
return redirect(sites[site], code=302)
if __name__ == "__main__":
app.run(debug=False, use_reloader=True)
``` |
{
"source": "jlohmoeller/ngsi-timeseries-api",
"score": 2
} |
#### File: reporter/tests/test_1TNE1A.py
```python
from conftest import QL_URL, crate_translator as translator
from datetime import datetime
from reporter.tests.utils import insert_test_data
import pytest
import requests
entity_type = 'Room'
attr_name = 'temperature'
n_days = 6
def query_url(values=False):
url = "{qlUrl}/types/{entityType}/attrs/{attrName}"
if values:
url += '/value'
return url.format(
qlUrl=QL_URL,
entityType=entity_type,
attrName=attr_name,
)
@pytest.fixture()
def reporter_dataset(translator):
insert_test_data(translator,
[entity_type],
n_entities=3,
n_days=n_days)
yield
def test_1TNE1A_defaults(reporter_dataset):
# Query without specific id
query_params = {
'type': entity_type,
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room2',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1TNE1A_one_entity(reporter_dataset):
# Query
entity_id = 'Room1'
query_params = {
'type': entity_type,
'id': entity_id
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
obtained_data = r.json()
assert isinstance(obtained_data, dict)
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
}
]
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1TNE1A_some_entities(reporter_dataset):
# Query
entity_id = 'Room0,Room2'
query_params = {
'type': entity_type,
'id': entity_id
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room2',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1TNE1A_values_defaults(reporter_dataset):
# Query
query_params = {
'type': entity_type,
'id': 'Room0,,Room1,RoomNotValid', # -> validates to Room0,Room1.
}
r = requests.get(query_url(values=True), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data == {'data': {'values': expected_entities}}
def test_not_found():
query_params = {
'type': entity_type,
'id': 'RoomNotValid'
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 404, r.text
assert r.json() == {
"error": "Not Found",
"description": "No records were found for such query."
}
def test_weird_ids(reporter_dataset):
"""
Invalid ids are ignored (provided at least one is valid to avoid 404).
Empty values are ignored.
Order of ids is preserved in response (e.g., Room1 first, Room0 later)
"""
query_params = {
'type': entity_type,
'id': 'Room1,RoomNotValid,,Room0,', # -> validates to Room0,Room1.
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_values = list(range(n_days))
expected_index = [
'1970-01-{:02}T00:00:00'.format(i+1) for i in expected_values
]
expected_entities = [
{
'entityId': 'Room1',
'index': expected_index,
'values': expected_values,
},
{
'entityId': 'Room0',
'index': expected_index,
'values': expected_values,
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == entity_type
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_different_time_indexes(translator):
"""
Each entity should have its time_index array.
"""
t = 'Room'
insert_test_data(translator, [t], n_entities=1, entity_id='Room1', n_days=2)
insert_test_data(translator, [t], n_entities=1, entity_id='Room3', n_days=4)
insert_test_data(translator, [t], n_entities=1, entity_id='Room2', n_days=3)
query_params = {
'type': 'Room',
'id': 'Room3,Room1,Room2',
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
expected_entities = [
{
'entityId': 'Room3',
'index': ['1970-01-{:02}T00:00:00'.format(i+1) for i in range(4)],
'values': list(range(4)),
},
{
'entityId': 'Room1',
'index': ['1970-01-{:02}T00:00:00'.format(i+1) for i in range(2)],
'values': list(range(2)),
},
{
'entityId': 'Room2',
'index': ['1970-01-{:02}T00:00:00'.format(i+1) for i in range(3)],
'values': list(range(3)),
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == 'Room'
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_aggregation_is_per_instance(translator):
"""
Attribute Aggregation works by default on a per-instance basis.
Cross-instance aggregation not yet supported.
It would change the shape of the response.
"""
t = 'Room'
insert_test_data(translator, [t], n_entities=1, entity_id='Room0', n_days=3)
insert_test_data(translator, [t], n_entities=1, entity_id='Room1', n_days=9)
query_params = {
'type': t,
'id': 'Room0,Room1',
'aggrMethod': 'sum'
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_entities = [
{
'entityId': 'Room0',
'index': ['', ''],
'values': [sum(range(3))],
},
{
'entityId': 'Room1',
'index': ['', ''],
'values': [sum(range(9))],
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == t
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
# Index array in the response is the used fromDate and toDate
query_params = {
'type': t,
'id': 'Room0,Room1',
'aggrMethod': 'max',
'fromDate': datetime(1970, 1, 1).isoformat(),
'toDate': datetime(1970, 1, 6).isoformat(),
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 200, r.text
# Assert Results
expected_entities = [
{
'entityId': 'Room0',
'index': ['1970-01-01T00:00:00', '1970-01-06T00:00:00'],
'values': [2],
},
{
'entityId': 'Room1',
'index': ['1970-01-01T00:00:00', '1970-01-06T00:00:00'],
'values': [5],
}
]
obtained_data = r.json()
assert isinstance(obtained_data, dict)
assert obtained_data['data']['entityType'] == t
assert obtained_data['data']['attrName'] == attr_name
assert obtained_data['data']['entities'] == expected_entities
def test_1T1ENA_aggrPeriod(reporter_dataset):
# GH issue https://github.com/smartsdk/ngsi-timeseries-api/issues/89
# aggrPeriod needs aggrMethod
query_params = {
'type': entity_type,
'aggrPeriod': 'minute',
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 400, r.text
query_params = {
'type': entity_type,
'aggrMethod': 'avg',
'aggrPeriod': 'minute',
}
r = requests.get(query_url(), params=query_params)
assert r.status_code == 501, r.text
```
#### File: src/tests/test_integration.py
```python
import json
import logging
import os
import requests
import time
logger = logging.getLogger(__name__)
# INPUT VARIABLES
QL_URL = os.environ.get("QL_URL", "http://localhost:8668")
ORION_URL = os.environ.get("ORION_URL", "http://localhost:1026")
ORION_URL_4QL = os.environ.get("ORION_URL_4QL", "http://orion:1026")
QL_URL_4ORION = os.environ.get("QL_URL_4ORION", "http://quantumleap:8668")
HEADERS_PUT = {'Content-Type': 'application/json'}
def get_entity(entity_type, entity_id):
return {
"id": entity_id,
"type": entity_type,
"address": {
"streetAddress": "IJzerlaan",
"postOfficeBoxNumber": "18",
"addressLocality": "Antwerpen",
"addressCountry": "BE"
},
"dateObserved": "2017-11-03T12:37:23.734827",
"source": "http://testing.data.from.smartsdk",
"precipitation": 0,
"relativeHumidity": 0.54,
"temperature": 12.2,
"windDirection": 186,
"windSpeed": 0.64,
"airQualityLevel": "moderate",
"airQualityIndex": 65,
"reliability": 0.7,
"CO": 500,
"NO": 45,
"NO2": 69,
"NOx": 139,
"SO2": 11,
"CO_Level": "moderate",
"refPointOfInterest": "null"
}
def create_orion_subscription(orion_url, ql_url, entity_type):
# Some overhead due to
# https://github.com/telefonicaid/fiware-orion/issues/3237
old_sub_ids = set([])
subs = requests.get("{}/v2/subscriptions".format(orion_url))
if subs.text:
old_sub_ids.update(set([s['id'] for s in subs.json()]))
# Create ORION Subscription
subscribe_url = "{}/v2/subscribe".format(ql_url)
params = {
'orionUrl': '{}/v2'.format(ORION_URL_4QL),
'quantumleapUrl': '{}/v2'.format(QL_URL_4ORION),
'entityType': entity_type,
}
r = requests.post(subscribe_url, params=params)
assert r.status_code == 201, "Failed to create Orion Subscription. " \
"{}".format(r.text)
# Get Sub id to delete it later
subs = requests.get("{}/v2/subscriptions".format(ORION_URL))
new_sub_ids = set([s['id'] for s in subs.json()])
created_ids = new_sub_ids.difference(old_sub_ids)
if len(created_ids) == 1:
return created_ids.pop()
if len(created_ids) > 1:
# A sub was created in the meantime. Get the correct one.
for i in created_ids:
s = requests.get("{}/v2/subscriptions/{}".format(ORION_URL, i))
if s.ok and 'TestIntegrationEntity' in s.text:
return i
assert False
def test_integration():
"""
Sanity Check for a complete deployment of QuantumLeap.
Make sure to set/edit the INPUT VARIABLES.
"""
# Validate QL_URL
res = requests.get("{}/v2/version".format(QL_URL))
assert res.ok, "{} not accessible. {}".format(QL_URL, res.text)
# Validate ORION_URL
res = requests.get("{}/version".format(ORION_URL))
assert res.ok, "{} not accessible. {}".format(ORION_URL, res.text)
# Prepare entity
entity_id = 'test_integration_entity_001'
entity_type = 'TestIntegrationEntity'
entity = get_entity(entity_type, entity_id)
# Create Subscription
sub_id = create_orion_subscription(ORION_URL, QL_URL, entity_type)
time.sleep(1)
try:
# Insert Data in ORION
data = json.dumps(entity)
url = "{}/v2/entities".format(ORION_URL)
params = {'options': 'keyValues'}
res = requests.post(url, data=data, params=params, headers=HEADERS_PUT)
assert res.ok
time.sleep(2)
# Update values in Orion
patch = {
"precipitation": {
"value": 100,
"type": "Number"
}
}
url = "{}/v2/entities/{}/attrs".format(ORION_URL, entity_id)
res = requests.patch(url, data=json.dumps(patch), headers=HEADERS_PUT)
assert res.ok
time.sleep(2)
# Query records in QuantumLeap
url = "{}/v2/entities/{}/attrs/precipitation".format(QL_URL, entity_id)
res = requests.get(url, params={'type': entity_type})
assert res.ok
index = res.json()['data']['index']
assert len(index) > 1
assert index[0] != index[-1]
finally:
# Cleanup Subscription
r = requests.delete("{}/v2/subscriptions/{}".format(ORION_URL, sub_id))
assert r.ok
# Cleanup Entity
r = requests.delete("{}/v2/entities/{}".format(ORION_URL, entity_id))
assert r.ok
# Cleanup Historical Records
r = requests.delete("{}/v2/types/{}".format(QL_URL, entity_type))
assert r.ok
```
#### File: translators/tests/test_crate.py
```python
from exceptions.exceptions import AmbiguousNGSIIdError
from translators.base_translator import BaseTranslator
from translators.benchmark import benchmark
from translators.crate import NGSI_TEXT
from conftest import crate_translator as translator
from utils.common import *
import statistics
def test_db_version(translator):
version = translator.get_db_version()
assert version == '3.0.5'
def test_insert(translator):
entities = create_random_entities(1, 2, 3, use_time=True, use_geo=True)
result = translator.insert(entities)
assert result.rowcount == len(entities)
def test_insert_entity(translator, entity):
now = datetime.now().isoformat(timespec='microseconds')
entity[BaseTranslator.TIME_INDEX_NAME] = now
result = translator.insert([entity])
assert result.rowcount == 1
translator._refresh([entity['type']])
loaded_entity = translator.query()
# These 2 can be ignored when empty. TODO: #12 Support attribute metadata
entity['temperature'].pop('metadata')
entity['pressure'].pop('metadata')
assert_ngsi_entity_equals(entity, loaded_entity[0])
def test_insert_multiple_types(translator):
entities = create_random_entities(num_types=3, num_ids_per_type=2, num_updates=1, use_time=True, use_geo=True)
result = translator.insert(entities)
assert result.rowcount > 0
# Again to check metadata handling works fine
entities = create_random_entities(num_types=3, num_ids_per_type=2, num_updates=1, use_time=True, use_geo=True)
result = translator.insert(entities)
assert result.rowcount > 0
def test_query_all_before_insert(translator):
loaded_entities = translator.query()
assert len(loaded_entities) == 0
def test_query_all(translator):
entities = create_random_entities(2, 2, 2, use_time=True, use_geo=True)
result = translator.insert(entities)
assert result.rowcount > 0
translator._refresh(['0', '1'])
loaded_entities = translator.query()
assert len(loaded_entities) == len(entities)
key = lambda e: e[BaseTranslator.TIME_INDEX_NAME]
a = sorted(entities, key=key)
b = sorted(loaded_entities, key=key)
for e, le in zip(a, b):
assert_ngsi_entity_equals(e, le)
def test_attrs_by_entity_id(translator):
# First insert some data
num_updates = 10
entities = create_random_entities(num_types=2,
num_ids_per_type=2,
num_updates=num_updates,
use_time=True,
use_geo=True)
translator.insert(entities)
translator._refresh(['0', '1'])
# Now query by entity id
entity_id = '0-1'
loaded_entities = translator.query(entity_type='0', entity_id=entity_id)
assert len(loaded_entities) == num_updates
assert all(map(lambda e: e['id'] == '0-1', loaded_entities))
# entity_type should be optional
entity_id = '1-1'
loaded_entities = translator.query(entity_id=entity_id)
assert len(loaded_entities) == num_updates
assert all(map(lambda e: e['id'] == '1-1', loaded_entities))
# nonexistent id should return no data
loaded_entities = translator.query(entity_id='some_nonexistent_id')
assert len(loaded_entities) == 0
def test_attrs_by_id_ambiguity(translator):
entities = create_random_entities(num_types=2,
num_ids_per_type=2,
num_updates=3)
for e in entities:
e['id'] = 'repeated_id'
translator.insert(entities)
translator._refresh(['0', '1'])
# OK if specifying type
loaded_entities = translator.query(entity_type='0', entity_id='repeated_id')
assert len(loaded_entities) == 3 * 2
# NOT OK otherwise
with pytest.raises(AmbiguousNGSIIdError):
translator.query(entity_id='repeated_id')
WITHIN_EAST_EMISPHERE = "within(attr_geo, 'POLYGON ((0 -90, 180 -90, 180 90, 0 90, 0 -90))')"
@pytest.mark.parametrize("attr_name, clause, tester", [
("attr_bool", "= True", lambda e: e["attr_bool"]["value"]),
("attr_str", "> 'M'", lambda e: e["attr_str"]["value"] > "M"),
("attr_float", "< 0.5", lambda e: e["attr_float"]["value"] < 0.5),
("attr_time", "> '1970-06-28T00:00'", lambda e: e["attr_time"]["value"] > datetime(1970, 6, 28).isoformat(timespec='microseconds')),
(WITHIN_EAST_EMISPHERE, "", lambda e: e["attr_geo"]["value"]["coordinates"][0] > 0),
])
def test_query_per_attribute(translator, attr_name, clause, tester):
num_types = 1
num_ids_per_type = 2
num_updates = 10
entities = create_random_entities(num_types, num_ids_per_type, num_updates, use_time=True, use_geo=True)
translator.insert(entities)
translator._refresh(['0'])
entities = translator.query(entity_type='0', where_clause="where {} {}".format(attr_name, clause))
total = num_types * num_ids_per_type * num_updates
assert len(entities) > 0, "No entities where found with the clause: {}{}".format(attr_name, clause)
assert len(entities) < total, "All entities matched the clause. Not expected from an uniform random distribution"
assert all(map(tester, entities))
def test_average(translator):
num_updates = 10
entities = create_random_entities(2, 2, num_updates, use_time=True, use_geo=True)
translator.insert(entities)
translator._refresh(['0', '1'])
# Per entity_id
eid = '0-1'
entity_mean = statistics.mean(e['attr_float']['value'] for e in entities if e['id'] == eid)
entity_mean_read = translator.average(attr_name='attr_float', entity_type='0', entity_id=eid)
assert pytest.approx(entity_mean_read) == entity_mean
# Total
total_mean = statistics.mean(e['attr_float']['value'] for e in entities)
total_mean_read = translator.average(attr_name='attr_float')
assert pytest.approx(total_mean_read) == total_mean
def test_benchmark(translator):
benchmark(translator, num_types=2, num_ids_per_type=2, num_updates=10, use_geo=False, use_time=False)
def test_benchmark_extended(translator):
benchmark(translator, num_types=2, num_ids_per_type=2, num_updates=10, use_geo=True, use_time=True)
def test_unsupported_ngsi_type(translator):
e = {
"type": "SoMeWeIrDtYpE",
"id": "sOmEwEiRdId",
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
"foo": {
"type": "DefinitivelyNotAValidNGSIType",
"value": "BaR",
},
}
translator.insert([e])
translator._refresh([e['type']])
entities = translator.query()
assert len(entities) == 1
assert_ngsi_entity_equals(e, entities[0])
def test_missing_type_defaults_string(translator):
e = {
"type": "SoMeWeIrDtYpE",
"id": "sOmEwEiRdId",
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
"foo": {
"value": "BaR",
},
}
translator.insert([e])
translator._refresh([e['type']])
entities = translator.query()
assert len(entities) == 1
# Response will include the type
e["foo"]["type"] = NGSI_TEXT
assert_ngsi_entity_equals(e, entities[0])
def test_capitals(translator):
entity_type = "SoMeWeIrDtYpE"
e = {
"type": entity_type,
"id": "sOmEwEiRdId",
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
"Foo": {
"type": "Text",
"value": "FoO",
},
"bAr": {
"type": "Text",
"value": "bAr",
},
}
translator.insert([e])
translator._refresh([entity_type])
entities = translator.query()
assert len(entities) == 1
assert_ngsi_entity_equals(e, entities[0])
# If a new attribute comes later, I want it translated as well.
e2 = e.copy()
e2['id'] = 'SOmEwEiRdId2'
e2['NewAttr'] = {"type": "Text", "value": "NewAttrValue!"}
e2[TIME_INDEX_NAME] = datetime.now().isoformat(timespec='microseconds')
translator.insert([e2])
translator._refresh([entity_type])
entities = translator.query()
assert len(entities) == 2
assert_ngsi_entity_equals(e2, entities[1])
# Note that old entity gets None for the new attribute
e['NewAttr'] = {'type': 'Text', 'value': None}
assert_ngsi_entity_equals(e, entities[0])
def test_no_time_index(translator):
"""
The Reporter is responsible for injecting the 'time_index' attribute to the
entity, but even if for some reason the attribute is not there, there
should be no problem with the insertion.
"""
e = {
'id': 'entityId1',
'type': 'type1',
'foo': {'type': 'Text', 'value': "SomeText"}
}
translator.insert([e])
translator._refresh([e['type']])
assert len(translator.query()) == 1
def test_long_json(translator):
# Github issue 44
big_entity = {
'id': 'entityId1',
'type': 'type1',
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
'foo': {
'type': 'Text',
'value': "SomeTextThatWillGetLong" * 2000
}
}
translator.insert([big_entity])
translator._refresh([big_entity['type']])
r = translator.query()
assert len(r) == 1
assert_ngsi_entity_equals(big_entity, r[0])
def test_geo_point(translator):
# Github issue #35: Support geo:point
entity = {
'id': 'Room1',
'type': 'Room',
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
'location': {
'type': 'geo:point',
'value': "19.6389474, -98.9109537" # lat, long
}
}
translator.insert([entity])
translator._refresh([entity['type']])
# Check location is saved as a geo_point column in crate
op = 'select latitude(location), longitude(location) from etroom'
translator.cursor.execute(op)
res = translator.cursor.fetchall()
assert len(res) == 1
assert res[0] == [19.6389474, -98.9109537]
entities = translator.query()
assert len(entities) == 1
# Check entity is retrieved as it was inserted
assert_ngsi_entity_equals(entity, entities[0])
def test_structured_value_to_array(translator):
entity = {
'id': '8906',
'type': 'AirQualityObserved',
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
'aqi': {'type': 'Number', 'value': 43},
'city': {'type': 'Text', 'value': 'Antwerpen'},
'h': {'type': 'Number', 'value': 93},
'location': {
'type': 'geo:point',
'value': '51.2056589, 4.4180728',
},
'measurand': {
'type': 'StructuredValue',
'value': ['pm25, 43, ugm3, PM25', 'pm10, 30, ugm3, PM10',
'p, 1012, hPa, Pressure']
},
'p': {'type': 'Number', 'value': 1012},
'pm10': {'type': 'Number', 'value': 30},
'pm25': {'type': 'Number', 'value': 43},
't': {'type': 'Number', 'value': 8.33}
}
translator.insert([entity])
translator._refresh([entity['type']])
r = translator.query()
assert len(r) == 1
# TODO Github issue 24
# assert_ngsi_entity_equals(entity, r[0])
def test_ISO8601(translator):
"""
ISO8601 should be a valid type, equivalent to DateTime.
"""
e = {
"type": "MyType",
"id": "MyId",
TIME_INDEX_NAME: datetime.now().isoformat(timespec='microseconds'),
"iso_attr": {
"type": "ISO8601",
"value": "2018-03-20T13:26:38.722000",
},
}
result = translator.insert([e])
assert result.rowcount > 0
translator._refresh([e['type']])
loaded = translator.query()
assert len(loaded) > 0
assert_ngsi_entity_equals(e, loaded[0])
################################################################################
# FIWARE DATA MODELS
################################################################################
def test_air_quality_observed(translator, air_quality_observed):
# Add TIME_INDEX as Reporter would
now = datetime.now().isoformat(timespec='microseconds')
air_quality_observed[TIME_INDEX_NAME] = now
result = translator.insert([air_quality_observed])
assert result.rowcount > 0
translator._refresh([air_quality_observed['type']])
loaded = translator.query()
assert len(loaded) > 0
assert_ngsi_entity_equals(air_quality_observed, loaded[0])
def test_traffic_flow_observed(translator, traffic_flow_observed):
# Add TIME_INDEX as Reporter would
now = datetime.now().isoformat(timespec='microseconds')
traffic_flow_observed[TIME_INDEX_NAME] = now
result = translator.insert([traffic_flow_observed])
assert result.rowcount > 0
translator._refresh([traffic_flow_observed['type']])
loaded = translator.query()
assert len(loaded) > 0
assert_ngsi_entity_equals(traffic_flow_observed, loaded[0])
``` |
{
"source": "J-L-O/IIC",
"score": 2
} |
#### File: segmentation/baselines/kmeans_segmentation_eval.py
```python
from datetime import datetime
from sys import stdout as sysout
import numpy as np
import torch
from sklearn.cluster import MiniBatchKMeans
from src.utils.cluster.eval_metrics import _hungarian_match, _acc, _nmi, _ari
from src.utils.cluster.transforms import sobel_process
GET_NMI_ARI = False
# expects single head architectures (ie not IID/+)
# Train all-in-one using samples. Eval batch using full dataset. Datasets are
# both the full labelled segments. Could have included unlabelled for former.
# (only impacts on Potsdam)
def kmeans_segmentation_eval(config, net, test_dataloader):
net.eval()
kmeans = train_kmeans(config, net, test_dataloader)
torch.cuda.empty_cache()
return apply_trained_kmeans(config, net, test_dataloader, kmeans)
def train_kmeans(config, net, test_dataloader):
num_imgs = len(test_dataloader.dataset)
max_num_pixels_per_img = int(config.max_num_kmeans_samples / num_imgs)
features_all = np.zeros(
(config.max_num_kmeans_samples, net.module.features_sz),
dtype=np.float32)
actual_num_features = 0
# discard the label information in the dataloader
for i, tup in enumerate(test_dataloader):
if (config.verbose and i < 10) or (i % int(len(test_dataloader) / 10) == 0):
print(("(kmeans_segmentation_eval) batch %d time %s" % (i, datetime.now())))
sysout.flush()
imgs, _, mask = tup # test dataloader, cpu tensors
imgs = imgs.cuda()
mask = mask.numpy().astype(np.bool)
# mask = mask.numpy().astype(np.bool)
num_unmasked = mask.sum()
if not config.no_sobel:
imgs = sobel_process(imgs, config.include_rgb, using_IR=config.using_IR)
# now rgb(ir) and/or sobel
with torch.no_grad():
# penultimate = features
x_out = net(imgs, penultimate=True).cpu().numpy()
if config.verbose and i < 2:
print(("(kmeans_segmentation_eval) through model %d time %s" % (i,
datetime.now())))
sysout.flush()
num_imgs_batch = x_out.shape[0]
x_out = x_out.transpose((0, 2, 3, 1)) # features last
x_out = x_out[mask, :]
if config.verbose and i < 2:
print(("(kmeans_segmentation_eval) applied mask %d time %s" % (i,
datetime.now())))
sysout.flush()
if i == 0:
assert (x_out.shape[1] == net.module.features_sz)
assert (x_out.shape[0] == num_unmasked)
# select pixels randomly, and record how many selected
num_selected = min(num_unmasked, num_imgs_batch * max_num_pixels_per_img)
selected = np.random.choice(num_selected, replace=False)
x_out = x_out[selected, :]
if config.verbose and i < 2:
print(("(kmeans_segmentation_eval) applied select %d time %s" % (i,
datetime.now())))
sysout.flush()
features_all[actual_num_features:actual_num_features + num_selected, :] = \
x_out
actual_num_features += num_selected
if config.verbose and i < 2:
print(("(kmeans_segmentation_eval) stored %d time %s" % (i,
datetime.now())))
sysout.flush()
assert (actual_num_features <= config.max_num_kmeans_samples)
features_all = features_all[:actual_num_features, :]
if config.verbose:
print("running kmeans")
sysout.flush()
kmeans = MiniBatchKMeans(n_clusters=config.gt_k, verbose=config.verbose).fit(
features_all)
return kmeans
def apply_trained_kmeans(config, net, test_dataloader, kmeans):
if config.verbose:
print("starting inference")
sysout.flush()
# on the entire test dataset
num_imgs = len(test_dataloader.dataset)
max_num_samples = num_imgs * config.input_sz * config.input_sz
preds_all = torch.zeros(max_num_samples, dtype=torch.int32).cuda()
targets_all = torch.zeros(max_num_samples, dtype=torch.int32).cuda()
actual_num_unmasked = 0
# discard the label information in the dataloader
for i, tup in enumerate(test_dataloader):
if (config.verbose and i < 10) or (i % int(len(test_dataloader) / 10) == 0):
print(("(apply_trained_kmeans) batch %d time %s" % (i, datetime.now())))
sysout.flush()
imgs, targets, mask = tup # test dataloader, cpu tensors
imgs, mask_cuda, targets, mask_np = imgs.cuda(), mask.cuda(), \
targets.cuda(), mask.numpy().astype(
np.bool)
num_unmasked = mask_cuda.sum().item()
if not config.no_sobel:
imgs = sobel_process(imgs, config.include_rgb, using_IR=config.using_IR)
# now rgb(ir) and/or sobel
with torch.no_grad():
# penultimate = features
x_out = net(imgs, penultimate=True).cpu().numpy()
x_out = x_out.transpose((0, 2, 3, 1)) # features last
x_out = x_out[mask_np, :]
targets = targets.masked_select(mask_cuda) # can do because flat
assert (x_out.shape == (num_unmasked, net.module.features_sz))
preds = torch.from_numpy(kmeans.predict(x_out)).cuda()
preds_all[actual_num_unmasked: actual_num_unmasked + num_unmasked] = preds
targets_all[
actual_num_unmasked: actual_num_unmasked + num_unmasked] = targets
actual_num_unmasked += num_unmasked
preds_all = preds_all[:actual_num_unmasked]
targets_all = targets_all[:actual_num_unmasked]
torch.cuda.empty_cache()
# permutation, not many-to-one
match = _hungarian_match(preds_all, targets_all, preds_k=config.gt_k,
targets_k=config.gt_k)
torch.cuda.empty_cache()
# do in cpu because of RAM
reordered_preds = torch.zeros(actual_num_unmasked, dtype=preds_all.dtype)
for pred_i, target_i in match:
selected = (preds_all == pred_i).cpu()
reordered_preds[selected] = target_i
reordered_preds = reordered_preds.cuda()
# this checks values
acc = _acc(reordered_preds, targets_all, config.gt_k, verbose=config.verbose)
if GET_NMI_ARI:
nmi, ari = _nmi(reordered_preds, targets_all), \
_ari(reordered_preds, targets_all)
else:
nmi, ari = -1., -1.
reordered_masses = np.zeros(config.gt_k)
for c in range(config.gt_k):
reordered_masses[c] = float(
(reordered_preds == c).sum()) / actual_num_unmasked
return acc, nmi, ari, reordered_masses
``` |
{
"source": "jlokimlin/anomaly_detection",
"score": 3
} |
#### File: anomaly_detection/sample/helpers.py
```python
from pytrends.request import TrendReq
def get_proxie_google_connection(geo_region='en-US'):
pytrends_proxie = TrendReq(
hl=geo_region,
tz=360,
timeout=(10,25),
proxies=['https://34.203.233.13.80',],
retries=2,
backoff_factor=0.1)
return pytrends_proxie
def gtrends(keywords, geo_region='en-US', timeframe='today 5-y'):
"""
Description:
Get a pandas DataFrame with Google trends data as a
two column data frame where the first column consists of the timestamps
and the second column consists of the observation.
timestamps: year-month-date
observation: Numbers represent search interest relative to the highest point on the chart for the given region and time. A value of 100 is the peak popularity for the term. A value of 50 means that the term is half as popular. A score of 0 means there was not enough data for this term.
Usage:
get_google_trends_df(keywords=['World Cup', 'FIFA', 'Russia'])
Arguments:
keywords: A list of strings.
geo
Value:
The returned value is a pandas DataFrame containing timestamps and observation values.
"""
# Login to Google
pytrend = get_proxie_google_connection(geo_region)
# Create payload and capture API tokens. Only required for interest_over_time(), interest_by_region(), and related_queries() methods.
pytrend.build_payload(kw_list=keywords, timeframe=timeframe)
# Get Google Trends of keywords as a Pandas DataFrame
google_trends_df = pytrend.interest_over_time()
return google_trends_df
``` |
{
"source": "jlongever/redfish-client-python",
"score": 2
} |
#### File: on_http_redfish_1_0/models/power_1_0_0_power.py
```python
from pprint import pformat
from six import iteritems
class Power100Power(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Power100Power - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'description': 'ResourceDescription',
'id': 'ResourceId',
'name': 'ResourceName',
'oem': 'ResourceOem',
'power_control': 'list[Power100PowerControl]',
'power_controlodata_count': 'Odata400Count',
'power_controlodata_navigation_link': 'Odata400IdRef',
'power_supplies': 'list[Power100PowerSupply]',
'power_suppliesodata_count': 'Odata400Count',
'power_suppliesodata_navigation_link': 'Odata400IdRef',
'redundancy': 'list[RedundancyRedundancy]',
'redundancyodata_count': 'Odata400Count',
'redundancyodata_navigation_link': 'Odata400IdRef',
'voltages': 'list[Power100Voltage]',
'voltagesodata_count': 'Odata400Count',
'voltagesodata_navigation_link': 'Odata400IdRef'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'description': 'Description',
'id': 'Id',
'name': 'Name',
'oem': 'Oem',
'power_control': 'PowerControl',
'power_controlodata_count': '<EMAIL>',
'power_controlodata_navigation_link': '<EMAIL>',
'power_supplies': 'PowerSupplies',
'power_suppliesodata_count': '<EMAIL>',
'power_suppliesodata_navigation_link': '<EMAIL>',
'redundancy': 'Redundancy',
'redundancyodata_count': '<EMAIL>',
'redundancyodata_navigation_link': '<EMAIL>',
'voltages': 'Voltages',
'voltagesodata_count': '<EMAIL>',
'voltagesodata_navigation_link': '<EMAIL>'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._description = None
self._id = None
self._name = None
self._oem = None
self._power_control = None
self._power_controlodata_count = None
self._power_controlodata_navigation_link = None
self._power_supplies = None
self._power_suppliesodata_count = None
self._power_suppliesodata_navigation_link = None
self._redundancy = None
self._redundancyodata_count = None
self._redundancyodata_navigation_link = None
self._voltages = None
self._voltagesodata_count = None
self._voltagesodata_navigation_link = None
@property
def odata_context(self):
"""
Gets the odata_context of this Power100Power.
:return: The odata_context of this Power100Power.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this Power100Power.
:param odata_context: The odata_context of this Power100Power.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this Power100Power.
:return: The odata_id of this Power100Power.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this Power100Power.
:param odata_id: The odata_id of this Power100Power.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this Power100Power.
:return: The odata_type of this Power100Power.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this Power100Power.
:param odata_type: The odata_type of this Power100Power.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def description(self):
"""
Gets the description of this Power100Power.
:return: The description of this Power100Power.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Power100Power.
:param description: The description of this Power100Power.
:type: ResourceDescription
"""
self._description = description
@property
def id(self):
"""
Gets the id of this Power100Power.
:return: The id of this Power100Power.
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Power100Power.
:param id: The id of this Power100Power.
:type: ResourceId
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Power100Power.
:return: The name of this Power100Power.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Power100Power.
:param name: The name of this Power100Power.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this Power100Power.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Power100Power.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Power100Power.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Power100Power.
:type: ResourceOem
"""
self._oem = oem
@property
def power_control(self):
"""
Gets the power_control of this Power100Power.
This is the definition for power control function (power reading/limiting).
:return: The power_control of this Power100Power.
:rtype: list[Power100PowerControl]
"""
return self._power_control
@power_control.setter
def power_control(self, power_control):
"""
Sets the power_control of this Power100Power.
This is the definition for power control function (power reading/limiting).
:param power_control: The power_control of this Power100Power.
:type: list[Power100PowerControl]
"""
self._power_control = power_control
@property
def power_controlodata_count(self):
"""
Gets the power_controlodata_count of this Power100Power.
:return: The power_controlodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._power_controlodata_count
@power_controlodata_count.setter
def power_controlodata_count(self, power_controlodata_count):
"""
Sets the power_controlodata_count of this Power100Power.
:param power_controlodata_count: The power_controlodata_count of this Power100Power.
:type: Odata400Count
"""
self._power_controlodata_count = power_controlodata_count
@property
def power_controlodata_navigation_link(self):
"""
Gets the power_controlodata_navigation_link of this Power100Power.
:return: The power_controlodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._power_controlodata_navigation_link
@power_controlodata_navigation_link.setter
def power_controlodata_navigation_link(self, power_controlodata_navigation_link):
"""
Sets the power_controlodata_navigation_link of this Power100Power.
:param power_controlodata_navigation_link: The power_controlodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._power_controlodata_navigation_link = power_controlodata_navigation_link
@property
def power_supplies(self):
"""
Gets the power_supplies of this Power100Power.
Details of the power supplies associated with this system or device
:return: The power_supplies of this Power100Power.
:rtype: list[Power100PowerSupply]
"""
return self._power_supplies
@power_supplies.setter
def power_supplies(self, power_supplies):
"""
Sets the power_supplies of this Power100Power.
Details of the power supplies associated with this system or device
:param power_supplies: The power_supplies of this Power100Power.
:type: list[Power100PowerSupply]
"""
self._power_supplies = power_supplies
@property
def power_suppliesodata_count(self):
"""
Gets the power_suppliesodata_count of this Power100Power.
:return: The power_suppliesodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._power_suppliesodata_count
@power_suppliesodata_count.setter
def power_suppliesodata_count(self, power_suppliesodata_count):
"""
Sets the power_suppliesodata_count of this Power100Power.
:param power_suppliesodata_count: The power_suppliesodata_count of this Power100Power.
:type: Odata400Count
"""
self._power_suppliesodata_count = power_suppliesodata_count
@property
def power_suppliesodata_navigation_link(self):
"""
Gets the power_suppliesodata_navigation_link of this Power100Power.
:return: The power_suppliesodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._power_suppliesodata_navigation_link
@power_suppliesodata_navigation_link.setter
def power_suppliesodata_navigation_link(self, power_suppliesodata_navigation_link):
"""
Sets the power_suppliesodata_navigation_link of this Power100Power.
:param power_suppliesodata_navigation_link: The power_suppliesodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._power_suppliesodata_navigation_link = power_suppliesodata_navigation_link
@property
def redundancy(self):
"""
Gets the redundancy of this Power100Power.
Redundancy information for the power subsystem of this system or device
:return: The redundancy of this Power100Power.
:rtype: list[RedundancyRedundancy]
"""
return self._redundancy
@redundancy.setter
def redundancy(self, redundancy):
"""
Sets the redundancy of this Power100Power.
Redundancy information for the power subsystem of this system or device
:param redundancy: The redundancy of this Power100Power.
:type: list[RedundancyRedundancy]
"""
self._redundancy = redundancy
@property
def redundancyodata_count(self):
"""
Gets the redundancyodata_count of this Power100Power.
:return: The redundancyodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._redundancyodata_count
@redundancyodata_count.setter
def redundancyodata_count(self, redundancyodata_count):
"""
Sets the redundancyodata_count of this Power100Power.
:param redundancyodata_count: The redundancyodata_count of this Power100Power.
:type: Odata400Count
"""
self._redundancyodata_count = redundancyodata_count
@property
def redundancyodata_navigation_link(self):
"""
Gets the redundancyodata_navigation_link of this Power100Power.
:return: The redundancyodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._redundancyodata_navigation_link
@redundancyodata_navigation_link.setter
def redundancyodata_navigation_link(self, redundancyodata_navigation_link):
"""
Sets the redundancyodata_navigation_link of this Power100Power.
:param redundancyodata_navigation_link: The redundancyodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._redundancyodata_navigation_link = redundancyodata_navigation_link
@property
def voltages(self):
"""
Gets the voltages of this Power100Power.
This is the definition for voltage sensors.
:return: The voltages of this Power100Power.
:rtype: list[Power100Voltage]
"""
return self._voltages
@voltages.setter
def voltages(self, voltages):
"""
Sets the voltages of this Power100Power.
This is the definition for voltage sensors.
:param voltages: The voltages of this Power100Power.
:type: list[Power100Voltage]
"""
self._voltages = voltages
@property
def voltagesodata_count(self):
"""
Gets the voltagesodata_count of this Power100Power.
:return: The voltagesodata_count of this Power100Power.
:rtype: Odata400Count
"""
return self._voltagesodata_count
@voltagesodata_count.setter
def voltagesodata_count(self, voltagesodata_count):
"""
Sets the voltagesodata_count of this Power100Power.
:param voltagesodata_count: The voltagesodata_count of this Power100Power.
:type: Odata400Count
"""
self._voltagesodata_count = voltagesodata_count
@property
def voltagesodata_navigation_link(self):
"""
Gets the voltagesodata_navigation_link of this Power100Power.
:return: The voltagesodata_navigation_link of this Power100Power.
:rtype: Odata400IdRef
"""
return self._voltagesodata_navigation_link
@voltagesodata_navigation_link.setter
def voltagesodata_navigation_link(self, voltagesodata_navigation_link):
"""
Sets the voltagesodata_navigation_link of this Power100Power.
:param voltagesodata_navigation_link: The voltagesodata_navigation_link of this Power100Power.
:type: Odata400IdRef
"""
self._voltagesodata_navigation_link = voltagesodata_navigation_link
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: on_http_redfish_1_0/models/power_1_0_0_power_supply.py
```python
from pprint import pformat
from six import iteritems
class Power100PowerSupply(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Power100PowerSupply - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'line_input_voltage_type': 'Power100LineInputVoltageType',
'member_id': 'str',
'oem': 'ResourceOem',
'power_supply_type': 'Power100PowerSupplyType',
'redundancy': 'list[RedundancyRedundancy]',
'redundancyodata_count': 'Odata400Count',
'redundancyodata_navigation_link': 'Odata400IdRef',
'related_item': 'list[Odata400IdRef]',
'related_itemodata_count': 'Odata400Count',
'related_itemodata_navigation_link': 'Odata400IdRef',
'status': 'ResourceStatus'
}
self.attribute_map = {
'line_input_voltage_type': 'LineInputVoltageType',
'member_id': 'MemberId',
'oem': 'Oem',
'power_supply_type': 'PowerSupplyType',
'redundancy': 'Redundancy',
'redundancyodata_count': '<EMAIL>',
'redundancyodata_navigation_link': '<EMAIL>',
'related_item': 'RelatedItem',
'related_itemodata_count': '<EMAIL>',
'related_itemodata_navigation_link': '<EMAIL>',
'status': 'Status'
}
self._line_input_voltage_type = None
self._member_id = None
self._oem = None
self._power_supply_type = None
self._redundancy = None
self._redundancyodata_count = None
self._redundancyodata_navigation_link = None
self._related_item = None
self._related_itemodata_count = None
self._related_itemodata_navigation_link = None
self._status = None
@property
def line_input_voltage_type(self):
"""
Gets the line_input_voltage_type of this Power100PowerSupply.
The line voltage type supported as an input to this Power Supply
:return: The line_input_voltage_type of this Power100PowerSupply.
:rtype: Power100LineInputVoltageType
"""
return self._line_input_voltage_type
@line_input_voltage_type.setter
def line_input_voltage_type(self, line_input_voltage_type):
"""
Sets the line_input_voltage_type of this Power100PowerSupply.
The line voltage type supported as an input to this Power Supply
:param line_input_voltage_type: The line_input_voltage_type of this Power100PowerSupply.
:type: Power100LineInputVoltageType
"""
self._line_input_voltage_type = line_input_voltage_type
@property
def member_id(self):
"""
Gets the member_id of this Power100PowerSupply.
This is the identifier for the member within the collection.
:return: The member_id of this Power100PowerSupply.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""
Sets the member_id of this Power100PowerSupply.
This is the identifier for the member within the collection.
:param member_id: The member_id of this Power100PowerSupply.
:type: str
"""
self._member_id = member_id
@property
def oem(self):
"""
Gets the oem of this Power100PowerSupply.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Power100PowerSupply.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Power100PowerSupply.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Power100PowerSupply.
:type: ResourceOem
"""
self._oem = oem
@property
def power_supply_type(self):
"""
Gets the power_supply_type of this Power100PowerSupply.
The Power Supply type (AC or DC)
:return: The power_supply_type of this Power100PowerSupply.
:rtype: Power100PowerSupplyType
"""
return self._power_supply_type
@power_supply_type.setter
def power_supply_type(self, power_supply_type):
"""
Sets the power_supply_type of this Power100PowerSupply.
The Power Supply type (AC or DC)
:param power_supply_type: The power_supply_type of this Power100PowerSupply.
:type: Power100PowerSupplyType
"""
self._power_supply_type = power_supply_type
@property
def redundancy(self):
"""
Gets the redundancy of this Power100PowerSupply.
This structure is used to show redundancy for fans. The Component ids will reference the members of the redundancy groups.
:return: The redundancy of this Power100PowerSupply.
:rtype: list[RedundancyRedundancy]
"""
return self._redundancy
@redundancy.setter
def redundancy(self, redundancy):
"""
Sets the redundancy of this Power100PowerSupply.
This structure is used to show redundancy for fans. The Component ids will reference the members of the redundancy groups.
:param redundancy: The redundancy of this Power100PowerSupply.
:type: list[RedundancyRedundancy]
"""
self._redundancy = redundancy
@property
def redundancyodata_count(self):
"""
Gets the redundancyodata_count of this Power100PowerSupply.
:return: The redundancyodata_count of this Power100PowerSupply.
:rtype: Odata400Count
"""
return self._redundancyodata_count
@redundancyodata_count.setter
def redundancyodata_count(self, redundancyodata_count):
"""
Sets the redundancyodata_count of this Power100PowerSupply.
:param redundancyodata_count: The redundancyodata_count of this Power100PowerSupply.
:type: Odata400Count
"""
self._redundancyodata_count = redundancyodata_count
@property
def redundancyodata_navigation_link(self):
"""
Gets the redundancyodata_navigation_link of this Power100PowerSupply.
:return: The redundancyodata_navigation_link of this Power100PowerSupply.
:rtype: Odata400IdRef
"""
return self._redundancyodata_navigation_link
@redundancyodata_navigation_link.setter
def redundancyodata_navigation_link(self, redundancyodata_navigation_link):
"""
Sets the redundancyodata_navigation_link of this Power100PowerSupply.
:param redundancyodata_navigation_link: The redundancyodata_navigation_link of this Power100PowerSupply.
:type: Odata400IdRef
"""
self._redundancyodata_navigation_link = redundancyodata_navigation_link
@property
def related_item(self):
"""
Gets the related_item of this Power100PowerSupply.
The ID(s) of the resources associated with this Power Limit
:return: The related_item of this Power100PowerSupply.
:rtype: list[Odata400IdRef]
"""
return self._related_item
@related_item.setter
def related_item(self, related_item):
"""
Sets the related_item of this Power100PowerSupply.
The ID(s) of the resources associated with this Power Limit
:param related_item: The related_item of this Power100PowerSupply.
:type: list[Odata400IdRef]
"""
self._related_item = related_item
@property
def related_itemodata_count(self):
"""
Gets the related_itemodata_count of this Power100PowerSupply.
:return: The related_itemodata_count of this Power100PowerSupply.
:rtype: Odata400Count
"""
return self._related_itemodata_count
@related_itemodata_count.setter
def related_itemodata_count(self, related_itemodata_count):
"""
Sets the related_itemodata_count of this Power100PowerSupply.
:param related_itemodata_count: The related_itemodata_count of this Power100PowerSupply.
:type: Odata400Count
"""
self._related_itemodata_count = related_itemodata_count
@property
def related_itemodata_navigation_link(self):
"""
Gets the related_itemodata_navigation_link of this Power100PowerSupply.
:return: The related_itemodata_navigation_link of this Power100PowerSupply.
:rtype: Odata400IdRef
"""
return self._related_itemodata_navigation_link
@related_itemodata_navigation_link.setter
def related_itemodata_navigation_link(self, related_itemodata_navigation_link):
"""
Sets the related_itemodata_navigation_link of this Power100PowerSupply.
:param related_itemodata_navigation_link: The related_itemodata_navigation_link of this Power100PowerSupply.
:type: Odata400IdRef
"""
self._related_itemodata_navigation_link = related_itemodata_navigation_link
@property
def status(self):
"""
Gets the status of this Power100PowerSupply.
:return: The status of this Power100PowerSupply.
:rtype: ResourceStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Power100PowerSupply.
:param status: The status of this Power100PowerSupply.
:type: ResourceStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: on_http_redfish_1_0/models/rack_hd_boot_image_boot_image.py
```python
from pprint import pformat
from six import iteritems
class RackHDBootImageBootImage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
RackHDBootImageBootImage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'root_ssh_key': 'str',
'domain': 'str',
'users': 'list[RackHDBootImageUsers]',
'hostname': 'str',
'os_name': 'str',
'repo': 'str',
'version': 'str',
'network_devices': 'list[RackHDBootImageNetworkDevice]',
'root_password': '<PASSWORD>',
'dns_servers': 'list[str]',
'install_disk': 'str'
}
self.attribute_map = {
'root_ssh_key': 'rootSshKey',
'domain': 'domain',
'users': 'users',
'hostname': 'hostname',
'os_name': 'osName',
'repo': 'repo',
'version': 'version',
'network_devices': 'networkDevices',
'root_password': '<PASSWORD>',
'dns_servers': 'dnsServers',
'install_disk': 'installDisk'
}
self._root_ssh_key = None
self._domain = None
self._users = None
self._hostname = None
self._os_name = None
self._repo = None
self._version = None
self._network_devices = None
self._root_password = None
self._dns_servers = None
self._install_disk = None
@property
def root_ssh_key(self):
"""
Gets the root_ssh_key of this RackHDBootImageBootImage.
This is the SshKey for the OS root account.
:return: The root_ssh_key of this RackHDBootImageBootImage.
:rtype: str
"""
return self._root_ssh_key
@root_ssh_key.setter
def root_ssh_key(self, root_ssh_key):
"""
Sets the root_ssh_key of this RackHDBootImageBootImage.
This is the SshKey for the OS root account.
:param root_ssh_key: The root_ssh_key of this RackHDBootImageBootImage.
:type: str
"""
self._root_ssh_key = root_ssh_key
@property
def domain(self):
"""
Gets the domain of this RackHDBootImageBootImage.
This is the domain for the target OS
:return: The domain of this RackHDBootImageBootImage.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""
Sets the domain of this RackHDBootImageBootImage.
This is the domain for the target OS
:param domain: The domain of this RackHDBootImageBootImage.
:type: str
"""
self._domain = domain
@property
def users(self):
"""
Gets the users of this RackHDBootImageBootImage.
This is a list of user account information that will created after OS installation
:return: The users of this RackHDBootImageBootImage.
:rtype: list[RackHDBootImageUsers]
"""
return self._users
@users.setter
def users(self, users):
"""
Sets the users of this RackHDBootImageBootImage.
This is a list of user account information that will created after OS installation
:param users: The users of this RackHDBootImageBootImage.
:type: list[RackHDBootImageUsers]
"""
self._users = users
@property
def hostname(self):
"""
Gets the hostname of this RackHDBootImageBootImage.
The hostname for target OS.
:return: The hostname of this RackHDBootImageBootImage.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this RackHDBootImageBootImage.
The hostname for target OS.
:param hostname: The hostname of this RackHDBootImageBootImage.
:type: str
"""
self._hostname = hostname
@property
def os_name(self):
"""
Gets the os_name of this RackHDBootImageBootImage.
Name of the target OS to be installed
:return: The os_name of this RackHDBootImageBootImage.
:rtype: str
"""
return self._os_name
@os_name.setter
def os_name(self, os_name):
"""
Sets the os_name of this RackHDBootImageBootImage.
Name of the target OS to be installed
:param os_name: The os_name of this RackHDBootImageBootImage.
:type: str
"""
allowed_values = ["CentOS", "CentOS+KVM", "ESXi", "RHEL", "RHEL+KVM"]
if os_name not in allowed_values:
raise ValueError(
"Invalid value for `os_name`, must be one of {0}"
.format(allowed_values)
)
self._os_name = os_name
@property
def repo(self):
"""
Gets the repo of this RackHDBootImageBootImage.
The external OS repository address, currently only supports HTTP
:return: The repo of this RackHDBootImageBootImage.
:rtype: str
"""
return self._repo
@repo.setter
def repo(self, repo):
"""
Sets the repo of this RackHDBootImageBootImage.
The external OS repository address, currently only supports HTTP
:param repo: The repo of this RackHDBootImageBootImage.
:type: str
"""
self._repo = repo
@property
def version(self):
"""
Gets the version of this RackHDBootImageBootImage.
The version number of target OS that needs to install.
:return: The version of this RackHDBootImageBootImage.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this RackHDBootImageBootImage.
The version number of target OS that needs to install.
:param version: The version of this RackHDBootImageBootImage.
:type: str
"""
self._version = version
@property
def network_devices(self):
"""
Gets the network_devices of this RackHDBootImageBootImage.
List of device names and static IP settings for network devices after OS installation.
:return: The network_devices of this RackHDBootImageBootImage.
:rtype: list[RackHDBootImageNetworkDevice]
"""
return self._network_devices
@network_devices.setter
def network_devices(self, network_devices):
"""
Sets the network_devices of this RackHDBootImageBootImage.
List of device names and static IP settings for network devices after OS installation.
:param network_devices: The network_devices of this RackHDBootImageBootImage.
:type: list[RackHDBootImageNetworkDevice]
"""
self._network_devices = network_devices
@property
def root_password(self):
"""
Gets the root_password of this RackHDBootImageBootImage.
The password for the OS root account.
:return: The root_password of this RackHDBootImageBootImage.
:rtype: str
"""
return self._root_password
@root_password.setter
def root_password(self, root_password):
"""
Sets the root_password of this RackHDBootImageBootImage.
The password for the OS root account.
:param root_password: The root_password of this RackHDBootImageBootImage.
:type: str
"""
self._root_password = root_password
@property
def dns_servers(self):
"""
Gets the dns_servers of this RackHDBootImageBootImage.
This is a list of Domain Name Servers.
:return: The dns_servers of this RackHDBootImageBootImage.
:rtype: list[str]
"""
return self._dns_servers
@dns_servers.setter
def dns_servers(self, dns_servers):
"""
Sets the dns_servers of this RackHDBootImageBootImage.
This is a list of Domain Name Servers.
:param dns_servers: The dns_servers of this RackHDBootImageBootImage.
:type: list[str]
"""
self._dns_servers = dns_servers
@property
def install_disk(self):
"""
Gets the install_disk of this RackHDBootImageBootImage.
:return: The install_disk of this RackHDBootImageBootImage.
:rtype: str
"""
return self._install_disk
@install_disk.setter
def install_disk(self, install_disk):
"""
Sets the install_disk of this RackHDBootImageBootImage.
:param install_disk: The install_disk of this RackHDBootImageBootImage.
:type: str
"""
self._install_disk = install_disk
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
``` |
{
"source": "jlongman/mega-linter",
"score": 3
} |
#### File: mega-linter/megalinter/plugin_factory.py
```python
import logging
import shutil
import subprocess
import sys
import requests
from megalinter import config, linter_factory, utils
def list_plugins():
plugins = config.get_list("PLUGINS", [])
return plugins
# Load & install plugins from external URLs
def initialize_plugins():
plugins = list_plugins()
for plugin in plugins:
descriptor_file = load_plugin(plugin)
install_plugin(descriptor_file)
# Load plugin descriptor
def load_plugin(plugin):
if plugin.startswith("https://"):
# Check validity of plugin URL
descriptor_file = "/megalinter-descriptors/" + plugin.rsplit("/", 1)[1]
if "/mega-linter-plugin-" not in plugin:
raise Exception(
"[Plugins] Plugin descriptor file must be hosted in a directory containing /mega-linter-plugin-"
)
if not descriptor_file.endswith(".megalinter-descriptor.yml"):
raise Exception(
"[Plugins] Plugin descriptor file must end with .megalinter-descriptor.yml"
)
# Download plugin and write it in megalinter
try:
r = requests.get(plugin, allow_redirects=True)
open(descriptor_file, "wb").write(r.content)
logging.info(
f"[Plugins] Loaded plugin descriptor {descriptor_file} from {plugin}"
)
except Exception as e:
raise Exception(f"[Plugins] Unable to load plugin {plugin}:\n{str(e)}")
return descriptor_file
else:
raise Exception(
"[Plugins] Plugin descriptors must follow the format"
f" https://**/mega-linter-plugin-**/**.mega-linter-descriptor.yml (wrong value {plugin})"
)
# Run plugin installation routines
def install_plugin(descriptor_file):
descriptor = linter_factory.build_descriptor_info(descriptor_file)
# Install descriptor level items
if "install" in descriptor:
process_install(descriptor["install"])
# Install linter level items
if "linters" in descriptor:
for linter_description in descriptor["linters"]:
if "install" in descriptor:
process_install(linter_description["install"])
logging.info(
f"[Plugins] Successful initialization of {descriptor['descriptor_id']} plugins"
)
# WARNING: works only with dockerfile and RUN instructions for now
def process_install(install):
# Build commands from descriptor
commands = []
# Dockerfile commands
if "dockerfile" in install:
# Remove RUN and \ at the end of lines from commands
commands += [
command.replace("RUN ").replace(" \\\n", "\n")
for command in install["dockerfile"]
]
# Run install commands
for command in commands:
logging.debug("[Plugins] Install command: " + str(command))
process = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable=shutil.which("bash") if sys.platform == "win32" else "/bin/bash",
)
return_code = process.returncode
stdout = utils.decode_utf8(process.stdout)
logging.debug(f"[Plugins] Result ({str(return_code)}): {stdout}")
if return_code != 0:
raise Exception(
f"[Plugins] Error while running install command {command}:\n{stdout}"
)
``` |
{
"source": "JLooo2/Backbone",
"score": 3
} |
#### File: JLooo2/Backbone/Network_in_Network_bn_keras.py
```python
import keras
from keras import optimizers
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.datasets import cifar10
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
batch_size = 128
epochs = 200
iterations = 391
num_classes = 10
dropout = 0.5
weight_decay = 0.0001
def color_preprocessing(x_train, x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
for i in range(3):
x_train[:, :, :, i] = (x_train[:, :, :, i] - mean[i]) / std[i]
x_test[:, :, :, i] = (x_test[:, :, :, i] - mean[i]) / std[i]
return x_train, x_test
def scheduler(epoch):
if epoch <= 60:
return 0.05
if epoch <= 120:
return 0.01
if epoch <= 160:
return 0.002
return 0.0004
def build_model():
model = Sequential()
model.add(Conv2D(192, (5, 5), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay),
input_shape=x_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(160, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(96, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
model.add(Dropout(dropout))
model.add(Conv2D(192, (5, 5), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
model.add(Dropout(dropout))
model.add(Conv2D(192, (3, 3), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(10, (1, 1), padding='same', kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
if __name__ == '__main__':
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train, x_test = color_preprocessing(x_train, x_test)
# build network
model = build_model()
print(model.summary())
# set callback
tb_cb = TensorBoard(log_dir='E:/Code/Backbone/nin_bn', histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
cbks = [change_lr, tb_cb]
# set data augmentation
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True, width_shift_range=0.125, height_shift_range=0.125,
fill_mode='constant', cval=0.)
datagen.fit(x_train)
# start training
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=iterations,
epochs=epochs, callbacks=cbks, validation_data=(x_test, y_test))
model.save('E:/Code/Backbone/models/nin_bn.h5')
```
#### File: JLooo2/Backbone/Wide_ResNet_keras.py
```python
import keras
import numpy as np
from keras import optimizers
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.datasets import cifar10
from keras.layers import Conv2D, Dense, Input, add, Activation, Flatten, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
DEPTH = 28
WIDE = 10
IN_FILTERS = 16
CLASS_NUM = 10
IMG_ROWS, IMG_COLS = 32, 32
IMG_CHANNELS = 3
BATCH_SIZE = 128
EPOCHS = 200
ITERATIONS = 50000 // BATCH_SIZE + 1
WEIGHT_DECAY = 0.0005
LOG_FILE_PATH = './wide_resnet/'
from keras import backend as K
# set GPU memory
if ('tensorflow' == K.backend()):
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
def scheduler(epoch):
if epoch < 60:
return 0.1
if epoch < 120:
return 0.02
if epoch < 160:
return 0.004
return 0.0008
def color_preprocessing(x_train, x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
mean = [125.3, 123.0, 113.9]
std = [63.0, 62.1, 66.7]
for i in range(3):
x_train[:, :, :, i] = (x_train[:, :, :, i] - mean[i]) / std[i]
x_test[:, :, :, i] = (x_test[:, :, :, i] - mean[i]) / std[i]
return x_train, x_test
def wide_residual_network(img_input, classes_num, depth, k):
print('Wide-Resnet %dx%d' % (depth, k))
n_filters = [16, 16 * k, 32 * k, 64 * k]
n_stack = (depth - 4) // 6
def conv3x3(x, filters):
return Conv2D(filters=filters, kernel_size=(3, 3), strides=(1, 1), padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=False)(x)
def bn_relu(x):
x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
x = Activation('relu')(x)
return x
def residual_block(x, out_filters, increase=False):
global IN_FILTERS
stride = (1, 1)
if increase:
stride = (2, 2)
o1 = bn_relu(x)
conv_1 = Conv2D(out_filters,
kernel_size=(3, 3), strides=stride, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=False)(o1)
o2 = bn_relu(conv_1)
conv_2 = Conv2D(out_filters,
kernel_size=(3, 3), strides=(1, 1), padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=False)(o2)
if increase or IN_FILTERS != out_filters:
proj = Conv2D(out_filters,
kernel_size=(1, 1), strides=stride, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=False)(o1)
block = add([conv_2, proj])
else:
block = add([conv_2, x])
return block
def wide_residual_layer(x, out_filters, increase=False):
global IN_FILTERS
x = residual_block(x, out_filters, increase)
IN_FILTERS = out_filters
for _ in range(1, int(n_stack)):
x = residual_block(x, out_filters)
return x
x = conv3x3(img_input, n_filters[0])
x = wide_residual_layer(x, n_filters[1])
x = wide_residual_layer(x, n_filters[2], increase=True)
x = wide_residual_layer(x, n_filters[3], increase=True)
x = BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
x = Activation('relu')(x)
x = AveragePooling2D((8, 8))(x)
x = Flatten()(x)
x = Dense(classes_num,
activation='softmax',
kernel_initializer='he_normal',
kernel_regularizer=l2(WEIGHT_DECAY),
use_bias=False)(x)
return x
if __name__ == '__main__':
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, CLASS_NUM)
y_test = keras.utils.to_categorical(y_test, CLASS_NUM)
# color preprocessing
x_train, x_test = color_preprocessing(x_train, x_test)
# build network
img_input = Input(shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS))
output = wide_residual_network(img_input, CLASS_NUM, DEPTH, WIDE)
resnet = Model(img_input, output)
print(resnet.summary())
# set optimizer
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
resnet.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# set callback
tb_cb = TensorBoard(log_dir=LOG_FILE_PATH, histogram_freq=0)
change_lr = LearningRateScheduler(scheduler)
cbks = [change_lr, tb_cb]
# set data augmentation
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125, height_shift_range=0.125, fill_mode='reflect')
datagen.fit(x_train)
# start training
resnet.fit_generator(datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
steps_per_epoch=ITERATIONS,
epochs=EPOCHS,
callbacks=cbks,
validation_data=(x_test, y_test))
resnet.save('E:/Code/Backbone/models/wide_resnet.h5')
``` |
{
"source": "JLooo2/UXNet-PyTorch",
"score": 3
} |
#### File: UXNet-PyTorch/NUSNet_model/NUSNet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchstat import stat
from torchsummary import summary
# The Definition of Models : UXNet UXNet4 UXNet5 UXNet6 UXNet7 UXNetCAM UXNetSAM UXNetCBAM UXNet765CAM4SMALLSAM
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dirate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
class BNREDWSCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dirate=1):
super(BNREDWSCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, groups=in_ch)
self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, stride=1, padding=0,
groups=1)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
xout = self.relu_s1(self.bn_s1(self.point_conv(self.conv_s1(hx))))
return xout
# upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src, tar):
src = F.interpolate(src, size=tar.shape[2:], mode='bilinear', align_corners=True)
return src
class NUSUnit7S(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit7S, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.conv6 = BNREDWSCONV(mid_ch, mid_ch, dirate=5)
self.sa = SpatialAttention()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv4(hx)
hx = self.pool(hx2)
hx3 = self.conv5(hx)
hx = self.pool(hx3)
hx4 = self.conv6(hx)
hx = self.pool(hx4)
hx5 = self.conv4(hx)
hx = self.pool(hx5)
hx6 = self.conv5(hx)
hx7 = self.conv6(hx6)
hx6d = self.conv4(torch.add(hx7, hx6))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.conv4(torch.add(hx6dup, hx5))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.conv4(torch.add(hx5dup, hx4))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4dup, hx3))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3dup, hx2))
hx2dup = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2dup, hx1))
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit7C(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit7C, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.conv6 = BNREDWSCONV(mid_ch, mid_ch, dirate=5)
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv4(hx)
hx = self.pool(hx2)
hx3 = self.conv5(hx)
hx = self.pool(hx3)
hx4 = self.conv6(hx)
hx = self.pool(hx4)
hx5 = self.conv4(hx)
hx = self.pool(hx5)
hx6 = self.conv5(hx)
hx7 = self.conv6(hx6)
hx6d = self.conv4(torch.add(hx7, hx6))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.conv4(torch.add(hx6dup, hx5))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.conv4(torch.add(hx5dup, hx4))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4dup, hx3))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3dup, hx2))
hx2dup = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2dup, hx1))
hxinout = self.ca(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit7CS(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit7CS, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.conv6 = BNREDWSCONV(mid_ch, mid_ch, dirate=5)
self.sa = SpatialAttention()
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv4(hx)
hx = self.pool(hx2)
hx3 = self.conv5(hx)
hx = self.pool(hx3)
hx4 = self.conv6(hx)
hx = self.pool(hx4)
hx5 = self.conv4(hx)
hx = self.pool(hx5)
hx6 = self.conv5(hx)
hx7 = self.conv6(hx6)
hx6d = self.conv4(torch.add(hx7, hx6))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.conv4(torch.add(hx6dup, hx5))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.conv4(torch.add(hx5dup, hx4))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4dup, hx3))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3dup, hx2))
hx2dup = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2dup, hx1))
hxinout = self.ca(hxinout) * hxinout
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit6S(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit6S, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.conv6 = BNREDWSCONV(mid_ch, mid_ch, dirate=5)
self.sa = SpatialAttention()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv5(hx)
hx = self.pool(hx2)
hx3 = self.conv6(hx)
hx = self.pool(hx3)
hx4 = self.conv4(hx)
hx = self.pool(hx4)
hx5 = self.conv5(hx)
hx6 = self.conv6(hx5)
hx5d = self.conv4(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.conv4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit6C(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit6C, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.conv6 = BNREDWSCONV(mid_ch, mid_ch, dirate=5)
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv5(hx)
hx = self.pool(hx2)
hx3 = self.conv6(hx)
hx = self.pool(hx3)
hx4 = self.conv4(hx)
hx = self.pool(hx4)
hx5 = self.conv5(hx)
hx6 = self.conv6(hx5)
hx5d = self.conv4(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.conv4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.ca(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit6CS(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit6CS, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.conv6 = BNREDWSCONV(mid_ch, mid_ch, dirate=5)
self.sa = SpatialAttention()
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv5(hx)
hx = self.pool(hx2)
hx3 = self.conv6(hx)
hx = self.pool(hx3)
hx4 = self.conv4(hx)
hx = self.pool(hx4)
hx5 = self.conv5(hx)
hx6 = self.conv6(hx5)
hx5d = self.conv4(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.conv4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.ca(hxinout) * hxinout
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit5S(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit5S, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.sa = SpatialAttention()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv4(hx)
hx = self.pool(hx2)
hx3 = self.conv5(hx)
hx = self.pool(hx3)
hx4 = self.conv4(hx)
hx5 = self.conv5(hx4)
hx4d = self.conv4(torch.add(hx5, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit5C(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit5C, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv4(hx)
hx = self.pool(hx2)
hx3 = self.conv5(hx)
hx = self.pool(hx3)
hx4 = self.conv4(hx)
hx5 = self.conv5(hx4)
hx4d = self.conv4(torch.add(hx5, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.ca(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit5CS(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit5CS, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.sa = SpatialAttention()
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv4(hx)
hx = self.pool(hx2)
hx3 = self.conv5(hx)
hx = self.pool(hx3)
hx4 = self.conv4(hx)
hx5 = self.conv5(hx4)
hx4d = self.conv4(torch.add(hx5, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.conv4(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.ca(hxinout) * hxinout
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit4C(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit4C, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv5(hx)
hx = self.pool(hx2)
hx3 = self.conv4(hx)
hx4 = self.conv5(hx3)
hx3d = self.conv4(torch.add(hx4, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.ca(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit4S(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit4S, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.sa = SpatialAttention()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv5(hx)
hx = self.pool(hx2)
hx3 = self.conv4(hx)
hx4 = self.conv5(hx3)
hx3d = self.conv4(torch.add(hx4, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnit4CS(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnit4CS, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.sa = SpatialAttention()
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx = self.pool(hx1)
hx2 = self.conv5(hx)
hx = self.pool(hx2)
hx3 = self.conv4(hx)
hx4 = self.conv5(hx3)
hx3d = self.conv4(torch.add(hx4, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.conv4(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hxinout = self.conv3(torch.add(hx2up, hx1))
hxinout = self.ca(hxinout) * hxinout
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnitSmallS(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnitSmallS, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.sa = SpatialAttention()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx2 = self.conv5(hx1)
hx3 = self.conv4(hx2)
hx4 = self.conv5(hx3)
hx3d = self.conv4(torch.add(hx4, hx3))
hx2d = self.conv4(torch.add(hx3d, hx2))
hxinout = self.conv3(torch.add(hx2d, hx1))
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnitSmallC(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnitSmallC, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx2 = self.conv5(hx1)
hx3 = self.conv4(hx2)
hx4 = self.conv5(hx3)
hx3d = self.conv4(torch.add(hx4, hx3))
hx2d = self.conv4(torch.add(hx3d, hx2))
hxinout = self.conv3(torch.add(hx2d, hx1))
hxinout = self.ca(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSUnitSmallCS(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(NUSUnitSmallCS, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv1 = REBNCONV(in_ch, out_ch, dirate=1)
self.conv2 = REBNCONV(out_ch, mid_ch, dirate=1)
self.conv3 = REBNCONV(mid_ch, out_ch, dirate=1)
self.conv4 = BNREDWSCONV(mid_ch, mid_ch, dirate=1)
self.conv5 = BNREDWSCONV(mid_ch, mid_ch, dirate=2)
self.sa = SpatialAttention()
self.ca = ChannelAttention(out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
hxin = self.conv1(hx)
hx1 = self.conv2(hxin)
hx2 = self.conv5(hx1)
hx3 = self.conv4(hx2)
hx4 = self.conv5(hx3)
hx3d = self.conv4(torch.add(hx4, hx3))
hx2d = self.conv4(torch.add(hx3d, hx2))
hxinout = self.conv3(torch.add(hx2d, hx1))
hxinout = self.ca(hxinout) * hxinout
hxinout = self.sa(hxinout) * hxinout
return self.relu(hxin + hxinout)
class NUSNet(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNet, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit7S(in_ch, 32, 64)
self.en2 = NUSUnit6S(64, 32, 128)
self.en3 = NUSUnit5S(128, 64, 256)
self.en4 = NUSUnit4C(256, 128, 512)
self.en5 = NUSUnitSmallC(512, 256, 512)
self.en6 = NUSUnitSmallC(512, 256, 512)
self.de5 = NUSUnitSmallC(512, 256, 512)
self.de4 = NUSUnit4C(512, 128, 256)
self.de3 = NUSUnit5S(256, 64, 128)
self.de2 = NUSUnit6S(128, 32, 64)
self.de1 = NUSUnit7S(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNetSAM(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNetSAM, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit7S(in_ch, 32, 64)
self.en2 = NUSUnit6S(64, 32, 128)
self.en3 = NUSUnit5S(128, 64, 256)
self.en4 = NUSUnit4S(256, 128, 512)
self.en5 = NUSUnitSmallS(512, 256, 512)
self.en6 = NUSUnitSmallS(512, 256, 512)
self.de5 = NUSUnitSmallS(512, 256, 512)
self.de4 = NUSUnit4S(512, 128, 256)
self.de3 = NUSUnit5S(256, 64, 128)
self.de2 = NUSUnit6S(128, 32, 64)
self.de1 = NUSUnit7S(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNetCAM(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNetCAM, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit7C(in_ch, 32, 64)
self.en2 = NUSUnit6C(64, 32, 128)
self.en3 = NUSUnit5C(128, 64, 256)
self.en4 = NUSUnit4C(256, 128, 512)
self.en5 = NUSUnitSmallC(512, 256, 512)
self.en6 = NUSUnitSmallC(512, 256, 512)
self.de5 = NUSUnitSmallC(512, 256, 512)
self.de4 = NUSUnit4C(512, 128, 256)
self.de3 = NUSUnit5C(256, 64, 128)
self.de2 = NUSUnit6C(128, 32, 64)
self.de1 = NUSUnit7C(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNet4(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNet4, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit4S(in_ch, 32, 64)
self.en2 = NUSUnit4S(64, 32, 128)
self.en3 = NUSUnit4S(128, 64, 256)
self.en4 = NUSUnit4C(256, 128, 512)
self.en5 = NUSUnit4C(512, 256, 512)
self.en6 = NUSUnit4C(512, 256, 512)
self.de5 = NUSUnit4C(512, 256, 512)
self.de4 = NUSUnit4C(512, 128, 256)
self.de3 = NUSUnit4S(256, 64, 128)
self.de2 = NUSUnit4S(128, 32, 64)
self.de1 = NUSUnit4S(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNet5(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNet5, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit5S(in_ch, 32, 64)
self.en2 = NUSUnit5S(64, 32, 128)
self.en3 = NUSUnit5S(128, 64, 256)
self.en4 = NUSUnit5C(256, 128, 512)
self.en5 = NUSUnit5C(512, 256, 512)
self.en6 = NUSUnit5C(512, 256, 512)
self.de5 = NUSUnit5C(512, 256, 512)
self.de4 = NUSUnit5C(512, 128, 256)
self.de3 = NUSUnit5S(256, 64, 128)
self.de2 = NUSUnit5S(128, 32, 64)
self.de1 = NUSUnit5S(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNet6(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNet6, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit6S(in_ch, 32, 64)
self.en2 = NUSUnit6S(64, 32, 128)
self.en3 = NUSUnit6S(128, 64, 256)
self.en4 = NUSUnit6C(256, 128, 512)
self.en5 = NUSUnit6C(512, 256, 512)
self.en6 = NUSUnit6C(512, 256, 512)
self.de5 = NUSUnit6C(512, 256, 512)
self.de4 = NUSUnit6C(512, 128, 256)
self.de3 = NUSUnit6S(256, 64, 128)
self.de2 = NUSUnit6S(128, 32, 64)
self.de1 = NUSUnit6S(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNet7(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNet7, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit7S(in_ch, 32, 64)
self.en2 = NUSUnit7S(64, 32, 128)
self.en3 = NUSUnit7S(128, 64, 256)
self.en4 = NUSUnit7C(256, 128, 512)
self.en5 = NUSUnit7C(512, 256, 512)
self.en6 = NUSUnit7C(512, 256, 512)
self.de5 = NUSUnit7C(512, 256, 512)
self.de4 = NUSUnit7C(512, 128, 256)
self.de3 = NUSUnit7S(256, 64, 128)
self.de2 = NUSUnit7S(128, 32, 64)
self.de1 = NUSUnit7S(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNetCBAM(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNetCBAM, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit7CS(in_ch, 32, 64)
self.en2 = NUSUnit6CS(64, 32, 128)
self.en3 = NUSUnit5CS(128, 64, 256)
self.en4 = NUSUnit4CS(256, 128, 512)
self.en5 = NUSUnitSmallCS(512, 256, 512)
self.en6 = NUSUnitSmallCS(512, 256, 512)
self.de5 = NUSUnitSmallCS(512, 256, 512)
self.de4 = NUSUnit4CS(512, 128, 256)
self.de3 = NUSUnit5CS(256, 64, 128)
self.de2 = NUSUnit6CS(128, 32, 64)
self.de1 = NUSUnit7CS(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
class NUSNet765CAM4SMALLSAM(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(NUSNet765CAM4SMALLSAM, self).__init__()
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.en1 = NUSUnit7C(in_ch, 32, 64)
self.en2 = NUSUnit6C(64, 32, 128)
self.en3 = NUSUnit5C(128, 64, 256)
self.en4 = NUSUnit4S(256, 128, 512)
self.en5 = NUSUnitSmallS(512, 256, 512)
self.en6 = NUSUnitSmallS(512, 256, 512)
self.de5 = NUSUnitSmallS(512, 256, 512)
self.de4 = NUSUnit4S(512, 128, 256)
self.de3 = NUSUnit5C(256, 64, 128)
self.de2 = NUSUnit6C(128, 32, 64)
self.de1 = NUSUnit7C(64, 32, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
self.outconv = nn.Conv2d(6, out_ch, 1)
def forward(self, x):
hx = x
# encoder
hx1 = self.en1(hx)
hx = self.pool(hx1)
hx2 = self.en2(hx)
hx = self.pool(hx2)
hx3 = self.en3(hx)
hx = self.pool(hx3)
hx4 = self.en4(hx)
hx = self.pool(hx4)
hx5 = self.en5(hx)
hx6 = self.en6(hx5)
# decoder
hx5d = self.de5(torch.add(hx6, hx5))
hx5up = _upsample_like(hx5d, hx4)
hx4d = self.de4(torch.add(hx5up, hx4))
hx4up = _upsample_like(hx4d, hx3)
hx3d = self.de3(torch.add(hx4up, hx3))
hx3up = _upsample_like(hx3d, hx2)
hx2d = self.de2(torch.add(hx3up, hx2))
hx2up = _upsample_like(hx2d, hx1)
hx1d = self.de1(hx2up)
# output
sup1 = self.side1(hx1d)
sup2 = self.side2(hx2d)
sup2 = _upsample_like(sup2, sup1)
sup3 = self.side3(hx3d)
sup3 = _upsample_like(sup3, sup1)
sup4 = self.side4(hx4d)
sup4 = _upsample_like(sup4, sup1)
sup5 = self.side5(hx5d)
sup5 = _upsample_like(sup5, sup1)
sup6 = self.side6(hx6)
sup6 = _upsample_like(sup6, sup1)
final_fusion_loss = self.outconv(torch.cat((sup1, sup2, sup3, sup4, sup5, sup6), 1))
return F.torch.sigmoid(final_fusion_loss), F.torch.sigmoid(sup1), F.torch.sigmoid(sup2), F.torch.sigmoid(
sup3), F.torch.sigmoid(sup4), F.torch.sigmoid(sup5), F.torch.sigmoid(sup6)
# torchsummary
# model = NUSNetCBAM()
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = model.to(device)
# print(summary(model, (3, 320, 320)))
# # torchstat
# model = NUSNet()
# print(stat(model, (3, 320, 320)))
``` |
{
"source": "jloosli/Adafruit-Raspberry-Pi-Python-Code",
"score": 3
} |
#### File: Adafruit-Raspberry-Pi-Python-Code/Adafruit_GFX/Adafruit_GFX.py
```python
class Adafruit_GFX:
_width = 0
_height = 0
def __init__(self, width, height):
self._width = width
self._height = height
rotation = 0
cursor_y = cursor_x = 0
textsize = 1
textcolor = textbgcolor = 0xFFFF
wrap = true
''' draw a circle outline '''
def drawCircle(self, x0, y0, r, color):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
self.drawPixel(x0, y0+r, color)
self.drawPixel(x0, y0-r, color)
self.drawPixel(x0+r, y0, color)
self.drawPixel(x0-r, y0, color)
while (x<y) {
if (f >= 0) {
y--
ddF_y += 2
f += ddF_y
}
x++
ddF_x += 2
f += ddF_x
self.drawPixel(x0 + x, y0 + y, color)
self.drawPixel(x0 - x, y0 + y, color)
self.drawPixel(x0 + x, y0 - y, color)
self.drawPixel(x0 - x, y0 - y, color)
self.drawPixel(x0 + y, y0 + x, color)
self.drawPixel(x0 - y, y0 + x, color)
self.drawPixel(x0 + y, y0 - x, color)
self.drawPixel(x0 - y, y0 - x, color)
def drawCircleHelper(self, x0, y0, r, cornername, ucolor):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
while (x<y):
if (f >= 0):
y--
ddF_y += 2
f += ddF_y
x+=1
ddF_x += 2
f += ddF_x
if (cornername & 0x4):
self.drawPixel(x0 + x, y0 + y, color)
self.drawPixel(x0 + y, y0 + x, color)
if (cornername & 0x2) :
self.drawPixel(x0 + x, y0 - y, color)
self.drawPixel(x0 + y, y0 - x, color)
if (cornername & 0x8) :
self.drawPixel(x0 - y, y0 + x, color)
self.drawPixel(x0 - x, y0 + y, color)
if (cornername & 0x1) :
self.drawPixel(x0 - y, y0 - x, color)
self.drawPixel(x0 - x, y0 - y, color)
def fillCircle(self, x0, y0, r, ucolor):
drawFastVLine(x0, y0-r, 2*r+1, color)
fillCircleHelper(x0, y0, r, 3, 0, color)
''' used to do circles and roundrects!'''
def fillCircleHelper(self, x0, y0, r, cornername, delta, ucolor):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
while (x<y) :
if (f >= 0) :
y -= 1
ddF_y += 2
f += ddF_y
x += 1
ddF_x += 2
f += ddF_x
if (cornername & 0x1) :
self.drawFastVLine(x0+x, y0-y, 2*y+1+delta, color)
self.drawFastVLine(x0+y, y0-x, 2*x+1+delta, color)
if (cornername & 0x2) :
self.drawFastVLine(x0-x, y0-y, 2*y+1+delta, color)
self.drawFastVLine(x0-y, y0-x, 2*x+1+delta, color)
''' bresenham's algorithm - thx wikpedia'''
def drawLine(self, x0, y0, x1, y1, ucolor):
steep = abs(y1 - y0) > abs(x1 - x0)
if (steep) :
swap(x0, y0)
swap(x1, y1)
if (x0 > x1) :
swap(x0, x1)
swap(y0, y1)
dx, dy
dx = x1 - x0
dy = abs(y1 - y0)
err = dx / 2
ystep = 0
if (y0 < y1) :
ystep = 1
else:
ystep = -1
for ( x0<=x1 x0++) :
if (steep) :
self.drawPixel(y0, x0, color)
else
self.drawPixel(x0, y0, color)
err -= dy
if (err < 0) :
y0 += ystep
err += dx
''' draw a rectangle'''
def drawRect(self, x, y, w, h, ucolor):
self.drawFastHLine(x, y, w, color)
self.drawFastHLine(x, y+h-1, w, color)
self.drawFastVLine(x, y, h, color)
self.drawFastVLine(x+w-1, y, h, color)
def drawFastVLine(self, x, y, h, ucolor) :
''' stupidest version - update in subclasses if desired!'''
self.drawLine(x, y, x, y+h-1, color)
def drawFastHLine(self, x, y, w, ucolor) :
''' stupidest version - update in subclasses if desired!'''
self.drawLine(x, y, x+w-1, y, color)
def fillRect(self, x, y, w, h, ucolor) :
''' stupidest version - update in subclasses if desired!'''
for (i=x i<x+w i++) :
self.drawFastVLine(i, y, h, color)
def fillScreen(self, ucolor) :
fillRect(0, 0, _width, _height, color)
''' draw a rounded rectangle!'''
def drawRoundRect(self, x, y, w, h, r, ucolor) :
''' smarter version'''
self.drawFastHLine(x+r , y , w-2*r, color) ''' Top'''
self.drawFastHLine(x+r , y+h-1, w-2*r, color) ''' Bottom'''
self.drawFastVLine( x , y+r , h-2*r, color) ''' Left'''
self.drawFastVLine( x+w-1, y+r , h-2*r, color) ''' Right'''
''' draw four corners'''
self.drawCircleHelper(x+r , y+r , r, 1, color)
self.drawCircleHelper(x+w-r-1, y+r , r, 2, color)
self.drawCircleHelper(x+w-r-1, y+h-r-1, r, 4, color)
self.drawCircleHelper(x+r , y+h-r-1, r, 8, color)
''' fill a rounded rectangle!'''
def fillRoundRect(self, x, y, w, h, r, ucolor):
''' smarter version'''
draw.fillRect(x+r, y, w-2*r, h, color)
''' draw four corners'''
draw.fillCircleHelper(x+w-r-1, y+r, r, 1, h-2*r-1, color)
fillCircleHelper(x+r , y+r, r, 2, h-2*r-1, color)
''' draw a triangle!'''
def drawTriangle(self, x0, y0, x1, y1, x2, y2, ucolor):
self.drawLine(x0, y0, x1, y1, color)
self.drawLine(x1, y1, x2, y2, color)
self.drawLine(x2, y2, x0, y0, color)
''' fill a triangle!'''
def fillTriangle (self, x0, y0, x1, y1, x2, y2, ucolor):
a, b, y, last
''' Sort coordinates by Y order (y2 >= y1 >= y0)'''
if (y0 > y1) :
swap(y0, y1) swap(x0, x1)
if (y1 > y2) :
swap(y2, y1) swap(x2, x1)
if (y0 > y1) :
swap(y0, y1) swap(x0, x1)
if(y0 == y2) : ''' Handle awkward all-on-same-line case as its own thing'''
a = b = x0
if(x1 < a) a = x1
else if(x1 > b) b = x1
if(x2 < a) a = x2
else if(x2 > b) b = x2
self.drawFastHLine(a, y0, b-a+1, color)
return
dx01 = x1 - x0,
dy01 = y1 - y0,
dx02 = x2 - x0,
dy02 = y2 - y0,
dx12 = x2 - x1,
dy12 = y2 - y1,
sa = 0,
sb = 0
''' For upper part of triangle, find scanline crossings for segments'''
''' 0-1 and 0-2. If y1=y2 (flat-bottomed triangle), the scanline y1'''
''' is included here (and second loop will be skipped, avoiding a /0'''
''' error there), otherwise scanline y1 is skipped here and handled'''
''' in the second loop...which also avoids a /0 error here if y0=y1'''
''' (flat-topped triangle).'''
if(y1 == y2) last = y1 ''' Include y1 scanline'''
else last = y1-1 ''' Skip it'''
for(y=y0 y<=last y++) :
a = x0 + sa / dy01
b = x0 + sb / dy02
sa += dx01
sb += dx02
''' longhand:
a = x0 + (x1 - x0) * (y - y0) / (y1 - y0)
b = x0 + (x2 - x0) * (y - y0) / (y2 - y0)
'''
if(a > b): swap(a,b)
self.drawFastHLine(a, y, b-a+1, color)
''' For lower part of triangle, find scanline crossings for segments'''
''' 0-2 and 1-2. This loop is skipped if y1=y2.'''
sa = dx12 * (y - y1)
sb = dx02 * (y - y0)
for( y<=y2 y++) :
a = x1 + sa / dy12
b = x0 + sb / dy02
sa += dx12
sb += dx02
''' longhand:
a = x1 + (x2 - x1) * (y - y1) / (y2 - y1)
b = x0 + (x2 - x0) * (y - y0) / (y2 - y0)
'''
if(a > b) swap(a,b)
self.drawFastHLine(a, y, b-a+1, color)
def drawBitmap(self, x, y, const *bitmap, w, h, ucolor) :
i, j, byteWidth = (w + 7) / 8
for(j=0 j<h j++) :
for(i=0 i<w i++ ) :
if(pgm_read_byte(bitmap + j * byteWidth + i / 8) & (128 >> (i & 7))) :
self.drawPixel(x+i, y+j, color)
def write(self, c) :
if (c == '\n') :
cursor_y += textsize*8
cursor_x = 0
elif (c == '\r') :
''' skip em'''
else:
self.drawChar(cursor_x, cursor_y, c, textcolor, textbgcolor, textsize)
cursor_x += textsize*6
if (wrap && (cursor_x > (_width - textsize*6))) {
cursor_y += textsize*8
cursor_x = 0
return 1
''' draw a character'''
def drawChar(self, x, y, unsigned char c, ucolor, ubg, size) :
if((x >= _width) || ''' Clip right'''
(y >= _height) || ''' Clip bottom'''
((x + 5 * size - 1) < 0) || ''' Clip left'''
((y + 8 * size - 1) < 0)) ''' Clip top'''
return
for (i=0; i<6; i++ ) {
line = 0
if (i == 5) :
line = 0x0
else:
line = pgm_read_byte(font+(c*5)+i)
for (int8_t j = 0 j<8 j++) :
if (line & 0x1) :
if (size == 1): ''' default size'''
self.drawPixel(x+i, y+j, color)
else : ''' big size'''
fillRect(x+(i*size), y+(j*size), size, size, color)
else if (bg != color) :
if (size == 1): ''' default size'''
self.drawPixel(x+i, y+j, bg)
else : ''' big size'''
fillRect(x+i*size, y+j*size, size, size, bg)
line >>= 1
def setCursor(self, x, y) :
cursor_x = x
cursor_y = y
def setTextSize(self, s) :
textsize = (s > 0) ? s : 1
def setTextColor(self, uc) :
textcolor = c
textbgcolor = c
''' for 'transparent' background, we'll set the bg '''
''' to the same as fg instead of using a flag'''
def setTextColor(self, uc, ub) :
textcolor = c
textbgcolor = b
def setTextWrap(self, w) :
wrap = w
def getRotation(self, ) :
rotation %= 4
return rotation
def setRotation(self, x) :
x %= 4 ''' cant be higher than 3'''
rotation = x
switch (x) {
case 0:
case 2:
_width = WIDTH
_height = HEIGHT
break
case 1:
case 3:
_width = HEIGHT
_height = WIDTH
break
def invertDisplay(self, i) :
''' do nothing, can be subclassed'''
continue
''' return the size of the display which depends on the rotation!'''
def width(self) :
return _width
def height(self):
return _height
``` |
{
"source": "jlopez0591/SIGIA",
"score": 2
} |
#### File: SIGIA/actividades/managers.py
```python
from django.db import models
from datetime import datetime as dt
# from polymorphic.manager import PolymorphicManager
from polymorphic.managers import PolymorphicManager
class ActividadQuerySet(models.QuerySet):
def en_espera(self):
return self.filter(estado='espera')
def rechazado(self):
return self.filter(estado='rechazado')
def aprobado(self):
return self.filter(estado='aprobado')
def puede_aprobar(self, usuario):
return self.filter(estado='espera', departamento=usuario.perfil.departamento)
def propias(self, usuario):
return self.filter(usuario=usuario)
def actuales(self):
fecha = dt.now()
return self.filter(fecha__year=fecha.year)
class ActividadManager(PolymorphicManager):
def get_queryset(self):
return ActividadQuerySet(self.model, using=self._db)
def en_espera(self):
return self.get_queryset().en_espera()
def rechazado(self):
return self.get_queryset().rechazado()
def aprobado(self):
return self.get_queryset().aprobado()
def puede_aprobar(self, usuario):
return self.get_queryset().puede_aprobar(usuario)
def propias(self, usuario):
return self.get_queryset().propias(usuario)
def actuales(self):
return self.get_queryset().actuales()
```
#### File: SIGIA/core/autocomplete.py
```python
from django.contrib.auth.models import User
from dal import autocomplete
class PerfilAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.request.user.is_superuser:
qs = User.objects.all()
else:
return User.objects.none()
if self.q:
qs = qs.filter(
username__icontains=self.q
)
return qs
```
#### File: SIGIA/core/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from core.managers import UsuarioManager
from auditlog.registry import auditlog
from auditlog.models import AuditlogHistoryField
class Usuario(User):
objects = UsuarioManager()
class Meta:
proxy = True
def __str__(self):
return '{}, {} - {}'.format(self.last_name, self.first_name, self.perfil.cod_profesor)
class Notificacion(models.Model):
history = AuditlogHistoryField()
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
titulo = models.CharField(blank=True, max_length=120)
texto = models.CharField(max_length=120)
url = models.URLField(blank=True)
def __str__(self):
return '{}-{}'.format(self.usuario.username, self.titulo)
def get_name(self):
return '{} {}'.format(self.first_name, self.last_name)
User.add_to_class("__str__", get_name)
auditlog.register(Notificacion)
```
#### File: SIGIA/estudiantes/forms.py
```python
from django import forms
from dal import autocomplete
from django.core.exceptions import ValidationError
from estudiantes.models import Estudiante, TrabajoGraduacion
from core.models import Usuario
class StudentUpdateForm(forms.ModelForm):
class Meta:
model = Estudiante
fields = (
# Info del nombre
'primer_nombre', 'segundo_nombre', 'primer_apellido', 'segundo_apellido',
# Info de la cedula
'provincia', 'clase', 'tomo', 'folio',
# Info Personal
'sexo', 'direccion', 'telefono', 'fecha_nacimiento',
'discapacidad', 'tipo_sangre',
# Info de contacto
'pais', 'correo', 'telefono_oficina', 'celular',
'celular_oficina',
# Info academica
'cod_sede', 'cod_facultad', 'cod_escuela', 'cod_carrera',
'turno', 'fecha_ingreso', 'semestre', 'ultimo_anio',
'ultimo_semestre', 'fecha_graduacion'
)
labels = {
'provincia': 'Provincia',
'clase': 'Clase',
'tomo': 'Tomo',
'folio': 'Folio'
}
widgets = {
'primer_nombre': forms.TextInput(attrs={
'class': 'form-control'
}),
'segundo_nombre': forms.TextInput(attrs={
'class': 'form-control'
}),
'primer_apellido': forms.TextInput(attrs={
'class': 'form-control'
}),
'segundo_apellido': forms.TextInput(attrs={
'class': 'form-control'
}),
'provincia': forms.Select(attrs={
'class': 'form-control cedula',
'disabled': 'true'
}),
'clase': forms.Select(attrs={
'class': 'form-control cedula'
}),
'tomo': forms.TextInput(attrs={
'class': 'form-control cedula'
}),
'folio': forms.TextInput(attrs={
'class': 'form-control cedula'
}),
'sexo': forms.Select(attrs={
'class': 'form-control'
}),
'direccion': forms.TextInput(attrs={
'class': 'form-control'
}),
'telefono': forms.TextInput(attrs={
'class': 'form-control'
}),
'fecha_nacimiento': forms.TextInput(attrs={
'class': 'form-control datepicker'
}),
'discapacidad': forms.Textarea(attrs={
'class': 'form-control'
}),
'tipo_sangre': forms.Select(attrs={
'class': 'form-control'
}),
'pais': forms.Select(attrs={
'class': 'form-control'
}),
'correo': forms.TextInput(attrs={
'class': 'form-control'
}),
'telefono_oficina': forms.TextInput(attrs={
'class': 'form-control'
}),
'celular': forms.TextInput(attrs={
'class': 'form-control'
}),
'celular_oficina': forms.TextInput(attrs={
'class': 'form-control'
}),
'cod_sede': forms.TextInput(attrs={
'class': 'form-control carrera'
}),
'cod_facultad': forms.TextInput(attrs={
'class': 'form-control carrera'
}),
'cod_escuela': forms.TextInput(attrs={
'class': 'form-control carrera'
}),
'cod_carrera': forms.TextInput(attrs={
'class': 'form-control carrera'
}),
'turno': forms.TextInput(attrs={
'class': 'form-control'
}),
'fecha_ingreso': forms.TextInput(attrs={
'class': 'form-control datepicker'
}),
'semestre': forms.TextInput(attrs={
'class': 'form-control'
}),
'ultimo_anio': forms.TextInput(attrs={
'class': 'form-control'
}),
'ultimo_semestre': forms.TextInput(attrs={
'class': 'form-control'
}),
'fecha_graduacion': forms.TextInput(attrs={
'class': 'form-control datepicker'
}),
}
class TrabajoForm(forms.ModelForm):
class Meta:
model = TrabajoGraduacion
fields = ('nombre_proyecto', 'estudiantes', 'asesor', 'estado', 'programa',
'cod_carrera', 'fecha_entrega', 'fecha_sustentacion', 'jurados', 'nota',
'archivo_anteproyecto', 'archivo_trabajo')
labels = {
'cod_carrera': 'Codigo de Carrera',
'nombre_proyecto': 'Nombre del Proyecto',
'fecha_entrega': 'Fecha de Entrega',
'fecha_sustentacion': 'Fecha de Sustentacion',
'nota': 'Nota Obtenida',
}
widgets = {
'cod_carrera': forms.TextInput(attrs={
'class': 'form-control'
}),
'nombre_proyecto': forms.TextInput(attrs={
'class': 'form-control'
}),
'estudiantes': forms.SelectMultiple(),
'asesor': forms.Select(attrs={
'class': 'custom-select form-control'
}),
'estado': forms.Select(attrs={
'class': 'custom-select form-control'
}),
'programa': forms.Select(attrs={
'class': 'custom-select form-control'
}),
'fecha_entrega': forms.TextInput(attrs={
'class': 'form-control datepicker'
}),
'fecha_sustentacion': forms.TextInput(attrs={
'class': 'form-control datepicker'
}),
'nota': forms.TextInput(attrs={
'class': 'form-control'
}),
'archivo_anteproyecto': forms.FileInput(attrs={
'class': 'form-control-file'
}),
'archivo_trabajo': forms.FileInput(attrs={
'class': 'form-control-file'
})
}
placeholder = {
'fecha_sustentacion': 'yyyy-mm-dd'
}
def __init__(self, *args, **kwargs):
facultad = kwargs.pop('facultad')
super(TrabajoForm, self).__init__(*args, **kwargs)
self.fields['estudiantes'].queryset = Estudiante.objects.activos().filter(facultad=facultad)
def clean(self):
estudiantes = self.cleaned_data.get('estudiantes')
jurados = self.cleaned_data.get('jurados')
if estudiantes.count() > 3:
raise ValidationError('Seleccione 3 estudiantes max.')
if jurados.count() > 3:
raise ValidationError('Seleccione 3 jurados max.')
return self.cleaned_data
```
#### File: SIGIA/inventario/autocomplete.py
```python
from dal import autocomplete
from inventario.models import Categoria, Fabricante, Modelo, Aula
class AulaAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.request.user.is_authenticated():
qs = Aula.objects.all()
else:
return Aula.objects.none()
if self.q:
qs = qs.filter(nombre__icontains=self.q)
return qs
class FabricanteAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.request.user.is_authenticated():
qs = Fabricante.objects.all()
else:
return Fabricante.objects.none()
if self.q:
qs = qs.filter(nombre__icontains=self.q)
return qs
class CategoriaAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.request.user.is_authenticated():
qs = Categoria.objects.all()
else:
return Categoria.objects.none()
if self.q:
qs = qs.filter(nombre__icontains=self.q)
return qs
class ModeloAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.request.user.is_authenticated():
qs = Modelo.objects.all()
else:
return Modelo.objects.none()
if self.q:
qs = qs.filter(nombre__icontains=self.q)
return qs
```
#### File: inventario/views/equipo.py
```python
from django.views.generic import CreateView, DetailView, UpdateView, ListView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import PermissionRequiredMixin
from inventario.forms import EquipoForm
from inventario.models import Equipo
from perfiles.models import Perfil
from ubicacion.models import FacultadInstancia
from django.core.exceptions import PermissionDenied
from django.contrib.messages.views import SuccessMessageMixin
# Equipos
class EquipoListView(ListView):
context_object_name = 'equipos'
model = Equipo
template_name = 'inventario/equipo/lista.html'
def get_queryset(self):
if self.request.user.perfil:
return Equipo.objects.filter(ubicacion=self.request.user.perfil.facultad)
else:
return None
class EquipoFacultadListView(PermissionRequiredMixin, ListView):
context_object_name = 'equipos'
model = Equipo
permission_required = 'ubicacion.ver_equipos_facultad'
template_name = 'inventario/equipo/lista.html'
paginate_by = 10
def get_queryset(self):
facultad = FacultadInstancia.objects.get(pk=self.kwargs['pk'])
usuario = self.request.user
if usuario.perfil.facultad != facultad:
raise PermissionDenied
qs = Equipo.objects.filter(ubicacion=self.kwargs['pk'])
return qs
class EquipoDetailView(DetailView):
context_object_name = 'equipo'
model = Equipo
template_name = 'inventario/equipo/detalle.html'
def get_object(self):
object = super(EquipoDetailView, self).get_object()
usuario = self.request.user
if usuario.perfil.facultad != object.facultad:
raise PermissionDenied
return object
class EquipoUpdateView(UpdateView):
form_class = EquipoForm
model = Equipo
template_name = 'inventario/equipo/crear.html'
def get_object(self):
object = super(EquipoUpdateView, self).get_object()
usuario = self.request.user
if usuario.perfil.facultad != object.facultad:
raise PermissionDenied
return object
class EquipoCreateView(SuccessMessageMixin, CreateView):
form_class = EquipoForm
model = Equipo
template_name = 'inventario/equipo/crear.html'
success_url = reverse_lazy('inventario:lista-equipo')
def form_valid(self, form):
usuario = Perfil.objects.get(usuario=self.request.user)
form.instance.cod_sede = usuario.cod_sede
form.instance.cod_facultad = usuario.cod_facultad
try:
return super(EquipoCreateView, self).form_valid(form)
except:
return self.form_invalid(form)
```
#### File: ubicacion/tests/ubicacion_tests.py
```python
from django.test import TestCase
from ubicacion.models import Sede, Facultad, Escuela, Departamento, Carrera, FacultadInstancia, EscuelaInstancia, \
DepartamentoInstancia, CarreraInstancia
class AutoUbicacionTestCase(TestCase):
fixtures = ['sede.json', 'facultad.json', 'escuela.json', 'departamento.json', 'carrera.json', ]
def setUp(self):
FacultadInstancia.objects.create(cod_sede='1', cod_facultad='24')
FacultadInstancia.objects.create(cod_sede='01', cod_facultad='25') # Prueba de no asignar al faltar facultad
FacultadInstancia.objects.create(cod_sede='X1', cod_facultad='24')
EscuelaInstancia.objects.create(cod_sede='1', cod_facultad='24', cod_escuela='2')
DepartamentoInstancia.objects.create(cod_sede='1', cod_facultad='24', cod_departamento='D1')
CarreraInstancia.objects.create(cod_sede='1', cod_facultad='24', cod_escuela='2', cod_carrera='1')
def test_facultadinstancia_auto_ubicacion(self):
'''
Revisa que se asignen de manera automatica las FK a Sede y Facultad
:return:
'''
s = Sede.objects.get(cod_sede='1')
f = Facultad.objects.get(cod_facultad='24')
fi = FacultadInstancia.objects.get(cod_sede='1', cod_facultad='24')
self.assertEqual(fi.sede, s)
self.assertEqual(fi.facultad, f)
def test_facultadinstancia_auto_ubicacion_sede_fail(self):
fi = FacultadInstancia.objects.get(cod_sede='X1', cod_facultad='24')
self.assertEqual(fi.sede, None)
def test_facultadinstancia_auto_ubicacion_facultad_fail(self):
fi = FacultadInstancia.objects.get(cod_sede='01', cod_facultad='25')
self.assertEqual(fi.facultad, None)
def test_escuelainstancia_auto_ubicacion(self):
s = Sede.objects.get(cod_sede='1')
f = Facultad.objects.get(cod_facultad='24')
e = Escuela.objects.get(cod_facultad='24', cod_escuela='2')
ei = EscuelaInstancia.objects.get(cod_sede='1', cod_facultad='24', cod_escuela='2')
self.assertEqual(ei.sede, s)
self.assertEqual(ei.facultad, f)
self.assertEqual(ei.escuela, e)
def test_departamentoinstancia_auto_ubicacion(self):
s = Sede.objects.get(cod_sede='1')
f = Facultad.objects.get(cod_facultad='24')
d = Departamento.objects.get(cod_facultad='24', cod_departamento='D1')
di = DepartamentoInstancia.objects.get(cod_sede='1', cod_facultad='24', cod_departamento='D1')
self.assertEqual(di.sede, s)
self.assertEqual(di.facultad, f)
self.assertEqual(di.departamento, d)
def test_carrerainstancia_auto_ubicacion(self):
s = Sede.objects.get(cod_sede='1')
f = Facultad.objects.get(cod_facultad='24')
e = Escuela.objects.get(cod_facultad='24', cod_escuela='2')
c = Carrera.objects.get(cod_facultad='24', cod_escuela='2', cod_carrera='1')
ci = CarreraInstancia.objects.get(cod_sede='1', cod_facultad='24', cod_escuela='2', cod_carrera='1')
self.assertEqual(ci.sede, s)
self.assertEqual(ci.facultad, f)
self.assertEqual(ci.escuela, e)
self.assertEqual(ci.carrera, c)
```
#### File: ubicacion/views/autocomplete.py
```python
from dal import autocomplete
from ubicacion.models import CarreraInstancia, EscuelaInstancia, Sede, FacultadInstancia, Facultad, Escuela, Carrera
class CarreraInstanciaAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if self.request.user.is_superuser:
qs = CarreraInstancia.objects.all()
elif self.request.user.is_authenticated():
qs = CarreraInstancia.objects.all()
else:
return CarreraInstancia.objects.none()
if self.q:
qs = qs.filter(nombre_proyecto__icontains=self.q)
return qs
``` |
{
"source": "jlopezg8/inteligentes1-2020-1",
"score": 2
} |
#### File: Python/busquedas_entre_adversarios/test_pente.py
```python
import utils.pente as pente
from poda_alfa_beta import PodaAlfaBeta
from utils import cronometrar, como_mutable
poda_alfa_beta = PodaAlfaBeta(pente.es_hoja, pente.get_utilidad,
pente.gen_sucesores, pente.get_sig_jugador)
def elegir_jugada(estado, jugador):
with cronometrar():
return poda_alfa_beta.elegir_jugada(
estado, jugador, lim_profundidad=4, con_barra_progreso=True)
pente.PartidaHumanoVsMaquina(elegir_jugada, inicia_maquina=True)
```
#### File: busquedas_entre_adversarios/utils/triqui.py
```python
import argparse
import random
import matplotlib.pyplot as plt
import numpy as np
from . import como_hasheable, como_mutable, iter_matriz
O = 'O'
X = 'X'
_ = '_'
ESTADO_INICIAL = (
(_, _, _),
(_, _, _),
(_, _, _),
)
def es_hoja(estado):
return not any(gen_sucesores(estado, O)) or get_utilidad(estado, O) != 0
def gen_sucesores(estado, jugador):
"""Función generadora de los estados sucesores de `estado`."""
estado = como_mutable(estado)
for i, j, val in iter_matriz(estado):
if val == _:
estado[i][j] = jugador
yield como_hasheable(estado)
estado[i][j] = _
def get_sig_jugador(jugador):
"""Retorna el jugador del turno siguiente, donde `jugador` es el jugador
del turno actual.
"""
return O if jugador == X else X
def get_utilidad(estado, jugador):
"""Retorna 1 si el estado corresponde a una victoria para `jugador`, -1 si
corresponde a una derrota para `jugador`, o 0 si corresponde a un empate o
si ningún jugador ha ganado hasta el momento.
"""
estado = np.array(estado)
m, n = estado.shape # asumir que m == n (estado es una matriz cuadrada)
jugadores = (
(estado == jugador, 1), # jugador
((estado == get_sig_jugador(jugador)), -1), # oponente
)
for jugadas, utilidad in jugadores:
if (any(jugadas.sum(axis=0) == m) # columnas
or any(jugadas.sum(axis=1) == n) # filas
or np.trace(jugadas) == n # diagonal principal
or np.trace(np.fliplr(jugadas)) == n): # diagonal secundaria
return utilidad
return 0
def graficar_estado(estado):
_graficar_estado(estado)
plt.show()
def _graficar_estado(estado):
plt.grid(True, color='black')
plt.ylim(len(estado), 0)
plt.xlim(0, len(estado[0]))
plt.yticks(range(1, len(estado)), labels=[])
plt.xticks(range(1, len(estado[0])), labels=[])
plt.gca().set_frame_on(False)
for i, j, val in iter_matriz(estado):
if val != _:
plt.text(x=j+.5, y=i+.5, s=val, size=32, ha='center', va='center')
plt.pause(.1)
def parse_args():
parser = argparse.ArgumentParser(description='Jugar triqui.')
parser.add_argument('-e', '--entre_maquinas', action='store_true',
help='juegan máquina vs máquina')
parser.add_argument('-i', '--inicia_maquina', action='store_true',
help='inicia máquina (para humano vs máquina)')
return parser.parse_args()
def PartidaMaquinaVsMaquina(elegir_jugada, maquina1=O,
estado_inicial=ESTADO_INICIAL):
estado = estado_inicial
jugador = maquina1
while not es_hoja(estado):
_graficar_estado(estado)
estado = elegir_jugada(estado, jugador)
jugador = get_sig_jugador(jugador)
graficar_estado(estado)
class PartidaHumanoVsMaquina:
def __init__(self, elegir_jugada, inicia_maquina=False, humano=O,
estado_inicial=ESTADO_INICIAL):
self.elegir_jugada = elegir_jugada
self.estado = como_mutable(estado_inicial)
self.humano = humano
self.maquina = get_sig_jugador(humano)
self._iniciar(inicia_maquina)
def _iniciar(self, inicia_maquina):
_graficar_estado(self.estado)
if inicia_maquina:
self._jugar_maquina()
self.es_turno_humano = True
plt.connect('button_press_event', self)
plt.show()
def __call__(self, evt):
if self.es_turno_humano and evt.inaxes:
self.es_turno_humano = False
i, j = int(evt.ydata), int(evt.xdata)
if self._jugar_jugador(i, j):
self._jugar_maquina()
self.es_turno_humano = True
def _jugar_jugador(self, i, j):
if self.estado[i][j] == _ and not es_hoja(self.estado):
self.estado[i][j] = self.humano
_graficar_estado(self.estado)
return True
else:
return False
def _jugar_maquina(self):
if not es_hoja(self.estado):
self.estado = como_mutable(
self.elegir_jugada(self.estado, self.maquina))
_graficar_estado(self.estado)
```
#### File: Python/busquedas_no_informadas/a_estrella.py
```python
from bisect import insort
from collections import deque
from math import isinf
from utils.indicadores_progreso import ContadorPasos
from utils.nodos import NodoConCostoCombinado as Nodo, reconstruir_ruta
def buscar_con_a_estrella(estado0, gen_estados_alcanzables, heuristica):
"""Retorna la ruta para resolver el problema, o `None` si no se encontró
una solución.
:param `estado0`: estado inicial
:param `gen_estados_alcanzables` función que recibe un estado y genera los
estados alcanzables a partir de este
:param heuristica: función que recibe un estado y estima qué tan cerca está
del estado objetivo; debe retornar 0 si el estado es el estado objetivo
"""
contador_pasos = ContadorPasos()
if isinf(dist := heuristica(estado0)):
return None # no resuelto
frontera = deque([Nodo(estado=estado0, padre=None, costo_actual=0,
dist=dist, costo_combinado=0+dist)])
considerados = {estado0} # estados en la frontera o ya visitados
while frontera:
next(contador_pasos)
nodo = frontera.popleft()
if nodo.dist == 0:
return reconstruir_ruta(nodo)
hijos = set(gen_estados_alcanzables(nodo.estado)) - considerados
for hijo in hijos:
if not isinf(dist := heuristica(hijo)):
costo_hijo = nodo.costo_actual + 1
insort(frontera,
Nodo(estado=hijo, padre=nodo, costo_actual=costo_hijo,
dist=dist, costo_combinado=costo_hijo+dist))
considerados.add(hijo)
return None # no resuelto
if __name__ == "__main__":
import utils.eight_puzzle as ep
X = ep.HUECO
estado0 = (
(5, 1, 2),
(X, 7, 3),
(6, 4, 8),
)
ep.graficar_estado(estado0)
ruta = buscar_con_a_estrella(estado0, ep.gen_estados_alcanzables,
heuristica=ep.dist_hamming)
print(f'Solución de {len(ruta)} pasos')
ep.graficar_ruta(ruta)
``` |
{
"source": "Jlopezjlx/devhub",
"score": 2
} |
#### File: src/account/views.py
```python
from django.shortcuts import render
from django.contrib.auth import authenticate, logout
from django.contrib.auth import login as auth_login
from django.http import HttpResponseRedirect, HttpResponse
def index(request):
return render(request, "account/index.html")
def login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('<PASSWORD>')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
auth_login(request,user)
return HttpResponse ("your account is ok")
else:
return HttpResponse ("Your account was inactive")
else:
print("Someone tried to log in and Failed")
return HttpResponse ("Invalid credentials")
else:
return render(request, "account/login/login.html")
``` |
{
"source": "jlopez-kepler/SUDOKU",
"score": 3
} |
#### File: app/project/auth.py
```python
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import ( # pylint: disable=import-error
login_required,
login_user,
logout_user,
)
from werkzeug.security import check_password_hash, generate_password_hash
from project.models import User
from . import db
auth = Blueprint(
"auth", __name__, template_folder="../templates", static_folder="../static"
)
@auth.route("/login", methods=["GET", "POST"])
def login():
"""LogIn Page"""
if request.method == "GET":
return render_template("login.html")
email = request.form.get("email")
password = request.form.get("password")
remember = bool(request.form.get("remember"))
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash("Please check your login details and try again.")
return redirect(url_for("auth.login"))
login_user(user, remember=remember)
return redirect(url_for("main.games"))
@auth.route("/signup", methods=["GET", "POST"])
def signup():
"""SignUp Page"""
if request.method == "GET":
return render_template("signup.html")
email = request.form.get("email")
name = request.form.get("name")
password = request.form.get("password")
user = User.query.filter_by(email=email).first()
if user:
flash("Email address already exists")
return redirect(url_for("auth.signup"))
new_user = User(
email=email,
name=name,
password=generate_password_hash(password, method="<PASSWORD>"),
)
db.session.add(new_user)
db.session.commit()
return redirect(url_for("auth.login"))
@auth.route("/logout")
@login_required
def logout():
"""LogOut Page"""
logout_user()
return redirect(url_for("main.index"))
``` |
{
"source": "jlopezNEU/scikit-learn",
"score": 2
} |
#### File: sklearn/_build_utils/pre_build_helpers.py
```python
import os
import sys
import glob
import tempfile
import textwrap
import setuptools # noqa
import subprocess
import warnings
from distutils.dist import Distribution
from distutils.sysconfig import customize_compiler
# NumPy 1.23 deprecates numpy.distutils
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from numpy.distutils.ccompiler import new_compiler
from numpy.distutils.command.config_compiler import config_cc
def _get_compiler():
"""Get a compiler equivalent to the one that will be used to build sklearn
Handles compiler specified as follows:
- python setup.py build_ext --compiler=<compiler>
- CC=<compiler> python setup.py build_ext
"""
dist = Distribution(
{
"script_name": os.path.basename(sys.argv[0]),
"script_args": sys.argv[1:],
"cmdclass": {"config_cc": config_cc},
}
)
dist.parse_config_files()
dist.parse_command_line()
cmd_opts = dist.command_options.get("build_ext")
if cmd_opts is not None and "compiler" in cmd_opts:
compiler = cmd_opts["compiler"][1]
else:
compiler = None
ccompiler = new_compiler(compiler=compiler)
customize_compiler(ccompiler)
return ccompiler
def compile_test_program(code, extra_preargs=[], extra_postargs=[]):
"""Check that some C code can be compiled and run"""
ccompiler = _get_compiler()
# extra_(pre/post)args can be a callable to make it possible to get its
# value from the compiler
if callable(extra_preargs):
extra_preargs = extra_preargs(ccompiler)
if callable(extra_postargs):
extra_postargs = extra_postargs(ccompiler)
start_dir = os.path.abspath(".")
with tempfile.TemporaryDirectory() as tmp_dir:
try:
os.chdir(tmp_dir)
# Write test program
with open("test_program.c", "w") as f:
f.write(code)
os.mkdir("objects")
# Compile, test program
ccompiler.compile(
["test_program.c"], output_dir="objects", extra_postargs=extra_postargs
)
# Link test program
objects = glob.glob(os.path.join("objects", "*" + ccompiler.obj_extension))
ccompiler.link_executable(
objects,
"test_program",
extra_preargs=extra_preargs,
extra_postargs=extra_postargs,
)
if "PYTHON_CROSSENV" not in os.environ:
# Run test program if not cross compiling
# will raise a CalledProcessError if return code was non-zero
output = subprocess.check_output("./test_program")
output = output.decode(sys.stdout.encoding or "utf-8").splitlines()
else:
# Return an empty output if we are cross compiling
# as we cannot run the test_program
output = []
except Exception:
raise
finally:
os.chdir(start_dir)
return output
def basic_check_build():
"""Check basic compilation and linking of C code"""
if "PYODIDE_PACKAGE_ABI" in os.environ:
# The following check won't work in pyodide
return
code = textwrap.dedent(
"""\
#include <stdio.h>
int main(void) {
return 0;
}
"""
)
compile_test_program(code)
```
#### File: sklearn/datasets/_arff_parser.py
```python
import itertools
from collections import OrderedDict
from collections.abc import Generator
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import scipy.sparse
from ..externals._arff import ArffSparseDataType, ArffContainerType
from ..utils import (
_chunk_generator,
check_pandas_support,
get_chunk_n_rows,
is_scalar_nan,
)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame"""
if feature["data_type"] == "string":
return object
elif feature["data_type"] == "nominal":
return "category"
# only numeric, integer, real are left
elif feature["number_of_missing_values"] != "0" or feature["data_type"] in [
"numeric",
"real",
]:
# cast to floats when there are any missing values
return np.float64
elif feature["data_type"] == "integer":
return np.int64
raise ValueError("Unsupported feature: {}".format(feature))
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None,
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff["data"]
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError("shape must be provided when arr['data'] is a Generator")
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(
itertools.chain.from_iterable(arff_data), dtype="float64", count=count
)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape,
dtype=np.float64,
)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError("Unexpected Data Type obtained from arff.")
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support("fetch_openml with as_frame=True")
attributes = OrderedDict(arff["attributes"])
arff_columns = list(attributes)
if not isinstance(arff["data"], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff["data"])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff["data"], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == "category":
cats_without_missing = [
cat
for cat in attributes[column]
if cat is not None and not is_scalar_nan(cat)
]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df,)
def _liac_arff_parser(
arff_container,
output_arrays_type,
features_dict,
data_columns,
target_columns,
col_slice_x=None,
col_slice_y=None,
shape=None,
):
if output_arrays_type == "pandas":
nominal_attributes = None
columns = data_columns + target_columns
(frame,) = _convert_arff_data_dataframe(arff_container, columns, features_dict)
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
else:
frame = None
X, y = _convert_arff_data(arff_container, col_slice_x, col_slice_y, shape)
nominal_attributes = {
k: v
for k, v in arff_container["attributes"]
if isinstance(v, list) and k in data_columns + target_columns
}
is_classification = {
col_name in nominal_attributes for col_name in target_columns
}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack(
[
np.take(
np.asarray(nominal_attributes.pop(col_name), dtype="O"),
y[:, i : i + 1].astype(int, copy=False),
)
for i, col_name in enumerate(target_columns)
]
)
elif any(is_classification):
raise ValueError(
"Mix of nominal and non-nominal targets is not currently supported"
)
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
```
#### File: datasets/tests/test_base.py
```python
import os
import shutil
import tempfile
import warnings
from pickle import loads
from pickle import dumps
from functools import partial
from importlib import resources
import pytest
import numpy as np
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets._base import (
load_csv_data,
load_gzip_compressed_csv_data,
)
from sklearn.preprocessing import scale
from sklearn.utils import Bunch
from sklearn.utils._testing import SkipTest
from sklearn.datasets.tests.test_common import check_as_frame
from sklearn.externals._pilutil import pillow_installed
from sklearn.utils import IS_PYPY
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
@pytest.fixture(scope="module")
def data_home(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_data_home_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture(scope="module")
def load_files_root(tmpdir_factory):
tmp_file = str(tmpdir_factory.mktemp("scikit_learn_load_files_test"))
yield tmp_file
_remove_dir(tmp_file)
@pytest.fixture
def test_category_dir_1(load_files_root):
test_category_dir1 = tempfile.mkdtemp(dir=load_files_root)
sample_file = tempfile.NamedTemporaryFile(dir=test_category_dir1, delete=False)
sample_file.write(b"Hello World!\n")
sample_file.close()
yield str(test_category_dir1)
_remove_dir(test_category_dir1)
@pytest.fixture
def test_category_dir_2(load_files_root):
test_category_dir2 = tempfile.mkdtemp(dir=load_files_root)
yield str(test_category_dir2)
_remove_dir(test_category_dir2)
def test_data_home(data_home):
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=data_home)
assert data_home == data_home
assert os.path.exists(data_home)
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert not os.path.exists(data_home)
# if the folder is missing it will be created again
data_home = get_data_home(data_home=data_home)
assert os.path.exists(data_home)
def test_default_empty_load_files(load_files_root):
res = load_files(load_files_root)
assert len(res.filenames) == 0
assert len(res.target_names) == 0
assert res.DESCR is None
def test_default_load_files(test_category_dir_1, test_category_dir_2, load_files_root):
if IS_PYPY:
pytest.xfail("[PyPy] fails due to string containing NUL characters")
res = load_files(load_files_root)
assert len(res.filenames) == 1
assert len(res.target_names) == 2
assert res.DESCR is None
assert res.data == [b"Hello World!\n"]
def test_load_files_w_categories_desc_and_encoding(
test_category_dir_1, test_category_dir_2, load_files_root
):
if IS_PYPY:
pytest.xfail("[PyPy] fails due to string containing NUL characters")
category = os.path.abspath(test_category_dir_1).split("/").pop()
res = load_files(
load_files_root, description="test", categories=category, encoding="utf-8"
)
assert len(res.filenames) == 1
assert len(res.target_names) == 1
assert res.DESCR == "test"
assert res.data == ["Hello World!\n"]
def test_load_files_wo_load_content(
test_category_dir_1, test_category_dir_2, load_files_root
):
res = load_files(load_files_root, load_content=False)
assert len(res.filenames) == 1
assert len(res.target_names) == 2
assert res.DESCR is None
assert res.get("data") is None
@pytest.mark.parametrize("allowed_extensions", ([".txt"], [".txt", ".json"]))
def test_load_files_allowed_extensions(tmp_path, allowed_extensions):
"""Check the behaviour of `allowed_extension` in `load_files`."""
d = tmp_path / "sub"
d.mkdir()
files = ("file1.txt", "file2.json", "file3.json", "file4.md")
paths = [d / f for f in files]
for p in paths:
p.touch()
res = load_files(tmp_path, allowed_extensions=allowed_extensions)
assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set(
res.filenames
)
@pytest.mark.parametrize(
"filename, expected_n_samples, expected_n_features, expected_target_names",
[
("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]),
("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]),
("breast_cancer.csv", 569, 30, ["malignant", "benign"]),
],
)
def test_load_csv_data(
filename, expected_n_samples, expected_n_features, expected_target_names
):
actual_data, actual_target, actual_target_names = load_csv_data(filename)
assert actual_data.shape[0] == expected_n_samples
assert actual_data.shape[1] == expected_n_features
assert actual_target.shape[0] == expected_n_samples
np.testing.assert_array_equal(actual_target_names, expected_target_names)
def test_load_csv_data_with_descr():
data_file_name = "iris.csv"
descr_file_name = "iris.rst"
res_without_descr = load_csv_data(data_file_name=data_file_name)
res_with_descr = load_csv_data(
data_file_name=data_file_name, descr_file_name=descr_file_name
)
assert len(res_with_descr) == 4
assert len(res_without_descr) == 3
np.testing.assert_array_equal(res_with_descr[0], res_without_descr[0])
np.testing.assert_array_equal(res_with_descr[1], res_without_descr[1])
np.testing.assert_array_equal(res_with_descr[2], res_without_descr[2])
assert res_with_descr[-1].startswith(".. _iris_dataset:")
@pytest.mark.parametrize(
"filename, kwargs, expected_shape",
[
("diabetes_data_raw.csv.gz", {}, [442, 10]),
("diabetes_target.csv.gz", {}, [442]),
("digits.csv.gz", {"delimiter": ","}, [1797, 65]),
],
)
def test_load_gzip_compressed_csv_data(filename, kwargs, expected_shape):
actual_data = load_gzip_compressed_csv_data(filename, **kwargs)
assert actual_data.shape == tuple(expected_shape)
def test_load_gzip_compressed_csv_data_with_descr():
data_file_name = "diabetes_target.csv.gz"
descr_file_name = "diabetes.rst"
expected_data = load_gzip_compressed_csv_data(data_file_name=data_file_name)
actual_data, descr = load_gzip_compressed_csv_data(
data_file_name=data_file_name,
descr_file_name=descr_file_name,
)
np.testing.assert_array_equal(actual_data, expected_data)
assert descr.startswith(".. _diabetes_dataset:")
def test_load_sample_images():
try:
res = load_sample_images()
assert len(res.images) == 2
assert len(res.filenames) == 2
images = res.images
# assert is china image
assert np.all(images[0][0, 0, :] == np.array([174, 201, 231], dtype=np.uint8))
# assert is flower image
assert np.all(images[1][0, 0, :] == np.array([2, 19, 13], dtype=np.uint8))
assert res.DESCR
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_sample_image():
try:
china = load_sample_image("china.jpg")
assert china.dtype == "uint8"
assert china.shape == (427, 640, 3)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
if pillow_installed:
with pytest.raises(AttributeError):
load_sample_image("blop.jpg")
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes_raw():
"""Test to check that we load a scaled version by default but that we can
get an unscaled version when setting `scaled=False`."""
diabetes_raw = load_diabetes(scaled=False)
assert diabetes_raw.data.shape == (442, 10)
assert diabetes_raw.target.size, 442
assert len(diabetes_raw.feature_names) == 10
assert diabetes_raw.DESCR
diabetes_default = load_diabetes()
np.testing.assert_allclose(
scale(diabetes_raw.data) / (442**0.5), diabetes_default.data, atol=1e-04
)
@pytest.mark.filterwarnings("ignore:Function load_boston is deprecated")
@pytest.mark.parametrize(
"loader_func, data_shape, target_shape, n_target, has_descr, filenames",
[
(load_breast_cancer, (569, 30), (569,), 2, True, ["filename"]),
(load_wine, (178, 13), (178,), 3, True, []),
(load_iris, (150, 4), (150,), 3, True, ["filename"]),
(
load_linnerud,
(20, 3),
(20, 3),
3,
True,
["data_filename", "target_filename"],
),
(load_diabetes, (442, 10), (442,), None, True, []),
(load_digits, (1797, 64), (1797,), 10, True, []),
(partial(load_digits, n_class=9), (1617, 64), (1617,), 10, True, []),
(load_boston, (506, 13), (506,), None, True, ["filename"]),
],
)
def test_loader(loader_func, data_shape, target_shape, n_target, has_descr, filenames):
bunch = loader_func()
assert isinstance(bunch, Bunch)
assert bunch.data.shape == data_shape
assert bunch.target.shape == target_shape
if hasattr(bunch, "feature_names"):
assert len(bunch.feature_names) == data_shape[1]
if n_target is not None:
assert len(bunch.target_names) == n_target
if has_descr:
assert bunch.DESCR
if filenames:
assert "data_module" in bunch
assert all(
[
f in bunch and resources.is_resource(bunch["data_module"], bunch[f])
for f in filenames
]
)
@pytest.mark.parametrize(
"loader_func, data_dtype, target_dtype",
[
(load_breast_cancer, np.float64, int),
(load_diabetes, np.float64, np.float64),
(load_digits, np.float64, int),
(load_iris, np.float64, int),
(load_linnerud, np.float64, np.float64),
(load_wine, np.float64, int),
],
)
def test_toy_dataset_frame_dtype(loader_func, data_dtype, target_dtype):
default_result = loader_func()
check_as_frame(
default_result,
loader_func,
expected_data_dtype=data_dtype,
expected_target_dtype=target_dtype,
)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert bunch_from_pkl["x"] == bunch_from_pkl.x
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key="original")
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a surprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__["key"] = "set from __dict__"
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert bunch_from_pkl.key == "original"
assert bunch_from_pkl["key"] == "original"
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = "changed"
assert bunch_from_pkl.key == "changed"
assert bunch_from_pkl["key"] == "changed"
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert "data" in dir(data)
# FIXME: to be removed in 1.2
def test_load_boston_warning():
"""Check that we raise the ethical warning when loading `load_boston`."""
warn_msg = "The Boston housing prices dataset has an ethical problem"
with pytest.warns(FutureWarning, match=warn_msg):
load_boston()
@pytest.mark.filterwarnings("ignore:Function load_boston is deprecated")
def test_load_boston_alternative():
pd = pytest.importorskip("pandas")
if os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1":
raise SkipTest(
"This test requires an internet connection to fetch the dataset."
)
boston_sklearn = load_boston()
data_url = "http://lib.stat.cmu.edu/datasets/boston"
try:
raw_df = pd.read_csv(data_url, sep=r"\s+", skiprows=22, header=None)
except ConnectionError as e:
pytest.xfail(f"The dataset can't be downloaded. Got exception: {e}")
data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
target = raw_df.values[1::2, 2]
np.testing.assert_allclose(data, boston_sklearn.data)
np.testing.assert_allclose(target, boston_sklearn.target)
```
#### File: feature_selection/tests/test_sequential.py
```python
import pytest
import scipy
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.datasets import make_regression, make_blobs
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import cross_val_score
from sklearn.cluster import KMeans
@pytest.mark.parametrize("n_features_to_select", (0, 5, 0.0, -1, 1.1))
def test_bad_n_features_to_select(n_features_to_select):
X, y = make_regression(n_features=5)
sfs = SequentialFeatureSelector(
LinearRegression(), n_features_to_select=n_features_to_select
)
with pytest.raises(ValueError, match="must be either 'auto'"):
sfs.fit(X, y)
def test_bad_direction():
X, y = make_regression(n_features=5)
sfs = SequentialFeatureSelector(
LinearRegression(), n_features_to_select="auto", direction="bad"
)
with pytest.raises(ValueError, match="must be either 'forward' or"):
sfs.fit(X, y)
@pytest.mark.filterwarnings("ignore:Leaving `n_features_to_select` to ")
@pytest.mark.parametrize("direction", ("forward", "backward"))
@pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto", None))
def test_n_features_to_select(direction, n_features_to_select):
# Make sure n_features_to_select is respected
n_features = 10
X, y = make_regression(n_features=n_features, random_state=0)
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select=n_features_to_select,
direction=direction,
cv=2,
)
sfs.fit(X, y)
if n_features_to_select in ("auto", None):
n_features_to_select = n_features // 2
assert sfs.get_support(indices=True).shape[0] == n_features_to_select
assert sfs.n_features_to_select_ == n_features_to_select
assert sfs.transform(X).shape[1] == n_features_to_select
@pytest.mark.parametrize("direction", ("forward", "backward"))
def test_n_features_to_select_auto(direction):
"""Check the behaviour of `n_features_to_select="auto"` with different
values for the parameter `tol`.
"""
n_features = 10
tol = 1e-3
X, y = make_regression(n_features=n_features, random_state=0)
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select="auto",
tol=tol,
direction=direction,
cv=2,
)
sfs.fit(X, y)
max_features_to_select = n_features - 1
assert sfs.get_support(indices=True).shape[0] <= max_features_to_select
assert sfs.n_features_to_select_ <= max_features_to_select
assert sfs.transform(X).shape[1] <= max_features_to_select
assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_
@pytest.mark.parametrize("direction", ("forward", "backward"))
def test_n_features_to_select_stopping_criterion(direction):
"""Check the behaviour stopping criterion for feature selection
depending on the values of `n_features_to_select` and `tol`.
When `direction` is `'forward'`, select a new features at random
among those not currently selected in selector.support_,
build a new version of the data that includes all the features
in selector.support_ + this newly selected feature.
And check that the cross-validation score of the model trained on
this new dataset variant is lower than the model with
the selected forward selected features or at least does not improve
by more than the tol margin.
When `direction` is `'backward'`, instead of adding a new feature
to selector.support_, try to remove one of those selected features at random
And check that the cross-validation score is either decreasing or
not improving by more than the tol margin.
"""
X, y = make_regression(n_features=50, n_informative=10, random_state=0)
tol = 1e-3
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select="auto",
tol=tol,
direction=direction,
cv=2,
)
sfs.fit(X, y)
selected_X = sfs.transform(X)
rng = np.random.RandomState(0)
added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True)))
added_X = np.hstack(
[
selected_X,
(X[:, rng.choice(added_candidates)])[:, np.newaxis],
]
)
removed_candidate = rng.choice(list(range(sfs.n_features_to_select_)))
removed_X = np.delete(selected_X, removed_candidate, axis=1)
plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean()
sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean()
added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean()
removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean()
assert sfs_cv_score >= plain_cv_score
if direction == "forward":
assert (sfs_cv_score - added_cv_score) <= tol
assert (sfs_cv_score - removed_cv_score) >= tol
else:
assert (added_cv_score - sfs_cv_score) <= tol
assert (removed_cv_score - sfs_cv_score) <= tol
# TODO: Remove test for n_features_to_select=None in 1.3
@pytest.mark.filterwarnings("ignore:Leaving `n_features_to_select` to ")
@pytest.mark.parametrize("direction", ("forward", "backward"))
@pytest.mark.parametrize(
"n_features_to_select, expected",
(
(0.1, 1),
(1.0, 10),
(0.5, 5),
(None, 5),
),
)
def test_n_features_to_select_float(direction, n_features_to_select, expected):
# Test passing a float as n_features_to_select
X, y = make_regression(n_features=10)
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select=n_features_to_select,
direction=direction,
cv=2,
)
sfs.fit(X, y)
assert sfs.n_features_to_select_ == expected
@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("direction", ("forward", "backward"))
@pytest.mark.parametrize(
"n_features_to_select, expected_selected_features",
[
(2, [0, 2]), # f1 is dropped since it has no predictive power
(1, [2]), # f2 is more predictive than f0 so it's kept
],
)
def test_sanity(seed, direction, n_features_to_select, expected_selected_features):
# Basic sanity check: 3 features, only f0 and f2 are correlated with the
# target, f2 having a stronger correlation than f0. We expect f1 to be
# dropped, and f2 to always be selected.
rng = np.random.RandomState(seed)
n_samples = 100
X = rng.randn(n_samples, 3)
y = 3 * X[:, 0] - 10 * X[:, 2]
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select=n_features_to_select,
direction=direction,
cv=2,
)
sfs.fit(X, y)
assert_array_equal(sfs.get_support(indices=True), expected_selected_features)
# TODO: Remove test for n_features_to_select=None in 1.3
@pytest.mark.filterwarnings("ignore:Leaving `n_features_to_select` to ")
@pytest.mark.parametrize("n_features_to_select", ["auto", None])
def test_sparse_support(n_features_to_select):
# Make sure sparse data is supported
X, y = make_regression(n_features=10)
X = scipy.sparse.csr_matrix(X)
sfs = SequentialFeatureSelector(
LinearRegression(), n_features_to_select=n_features_to_select, cv=2
)
sfs.fit(X, y)
sfs.transform(X)
def test_nan_support():
# Make sure nans are OK if the underlying estimator supports nans
rng = np.random.RandomState(0)
n_samples, n_features = 40, 4
X, y = make_regression(n_samples, n_features, random_state=0)
nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool)
X[nan_mask] = np.nan
sfs = SequentialFeatureSelector(
HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2
)
sfs.fit(X, y)
sfs.transform(X)
with pytest.raises(ValueError, match="Input X contains NaN"):
# LinearRegression does not support nans
SequentialFeatureSelector(
LinearRegression(), n_features_to_select="auto", cv=2
).fit(X, y)
def test_pipeline_support():
# Make sure that pipelines can be passed into SFS and that SFS can be
# passed into a pipeline
n_samples, n_features = 50, 3
X, y = make_regression(n_samples, n_features, random_state=0)
# pipeline in SFS
pipe = make_pipeline(StandardScaler(), LinearRegression())
sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2)
sfs.fit(X, y)
sfs.transform(X)
# SFS in pipeline
sfs = SequentialFeatureSelector(
LinearRegression(), n_features_to_select="auto", cv=2
)
pipe = make_pipeline(StandardScaler(), sfs)
pipe.fit(X, y)
pipe.transform(X)
# FIXME : to be removed in 1.3
def test_raise_deprecation_warning():
"""Check that we raise a FutureWarning with `n_features_to_select`."""
n_samples, n_features = 50, 3
X, y = make_regression(n_samples, n_features, random_state=0)
warn_msg = "Leaving `n_features_to_select` to None is deprecated"
with pytest.warns(FutureWarning, match=warn_msg):
SequentialFeatureSelector(LinearRegression()).fit(X, y)
@pytest.mark.parametrize("n_features_to_select", (2, 3))
def test_unsupervised_model_fit(n_features_to_select):
# Make sure that models without classification labels are not being
# validated
X, y = make_blobs(n_features=4)
sfs = SequentialFeatureSelector(
KMeans(n_init=1),
n_features_to_select=n_features_to_select,
)
sfs.fit(X)
assert sfs.transform(X).shape[1] == n_features_to_select
@pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3))
def test_no_y_validation_model_fit(y):
# Make sure that other non-conventional y labels are not accepted
X, clusters = make_blobs(n_features=6)
sfs = SequentialFeatureSelector(
KMeans(),
n_features_to_select=3,
)
with pytest.raises((TypeError, ValueError)):
sfs.fit(X, y)
```
#### File: sklearn/utils/_bunch.py
```python
class Bunch(dict):
"""Container object exposing keys as attributes.
Bunch objects are sometimes used as an output for functions and methods.
They extend dictionaries by enabling values to be accessed by key,
`bunch["value_key"]`, or by an attribute, `bunch.value_key`.
Examples
--------
>>> from sklearn.utils import Bunch
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
``` |
{
"source": "jlopezpena/confluencebuilder",
"score": 2
} |
#### File: sphinxcontrib/confluencebuilder/config.py
```python
from .logger import ConfluenceLogger
import os.path
class ConfluenceConfig:
"""
confluence configuration validation utility class
This class is used to perform a series of sanity checks on a user's
configuration to ensure the building/publishing environment is using sane
options.
"""
@staticmethod
def validate(builder, log=True):
"""
validate a provided configuration
The provided configuration will be checked for sane configuration
options. The method will return True for an expected good configuration,
while returning False for a known bad configuration.
"""
errState = False
c = builder.config
env = builder.app.env
if c.confluence_footer_file:
if not os.path.isfile(os.path.join(env.srcdir,
c.confluence_footer_file)):
errState = True
if log:
ConfluenceLogger.error(
"""missing footer file
The option 'confluence_footer_file' has been provided to find a footer template
file from a relative location. Ensure the value is set to a proper file path.
""")
if c.confluence_header_file:
if not os.path.isfile(os.path.join(env.srcdir,
c.confluence_header_file)):
errState = True
if log:
ConfluenceLogger.error(
"""missing header file
The option 'confluence_header_file' has been provided to find a header template
file from a relative location. Ensure the value is set to a proper file path.
""")
if c.confluence_max_doc_depth:
depth = c.confluence_max_doc_depth
if not isinstance(depth, int) or depth < 0:
errState = True
if log:
ConfluenceLogger.error(
"""maximum document depth is not an integer value
When limiting the document depth permitted for a building/publishing event, the
defined maximum document depth must be defined as an integer value (not a float,
string, etc.).
""")
if c.confluence_publish_subset:
if not (isinstance(c.confluence_publish_subset, (tuple, list, set))
and all(isinstance(docname, str)
for docname in c.confluence_publish_subset)):
errState = True
if log:
ConfluenceLogger.error(
"""'confluence_publish_subset' should be a collection of strings""")
else:
for docname in c.confluence_publish_subset:
if not any(os.path.isfile(os.path.join(env.srcdir,
docname + suffix))
for suffix in c.source_suffix):
errState = True
if log:
ConfluenceLogger.error(
"""Document '%s' in 'confluence_publish_subset' not found""", docname)
if c.confluence_publish:
if c.confluence_disable_rest and c.confluence_disable_xmlrpc:
errState = True
if log:
ConfluenceLogger.error(
"""all publish protocols explicitly disabled
While publishing has been configured using 'confluence_publish', both REST and
XML-RPC have been explicitly disabled in the user configuration. This extension
cannot publish documents without a single publish protocol enabled.
""")
if not c.confluence_parent_page:
if c.confluence_parent_page_id_check:
errState = True
if log:
ConfluenceLogger.error(
"""parent page (holder) name not set
When a parent page identifier check has been configured with the option
'confluence_parent_page_id_check', no parent page name has been provided with
the 'confluence_parent_page' option. Ensure the name of the parent page name
is provided as well.
""")
if not c.confluence_server_url:
errState = True
if log:
ConfluenceLogger.error(
"""confluence server url not provided
While publishing has been configured using 'confluence_publish', the Confluence
server URL has not. Ensure 'confluence_server_url' has been set to target
Confluence instance to be published to.
""")
if not c.confluence_space_name:
errState = True
if log:
ConfluenceLogger.error(
"""confluence space name not provided
While publishing has been configured using 'confluence_publish', the Confluence
space name has not. Ensure 'confluence_space_name' has been set to space's name
which content should be published under.
""")
if not c.confluence_server_user and c.confluence_server_pass:
errState = True
if log:
ConfluenceLogger.error(
"""confluence username not provided
A publishing password has been configured with 'confluence_server_pass';
however, no username has been configured. Ensure 'confluence_server_user' is
properly set with the publisher's Confluence username.
""")
if c.confluence_ca_cert:
if not os.path.exists(c.confluence_ca_cert):
errState = True
if log:
ConfluenceLogger.error(
"""missing certificate authority
The option 'confluence_ca_cert' has been provided to find a certificate
authority file or path from a relative location. Ensure the value is set to a
proper file path.
""")
if c.confluence_client_cert:
if isinstance(c.confluence_client_cert, tuple):
cert_files = c.confluence_client_cert
else:
cert_files = (c.confluence_client_cert, None)
if len(cert_files) != 2:
errState = True
if log:
ConfluenceLogger.error(
"""invalid client certificate
The option 'confluence_client_cert' has been provided but there are too many
values. The client cert can either be a file/path to a certificate & key pair
or a tuple for the certificate and key in different files.
""")
for cert_file in cert_files:
if cert_file and not os.path.isfile(cert_file):
errState = True
if log:
ConfluenceLogger.error(
"""missing certificate file
The option 'confluence_client_cert' has been provided to find a client
certificate file from a relative location, but the file %s was not found.
Ensure the value is set to a proper file path and the file exists.
""" % cert_file
)
c.confluence_client_cert = cert_files
return not errState
```
#### File: sphinxcontrib/confluencebuilder/util.py
```python
from .std.confluence import API_REST_BIND_PATH
from .std.confluence import API_XMLRPC_BIND_PATH
from hashlib import sha256
class ConfluenceUtil:
"""
confluence utility helper class
This class is used to hold a series of utility methods.
"""
@staticmethod
def hashAsset(asset):
"""
generate a hash of the provided asset
Calculate a hash for an asset file (e.x. an image file). When publishing
assets as attachments for a Confluence page, hashes can be used to check
if an attachment needs to be uploaded again.
Args:
asset: the asset (file)
Returns:
the hash
"""
BLOCKSIZE = 65536
sha = sha256()
with open(asset, 'rb') as file:
buff = file.read(BLOCKSIZE)
while len(buff) > 0:
sha.update(buff)
buff = file.read(BLOCKSIZE)
return sha.hexdigest()
@staticmethod
def normalizeBaseUrl(url):
"""
normalize a confluence base url
A Confluence base URL refers to the URL portion excluding the target
API bind point. This method attempts to handle a series of user-provided
URL values and attempt to determine the proper base URL to use.
"""
if url:
# removing any trailing forward slash user provided
if url.endswith('/'):
url = url[:-1]
# check for xml-rpc bind path; strip and return if found
if url.endswith(API_XMLRPC_BIND_PATH):
url = url[:-len(API_XMLRPC_BIND_PATH)]
else:
# check for rest bind path; strip and return if found
if url.endswith(API_REST_BIND_PATH):
url = url[:-len(API_REST_BIND_PATH)]
# restore trailing forward flash
elif not url.endswith('/'):
url += '/'
return url
``` |
{
"source": "jlopez/portal",
"score": 2
} |
#### File: portal/portal/api.py
```python
from functools import wraps
import cookielib
import HTMLParser
import json
import os
import sys
import re
import urllib
import urllib2
import urlparse
import uuid
def cached(wrapped):
@wraps(wrapped)
def wrapper():
if not hasattr(wrapped, 'cache'):
wrapped.cache = wrapped()
return wrapped.cache
return wrapper
def cached_method(wrapped):
@wraps(wrapped)
def wrapper(self):
name = '%s_cache' % wrapped.__name__
if not hasattr(self, name):
setattr(self, name, wrapped(self))
return getattr(self, name)
return wrapper
def _ensure_parents_exist(filename):
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
assert not dirname or os.path.isdir(dirname), (
"Path %s is not a directory" % dirname)
class APIException(Exception): pass
class APIServiceException(APIException):
def __init__(self, info):
if info['userString'] != info['resultString']:
super(APIServiceException, self).__init__(
'%s (Error %s: %s)' % (info['userString'],
info['resultCode'], info['resultString']))
else:
super(APIServiceException, self).__init__(
'%s (Error %s)' % (info['userString'],
info['resultCode']))
self.info = info
self.code = info['resultCode']
class API(object):
LOGIN_URL = 'https://developer.apple.com/account/login.action'
DEVELOPER_URL = 'https://developer.apple.com'
DEVELOPER_SERVICES_URL = '%s/services-developerportal/QH65B2/account/ios' % DEVELOPER_URL
GET_TEAM_ID_URL = 'https://developer.apple.com/account/ios/certificate/certificateList.action'
class _LoginHTMLParser(HTMLParser.HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == "form":
attrs = { k: v for k, v in attrs }
if attrs['name'] == 'appleConnectForm':
self.url = attrs['action']
def feed(self, data):
try:
HTMLParser.HTMLParser.feed(self, data)
except HTMLParser.HTMLParseError:
pass
def __init__(self, debug=False):
cookie_jar = cookielib.CookieJar()
processor = urllib2.HTTPCookieProcessor(cookie_jar)
self._opener = urllib2.build_opener(processor)
self._debug = debug
def login(self, user=None, password=None):
if not user or not password:
user, password = self._find_credentials()
try:
r = self._opener.open(self.LOGIN_URL)
parser = self._LoginHTMLParser()
page = r.read()
parser.feed(page)
if not parser.url:
if self._debug:
print >>sys.stderr, "Page contents:\n%s" % page
raise APIException("Login failed: unable to locate login URL (HTML scraping failure)")
scheme, netloc, _, _, _, _ = urlparse.urlparse(r.geturl())
url = '%s://%s%s' % (scheme, netloc, parser.url)
params = dict(theAccountName=user, theAccountPW=password,
theAuxValue='')
r = self._opener.open(url, urllib.urlencode(params))
r = self._opener.open(self.GET_TEAM_ID_URL)
page = r.read()
matcher = re.search(r'teamId=([A-Z0-9]*)', page)
if not matcher:
if self._debug:
print >>sys.stderr, "Login failed, page contents:\n%s" % page
raise APIException("Login failed, please check credentials (using %s)" % user)
self.team_id = matcher.group(1)
self.user = user
except urllib2.URLError as e:
raise e
def _api(self, cmd, form={}, **kwargs):
try:
if isinstance(form, (dict, list)):
form = urllib.urlencode(form)
kwargs['content-type'] = 'text/x-url-arguments'
kwargs['accept'] = 'application/json'
kwargs['requestId'] = str(uuid.uuid4())
kwargs['userLocale'] = 'en_US'
kwargs['teamId'] = self.team_id
query = urllib.urlencode(kwargs)
url = "%s/%s?%s" % (self.DEVELOPER_SERVICES_URL, cmd, query)
response = self._opener.open(url, form)
assert response.getcode() == 200, "Error %" % response.getcode()
data = json.loads(response.read())
rc = data['resultCode']
if rc not in [ 0, 8500 ]:
raise APIServiceException(data)
return data
except urllib2.URLError as e:
raise e
def _find_credentials(self):
# First try environment variables
try:
credentials = os.environ['PORTAL_CREDENTIALS']
user, password = credentials.split(':')
return user, password
except (KeyError, ValueError):
pass
# Now try .portalrc file
def search_path():
yield os.path.expanduser('~/.portalrc')
path = os.getcwd()
while True:
filename = os.path.join(path, '.portalrc')
if os.path.isfile(filename):
yield filename
break
if path == '/':
break
path = os.path.dirname(path)
import ConfigParser
group = os.environ.get('PORTAL_ENVIRONMENT', 'Default')
try:
cfg = ConfigParser.RawConfigParser()
cfg.read(search_path())
return cfg.get(group, 'user'), cfg.get(group, 'password')
except ConfigParser.Error:
raise APIException('Missing credentials '
'(.portalrc section [%s] / PORTAL_CREDENTIALS)' % group)
def _list_cert_requests(self):
data = self._api("certificate/listCertRequests", certificateStatus=0,
types=self.ALL_CERT_TYPES)
return data['certRequests']
def _list_app_ids(self):
data = self._api('identifiers/listAppIds') #, onlyCountLists='true')
return data['appIds']
def _list_provisioning_profiles(self):
data = self._api('profile/listProvisioningProfiles',
includeInactiveProfiles='true', onlyCountLists='true')
return data['provisioningProfiles']
def _list_devices(self, include_removed=True):
data = self._api('device/listDevices',
includeRemovedDevices='true' if include_removed else 'false')
return data['devices']
def clear_cache(self):
for n in self.__dict__:
if n.endswith('_cache'):
delattr(self, n)
@cached_method
def all_cert_requests(self):
return self._list_cert_requests()
def list_cert_requests(self, typ):
if not isinstance(typ, list):
typ = [ typ ]
return [ c for c in self.all_cert_requests()
if c['certificateTypeDisplayId'] in typ ]
@cached_method
def all_app_ids(self):
return self._list_app_ids()
def get_app_id(self, app_id):
if isinstance(app_id, (list, tuple)):
return [ self.get_app_id(a) for a in app_id ]
if isinstance(app_id, dict):
return app_id
if not isinstance(app_id, basestring):
raise APIException('invalid app_id %s' % app_id)
try:
if '.' in app_id:
return next(a for a in self.all_app_ids()
if a['identifier'] == app_id)
else:
return next(a for a in self.all_app_ids()
if a['appIdId'] == app_id)
except StopIteration:
return None
@cached_method
def all_devices(self):
return self._list_devices()
def get_device(self, device, return_id_if_missing=False):
if isinstance(device, (list, tuple)):
return [ self.get_device(d,
return_id_if_missing=return_id_if_missing)
for d in device ]
if isinstance(device, dict):
return device
if not isinstance(device, basestring):
raise APIException('invalid device %s' % device)
try:
if re.match('[0-9a-f]{40}', device, re.I):
return next(d for d in self.all_devices()
if d['deviceNumber'] == device)
else:
return next(d for d in self.all_devices()
if d['deviceId'] == device)
except StopIteration:
if return_id_if_missing:
return device
return None
def add_device(self, udid, name=None):
name = name or udid
form = []
form.append(('register', 'single'))
form.append(('name', name))
form.append(('deviceNumber', udid))
form.append(('deviceNames', name))
form.append(('deviceNumbers', udid))
data = self._api("device/addDevice", form=form)
return data['device']
def delete_device(self, device):
if not isinstance(device, (basestring, dict)):
raise APIException('invalid device %s' % device)
device = self.get_device(device)
self._api('device/deleteDevice',
deviceId=device['deviceId'])
def enable_device(self, device):
if not isinstance(device, (basestring, dict)):
raise APIException('invalid device %s' % device)
device = self.get_device(device)
data = self._api('device/enableDevice',
displayId=device['deviceId'],
deviceNumber=device['deviceNumber'])
return data['device']
@cached_method
def all_provisioning_profiles(self):
return self._list_provisioning_profiles()
def get_provisioning_profile(self, profile, return_id_if_missing=False):
if isinstance(profile, (list, tuple)):
return [ self.get_provisioning_profile(p,
return_id_if_missing=return_id_if_missing)
for p in profile ]
if isinstance(profile, dict):
return profile
if not isinstance(profile, basestring):
raise APIException('invalid profile id %s' % profile)
try:
return next(p for p in self.all_provisioning_profiles()
if p['provisioningProfileId'] == profile)
except StopIteration:
if return_id_if_missing:
return profile
return None
def create_provisioning_profile(self, profile_type, app_id, certificates=None,
devices=None, name=None):
if not 0 <= profile_type < 3:
raise APIException('profile_type must be one of ' +
', '.join(t for t in dir(API) if t.startswith('PROFILE_TYPE_')))
if not isinstance(app_id, (dict, basestring)):
raise APIException('invalid app_id %s' % app_id)
distribution_type = 'limited adhoc store'.split()[profile_type]
if profile_type == self.PROFILE_TYPE_DEVELOPMENT:
distribution_type_label = 'Distribution'
else:
distribution_type_label = 'Development'
app_id = self.get_app_id(app_id)
if certificates is None:
if profile_type == API.PROFILE_TYPE_DEVELOPMENT:
cert_type = API.CERT_TYPE_IOS_DEVELOPMENT
else:
cert_type = API.CERT_TYPE_IOS_DISTRIBUTION
certificates = self.list_cert_requests(cert_type)
certificates = self._unwrap(certificates, 'certificateId')
devices = self._unwrap(devices or (), 'deviceId')
if not name:
name = '%s %s' % (app_id['name'],
'Development AdHoc AppStore'.split()[profile_type])
form = []
form.append(('distributionType', distribution_type))
form.append(('appIdId', app_id['appIdId']))
form.append(('certificateIds', self._format_list(certificates)))
for device in devices:
form.append(('devices', device))
if devices:
form.append(('deviceIds', self._format_list(devices)))
form.append(('template', ''))
form.append(('returnFullObjects', 'false'))
form.append(('provisioningProfileName', name))
form.append(('distributionTypeLabel', distribution_type_label))
form.append(('appIdName', app_id['name']))
form.append(('appIdPrefix', app_id['prefix']))
form.append(('appIdIdentifier', app_id['identifier']))
form.append(('certificateCount', len(certificates)))
form.append(('deviceCount', len(devices) if devices else ''))
data = self._api("profile/createProvisioningProfile", form=form)
return data['provisioningProfile']
def delete_provisioning_profile(self, profile):
profile = self._unwrap(profile, 'provisioningProfileId')
self._api('profile/deleteProvisioningProfile',
provisioningProfileId=profile)
def _format_list(self, objs):
if objs:
return '[%s]' % ','.join(objs)
return ''
def _unwrap(self, obj, key):
if obj is None:
return obj
if isinstance(obj, (list, tuple)):
return [ self._unwrap(o, key) for o in obj ]
if isinstance(obj, basestring):
return obj
return obj[key]
def update_provisioning_profile(self, profile, name=None, app_id=None,
certificate_ids=None, device_ids=None, distribution_type=None):
form = []
form.append(('provisioningProfileId', profile['provisioningProfileId']))
form.append(('distributionType', distribution_type or profile['distributionMethod']))
form.append(('returnFullObjects', 'false'))
form.append(('provisioningProfileName', name or profile['name']))
form.append(('appIdId', app_id or profile['appId']['appIdId']))
for certificate_id in certificate_ids or profile['certificateIds']:
if isinstance(certificate_id, dict):
certificate_id = certificate_id['certificateId']
form.append(('certificateIds', certificate_id))
if device_ids is None:
device_ids = profile['deviceIds']
for device_id in device_ids:
if isinstance(device_id, dict):
device_id = device_id['deviceId']
form.append(('deviceIds', device_id))
return self._api('profile/regenProvisioningProfile', form=form)
def _make_dev_url(self, path, **kwargs):
query = urllib.urlencode(kwargs)
return "%s/%s.action?%s" % (self.DEVELOPER_URL, path, query)
def download_profile(self, profile, file_or_filename):
try:
if isinstance(profile, dict):
profile = profile['provisioningProfileId']
url = self._make_dev_url('account/ios/profile/profileContentDownload',
displayId=profile)
r = self._opener.open(url)
assert r.getcode() == 200, 'Unable to download profile [%s]' % profile
profile = r.read()
if isinstance(file_or_filename, basestring):
_ensure_parents_exist(file_or_filename)
with open(file_or_filename, 'wb') as f:
f.write(profile)
else:
file_or_filename.write(profile)
except urllib2.HTTPError as e:
if e.getcode() == 404:
raise APIException("Profile '%s' not found" % profile)
raise e
def profile_type(self, profile):
if isinstance(profile, int):
if not 0 <= profile < len(API._PROFILE_TYPE_LABELS):
raise APIException('Invalid profile type %s' % profile)
return profile
if isinstance(profile, basestring):
try:
return self.profile_type(int(profile))
except ValueError:
pass
try:
return API._PROFILE_TYPE_LABELS.index(profile)
except ValueError:
raise APIException("Invalid profile type '%s'" % profile)
if not isinstance(profile, dict):
raise APIException('Invalid profile %s' % profile)
if profile['type'] == 'Development':
return API.PROFILE_TYPE_DEVELOPMENT
if profile['deviceCount']:
return API.PROFILE_TYPE_ADHOC
return API.PROFILE_TYPE_APPSTORE
def profile_type_name(self, profile):
return API._PROFILE_TYPE_LABELS[self.profile_type(profile)]
def is_profile_expired(self, profile):
return profile['status'] == 'Expired'
PROFILE_TYPE_DEVELOPMENT = 0
PROFILE_TYPE_ADHOC = 1
PROFILE_TYPE_APPSTORE = 2
_PROFILE_TYPE_LABELS = 'development adhoc appstore'.split()
ALL_CERT_TYPES = "5QPB9NHCEI,R58UK2EWSO,9RQEK7MSXA,LA30L5BJEU,BKLRAVXMGM,3BQKVH9I2X,Y3B2F3TYSI"
(CERT_TYPE_IOS_DEVELOPMENT, CERT_TYPE_IOS_DISTRIBUTION,
CERT_TYPE_UNKNOWN_1, CERT_TYPE_UNKNOWN_2,
CERT_TYPE_APN_DEVELOPMENT, CERT_TYPE_APN_PRODUCTION,
CERT_TYPE_UNKNOWN_3) = ALL_CERT_TYPES.split(',')
CERT_TYPE_IOS = [ CERT_TYPE_IOS_DEVELOPMENT, CERT_TYPE_IOS_DISTRIBUTION ]
```
#### File: jlopez/portal/portal.py
```python
from functools import wraps
import cookielib
import HTMLParser
import json
import os
import re
import sys
import urllib
import urllib2
import urlparse
def cached(wrapped):
@wraps(wrapped)
def wrapper():
if not hasattr(wrapped, 'cache'):
wrapped.cache = wrapped()
return wrapped.cache
return wrapper
def cached_method(wrapped):
@wraps(wrapped)
def wrapper(self):
name = '%s_cache' % wrapped.__name__
if not hasattr(self, name):
setattr(self, name, wrapped(self))
return getattr(self, name)
return wrapper
def uuid():
import uuid
return str(uuid.uuid4())
class APIException(Exception):
def __init__(self, info):
super(APIException, self).__init__(
'%s (Error %s: %s)' % (info['userString'], info['resultCode'], info['resultString']))
self.info = info
self.code = info['resultCode']
class API(object):
LOGIN_URL = 'https://developer.apple.com/account/login.action'
DEVELOPER_URL = 'https://developer.apple.com'
DEVELOPER_SERVICES_URL = '%s/services-developerportal/QH65B2/account/ios' % DEVELOPER_URL
GET_TEAM_ID_URL = 'https://developer.apple.com/account/ios/certificate/certificateList.action'
class LoginHTMLParser(HTMLParser.HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == "form":
attrs = { k: v for k, v in attrs }
if attrs['name'] == 'appleConnectForm':
self.url = attrs['action']
def feed(self, data):
try:
HTMLParser.HTMLParser.feed(self, data)
except HTMLParser.HTMLParseError:
pass
def __init__(self):
cookie_jar = cookielib.CookieJar()
processor = urllib2.HTTPCookieProcessor(cookie_jar)
self.opener = urllib2.build_opener(processor)
def login(self, user=None, password=<PASSWORD>):
try:
r = self.opener.open(self.LOGIN_URL)
assert r.getcode() == 200, "Unable to fetch login page"
parser = self.LoginHTMLParser()
parser.feed(r.read())
assert parser.url, 'Unable to locate login post URL'
scheme, netloc, _, _, _, _ = urlparse.urlparse(r.geturl())
url = '%s://%s%s' % (scheme, netloc, parser.url)
params = dict(theAccountName=user, theAccountPW=password,
theAuxValue='')
r = self.opener.open(url, urllib.urlencode(params))
assert r.getcode() == 200, "Unable to login"
r = self.opener.open(self.GET_TEAM_ID_URL)
assert r.getcode() == 200, "Unable to retrieve Team ID"
matcher = re.search(r'teamId=([A-Z0-9]*)', r.read())
assert matcher, "Unable to locate Team ID"
self.team_id = matcher.group(1)
except urllib2.URLError as e:
raise e
def _api(self, cmd, form={}, **kwargs):
if isinstance(form, (dict, list)):
form = urllib.urlencode(form)
kwargs['content-type'] = 'text/x-url-arguments'
kwargs['accept'] = 'application/json'
kwargs['requestId'] = uuid()
kwargs['userLocale'] = 'en_US'
kwargs['teamId'] = self.team_id
query = urllib.urlencode(kwargs)
url = "%s/%s?%s" % (self.DEVELOPER_SERVICES_URL, cmd, query)
response = self.opener.open(url, form)
assert response.getcode() == 200, "Error %" % response.getcode()
data = json.loads(response.read())
rc = data['resultCode']
if rc not in [ 0, 8500 ]:
raise APIException(data)
return data
def _list_cert_requests(self):
data = self._api("certificate/listCertRequests", certificateStatus=0,
types=self.ALL_CERT_TYPES)
return data['certRequests']
def _list_app_ids(self):
data = self._api('identifiers/listAppIds') #, onlyCountLists='true')
return data['appIds']
def _list_provisioning_profiles(self):
data = self._api('profile/listProvisioningProfiles',
includeInactiveProfiles='true', onlyCountLists='true')
return data['provisioningProfiles']
def _list_devices(self, include_removed=True):
data = self._api('device/listDevices',
includeRemovedDevices='true' if include_removed else 'false')
return data['devices']
@cached_method
def all_cert_requests(self):
return self._list_cert_requests()
@cached_method
def all_app_ids(self):
return self._list_app_ids()
@cached_method
def all_provisioning_profiles(self):
return self._list_provisioning_profiles()
@cached_method
def all_devices(self):
return self._list_devices()
def clear_cache(self):
for n in self.__dict__:
if n.endswith('_cache'):
delattr(self, n)
def list_cert_requests(self, typ):
if not isinstance(typ, list):
typ = [ typ ]
return [ c for c in self.all_cert_requests()
if c['certificateTypeDisplayId'] in typ ]
def update_provisioning_profile(self, profile, name=None, app_id=None,
certificate_ids=None, device_ids=None, distribution_type=None):
form = []
form.append(('provisioningProfileId', profile['provisioningProfileId']))
form.append(('distributionType', distribution_type or profile['distributionMethod']))
form.append(('returnFullObjects', 'false'))
form.append(('provisioningProfileName', name or profile['name']))
form.append(('appIdId', app_id or profile['appId']['appIdId']))
for certificate_id in certificate_ids or profile['certificateIds']:
if isinstance(certificate_id, dict):
certificate_id = certificate_id['certificateId']
form.append(('certificateIds', certificate_id))
if device_ids is None:
device_ids = profile['deviceIds']
for device_id in device_ids:
if isinstance(device_id, dict):
device_id = device_id['deviceId']
form.append(('deviceIds', device_id))
return self._api('profile/regenProvisioningProfile', form=form)
def _make_dev_url(self, path, **kwargs):
query = urllib.urlencode(kwargs)
return "%s/%s.action?%s" % (self.DEVELOPER_URL, path, query)
def download_profile(self, profile, filename):
if isinstance(profile, dict):
profile = profile['provisioningProfileId']
url = self._make_dev_url('account/ios/profile/profileContentDownload',
displayId=profile)
r = self.opener.open(url)
assert r.getcode() == 200, 'Unable to download profile [%s]' % profile
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
assert not dirname or os.path.isdir(dirname), "Path %s is not a directory" % dirname
with open(filename, 'wb') as f:
f.write(r.read())
ALL_CERT_TYPES = "5QPB9NHCEI,R58UK2EWSO,9RQEK7MSXA,LA30L5BJEU,BKLRAVXMGM,3BQKVH9I2X,Y3B2F3TYSI"
(CERT_TYPE_IOS_DEVELOPMENT, CERT_TYPE_IOS_DISTRIBUTION,
CERT_TYPE_UNKNOWN_1, CERT_TYPE_UNKNOWN_2,
CERT_TYPE_APN_DEVELOPMENT, CERT_TYPE_APN_PRODUCTION,
CERT_TYPE_UNKNOWN_3) = ALL_CERT_TYPES.split(',')
CERT_TYPE_IOS = [ CERT_TYPE_IOS_DEVELOPMENT, CERT_TYPE_IOS_DISTRIBUTION ]
if __name__ == '__main__':
import getopt
optlist, args = getopt.getopt(sys.argv[1:], 'u:p:a:')
opts = dict((o[1:], a or True) for o, a in optlist)
api = API()
api.login(opts.get('u', '<EMAIL>'),
opts.get('p', 'Fr13ndgr4ph'))
if args[0] == 'update-profiles':
dev_certs = api.list_cert_requests(typ=api.CERT_TYPE_IOS_DEVELOPMENT)
dist_certs = api.list_cert_requests(typ=api.CERT_TYPE_IOS_DISTRIBUTION)
devices = api.all_devices()
profiles = api.all_provisioning_profiles()
for profile in profiles:
identifier = profile['appId']['identifier']
if 'a' in opts and identifier != opts['a']:
continue
if 'DISTRO' in profile['name']:
is_appstore = True
is_dev = is_adhoc = False
print >>sys.stderr, identifier
devs = devices if profile['deviceCount'] and 'DISTRO' not in profile['name'] and 'AppStore' not in profile['name'] else []
certs = dev_certs if profile['type'] == 'Development' else dist_certs
api.update_provisioning_profile(profile,
device_ids=devs, certificate_ids=certs)
elif args[0] == 'download-profiles':
profiles = api.all_provisioning_profiles()
for profile in profiles:
identifier = profile['appId']['identifier']
if 'a' in opts and identifier != opts['a']:
continue
is_wildcard = identifier == '*'
is_dev = profile['type'] == 'Development'
is_adhoc = not is_dev and profile['deviceCount'] > 0
is_appstore = not is_dev and not is_adhoc
if is_dev:
filename = 'development.mobileprovision'
elif is_adhoc:
filename = 'adhoc.mobileprovision'
else:
filename = 'appstore.mobileprovision'
if not is_wildcard:
filename = '%s/%s' % (identifier, filename)
api.download_profile(profile, filename)
else:
profiles = api.all_provisioning_profiles()
print json.dumps([ p for p in profiles if p['name'].startswith('Test') ], indent=4)
```
#### File: jlopez/portal/setup.py
```python
import os
import re
from setuptools import setup, find_packages
VERSIONFILE = os.path.join('portal', '_version.py')
VSRE = r'^__version__ = [\'"](.*?)[\'"]'
def get_version():
verstrline = open(VERSIONFILE, 'rt').read()
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError(
"Unable to find version string in %s." % VERSIONFILE)
setup(
name="portal",
version=get_version(),
description="Interact with Apple's Provisioning Portal, stay sane",
author="<NAME>",
author_email="<EMAIL>",
url = "https://www.github.com/jlopez/portal",
license = "MIT",
packages=find_packages(),
include_package_data=True,
entry_points=dict(
console_scripts=[
'portal = portal.cli:main'
],
),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
``` |
{
"source": "jlopezscala/moto",
"score": 2
} |
#### File: moto/sqs/exceptions.py
```python
from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class MessageNotInflight(Exception):
description = "The message referred to is not in flight."
status_code = 400
class ReceiptHandleIsInvalid(RESTError):
code = 400
def __init__(self):
super(ReceiptHandleIsInvalid, self).__init__(
"ReceiptHandleIsInvalid", "The input receipt handle is invalid."
)
class MessageAttributesInvalid(RESTError):
code = 400
def __init__(self, description):
super(MessageAttributesInvalid, self).__init__(
"MessageAttributesInvalid", description
)
class QueueDoesNotExist(RESTError):
code = 400
def __init__(self):
super().__init__(
"AWS.SimpleQueueService.NonExistentQueue",
"The specified queue does not exist for this wsdl version.",
template="wrapped_single_error",
)
class QueueAlreadyExists(RESTError):
code = 400
def __init__(self, message):
super(QueueAlreadyExists, self).__init__("QueueAlreadyExists", message)
class EmptyBatchRequest(RESTError):
code = 400
def __init__(self):
super(EmptyBatchRequest, self).__init__(
"EmptyBatchRequest",
"There should be at least one SendMessageBatchRequestEntry in the request.",
)
class InvalidBatchEntryId(RESTError):
code = 400
def __init__(self):
super(InvalidBatchEntryId, self).__init__(
"InvalidBatchEntryId",
"A batch entry id can only contain alphanumeric characters, "
"hyphens and underscores. It can be at most 80 letters long.",
)
class BatchRequestTooLong(RESTError):
code = 400
def __init__(self, length):
super(BatchRequestTooLong, self).__init__(
"BatchRequestTooLong",
"Batch requests cannot be longer than 262144 bytes. "
"You have sent {} bytes.".format(length),
)
class BatchEntryIdsNotDistinct(RESTError):
code = 400
def __init__(self, entry_id):
super(BatchEntryIdsNotDistinct, self).__init__(
"BatchEntryIdsNotDistinct", "Id {} repeated.".format(entry_id)
)
class TooManyEntriesInBatchRequest(RESTError):
code = 400
def __init__(self, number):
super(TooManyEntriesInBatchRequest, self).__init__(
"TooManyEntriesInBatchRequest",
"Maximum number of entries per request are 10. "
"You have sent {}.".format(number),
)
class InvalidAttributeName(RESTError):
code = 400
def __init__(self, attribute_name):
super(InvalidAttributeName, self).__init__(
"InvalidAttributeName", "Unknown Attribute {}.".format(attribute_name)
)
class InvalidAttributeValue(RESTError):
code = 400
def __init__(self, attribute_name):
super(InvalidAttributeValue, self).__init__(
"InvalidAttributeValue",
"Invalid value for the parameter {}.".format(attribute_name),
)
class InvalidParameterValue(RESTError):
code = 400
def __init__(self, message):
super(InvalidParameterValue, self).__init__("InvalidParameterValue", message)
class MissingParameter(RESTError):
code = 400
def __init__(self, parameter):
super(MissingParameter, self).__init__(
"MissingParameter",
"The request must contain the parameter {}.".format(parameter),
)
class OverLimit(RESTError):
code = 403
def __init__(self, count):
super(OverLimit, self).__init__(
"OverLimit", "{} Actions were found, maximum allowed is 7.".format(count)
)
``` |
{
"source": "jlopezvi/Consensus",
"score": 2
} |
#### File: jlopezvi/Consensus/app.py
```python
import os
from flask import Flask,jsonify,json, flash
from crossdomain import crossdomain
from flask import request,render_template,redirect,url_for
import ast
import json
from communityManager import saveCommunity,deleteCommunity,addCommunityToContact,getCommunities
from participantManager import _get_participant_node, remove_user_aux, get_all_participants_admin_aux, \
get_participant_followers_info_aux,get_participant_followings_info_aux, get_fullname_for_participant_unrestricted_aux,\
get_fullname_for_participant_aux, registration_aux, get_participant_data_aux, modify_user_data_aux, \
get_participant_data_by_email_unrestricted_aux, get_all_public_participants_for_user_aux, if_participant_exists_by_email_aux, \
add_following_contact_to_user_aux, remove_following_contact_to_user_aux, get_user_data_aux, modify_user_password_aux
from participantManager import get_participantnotifications_for_user_aux, remove_notification_from_participant1_to_participant2_aux
from ideaManager import get_ideas_data_created_by_participant_aux, get_ideas_created_by_participant_aux,\
add_idea_to_user_aux, vote_on_idea_aux, modify_idea_aux, remove_idea_aux, _get_supporters_emails_for_idea_aux, \
_get_volunteers_emails_for_idea_aux, get_vote_statistics_for_idea_aux, get_voting_rel_between_user_and_idea_aux, \
redflag_idea_aux, get_all_ideas_admin_aux, get_idea_data_admin_aux, get_idea_node_data_aux, get_idea_data_for_user_aux
from ideaManager import get_ideanotifications_for_user_aux, remove_notification_from_idea_to_participant_aux, \
_do_tasks_for_idea_editedproposal
from webManager import ideas_for_newsfeed_aux, if_ideas_for_newsfeed_aux, ideas_for_home_aux, registration_receive_emailverification_aux, \
registration_from_invitation_aux, registration_send_invitation_aux, do_cron_tasks_aux, get_topten_ideas_aux
import logging
import flask_login
from user_authentification import User
from uuid_token import generate_confirmation_token, confirm_token
from flask_mail import Mail
from utils import send_email
#TODO: logging, sending emails when errors take place.
#logging.basicConfig(level=logging.DEBUG)
################
#### config ####
################
app = Flask(__name__)
# app.debug = True
app.config.from_object('config.BaseConfig')
DEBUG=False
try:
os.environ['APP_SETTINGS']
app.config.from_object(os.environ['APP_SETTINGS'])
DEBUG = app.config['DEBUG']
except KeyError:
pass
MAIL_DEFAULT_SENDER=app.config['MAIL_DEFAULT_SENDER']
SUPPORT_RATE_MIN=app.config['SUPPORT_RATE_MIN']
SUPPORTERS_CHAR_NUM_MAX=app.config['SUPPORTERS_CHAR_NUM_MAX']
REJECTORS_CHAR_NUM_MAX=app.config['REJECTORS_CHAR_NUM_MAX']
####################
#### extensions ####
####################
#flask-mail
mail = Mail(app)
#flask_login
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
#app.secret_key = 'super secret string' # Change this!
@login_manager.user_loader
def user_loader(email):
return User(email)
#@<EMAIL>
#def unauthorized_handler():
# return 'Unauthorized'
################################
#### API, WORKING NOW ####
################################
@app.route('/test')
def test():
return url_for('static', filename='images/ideas/a.jpg')
@app.route('/do_tasks_for_idea_editedproposal_TEST/<idea_index>', methods=['POST'])
def do_tasks_for_idea_editedproposal_TEST(idea_index):
return _do_tasks_for_idea_editedproposal(idea_index)
############################################
# API
############################################
@app.route('/')
def hello(message=None):
return render_template('login/login.html',message=message)
@app.route('/newsfeed')
@flask_login.login_required
#user_email=flask_login.current_user.id
def newsfeed():
user_email = flask_login.current_user.id
message = {"user": user_email}
return render_template('login/newsfeed.html', message = message)
@app.route('/home')
@flask_login.login_required
#user_email=flask_login.current_user.id
def home():
user_email = flask_login.current_user.id
message = {"user": user_email}
return render_template('home.html', message = message)
@app.route('/participants')
@app.route('/participants/<participant_email>')
@flask_login.login_required
def participants(participant_email=None):
user_email = flask_login.current_user.id
message = {"user": user_email, "participant": participant_email}
# if the same login user
if user_email == participant_email:
return redirect(url_for('participants'))
else:
return render_template('participants.html', message = message)
@app.route('/topten')
@flask_login.login_required
def topten():
user_email = flask_login.current_user.id
message = {"user": user_email}
return render_template('topten.html', message = message)
##############
# PARTICIPANT MANAGER
##############
# input: json {"email":"asdf@asdf", "password":"<PASSWORD>"}
# output:
# json {"result":"Wrong: Bad e-mail"} / json {"result": "Wrong: Bad password"}
# / login and json {"result": "OK"}
@app.route('/login', methods=['POST'])
def login():
login = request.get_json(force=True)
user_to_check=_get_participant_node(login['email'])
if user_to_check is None :
return jsonify({"result":"Wrong: Bad e-mail"})
if login['password'] == user_to_check['password']:
user = User(login['email'])
flask_login.login_user(user)
return jsonify({"result": "OK"})
else:
return jsonify({"result": "Wrong: Bad password"})
# input: user_index logged in
# output: json {"result": "OK"}
@app.route('/logout')
def logout():
flask_login.logout_user()
return jsonify({"result": "OK"})
# input: application/json
# {"fullname":"<NAME>","email":"<EMAIL>", "username": "jlopezvi",
# "position":"employee", "group":"Marketing", "password":"<PASSWORD>",
# "host_email":"asdf@das"/null, "ifpublicprofile":true/false,
# "ifregistrationfromemail":true/false, "profilepic": "base64_string"/null}
# *Group: Governing Board, Marketing, Sales, Technical, Human Resources
# output: json
# 1. Wrong (participant registered already!)
# {"result":"Wrong","ifemailexists":true,"ifemailexists_msg":"message"}
# 2. OK (participant registered already but e-mail not verified yet. Sends new e-mail for verification) -->
# {"result": "OK: Participant registered previously, resend email verification",
# "ifemailexists":true, "ifemailexists_msg":"message",
# "ifemailverified":false,"ifemailverified_msg":"message"}
# 3. OK (4 different normal cases of registration)
# {"result":"OK", "ifhost":true/false,"ifhost_msg":"message",
# "ifemailverified":true/false,"ifemailverified_msg":"message"})
# *Note: when "ifemailverified" is "true", the user is logged in
# *Note: when "ifemailverified" is "false", a verification e-mail is sent
# *Note: when "ifhost" is "true", the user starts following the host.
@app.route('/registration', methods=['POST'])
def registration():
inputdict = request.get_json()
return registration_aux(inputdict)
# input: user_email (user logged in)
# application/json ALL FIELDS ARE OPTIONAL !
# {"fullname": "<NAME>", "new_email": "<EMAIL>",
# "username": "jlopezvi",
# "position":"employee", "group": "IT", "password": "<PASSWORD>",
# "ifpublicprofile": true/false,, "ifsupportingproposalsvisible" : true/false,
# "ifrejectingproposalsvisible" : true/false, "profilepic": "base64_string"/null
# }
# Output: json
# 1. {'result': 'Wrong: New e-mail already exists'}
# 2. {'result': 'OK'}
@app.route('/modify_user_data', methods=['PUT'])
@app.route('/modify_user_data/<user_email_DEBUG>', methods=['PUT'])
def modify_user_data(user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
inputdict = request.get_json()
return modify_user_data_aux(inputdict, user_email)
# input: user_email (user logged in)
# application/json
# {"old_password": "<PASSWORD>", "new_password": "<PASSWORD>" }
# Output: json
# 1. {'result': 'Wrong: Wrong current password'}
# 2. {'result': 'OK'}
@app.route('/modify_user_password', methods=['PUT'])
@app.route('/modify_user_password/<user_email_DEBUG>', methods=['PUT'])
def modify_user_password(user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
inputdict = request.get_json()
return modify_user_password_aux(inputdict, user_email)
# input: user_email (user logged in)
# Output: application/json
# {"result": "OK", "data": data}
# data = {"fullname":"<NAME>", "email": "<EMAIL>",
# "username": "jlopezvi",
# "position": "employee", "group": "IT", "password": "<PASSWORD>",
# "ifpublicprofile": true/false, "ifsupportingproposalsvisible" : true/false,
# "ifrejectingproposalsvisible" : true/false, "profilepic_url": "static/.../pic.jpg"
# }
@app.route('/get_user_data', methods=['GET'])
@app.route('/get_user_data/<user_email_DEBUG>', methods=['GET'])
def get_user_data(user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_user_data_aux(user_email)
# input: user_email (user logged in)
# output: json {"result": "OK"}
@app.route('/remove_user', methods=['DELETE'])
@app.route('/remove_user/<user_email_DEBUG>', methods=['DELETE'])
def remove_user(user_email_DEBUG=None) :
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return remove_user_aux(user_email)
# input: participant_email: <EMAIL>, user_email (user logged in)
# output: JSON [2 possibilities, according to privacy restrictions]
# 1. {"result":"OK", "ifallowed":true, "participant_data": participant_data }
# participant_data:
# {
# 'id': 'email',
# 'profilepic_url': 'static/.../pic.jpg',
# 'username': 'John',
# 'fullname': '<NAME>',
# 'ideas_num': 5,
# 'followers_num': 5,
# 'followings_num': 2
# }
# 2. {"result":"OK", "ifallowed":false, "participant_data": {} }
@app.route('/get_participant_data/<participant_email>')
@app.route('/get_participant_data/<participant_email>/<user_email_DEBUG>')
def get_participant_data(participant_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_participant_data_aux(participant_email, user_email)
# input: participant_email: <EMAIL>, user_email (user logged in)
# output: json {"result":"OK", "participant_data": participant_data }
# participant_data:
# {
# 'id': 'email',
# 'profilepic_url': 'static/.../pic.jpg',
# 'username': 'John',
# 'fullname': '<NAME>',
# 'position': 'assistant'
# 'group': 'Marketing'
# 'ideas_num': 5,
# 'followers_num': 5,
# 'followings_num': 2
# }
@app.route('/get_participant_data_by_email_unrestricted/<participant_email>')
@app.route('/get_participant_data_by_email_unrestricted/<participant_email>/<user_email_DEBUG>')
def get_participant_data_by_email_unrestricted(participant_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_participant_data_by_email_unrestricted_aux(participant_email, user_email)
# input: email
# output: json {"result" : true / false }
@app.route('/if_participant_exists_by_email/<participant_email>')
def if_participant_exists_by_email(participant_email):
return if_participant_exists_by_email_aux(participant_email)
# input: participant_email, user_email (user logged in)
# output: json {"result": "OK", "ifallowed": ifallowed, "fullname": fullname}
@app.route('/get_fullname_for_participant/<participant_email>', methods=['GET'])
@app.route('/get_fullname_for_participant/<participant_email>/<user_email_DEBUG>', methods=['GET'])
def get_fullname_for_participant(participant_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_fullname_for_participant_aux(participant_email, user_email)
# input: participant_email
# output: json {"result": "OK", "fullname": fullname}
@app.route('/get_fullname_for_participant_unrestricted/<participant_email>', methods=['GET'])
def get_fullname_for_participant_unrestricted(participant_email):
return get_fullname_for_participant_unrestricted_aux(participant_email)
# Input: participant's email, user's email (user logged in)
# Output: json [2 possibilities, according to privacy restrictions]
# 1.{"result":"OK", "ifallowed": True,
# "followers_num": 1,
# "followers_info": [
# {
# "email": "<EMAIL>",
# "fullname": "<NAME>",
# "username": "ale",
# "profilepic_url": "static/.../pic.jpg"
# }
# ]
# }
# 2.{"result":"OK", "ifallowed": False, "followers_num": 1, "followers_info": []}
@app.route('/get_participant_followers_info/<participant_email>', methods=['GET'])
@app.route('/get_participant_followers_info/<participant_email>/<user_email_DEBUG>', methods=['GET'])
def get_participant_followers_info(participant_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_participant_followers_info_aux(participant_email, user_email)
# Input: participant's email, user's email (user logged in)
# Output: json [2 possibilities, according to privacy restrictions]
# 1.{"result":"OK", "ifallowed": True,
# "followings_num": 1,
# "followings_info": [
# {
# "email": "<EMAIL>",
# "fullname": "<NAME>",
# "username": "ale",
# "profilepic_url": "static/.../pic.jpg"
# }
# ]
# }
# 2.{"result":"OK", "ifallowed": False, "followings_num": 1, "followings_info": []}
@app.route('/get_participant_followings_info/<participant_email>', methods=['GET'])
@app.route('/get_participant_followings_info/<participant_email>/<user_email_DEBUG>', methods=['GET'])
def get_participant_followings_info(participant_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_participant_followings_info_aux(participant_email, user_email)
# input: followingcontact email, user email (user logged in)
# output: json {"result": "OK"/"Wrong"}
@app.route('/add_following_contact_to_user/<followingcontact_email>', methods=['GET'])
@app.route('/add_following_contact_to_user/<followingcontact_email>/<user_email_DEBUG>', methods=['GET'])
def add_following_contact_to_user(followingcontact_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return add_following_contact_to_user_aux(followingcontact_email, user_email)
# input: followingcontact email, user email (user logged in)
# output: json {"result": "OK"/"Wrong"}
@app.route('/remove_following_contact_to_user/<followingcontact_email>', methods=['GET'])
@app.route('/remove_following_contact_to_user/<followingcontact_email>/<user_email_DEBUG>', methods=['GET'])
def remove_following_contact_to_user(followingcontact_email, user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return remove_following_contact_to_user_aux(followingcontact_email, user_email)
@app.route('/get_all_participants_admin', methods=['GET','OPTIONS'])
def get_all_participants_admin():
return json.dumps(get_all_participants_admin_aux())
# Output json {"email": "<EMAIL>", "position": "Employee", "group": "IT",
# "fullname": "jlopezvi", "profilepic_url": "static/.../pic.jpg", "if_following":True/False}
@app.route('/get_all_public_participants_for_user', methods=['GET'])
@app.route('/get_all_public_participants_for_user/<user_email_DEBUG>', methods=['GET'])
def get_all_public_participants_for_user(user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return json.dumps(get_all_public_participants_for_user_aux(user_email))
##### PARTICIPANT NOTIFICATIONS
# TODO: test
# input: [user logged in]
# output: json {"result": "OK", "data": notifications}) with
# notifications = [
# {'notification_type': 'newfollower',
# 'participant_index': participant_email },
# { }
# ]
@app.route('/get_participantnotifications_for_user',methods=['GET'])
@app.route('/get_participantnotifications_for_user/<user_email_DEBUG>',methods=['GET'])
def get_participantnotifications_for_user(user_email_DEBUG):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_participantnotifications_for_user_aux(user_email)
# TODO: test
# input: json {"participant1_email":"asdf@asdf", "participant2_email":"asdf2@asdf",
# "notification_type": "newfollower"}
# output:
# json {"result": "OK", "result_msg": "Notification was deleted"} /
@app.route('/remove_notification_from_participant1_to_participant2',methods=['POST'])
def remove_notification_from_participant1_to_participant2():
participant1_email = request.get_json()['participant1_email']
participant2_email = request.get_json()['participant2_email']
notification_type = request.get_json()['notification_type']
return remove_notification_from_participant1_to_participant2_aux(participant1_email, participant2_email, notification_type)
###############
# IDEA MANAGER
###############
# input: user_email (user logged in)
# application/json :
# {"concern":"we are not social enough in the office",
# "proposal":"social coffee pause at 4 p.m.",
# "moreinfo_concern":"I have to say as well this and this and this about the concern...",
# "moreinfo_proposal":"I have to say as well this and this and this about the proposal...",
# "supporters_goal_num":500, "volunteers_goal_num":5,
# "image":"base64_string"/null,
# "if_author_public":true/false, "first_receivers_emails":["<EMAIL>", "<EMAIL>"] }
# output: json {"result":"OK", "result_msg":"added idea to database"}
# {"result":"Wrong", "result_msg":"proposal already exists"}
@app.route('/add_idea_to_user', methods=['POST'])
@app.route('/add_idea_to_user/<string:user_email_DEBUG>', methods=['POST'])
def add_idea_to_user(user_email_DEBUG=None) :
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
idea_dict = request.get_json()
return add_idea_to_user_aux(user_email,idea_dict)
# input: application/json : ALL FIELDS ARE OPTIONAL SAVE FOR 'current_proposal'!
# {"concern":"we are not social enough in the office",
# "current_proposal": "this is the proposal to be modified",
# "proposal": "this is the new proposal (if is required)",
# "moreinfo_concern":"I have to say as well this and this and this about the concern...",
# "moreinfo_proposal":"I have to say as well this and this and this about the proposal...",
# "supporters_goal_num":500, "volunteers_goal_num":5,
# "image":"base64_string"/null,
# "if_author_public":true/false }
# Output json : 1./ {"result":"OK", "result_msg":"Idea was modified"}
# 2./ {"result":"Wrong", "result_msg": "Proposal already exists"}
@app.route('/modify_idea', methods=['PUT'])
def modify_idea():
idea_dict = request.get_json()
return modify_idea_aux(idea_dict)
# input: json {"proposal": "text of the proposal"}
# Output json : {"result":"OK", "result_msg":"Idea was removed"}
@app.route('/remove_idea', methods=['DELETE'])
def remove_idea():
idea_index=request.get_json()['proposal']
return remove_idea_aux(idea_index)
# Input: participant's email, user's email (user logged in)
# Output: json with fields "result","ifallowed":true/ false, "ideas_indices".
# "ideas_indices" contains a list [] with the indices for all the ideas created by the user
# There are two possibilities according to privacy restrictions
# 1. {"result": "OK",
# "ifallowed": true,
# "ideas_indices": [proposal_of_idea_1, proposal_of_idea_2,...]
# }
# 2. {
# "result": "OK"
# "ifallowed": false,
# "ideas_indices": []
# }
@app.route('/get_ideas_created_by_participant/<participant_email>', methods=['GET'])
@app.route('/get_ideas_created_by_participant/<participant_email>/<user_email_DEBUG>', methods=['GET'])
def get_ideas_created_by_participant(participant_email,user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_ideas_created_by_participant_aux(participant_email,user_email)
# Input: participant's email, user's email (user logged in)
# Output: json with fields "result","ifallowed":true/ false, "ideas_data".
# "ideas_data" contains list [] with json data {} for all the ideas created by the user
# There are two possibilities according to privacy restrictions
# 1. {"result": "OK",
# "ifallowed": true,
# "ideas_data":
# [
# {
# 'concern': 'Some text for the concern',
# 'proposal': 'Some text for the proposal',
# 'image_url': 'static/.../asdf.JPG'/null,
# 'uuid': 'unique_identifier_string',
# 'moreinfo_concern': 'blah blah blah more info',
# 'moreinfo_proposal': 'blah blah blah more info',
# 'supporters_goal_num': 200,
# 'volunteers_goal_num': 5,
# 'if_author_public': true / false
# 'author_profilepic_url': 'static/.../pic.jpg'/null, 'author_username': 'daniela', 'author_email': '<EMAIL>',
# 'duration' : "4 hours/ days/ weeks",
# 'supporters_num' : 5, 'volunteers_num' : 2, 'rejectors_num': 3,
# 'support_rate' : 95, 'support_rate_MIN' : 90,
# 'known_supporters': [
# { 'email': 'user', 'username': 'me' }, { 'email': '<EMAIL>', 'username': 'Pedro' }
# ],
# 'known_rejectors':[
# { 'email': 'd@', 'username': 'Elisa' }
# ],
# 'vote_type': null / 'supported' / 'rejected' / 'ignored'
# 'vote_ifvolunteered': null / true / false
# },
# {
# ...
# }
# ]
# }
# 2. {
# "result": "OK",
# "ifallowed": false,
# "ideas_data": []
# }
@app.route('/get_ideas_data_created_by_participant/<participant_email>', methods=['GET'])
@app.route('/get_ideas_data_created_by_participant/<participant_email>/<user_email_DEBUG>', methods=['GET'])
def get_ideas_data_created_by_participant(participant_email,user_email_DEBUG=None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_ideas_data_created_by_participant_aux(participant_email, user_email)
# input idea_proposal
# output json {"result": "OK", "volunteers_emails": [email1, email2,...]}
@app.route('/get_volunteers_emails_for_idea/<idea_proposal>', methods=['GET'])
def get_volunteers_emails_for_idea(idea_proposal):
volunteers_emails = _get_volunteers_emails_for_idea_aux(idea_proposal)
return jsonify({"result": "OK", "volunteers_emails": volunteers_emails})
# input idea_proposal
# output json {"result": "OK", "supporters_emails": [email1, email2,...]}
@app.route('/get_supporters_emails_for_idea/<idea_proposal>', methods=['GET'])
def get_supporters_emails_for_idea(idea_proposal):
supporters_emails = _get_supporters_emails_for_idea_aux(idea_proposal)
return jsonify({"result": "OK", "supporters_emails": supporters_emails})
# input idea_proposal
# output {"result": "OK", "vote_statistics" : [supporters_num, rejectors_num, passives_num, volunteers_num]}
@app.route('/get_vote_statistics_for_idea/<idea_proposal>', methods=['GET'])
def get_vote_statistics_for_idea(idea_proposal):
vote_statistics = get_vote_statistics_for_idea_aux(idea_proposal)
return jsonify({"result": "OK", "vote_statistics" : vote_statistics})
# input: user's email (flask_login.current_user.id), idea_proposal
# output: json {"result": "OK", "vote_type":"supported/rejected/ignored", "vote_ifvolunteered":true/false}
# {"result": "Wrong", "result_msg": "Voting relationship does not exist"}
@app.route('/get_voting_rel_between_user_and_idea/<idea_proposal>',methods=['GET'])
@app.route('/get_voting_rel_between_user_and_idea/<idea_proposal>/<user_email_DEBUG>',methods=['GET'])
def vote_status_idea(idea_proposal, user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_voting_rel_between_user_and_idea_aux(user_email, idea_proposal)
# TODO: format for vote_timestamp
# input user's email (flask_login.current_user.id)
# json {"idea_proposal":"let's do this",
# "vote_type":"supported/rejected/ignored", "vote_ifvolunteered": true/false}
# output json {"result": "Wrong: User vote exists of same type"}
# {"result": "OK: User vote was modified"}
# {"result": "OK: User vote was created"}
@app.route('/vote_on_idea',methods=['POST'])
@app.route('/vote_on_idea/<user_email_DEBUG>',methods=['POST'])
def vote_on_idea(user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return vote_on_idea_aux(user_email, request.get_json())
# input: user's email (flask_login.current_user.id),
# json {"idea_index":"let's do this",
# "reason":"this and this"}
# output: jsonify({"result":"OK", "result_msg":"Idea was removed"})
@app.route('/redflag_idea',methods=['POST'])
@app.route('/redflag_idea/<user_email_DEBUG>',methods=['POST'])
def redflag_idea(user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
reason = request.get_json()['reason']
idea_index = request.get_json()['idea_index']
return redflag_idea_aux(user_email, idea_index, reason)
@app.route('/get_all_ideas_admin', methods=['GET','OPTIONS'])
def get_all_ideas_admin():
return json.dumps(get_all_ideas_admin_aux())
# input: idea_index and user's email (flask_login.current_user.id),
# output: json
# {"result":"OK",
# "idea_data":
# {
# 'concern': 'Some text for the concern',
# 'proposal': 'Some text for the proposal',
# 'image_url': 'static/.../asdf.JPG'/null,
# 'uuid': 'unique_identifier_string',
# 'moreinfo_concern': 'blah blah blah more info',
# 'moreinfo_proposal': 'blah blah blah more info',
# 'supporters_goal_num': 200,
# 'volunteers_goal_num': 5,
# 'if_author_public': true / false
# 'author_profilepic_url': 'static/.../pic.jpg'/null, 'author_username': 'daniela', 'author_email': '<EMAIL>',
# 'duration' : "4 hours/ days/ weeks",
# 'supporters_num' : 5, 'volunteers_num' : 2, 'rejectors_num': 3,
# 'support_rate' : 95, 'support_rate_MIN' : 90,
# 'known_supporters': [
# { 'email': 'user', 'username': 'me' }, { 'email': '<EMAIL>', 'username': 'Pedro' }
# ],
# 'known_rejectors':[
# { 'email': 'd@', 'username': 'Elisa' }
# ],
# 'vote_type': null / 'supported' / 'rejected' / 'ignored'
# 'vote_ifvolunteered': null / true / false
# }
# }
@app.route('/get_idea_data_for_user/<idea_index>',methods=['GET'])
@app.route('/get_idea_data_for_user/<idea_index>/<user_email_DEBUG>',methods=['GET'])
def get_idea_data_for_user(idea_index, user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_idea_data_for_user_aux(idea_index, user_email)
# input idea_proposal
# output idea data as a json
@app.route('/get_idea_data_admin/<idea_proposal>', methods=['GET'])
def get_idea_data_admin(idea_proposal):
return jsonify(get_idea_data_admin_aux(idea_proposal))
# input idea_proposal
# output idea_node's data as a json
@app.route('/get_idea_node_data/<idea_proposal>', methods=['GET'])
def get_idea_node_data(idea_proposal):
return jsonify(get_idea_node_data_aux(idea_proposal))
##### IDEA NOTIFICATIONS
# TODO: test
# input: [user logged in]
# output: json {"result": "OK", "data": notifications}) with
# notifications = [
# {'notification_type': 'failurewarning'/'successful'/'sucessful_to_author'/'edited',
# 'idea_index': idea_proposal },
# { }
# ]
@app.route('/get_ideanotifications_for_user',methods=['GET'])
@app.route('/get_ideanotifications_for_user/<user_email_DEBUG>',methods=['GET'])
def get_ideanotifications_for_user(user_email_DEBUG):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_ideanotifications_for_user_aux(user_email)
# TODO: test
# input: json {"participant_email":"asdf@asdf", "proposal":"this is a proposal",
# "notification_type": "failurewarning"/"successful"/"sucessful_to_author"/"edited"}
# output:
# json {"result": "OK", "result_msg": "Notification was deleted"} /
@app.route('/remove_notification_from_idea_to_participant',methods=['POST'])
def remove_notification_from_idea_to_participant():
participant_email = request.get_json()['participant_email']
idea_index = request.get_json()['proposal']
notification_type = request.get_json()['notification_type']
return remove_notification_from_idea_to_participant_aux(participant_email, idea_index, notification_type)
##############
# WEB MANAGER
##############
# TODO: try with redirect instead of render_template
# input: URL token link from an invitation e-mail and that (guest) e-mail
# output: redirects to login with a json called "message"
# -> json {"type": "registration", "result": "Wrong", "result_msg": "The confirmation link is invalid or has expired"}
# -> json {"type": "registration", "result": "OK : With data", "result_msg": "Invitation OK",
# "user_email": "<EMAIL>", "host_email": <EMAIL>"}
@app.route('/registration_from_invitation/<token>/<guest_email>')
def registration_from_invitation(token, guest_email):
return registration_from_invitation_aux(token, guest_email)
# input: host_email and guest_email
# output: sends registration e-mail and json
# {"result": "OK", "result_msg" : "email sent"}
@app.route('/registration_send_invitation/<host_email>/<guest_email>', methods=['GET'])
def registration_send_invitation(host_email, guest_email):
return registration_send_invitation_aux(host_email, guest_email)
# TODO: try with redirect instead of render_template
# input: URL token link from an invitation e-mail
# output: redirects to login with a json called "message"
# -> json {"type": "login", "result": "Wrong", "result_msg": "The confirmation link is invalid or has expired"}
# -> json {"type": "login", "result": "Wrong", "result_msg": "Email already verified"}
# -> json {"type": "login", "result": "Wrong", "result_msg": "Email not registered"}
# -> json {"type": "login", "result": "OK : With data", "result_msg": "Email verified", "login_email": "<EMAIL>"}
@app.route('/registration_receive_emailverification/<token>')
def registration_receive_emailverification(token):
return registration_receive_emailverification_aux(token)
# TODO: add weights for ideas
# Get Ideas For Newsfeed
# Input: user_email (user logged in)
# Output: json with fields 'result' and 'data'. 'data' contains array with all ideas for the newsfeed
# {"result": "OK",
# "data": [ {*see /get_idea_data_for_user}, {*see /get_idea_data_for_user} ]
# }
@app.route('/ideas_for_newsfeed',methods=['GET'])
@app.route('/ideas_for_newsfeed/<user_email_DEBUG>',methods=['GET'])
def ideas_for_newsfeed(user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return ideas_for_newsfeed_aux(user_email)
# Input: user_email (user logged in)
# Output: json {"result": true/false}
@app.route('/if_ideas_for_newsfeed',methods=['GET'])
@app.route('/if_ideas_for_newsfeed/<user_email_DEBUG>',methods=['GET'])
def if_ideas_for_newsfeed(user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return if_ideas_for_newsfeed_aux(user_email)
# Ideas For Home: See the Supported + Volunteered ideas/ See the ignored ideas / See the rejected ideas
# Input: user_email (user logged in) and JSON {"vote_type": "rejected/supported/ignored"}
# Output: json with fields 'result' and 'data'. 'data' Array with all ideas that the user has voted according to << vote_type >>
# {"result": "OK",
# "data": [
# {
# 'concern': 'Some text for the concern',
# 'proposal': 'Some text for the proposal',
# 'image_url': 'static/.../asdf.JPG'/null,
# 'uuid': 'unique_identifier_string',
# 'moreinfo_concern': 'blah blah blah more info',
# 'moreinfo_proposal': 'blah blah blah more info',
# 'supporters_goal_num': 200,
# 'volunteers_goal_num': 5,
# 'if_author_public': true / false
# 'author_profilepic_url': 'static/.../pic.jpg'/null, 'author_username': 'daniela', 'author_email': '<EMAIL>',
# 'duration' : "4 hours/ days/ weeks",
# 'supporters_num' : 5, 'volunteers_num' : 2, 'rejectors_num': 3,
# 'support_rate' : 95, 'support_rate_MIN' : 90,
# 'known_supporters': [
# { 'email': 'user', 'username': 'me' }, { 'email': '<EMAIL>', 'username': 'Pedro' }
# ],
# 'known_rejectors':[
# { 'email': 'd@', 'username': 'Elisa' }
# ],
# 'vote_type': null / 'supported' / 'rejected' / 'ignored'
# 'vote_ifvolunteered': null / true / false
# },
# {
# ...
# }
# ]
# }
@app.route('/ideas_for_home',methods=['POST'])
@app.route('/ideas_for_home/<user_email_DEBUG>',methods=['POST'])
def ideas_for_home(user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
#
vote_type = request.get_json()['vote_type']
return ideas_for_home_aux(user_email, vote_type)
# Input: None
# Output: json with fields 'result' and 'data'. 'data' Array with at most 10 ideas ranked from 1st to 10st in order
# {"result": "OK",
# "data": [
# {
# 'concern': 'Some text for the concern',
# 'proposal': 'Some text for the proposal',
# 'image_url': 'static/.../asdf.JPG'/null,
# 'uuid': 'unique_identifier_string',
# 'moreinfo_concern': 'blah blah blah more info',
# 'moreinfo_proposal': 'blah blah blah more info',
# 'supporters_goal_num': 200,
# 'volunteers_goal_num': 5,
# 'if_author_public': true / false
# 'author_profilepic_url': 'static/.../pic.jpg'/null, 'author_username': 'daniela', 'author_email': '<EMAIL>',
# 'duration' : "4 hours/ days/ weeks",
# 'supporters_num' : 5, 'volunteers_num' : 2, 'rejectors_num': 3,
# 'support_rate' : 95, 'support_rate_MIN' : 90,
# 'known_supporters': [
# { 'email': 'user', 'username': 'me' }, { 'email': '<EMAIL>', 'username': 'Pedro' }
# ],
# 'known_rejectors':[
# { 'email': 'd@', 'username': 'Elisa' }
# ],
# 'vote_type': null / 'supported' / 'rejected' / 'ignored'
# 'vote_ifvolunteered': null / true / false
# },
# {
# ...
# }
# ]
# }
@app.route('/get_topten_ideas',methods=['GET'])
@app.route('/get_topten_ideas/<user_email_DEBUG>',methods=['GET'])
def get_topten_ideas(user_email_DEBUG = None):
if DEBUG and user_email_DEBUG is not None:
user_email = user_email_DEBUG
else:
user_email = flask_login.current_user.id
return get_topten_ideas_aux(user_email)
# input : None
# output: JSON {"result":"OK", "ideas_removed" : [ "proposal1", "proposal2"]}
@app.route('/do_cron_tasks')
def do_cron_tasks():
return do_cron_tasks_aux()
########################
# COMMUNITIES (NOT USED)
#######################
@app.route('/addCommunity', methods=['POST'])
def addComunity():
return saveCommunity(request.get_json())
@app.route('/addCommunityToUser/<string:name>/<string:email>', methods=['POST', 'OPTIONS'])
def addCommunityToUser(name, email) :
addCommunityToContact(name, email)
return "Community %s was added to user with email %s" % (name, email)
@app.route('/delete/community/<string:name>', methods=['DELETE', 'OPTIONS'])
def removeCommunity(name):
deleteCommunity(name)
return "Community %s was successfully removed" % name
@app.route('/getCommunitiesOfUser/<string:email>', methods=['GET','OPTIONS'])
def getAllCommunitiesForUser(email):
return json.dumps(getCommunities(email))
################
# ERROR HANDLERS
################
<EMAIL>(NotFoundError)
#def handle_NotFoundError(error):
# response = jsonify(error.to_dict())
# response.status_code = error.status_code
# return response
################
# MAIN PROGRAM
################
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
if os.environ.get('GRAPHENEDB_URL'):
app.run(host='0.0.0.0', port=port)
else:
app.run(host='127.0.0.1', port=port)
```
#### File: jlopezvi/Consensus/communityManager.py
```python
from utils import getGraph
from py2neo import neo4j
from participantManager import _get_participant_node
def saveCommunity(community):
if _getCommunity(community) :
return "Community %s already exists" % community
elif _getCommunity(community) is None :
_newCommunity(community)
return "Community %s was successfully added" % community
def addCommunityToContact(name, email):
userFound = _get_participant_node(email)
communityFound = _getCommunity(name)
getGraph().create((userFound, "BELONGS_TO", communityFound))
def getCommunities(email):
currentUser = _getUserByEmail(email)
rels = list(getGraph().match(start_node=currentUser, rel_type="BELONGS_TO"))
communities = []
for rel in rels:
communities.append(rel.end_node.get_properties())
#print getGraph().node(rel.end_node)
return communities
def deleteCommunity(name):
communityFound = _getCommunity(name)
communityFound.delete()
def _getCommunity(communityName):
communityFound = _getCommunityIndex().get("name", communityName)
if communityFound :
return communityFound[0]
return None
def _addToCommunityIndex(name, newCommunity) :
_getCommunityIndex().add("name", name, newCommunity)
def _newCommunity(community):
name = community.get('name')
newCommunity, = getGraph().create({"name" : name, "description" : community.get('description')})
_addToCommunityIndex(name, newCommunity)
def _getCommunityIndex():
return getGraph().get_or_create_index(neo4j.Node, "Communities")
``` |
{
"source": "jlopp/bitnodes",
"score": 2
} |
#### File: jlopp/bitnodes/seeder.py
```python
import glob
import json
import logging
import operator
import os
import random
import redis
import sys
import threading
import time
from ConfigParser import ConfigParser
from protocol import DEFAULT_PORT
# Redis connection setup
REDIS_SOCKET = os.environ.get('REDIS_SOCKET', "/tmp/redis.sock")
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
REDIS_CONN = redis.StrictRedis(unix_socket_path=REDIS_SOCKET,
password=REDIS_PASSWORD)
SETTINGS = {}
def export_nodes(nodes):
"""
Exports nodes as A and AAAA records. Nodes are selected from oldest
(longest uptime) to newest each with unique AS number.
"""
nodes = sorted(nodes, key=operator.itemgetter(4))[:SETTINGS['nodes']]
min_height = REDIS_CONN.get('height')
if min_height is None:
min_height = SETTINGS['min_height']
else:
min_height = int(min_height)
min_age = SETTINGS['min_age']
now = int(time.time())
logging.info("Min. height: {}".format(min_height))
oldest = now - min(nodes, key=operator.itemgetter(4))[4]
if oldest < min_age:
min_age = oldest - (0.01 * oldest) # Max. 1% newer than oldest
logging.info("Min. age: {}".format(min_age))
asns = []
a_records = []
aaaa_records = []
for node in nodes:
address = node[0]
port = node[1]
age = now - node[4]
height = node[5]
asn = node[12]
if (port == DEFAULT_PORT and asn not in asns and
age >= min_age and height >= min_height):
if ":" in address:
aaaa_records.append("@\tIN\tAAAA\t{}".format(address))
else:
a_records.append("@\tIN\tA\t{}".format(address))
asns.append(asn)
return (a_records, aaaa_records)
def save_zone_file(a_records, aaaa_records):
"""
Saves A and AAAA records in DNS zone file.
"""
random.shuffle(a_records)
random.shuffle(aaaa_records)
logging.info("A records: {}".format(len(a_records)))
logging.info("AAAA records: {}".format(len(aaaa_records)))
a_records = "\n".join(a_records[:SETTINGS['a_records']]) + "\n"
aaaa_records = "\n".join(aaaa_records[:SETTINGS['aaaa_records']]) + "\n"
template = open(SETTINGS['template'], "r").read()
open(SETTINGS['zone_file'], "w").write(template + a_records + aaaa_records)
def cron():
"""
Periodically fetches latest snapshot to sample nodes for DNS zone file.
"""
while True:
time.sleep(5)
dump = max(glob.iglob("{}/*.json".format(SETTINGS['export_dir'])))
logging.info("Dump: {}".format(dump))
nodes = []
try:
nodes = json.loads(open(dump, "r").read(), encoding="latin-1")
except ValueError:
logging.warning("Write pending")
if len(nodes) > 0:
(a_records, aaaa_records) = export_nodes(nodes)
if len(a_records) > 0 and len(aaaa_records) > 0:
save_zone_file(a_records, aaaa_records)
def init_settings(argv):
"""
Populates SETTINGS with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
SETTINGS['logfile'] = conf.get('seeder', 'logfile')
SETTINGS['debug'] = conf.getboolean('seeder', 'debug')
SETTINGS['export_dir'] = conf.get('seeder', 'export_dir')
SETTINGS['nodes'] = conf.getint('seeder', 'nodes')
SETTINGS['min_height'] = conf.getint('seeder', 'min_height')
SETTINGS['min_age'] = conf.getint('seeder', 'min_age')
SETTINGS['zone_file'] = conf.get('seeder', 'zone_file')
SETTINGS['template'] = conf.get('seeder', 'template')
SETTINGS['a_records'] = conf.getint('seeder', 'a_records')
SETTINGS['aaaa_records'] = conf.getint('seeder', 'aaaa_records')
def main(argv):
if len(argv) < 2 or not os.path.exists(argv[1]):
print("Usage: seeder.py [config]")
return 1
# Initialize global settings
init_settings(argv)
# Initialize logger
loglevel = logging.INFO
if SETTINGS['debug']:
loglevel = logging.DEBUG
logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
"%(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=SETTINGS['logfile'],
filemode='w')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
threading.Thread(target=cron).start()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
``` |
{
"source": "jlopp/explorer",
"score": 2
} |
#### File: explorer/blocks/views.py
```python
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from annoying.decorators import render_to
from blockexplorer.decorators import assert_valid_coin_symbol
from blockexplorer.settings import BLOCKCYPHER_API_KEY
from blockcypher.api import get_block_details, get_latest_block_height
from blockcypher.constants import COIN_SYMBOL_MAPPINGS
from utils import get_max_pages
@assert_valid_coin_symbol
@render_to('block_overview.html')
def block_overview(request, coin_symbol, block_representation):
TXNS_PER_PAGE = 20
# 1 indexed page
current_page = request.GET.get('page')
if current_page:
current_page = int(current_page)
else:
current_page = 1
# TODO: fail gracefully if the user picks a number of pages that is too large
# Waiting on @matthieu's change to API first (currently throws 502)
try:
block_details = get_block_details(
block_representation=block_representation,
coin_symbol=coin_symbol,
txn_limit=TXNS_PER_PAGE,
txn_offset=(current_page-1)*TXNS_PER_PAGE,
api_key=BLOCKCYPHER_API_KEY,
)
except AssertionError:
msg = _('Invalid Block Representation')
messages.warning(request, msg)
redir_url = reverse('coin_overview', kwargs={'coin_symbol': coin_symbol})
return HttpResponseRedirect(redir_url)
# import pprint; pprint.pprint(block_details, width=1)
if 'error' in block_details:
msg = _('Sorry, that block was not found')
messages.warning(request, msg)
return HttpResponseRedirect(reverse('home'))
# Technically this is not the only API call used on this page
api_url = 'https://api.blockcypher.com/v1/%s/%s/blocks/%s' % (
COIN_SYMBOL_MAPPINGS[coin_symbol]['blockcypher_code'],
COIN_SYMBOL_MAPPINGS[coin_symbol]['blockcypher_network'],
block_representation,
)
return {
'coin_symbol': coin_symbol,
'api_url': api_url,
'block_details': block_details,
'current_page': current_page,
'max_pages': get_max_pages(num_items=block_details['n_tx'], items_per_page=TXNS_PER_PAGE),
}
@assert_valid_coin_symbol
def latest_block(request, coin_symbol):
latest_block_height = get_latest_block_height(coin_symbol=coin_symbol,
api_key=BLOCKCYPHER_API_KEY)
kwargs = {
'coin_symbol': coin_symbol,
'block_representation': latest_block_height,
}
return HttpResponseRedirect(reverse('block_overview', kwargs=kwargs))
def latest_block_forwarding(request):
return HttpResponseRedirect(reverse('latest_block', kwargs={
'coin_symbol': 'btc',
}))
```
#### File: explorer/users/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.core.urlresolvers import reverse_lazy
from emails.trigger import send_and_log
from utils import get_client_ip, get_user_agent
# For more info, see the django docs here:
# https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#a-full-example
class AuthUserManager(BaseUserManager):
def create_user(self, email, password, creation_ip, creation_user_agent):
"""
Creates and saves a user with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
# force whole email to lowercase. violates spec but better usability.
user = self.model(email=email.lower().strip())
# if no password it calls set_unusuable_password() under the hood:
user.set_password(password)
user.creation_ip = creation_ip
user.creation_user_agent = creation_user_agent
user.save()
return user
def create_superuser(self, email, password, creation_ip=None,
creation_user_agent=None):
"""
Creates and saves a superuser with the given email and password.
"""
if not creation_ip:
creation_ip = '127.0.0.1'
if not creation_user_agent:
creation_user_agent = 'admin'
user = self.create_user(
email=email,
password=password,
creation_ip=creation_ip,
creation_user_agent=creation_user_agent,
)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class AuthUser(AbstractBaseUser):
date_joined = models.DateTimeField(auto_now_add=True, db_index=True)
first_name = models.CharField(max_length=64, blank=True, null=True)
last_name = models.CharField(max_length=64, blank=True, null=True)
email = models.EmailField(max_length=128, unique=True)
is_active = models.BooleanField(default=True, help_text='Can login?')
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
creation_ip = models.IPAddressField(null=False, blank=False, db_index=True)
creation_user_agent = models.CharField(max_length=1024, blank=True, db_index=True)
email_verified = models.BooleanField(default=False, db_index=True)
objects = AuthUserManager()
USERNAME_FIELD = 'email'
def __str__(self):
return '%s: %s' % (self.id, self.email)
def get_full_name(self):
if self.first_name and self.last_name:
return '%s %s' % (self.first_name, self.last_name)
else:
return ''
def get_short_name(self):
return self.first_name
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# FIXME
return self.is_superuser
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# FIXME
return self.is_superuser
def get_login_uri(self):
return '%s?e=%s' % (reverse_lazy('user_login'), self.email)
def get_address_subscriptions(self):
return self.addresssubscription_set.filter(unsubscribed_at=None).order_by('-id')
def get_address_forwardings(self):
return self.addressforwarding_set.filter(archived_at=None).order_by('-id')
def send_pwreset_email(self):
"""
Send password reset email to user.
"""
# TODO: add some sort of throttling
return send_and_log(
subject='Blockcypher Password Reset',
body_template='password_reset.html',
to_user=self,
body_context={},
fkey_objs={'auth_user': self},
)
class LoggedLogin(models.Model):
login_at = models.DateTimeField(auto_now_add=True, db_index=True)
auth_user = models.ForeignKey(AuthUser, blank=False, null=False)
ip_address = models.IPAddressField(null=False, blank=False, db_index=True)
user_agent = models.CharField(max_length=1024, blank=True, db_index=True)
def __str__(self):
return '%s: %s' % (self.id, self.ip_address)
@classmethod
def record_login(cls, request):
return cls.objects.create(
auth_user=request.user,
ip_address=get_client_ip(request),
user_agent=get_user_agent(request),
)
``` |
{
"source": "jlorencelim/active-campaign",
"score": 3
} |
#### File: active-campaign/active_campaign/base.py
```python
class BaseAC(object):
def format_filters(self, filters={}):
return {'filters[{}]'.format(key): value for key, value in filters.items()}
def format_ordering(self, ordering={}):
return {'orders[{}]'.format(key): value for key, value in ordering.items()}
```
#### File: active_campaign/ecommerce/customer.py
```python
import json
from active_campaign.base import BaseAC
class ACCustomer(BaseAC):
"""E-commerce customer resources represent a customer in an external e-commerce service.
Customer resources primarily hold aggregate e-commerce data associated with a contact.
Arguments:
client {ACClient} -- ACClient object.
connection_id {int} -- The id of the connection object for the service
where the customer originates.
"""
def __init__(self, *, client, connection_id):
self.client = client
self.connection_id = connection_id
def create(self, *, external_id, email):
"""Create a new e-commerce customer resource.
Arguments:
external_id {str} -- The id of the customer in the external service.
email {str} -- The email address of the customer.
Returns:
bool -- True if success, False otherwise.
dict -- Response of the /ecomCustomers/ endpoint.
"""
url = '{}/ecomCustomers/'.format(self.client.base_url)
payload = {
'ecomCustomer': {
'connectionid': self.connection_id,
'externalid': external_id,
'email': email,
}
}
request = self.client.session.post(url, json=payload)
return request.ok, json.loads(request.text)
def list(self, filters={}, ordering={}, limit=20, offset=0):
"""List all e-commerce customer resources.
Optional Arguments:
filters {dict} -- To apply multiple, convention oriented filters to a request. (default: {{}})
key - Field name
value - Value to fitler by
ordering {dict} -- To apply multiple sorting criteria to a request. (default: {{}})
key - Field name
value - ASC = Ascending order
DESC = Descending order
limit {int} -- The number of results to display in each page. (default: {20}; max: {100})
offset {int} -- The starting point for the result set of a page. (default: {0})
Returns:
bool -- True if success, False otherwise.
dict -- Response of the /ecomCustomers/ endpoint.
"""
url = '{}/ecomCustomers/'.format(self.client.base_url)
payload = {
limit: limit,
offset: offset
}
payload.update(self.format_filters(filters))
payload.update(self.format_ordering(ordering))
request = self.client.session.get(url, params=payload)
return request.ok, json.loads(request.text)
``` |
{
"source": "jlorencelim/django-cookiecutter",
"score": 2
} |
#### File: {{ cookiecutter.repo_name }}/core/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class AbstractTimeStampedModel(models.Model):
"""
Base for time-stamped models.
"""
created_at = models.DateTimeField(_('Created At'), editable=False)
updated_at = models.DateTimeField(_('Updated At'), editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
# check if the instace already has an id
if not self.created_at:
self.created_at = timezone.now()
# update date modified
self.updated_at = timezone.now()
return super(AbstractTimeStampedModel, self).save(*args, **kwargs)
```
#### File: {{ cookiecutter.repo_name }}/core/utils.py
```python
from __future__ import (
absolute_import,
unicode_literals,
)
import os
import uuid
def get_upload_path(instance, filename):
"""
This function gets the upload path of the image.
Returns the image path.
"""
file_name, file_extension = os.path.splitext(filename)
model = instance._meta.model_name.lower()
new_file = '{}{}'.format(uuid.uuid4().hex, file_extension)
return os.path.join(model, new_file)
``` |
{
"source": "jlorenze/asl_fixedwing",
"score": 3
} |
#### File: asl_fixedwing/data/utils.py
```python
from os.path import dirname, abspath, join, isfile, isdir
import sys
import rosbag
import numpy as np
def get_data_dir():
return dirname(abspath(__file__))
def get_models_dir():
return join(dirname(dirname(abspath(__file__))), 'models')
def get_utils_dir():
return join(dirname(dirname(abspath(__file__))), 'src/utils')
class pointStream:
def __init__(self):
self.x = []
self.y = []
self.z = []
self.t = []
def add_point(self, t, x, y, z):
self.x.append(x)
self.y.append(y)
self.z.append(z)
self.t.append(t)
class ctrlStream:
def __init__(self):
self.u = [[], [], [], []]
self.t = []
def add_point(self, t, u):
self.t.append(t)
for i in range(4):
self.u[i].append(u[i])
class zStream:
def __init__(self):
self.z = []
self.t = []
def add_point(self, t, z):
self.t.append(t)
if not self.z:
self.z = [[z[i]] for i in range(len(z))]
else:
for i in range(len(z)):
self.z[i].append(z[i])
class planeData:
"""
A class to extract data from Plane topic messages
"""
def __init__(self):
self.pos = pointStream() # inertial pos x_i, y_i, z_i [m]
self.vel = pointStream() # body frame vel u, v, w [m/s]
self.euler = pointStream() # euler angle phi, th, psi [rad]
self.om = pointStream() # body rate p, q, r [rad/s]
self.act = ctrlStream() # thrust [N] and ctrl srf def [rad]
self.nrmlzd_act = ctrlStream() # normalized actuators
# Total velocity
self.vel.V = []
def add_msg(self, topic, msg, t):
"""
Add a piece of data from a ROS message
"""
if topic == 'position':
self.pos.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'velocity':
self.vel.add_point(t, msg.point.x, msg.point.y, msg.point.z)
self.vel.V.append(np.sqrt(msg.point.x**2 + msg.point.y**2 + msg.point.z**2))
elif topic == 'euler':
self.euler.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'bodyrate':
self.om.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'actuators':
self.act.add_point(t, msg.controls)
elif topic == 'target_actuator_control':
self.nrmlzd_act.add_point(t, msg.controls)
class rompcData:
"""
A class to extract data from ROMPC topic messages
Note e_att is either euler angles or axis/angle param depending
on the type of model used.
Note e_attrate is either body rates p,q,r or axis/angle rates depending
on the type of model used.
"""
def __init__(self):
self.e_pos = pointStream() # pos error x_r, y_r, z_r [m]
self.e_vel = pointStream() # body frame vel error [m/s]
self.e_att = pointStream() # attitude error [rad]
self.e_attrate = pointStream() # attitude rate error [rad/s]
self.ubar = ctrlStream() # nominal control minus eq. control
self.u = ctrlStream() # control minus eq. control
self.zbar = zStream()
self.zhat = zStream()
self.u_prev = ctrlStream() # Control used in state estimator
self.y = zStream()
self.qp_solve_time = zStream()
def add_msg(self, topic, msg, t):
"""
Add a piece of data from a ROS message
"""
if topic == 'pos_error':
self.e_pos.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'vel_error':
self.e_vel.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'att_error':
self.e_att.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'attrate_error':
self.e_attrate.add_point(t, msg.point.x, msg.point.y, msg.point.z)
elif topic == 'ubar':
self.ubar.add_point(t, msg.data)
elif topic == 'u':
self.u.add_point(t, msg.data)
elif topic == 'zbar':
self.zbar.add_point(t, msg.data)
elif topic == 'zhat':
self.zhat.add_point(t, msg.data)
elif topic == 'u_prev':
self.u_prev.add_point(t, msg.data)
elif topic == 'y':
self.y.add_point(t, msg.data)
elif topic == 'qp_solve_time':
self.qp_solve_time.add_point(t, [msg.data])
class RosbagData:
"""
This class extracts rosbag data
"""
def __init__(self, fpath):
self.plane = planeData()
self.rompc = rompcData()
self.t0 = None
bag = rosbag.Bag(fpath)
topics = ['/plane/position', '/plane/velocity',
'/plane/euler', '/plane/bodyrate',
'/plane/actuators', '/rompc/pos_error',
'/rompc/vel_error', '/rompc/att_error',
'/rompc/attrate_error',
'/rompc/ubar', '/rompc/u',
'/rompc/zbar', '/rompc/zhat',
'/rompc/u_prev', '/rompc/y',
'/rompc/qp_solve_time',
'/mavros/target_actuator_control']
for topic, msg, t in bag.read_messages(topics=topics):
self.add_msg(msg, topic)
def extract_time(self, msg):
t = msg.header.stamp.secs + msg.header.stamp.nsecs/1e9
if self.t0 is None:
self.t0 = t
return t - self.t0
def add_msg(self, msg, topic):
main, sub = topic.split('/')[1:3]
if sub == 'qp_solve_time':
t = 0
else:
t = self.extract_time(msg)
if main == 'plane' or main == 'mavros':
self.plane.add_msg(sub, msg, t)
elif main == 'rompc':
self.rompc.add_msg(sub, msg, t)
if __name__ == '__main__':
data_dir = get_data_dir()
fpath = join(data_dir, 'rompc.bag')
data = RosbagData(fpath)
```
#### File: models/gazebo/sysid.py
```python
import numpy as np
def aircraft_ctrl_params():
"""
Compute the parameters that define the aircraft's command to control mappings
delta = c_delta*u_delta + delta_0 maps [-1,1] to radians
T = c_T0*(1 - C_TVom*V/u_T)*u_T^2 maps [0,1] to Newtons
"""
k_slowdown = 10.0 # from .sdf file
k_motor = 8.54858e-06 # from .sdf file
c_om = 350.0 # from sysid data
# Thrust model parameters
c_T0 = k_motor*(c_om*k_slowdown)**2
c_TVom = 0.2/25.0 # a guess because Gazebo doesn't model prop well
# Control surface deflection parameters
a_0 = 0.0
e_0 = 0.0
r_0 = 0.0
c_a = 1.0
c_e = 1.0
c_r = 1.0
return np.array([c_T0, c_TVom, a_0, c_a, e_0, c_e, r_0, c_r])
if __name__ == '__main__':
aircraft_ctrl_params()
``` |
{
"source": "jlorieau/nmr",
"score": 3
} |
#### File: plotpipe2d/scripts/plotpipe2d.py
```python
from itertools import groupby
import pathlib
from nmrglue import pipe
import click
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
default2d = pathlib.Path(__file__).parent.parent / 'trosy-fb.ft2'
def contours_by_Imax(data, minI_factor=8., cl_factor=1.2, num_contours=10):
"""Calculate the contours for the plot base on the maximum intensity."""
maxI = data.max()
minI = maxI / minI_factor
return [minI * cl_factor ** x for x in range(num_contours)]
def freq_ppm(header, data, debug=False):
"""Generate a 2d for the frequencies of the 2d spectrum in Hz."""
# Get the spectral widths for the 2 frequency dimensions in Hz
f1sw = header['FDF1SW']
f2sw = header['FDF2SW']
# Get the observed (Larmor) frequencies for each channel in MHz
f1obs = header['FDF1OBS']
f2obs = header['FDF2OBS']
# Get the spectral widths in ppm
f1swppm = f1sw / f1obs
f2swppm = f2sw / f2obs
# Get the spectral offset in ppm
f1swoffppm = header['FDF1ORIG'] / f1obs
f2swoffppm = header['FDF2ORIG'] / f2obs
# Get the spectral ranges in ppm
f1rangeppm = (f1swoffppm, f1swoffppm + f1swppm)
f2rangeppm = (f2swoffppm, f2swoffppm + f2swppm)
# Get the number of points in the f1 (y-axis) and f2 (x-axis)
# dimensions
f1npts, f2npts = data.shape
# Calculate the number of ppm per point for each dimension
f1delta = f1swppm / f1npts
f2delta = f2swppm / f2npts
if debug:
print('f1sw (Hz): {}, f2sw (Hz): {}'.format(f1sw, f2sw))
print('f1swppm (ppm): {}, f2swppm (Hz): {}'.format(f1swppm, f2swppm))
print('f1offppm (ppm): {}, f2offppm (ppm): {}'.format(f1swoffppm, f2swoffppm))
print('f1rangeppm (ppm): {}'.format(f1rangeppm))
print('f2rangeppm (ppm): {}'.format(f2rangeppm))
# return an numby array for the frequencies in Hz
f1ppm = np.array([f1rangeppm[1] - float(i) * f1delta for i in range(f1npts)])
f2ppm = np.array([f2rangeppm[1] - float(i) * f2delta for i in range(f2npts)])
return np.meshgrid(f2ppm, f1ppm)
def format_isotope(label):
"""Format an isotope string into latex"""
# Parse the starter numbers
groups = [list(g) for _,g in groupby(label, key=lambda c: c.isdigit())]
if len(groups) > 1:
number = ''.join(groups[0])
rest = ''.join([''.join(g) for g in groups[1:]])
return "$^{{{}}}${}".format(number, rest)
else:
return label
def print_header(header):
"""Print information on the header"""
for key in sorted(header.keys()):
if key.startswith('FDF3') or key.startswith('FDF4'):
continue
print(key, header[key])
@click.command()
@click.argument('filenames', nargs=-1, type=click.Path(exists=True))
@click.option('--dims', required=False, default=(3, 4),
type=click.Tuple([float, float]),
help='The figure dimensions (in inches)')
@click.option('--units', required=False, default=('ppm', 'ppm'),
type=click.Tuple([str, str]),
help='The units to plot the x- and y-axes (ppm)')
@click.option('--title', required=False, default=None,
help='The title for the figure')
@click.option('--labels', required=False,
default=('', ''), type=click.Tuple([str, str]),
help="The labels for the x- and y-axis")
@click.option('--xlim', required=False,
default=(None, None), type=click.Tuple([float, float]),
help="The x-axis limits (in units) to draw the spectrum")
@click.option('--ylim', required=False,
default=(None, None), type=click.Tuple([float, float]),
help="The y-axis limits (in units) to draw the spectrum")
@click.option('--ticks', required=False,
default=(1, 5), type=click.Tuple([float, float]),
help="The major tick mark interval for the x- and y-axis")
@click.option('--contour-Icutoff', 'ctr_Icutoff', required=False,
default=0.15, type=float,
help='The fraction of the maximum intensity to start contours')
@click.option('--contour-levels', 'ctr_levels', required=False,
default=20, type=int,
help='The number of contours to draw')
@click.option('--contour-factor', 'ctr_factor', required=False,
default=1.2, type=float,
help='The contour multiplicative factor')
@click.option('--contour-map', 'cmap', required=False,
default='winter', type=str,
help='The color map to use for drawing the contours')
@click.option('--out', '-o', 'outfile',
required=False, default=None,
help="The output filename to save to figure image")
@click.option('--debug', '-d', required=False, default=False, type=bool,
help="Print debug information to the terminal")
def plot2d(filenames, dims, units, title, labels, xlim, ylim, ticks,
ctr_Icutoff, ctr_levels, ctr_factor, cmap, outfile,
debug):
"""Plot a 2d NMR spectrum from NMRPipe format.
"""
# Setup the figure
fig = plt.figure(figsize=dims)
if title is not None:
plt.title(title)
# Setup the input spectra
if len(filenames) == 0:
filenames = [str(default2d)]
for filename in filenames:
header, data = pipe.read(filename)
# Format the labels
xlabel_hdr = format_isotope(header['FDF2LABEL'])
ylabel_hdr = format_isotope(header['FDF1LABEL'])
# Format the axes
ppm1, ppm2 = freq_ppm(header, data, debug=debug)
if units[0] is None:
pass
else:
x = ppm1
xlabel = labels[0] or "{} Frequency (ppm)".format(xlabel_hdr)
if units[1] is None:
pass
else:
y = ppm2
ylabel = labels[1] or "{} Frequency (ppm)".format(ylabel_hdr)
# Set the plot
cl = contours_by_Imax(data, minI_factor=ctr_Icutoff**-1,
cl_factor=ctr_factor, num_contours=ctr_levels)
cnt = plt.contour(x, y, data, levels=cl, cmap=cmap)
# Set plot information for the last contour
axes = cnt.axes
axes.invert_xaxis()
axes.set_xlabel(xlabel)
axes.xaxis.set_major_locator(MultipleLocator(ticks[0]))
axes.invert_yaxis()
axes.set_ylabel(ylabel)
axes.yaxis.set_major_locator(MultipleLocator(ticks[1]))
# Set the limits
if not any(i is None for i in xlim):
print(xlim)
axes.set_xlim(*xlim)
if not any(i is None for i in ylim):
axes.set_ylim(*ylim)
# Reposition elements
fig.tight_layout()
if outfile is None:
plt.show()
else:
plt.savefig(outfile)
if __name__ == '__main__':
plot2d()
``` |
{
"source": "jlosey/muller",
"score": 2
} |
#### File: jlosey/muller/GeneratorFunctions.py
```python
import math
import string
import glob
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
from scipy import optimize
from scipy.linalg import expm, logm
import os.path
from os import walk
import pylab
from collections import defaultdict
import scipy.integrate as integrate
from pandas import *
import pandas as pd
#from rpy2.robjects.packages import importr
#utils = importr('utils')
#utils.install_packages('gutenbergr', repos='https://cloud.r-project.org')
#utils.install_packages('ctmcd')
#import rpy2.robjects as ro
#from rpy2.robjects import pandas2ri
#pandas2ri.activate()
#ctmcd = importr('ctmcd')
import os
import time
from scipy import linalg
from random import *
from helperFunctions import *
from constants import *
#Defined for i->j
def diagonalAdjustment(matrix, tau=1, k=0, epsilon=0.001, maxIterations=20):
#input is ETM or EPM, returns generator
#take log
logMatrix = isRealLog(normalizeMatrix(matrix, k=k), epsilon=eps, maxIterations=maxIters)/tau
# logMatrix=logm(matrix)
#set off diagonals to zero
for i in range(logMatrix.shape[0]):
for j in range(logMatrix.shape[0]):
if(i!=j and logMatrix[i,j]<0):
logMatrix[i,j] = 0
#make diagonals the negative sum of rest of row
for i in range(logMatrix.shape[0]):
logMatrix[i,i]=0 #first set diagonals to zero
logMatrix[i,i] = -1 * logMatrix[i].sum() #by row
return logMatrix
#Defined for i->j
def weightedAdjustment(matrix, tau=1, k=0, epsilon=0.001, maxIterations=20): #input is ETM or EPM
#returns Generator
#take log
logMatrix = isRealLog(normalizeMatrix(matrix, k=k), epsilon=eps, maxIterations=maxIters)/tau
#set off diagonals to zero as in DA
for i in range(logMatrix.shape[0]):
for j in range(logMatrix.shape[0]):
if(i!=j and logMatrix[i,j]<0):
logMatrix[i,j] = 0
absMatrix = abs(np.copy(logMatrix))
for i in range(logMatrix.shape[0]):
for j in range(logMatrix.shape[0]):
matrix[i,j] = logMatrix[i,j] - absMatrix[i,j] * logMatrix[:,i].sum() / absMatrix[:,i].sum()
return matrix
def EM(matrix, tau=1, k=0):
df = npMatrixToPDdf(matrix)
DAmat = diagonalAdjustment(matrix, tau=tau, k=k)
EMmat = ctmcd.gm(tm=df, te=tau, method="EM", gmguess=DAmat)[0]
return EMmat
def MLE(matrix, t=1, iterations=250000,pseudobeta=1, noiseR=0.1, noiseP=0, smooth=0.0001):
N0=matrix
N=normalizeMatrix(matrix+1)
n=N.shape[0]
P=guessP(N)
R=guessR(N,P)
#R = np.random.rand(R.shape[0], R.shape[0])
# for i in range(R.shape[0]):
# R[i,i] = 0
# R[i,i] = -1 * R[:,i].sum() #should be column sum
# print("randR")
# for i in detailedBalance(R):
# print(i)
print("#Iterations: %s"%iterations)
print("#Pseudobeta: %s"%pseudobeta)
print("#noiseR: %s"%noiseR)
print("#noiseP: %s"%noiseP)
print("#smooth: %s"%smooth)
logl = calcLL(N0,R,t)
seed()
rejected=0
rejectedLastThousand=0
adjusted=np.zeros(n)
for step in range(1,iterations+1):
i=randint(0,n-1)
if (t%2==0 or noiseP==0):
j=n
while j>=n or j<0:
j=i+1-2*randint(0,1)
dr=-R[i,j]
#while R[i,j]+dr<=0:# or R[j,i]+P[i]/P[j]*dr<=0: #off diagonals need to still be greater than 0
while R[i,j]+dr<=0 or R[j,i]-dr<=0: #off diagonals need to still be greater than 0
# or R[j,i]+P[j]/P[i]*dr<=0
dr=(random()-0.5)*noiseR
R[i,j]+=dr
R[i,i]-=dr
#R[j,i]-=dr
#R[j,j]+=dr
# R[j,i]+=dr*P[i]/P[j]
# R[j,j]-=dr*P[i]/P[j]
else:
dp=(random()-0.5)*noiseP
for j in range(n):
if i!=j:
P[j]-=(dp*P[i])/n
P[i]*=(1+dp)
if (i<n-1):
R[i+1,i+1]-=R[i+1,i]*dp
R[i+1,i]*=1+dp
if (i>0):
R[i-1,i-1]-=R[i-1,i]*dp
R[i-1,i]*=1+dp
#r=sp.linalg.expm(R)
loglt=0
#for ii in range(n):
# for jj in range(n):
# if N[ii,jj]*r[ii,jj]>0:
# loglt+=log(r[ii,jj])*N[ii,jj]
#if smooth>0:
# for ii in range(n-1):
# D[ii]=R[ii,ii+1]*sqrt(P[ii+1]/P[ii])
# for ii in range(n-2):
# loglt-=(D[ii]-D[ii+1])**2/(2*smooth**2)+(log(P[ii]/P[ii+1]))**2/(2*smooth**2)
loglt = calcLL(N0, R, t)
dlog = (loglt) - (logl) #these numbers are always negative, thus if loglt>logl this will be positive
r = random()
if math.isnan(loglt) or math.isinf(loglt) or (r>np.exp(pseudobeta*(dlog))): #rejection criterion
if (t%2==0 or noiseP==0):
R[i,j]-=dr
R[i,i]+=dr
#R[j,i]+=dr
#R[j,j]-=dr
##R[j,i]-=dr*P[i]/P[j]
#R[j,j]+=dr*P[i]/P[j]
else:
P[i]/=(1+dp)
for j in range(n):
if i!=j:
P[j]+=(dp*P[i])/n
if (i<n-1):
R[i+1,i]/=1+dp
R[i+1,i+1]+=R[i+1,i]*dp
if (i>0):
R[i-1,i]/=1+dp
R[i-1,i-1]+=R[i-1,i]*dp
rejected +=1.
rejectedLastThousand +=1.
else:
logl=loglt
adjusted[i]+=1
if step%1000==0:
###########
#noiseR = noiseR * min(1,(1 - rejectedLastThousand/1000)+0.5)
#noiseR = 1 - rejectedLastThousand/1000
#noiseP = noiseP * min(1,(1 - rejectedLastThousand/1000)+0.5)
#if (rejectedLastThousand/1000*100 > 95):
# print("Iteration: %d, Logl: %.2f, TotalReject: %.2f%%, RecentReject: %.2f%%, noiseR = %.2f" %(step, logl, rejected/float(step)*100, rejectedLastThousand/1000*100, noiseR))
# return R
print("Iteration: %d, Logl: %.2f, TotalReject: %.2f%%, RecentReject: %.2f%%, noiseR = %.2f" %(step, logl, rejected/float(step)*100, rejectedLastThousand/1000*100, noiseR))
############
#print("Iteration: %d, Logl: %.2f, TotalReject: %.2f%%, RecentReject: %.2f%%" %(step, logl, rejected/float(step)*100, rejectedLastThousand/1000*100))
rejectedLastThousand=0
if step%5000==0:
for i in detailedBalance(R):
print(i)
return R
#Helper function by which to optimize the frobenius distance between the two matrices.
def optimizeFunc(x, i, j, Q, P):#x is exp(Q(q_{i,j}))
Q[i,i] += Q[i,j] - x
Q[i,j] = x
return frobenius(iterative_expm(Q), P)
#Input is a ETM or EPM
def CWO(matrix, tau=1, k=0, epsilon=0.001, maxIterations=20):
calculations=0
#It is noted that any method can be used here. Not just DA.
Q = diagonalAdjustment(matrix, tau=tau, k=k, epsilon=eps, maxIterations=maxIters)
matrix = normalizeMatrix(matrix, k=k)
for i in range(Q.shape[0]):
for j in range(Q.shape[0]):
if(i!=j):
if(Q[i,j]>1e-10):
calculations+=1
#Run an optimization on each row over the first function defined in this cell
x = optimize.fmin(optimizeFunc, Q[i,j], args=(i,j,Q,matrix), maxiter=200, full_output=False, disp=False)[0]#argmin(i, j, Q, c)
Q[i,j] = x
return Q
def QOG(matrix, tau=1, k=0, epsilon=eps, maxIterations=maxIters):
logMatrix = isRealLog(normalizeMatrix(matrix,k=k), epsilon=eps, maxIterations=maxIters)/tau
#step 2 of algorithm
sortedMatrix, unsortKey = sortMatrix(logMatrix)
#step 3 of algorithm
m = np.zeros(matrix.shape[0])
for i in range(matrix.shape[0]):
m[i] = findMValue(sortedMatrix[i])
#step 4 of algorithm
copyMatrix=np.copy(sortedMatrix)
for i in range(matrix.shape[0]):#for each row
for j in range(2,int(m[i])+1):#include m[i]
sortedMatrix[i,j]=0
for j in range(int(m[i])+1,matrix.shape[0]):#for each value not zero'd
for k in range(int(m[i])+1,matrix.shape[0]): #summation
sortedMatrix[i,j] -= copyMatrix[i,k] / (matrix.shape[0] - m[i] + 1)
sortedMatrix[i,j] -= copyMatrix[i,0] / (matrix.shape[0] - m[i] + 1)
for k in range(int(m[i]+1),matrix.shape[0]):
sortedMatrix[i,0] -= copyMatrix[i,k] / (matrix.shape[0] - m[i] + 1)
sortedMatrix[i,0] -= copyMatrix[i,0] / (matrix.shape[0] - m[i] + 1)
#step 5 - shuffle rows back into order.
quasi = unsortMatrix(sortedMatrix, unsortKey)
return quasi
def findMValue(array): #step 3 of algorithm
n = len(array)-1 #last index
val=0
for i in range(1,n+1): #i loops from 1 to n
val = (n+1-i)*array[i+1]-array[0]
for j in range(n-i):#from 0 to n-1-i
val -= array[n-j]
if(val>=0): #truth condition of algorithm
return i
return -1 #otherwise return that row cannot be optimized.
def sortMatrix(matrix): #returns sortMatrix and unsortKey
sortMatrix = np.copy(matrix)
for i in range(matrix.shape[0]):
sortMatrix[i].sort()
sortMatrix = sortMatrix
unsortKey = np.zeros((matrix.shape[0], matrix.shape[0]))
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
f=0
while(unsortKey[i,j]==0):
if(sortMatrix[i,f]==matrix[i,j]):
unsortKey[i,j] = f + 1
f+=1
return sortMatrix, unsortKey
def unsortMatrix(matrix, key): #take in sorted matrix and key to unsort
unsortedMatrix = np.zeros((matrix.shape[0],matrix.shape[0]))
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
unsortedMatrix[i,j] = matrix[i,int(key[i,j])-1]
return unsortedMatrix
```
#### File: jlosey/muller/muller.py
```python
import numpy as np
import matplotlib.pyplot as plt
def muller(x,y):
aa = [-1, -1, -6.5, 0.7]
bb = [0, 0, 11, 0.6]
cc = [-10, -10, -6.5, 0.7]
AA = [-200, -100, -170, 15]
XX = [1, 0, -0.5, -1]
YY = [0, 0.5, 1.5, 1]
value = 0.
for j in range(4):
value += AA[j] * np.exp(aa[j] * (x - XX[j])**2 + \
bb[j] * (x - XX[j]) * (y - YY[j]) + \
cc[j] * (y - YY[j])**2)
return value
X = np.linspace(-1.75,1.,300)
Y = np.linspace(-0.5,2.5,300)
xx,yy = np.meshgrid(X,Y)
zz = muller(xx,yy)
print(xx,yy,zz)
ax = plt.contourf(xx,yy.clip(max = 200),zz,40)
plt.legend()
plt.show()
``` |
{
"source": "jlou2u/katas",
"score": 4
} |
#### File: p99/python3/p21.py
```python
def insert_at(e, n, l):
if n < 0:
l = l[::-1]
n = -1*n-1
r = l[0:n] + [e] + l[n:]
return r[::-1]
return l[0:n] + [e] + l[n:]
def test_insert_at():
l = ['a', 'b', 'c', 'd']
l_copy = [e for e in l]
assert insert_at('new', 1, l) == ['a', 'new', 'b', 'c', 'd']
assert l == l_copy
assert insert_at('new', 0, []) == ['new']
assert insert_at('new', 1, []) == ['new']
assert insert_at('new', 10, []) == ['new']
assert insert_at('new', -1, ['a']) == ['a', 'new']
assert insert_at('new', -2, ['a', 'b']) == ['a', 'new', 'b']
```
#### File: p99/python3/p22.py
```python
def rng(i, k):
return list(range(i, k+1))
def test_rng():
assert rng(4, 9) == [4, 5, 6, 7, 8, 9]
```
#### File: p99/python3/p34.py
```python
def gcd(i, j):
for n in reversed(range(max(i, j)+1)):
if i % n == 0 and j % n == 0:
return n
def is_coprime(i, j):
return gcd(i, j) == 1
def totient(m):
n = 0
for i in reversed(range(m+1)):
if is_coprime(m, i):
n = n + 1
return n
def test_totient():
assert totient(2) == 1
assert totient(3) == 2
assert totient(4) == 2
assert totient(5) == 4
assert totient(6) == 2
assert totient(7) == 6
assert totient(8) == 4
assert totient(9) == 6
assert totient(10) == 4
assert totient(11) == 10
assert totient(12) == 4
assert totient(13) == 12
assert totient(14) == 6
assert totient(15) == 8
``` |
{
"source": "jlouder/sigal",
"score": 2
} |
#### File: sigal/plugins/watermark.py
```python
import logging
from PIL import Image, ImageEnhance
from sigal import signals
def reduce_opacity(im, opacity):
"""Returns an image with reduced opacity."""
assert opacity >= 0 and opacity <= 1
if im.mode != 'RGBA':
im = im.convert('RGBA')
else:
im = im.copy()
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
def watermark(im, mark, position, opacity=1):
"""Adds a watermark to an image."""
if opacity < 1:
mark = reduce_opacity(mark, opacity)
if im.mode != 'RGBA':
im = im.convert('RGBA')
# create a transparent layer the size of the image and draw the
# watermark in that layer.
layer = Image.new('RGBA', im.size, (0, 0, 0, 0))
if position == 'tile':
for y in range(0, im.size[1], mark.size[1]):
for x in range(0, im.size[0], mark.size[0]):
layer.paste(mark, (x, y))
elif position == 'scale':
# scale, but preserve the aspect ratio
ratio = min(
float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1])
w = int(mark.size[0] * ratio)
h = int(mark.size[1] * ratio)
mark = mark.resize((w, h))
layer.paste(mark, (int((im.size[0] - w) / 2),
int((im.size[1] - h) / 2)))
else:
layer.paste(mark, position)
# composite the watermark with the layer
return Image.composite(layer, im, layer)
def add_watermark(img, settings=None):
logger = logging.getLogger(__name__)
logger.debug('Adding watermark to %r', img)
mark = Image.open(settings['watermark'])
position = settings.get('watermark_position', 'scale')
opacity = settings.get("watermark_opacity", 1)
return watermark(img, mark, position, opacity)
def register(settings):
logger = logging.getLogger(__name__)
if settings.get('watermark'):
signals.img_resized.connect(add_watermark)
else:
logger.warning('Watermark image is not set')
``` |
{
"source": "jlouie95618/airtable-python",
"score": 2
} |
#### File: airtable-python/airtable/airtable_error.py
```python
class AirtableError(object):
"""docstring for AirtableError"""
def __init__(self, arg):
super(AirtableError, self).__init__()
self.arg = arg
```
#### File: airtable-python/airtable/perform_request.py
```python
import requests
def performRequest():
header = { 'Authorization': 'Bearer keyrpG4FPpEqZ0ubg'}
r = requests.get('https://api.airtable.com/v0/app9uvKeuVL1pOfCD/Cuisines/recgruzSZ0BvRB63q', headers = header)
print r
print r.text
performRequest()
```
#### File: airtable-python/airtable/table.py
```python
class Table(object):
"""docstring for Table"""
def __init__(self, arg):
self.arg = arg
``` |
{
"source": "J-L-O/UNO",
"score": 2
} |
#### File: UNO/utils/metrics.py
```python
from typing import Tuple
import torch
import numpy as np
from utils.eval import cluster_acc
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
from sklearn.metrics import adjusted_rand_score as ari_score
def compute_distance_to_prototypes(features: torch.tensor, prototypes: torch.tensor, labels: torch.tensor) -> float:
mean_distance = 0
num_classes = prototypes.shape[0]
for i in range(num_classes):
if ~torch.isnan(prototypes[i, 0]):
samples = labels == i
mean_distance += calculate_l2_loss(prototypes[i], features[samples])
mean_distance /= num_classes
return mean_distance
def compute_metrics(preds: np.ndarray, labels: np.ndarray) -> Tuple[float, float, float]:
acc = cluster_acc(labels, preds)
nmi = nmi_score(labels, preds)
ari = ari_score(labels, preds)
return acc, nmi, ari
def calculate_prototypes(features, labels, num_classes):
class_assignments = labels.view(labels.shape[0], 1).expand(-1, features.shape[1])
one_hot = torch.nn.functional.one_hot(labels, num_classes)
labels_count = one_hot.sum(dim=0)
prototypes = torch.zeros((num_classes, features.shape[1]), dtype=features.dtype, device=features.device)
prototypes.scatter_add_(0, class_assignments, features)
prototypes = prototypes / labels_count.float().unsqueeze(1)
return prototypes
def calculate_l2_loss(prototype, features):
repeated_prototype = prototype.unsqueeze(0).repeat(features.shape[0], 1)
mse_loss = torch.nn.functional.mse_loss(repeated_prototype, features, reduction="mean")
# sum_of_squares = mse_loss.sum(dim=1)
# l2_loss = torch.sqrt(sum_of_squares + 1e-10)
return mse_loss # l2_loss.mean()
def calculate_nearest_labeled_neighbors(features: torch.tensor, model: torch.nn.Module, k: int, labels: torch.tensor,
num_classes: int, key: str = "logits_lab") -> torch.tensor:
prototypes = calculate_prototypes(features, labels, num_classes)
prototype_features = model.forward_heads(prototypes.float())
nearest_neighbors = prototype_features[key].topk(k=k, dim=1).indices
return nearest_neighbors
```
#### File: UNO/utils/sinkhorn_knopp.py
```python
import torch
class SinkhornKnopp(torch.nn.Module):
def __init__(self, num_iters=3, epsilon=0.05):
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
@torch.no_grad()
def forward(self, logits):
Q = torch.exp(logits / self.epsilon).t()
B = Q.shape[1]
K = Q.shape[0] # how many prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
Q /= sum_Q
for it in range(self.num_iters):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
```
#### File: UNO/utils/transforms.py
```python
import torch
import torchvision.transforms as T
from PIL import ImageFilter, ImageOps
import random
class DiscoverTargetTransform:
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, y):
y = self.mapping[y]
return y
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class Solarize(object):
def __init__(self, p=0.2):
self.prob = p
def __call__(self, img):
if torch.bernoulli(torch.tensor(self.prob)) == 0:
return img
v = torch.rand(1) * 256
return ImageOps.solarize(img, v)
class Equalize(object):
def __init__(self, p=0.2):
self.prob = p
def __call__(self, img):
if torch.bernoulli(torch.tensor(self.prob)) == 0:
return img
return ImageOps.equalize(img)
def get_multicrop_transform(dataset, mean, std):
if dataset == "ImageNet":
return T.Compose(
[
T.RandomResizedCrop(size=96, scale=(0.08, 0.5)),
T.RandomHorizontalFlip(),
T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
T.RandomGrayscale(p=0.2),
T.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
T.ToTensor(),
T.Normalize(mean, std),
]
)
elif "CIFAR" in dataset:
return T.Compose(
[
T.RandomResizedCrop(size=18, scale=(0.3, 0.8)),
T.RandomHorizontalFlip(),
T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
Solarize(p=0.2),
Equalize(p=0.2),
T.ToTensor(),
T.Normalize(mean, std),
]
)
class MultiTransform:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, x):
return [t(x) for t in self.transforms]
def get_transforms(mode, dataset, multicrop=False, num_large_crops=2, num_small_crops=2):
mean, std = {
"CIFAR10": [(0.491, 0.482, 0.447), (0.202, 0.199, 0.201)],
"CIFAR100": [(0.507, 0.487, 0.441), (0.267, 0.256, 0.276)],
"ImageNet": [(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)],
}[dataset]
transform = {
"ImageNet": {
"unsupervised": T.Compose(
[
T.RandomResizedCrop(224, (0.5, 1.0)),
T.RandomHorizontalFlip(),
T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5),
T.RandomGrayscale(p=0.2),
T.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2),
T.ToTensor(),
T.Normalize(mean, std),
]
),
"eval": T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean, std),
]
),
},
"CIFAR100": {
"unsupervised": T.Compose(
[
T.RandomChoice(
[
T.RandomCrop(32, padding=4),
T.RandomResizedCrop(32, (0.5, 1.0)),
]
),
T.RandomHorizontalFlip(),
T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.6),
Solarize(p=0.1),
Equalize(p=0.1),
T.ToTensor(),
T.Normalize(mean, std),
]
),
"supervised": T.Compose(
[
T.RandomCrop(32, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean, std),
]
),
"eval": T.Compose(
[
T.CenterCrop(32),
T.ToTensor(),
T.Normalize(mean, std),
]
),
},
"CIFAR10": {
"unsupervised": T.Compose(
[
T.RandomChoice(
[
T.RandomCrop(32, padding=4),
T.RandomResizedCrop(32, (0.5, 1.0)),
]
),
T.RandomHorizontalFlip(),
T.RandomApply([T.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.6),
Solarize(p=0.1),
Equalize(p=0.1),
T.ToTensor(),
T.Normalize(mean, std),
]
),
"supervised": T.Compose(
[
T.RandomCrop(32, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean, std),
]
),
"eval": T.Compose(
[
T.CenterCrop(32),
T.ToTensor(),
T.Normalize(mean, std),
]
),
},
}[dataset][mode]
if mode == "unsupervised":
transforms = [transform] * num_large_crops
if multicrop:
multicrop_transform = get_multicrop_transform(dataset, mean, std)
transforms += [multicrop_transform] * num_small_crops
transform = MultiTransform(transforms)
return transform
```
#### File: UNO/utils/visualization.py
```python
import time
from typing import List
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from torch.utils.tensorboard import SummaryWriter
def visualize_features(features: np.ndarray, labels: np.ndarray, num_classes: int, tag: str, writer: SummaryWriter,
epoch: int, metadata: List = None, metadata_header: List[str] = None) -> None:
feat_cols = ["pixel" + str(i) for i in range(features.shape[1])]
df = pd.DataFrame(features, columns=feat_cols)
df["y"] = labels
df["label"] = df["y"].apply(lambda i: int(i))
print(f"Size of the dataframe: {df.shape}")
df_subset = df
data_subset = df_subset[feat_cols].values
pca_50 = PCA(n_components=50)
pca_result_50 = pca_50.fit_transform(data_subset)
print(f"Cumulative explained variation for 50 principal components: {np.sum(pca_50.explained_variance_ratio_)}")
if metadata is None:
writer.add_embedding(pca_result_50, labels.tolist(), tag=tag, global_step=epoch)
else:
writer.add_embedding(pca_result_50, metadata, tag=tag, global_step=epoch, metadata_header=metadata_header)
time_start = time.time()
tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=300)
tsne_pca_results = tsne.fit_transform(pca_result_50)
print(f"t-SNE done! Time elapsed: {time.time() - time_start} seconds")
df_subset["tsne-pca50-one"] = tsne_pca_results[:, 0]
df_subset["tsne-pca50-two"] = tsne_pca_results[:, 1]
fig = plt.figure()
plt.title(tag)
sns.scatterplot(
x="tsne-pca50-one", y="tsne-pca50-two",
hue="y",
palette=sns.color_palette("hls", num_classes),
data=df_subset,
legend="full",
alpha=0.3,
)
writer.add_figure(tag, fig, global_step=epoch, close=True)
def visualize_labeled_vs_unlabeled(features_unlabeled: np.ndarray, labels_unlabeled: np.ndarray,
features_labeled: np.ndarray, labels_labeled: np.ndarray,
tag: str, writer: SummaryWriter, epoch: int):
coarse_labels_unlabeled = np.zeros_like(labels_unlabeled)
coarse_labels_labeled = np.ones_like(labels_labeled)
max_unlabeled = np.max(labels_unlabeled)
max_labeled = np.max(labels_labeled)
features_combined = np.concatenate((features_unlabeled, features_labeled), axis=0)
coarse_labels_combined = np.concatenate((coarse_labels_unlabeled, coarse_labels_labeled), axis=0)
labeled_coarse_unlabeled_fine = np.concatenate((labels_unlabeled,
np.full(features_labeled.shape[0], max_unlabeled + 1)), axis=0)
labeled_fine_unlabeled_coarse = np.concatenate((np.full(features_unlabeled.shape[0], max_labeled + 1),
labels_labeled), axis=0)
metadata = list(zip(coarse_labels_combined, labeled_coarse_unlabeled_fine, labeled_fine_unlabeled_coarse))
metadata_header = ["both coarse", "labled coarse, unlabeled fine", "labled fine, unlabeled coarse"]
visualize_features(features_combined, coarse_labels_combined, 2, tag, writer, epoch,
metadata=metadata, metadata_header=metadata_header)
def visualize_distances(p1: torch.tensor, p2: torch.tensor, num_classes: int, tag: str,
writer: SummaryWriter, epoch: int):
distances = torch.cdist(p1, p2, 2)
sorted_distances = distances.sort(dim=1).values
mean_distances = sorted_distances.mean(dim=0)[1:] # First value is always 0
fig = plt.figure()
plt.title(tag)
ax = sns.lineplot(x=np.arange(num_classes - 1), y=mean_distances.cpu().numpy(), drawstyle='steps-pre')
ax.set(ylim=(0, None))
ax.set(xlabel='k')
ax.set(ylabel='mean l2 distance')
writer.add_figure(tag, fig, global_step=epoch, close=True)
``` |
{
"source": "jlousada315/NNE-TCP",
"score": 3
} |
#### File: NNE-TCP/package/Prioritizer.py
```python
from sklearn.manifold import TSNE
import umap
from sklearn.model_selection import StratifiedKFold, KFold
from Data import DataCI
from DataGenerator import DataGenerator
from Model import Model, Metrics
from Visualizer import Visualizer
from tensorflow import keras
import numpy as np
import pandas as pd
def reduce_dim(weights, components=3, method='TSNE'):
"""
Reduce dimensions of embeddings
:param weights:
:param components:
:param method:
:return: TSNE or UMAP element
"""
if method == 'TSNE':
return TSNE(components, metric='cosine').fit_transform(weights)
elif method == 'UMAP':
# Might want to try different parameters for UMAP
return umap.UMAP(n_components=components, metric='cosine',
init='random', n_neighbors=5).fit_transform(weights)
class NNEmbeddings(Model, Metrics, Visualizer):
"""
Neural Networks Embeddings model which inherits from abstract class Model and class Metrics.
Once it is created, all the data becomes available from DataCI class and there is the possibility
of loading a previously trained model, or to train from scratch.
"""
def __init__(self, D: DataCI, embedding_size: int = 200, optimizer: str = 'Adam',
negative_ratio=1, nb_epochs: int = 10, batch_size: int = 1, classification: bool = False,
kfolds: int = 10, model_file: str = 'model.h5', load: bool = False, save: bool = False):
"""
NNEmbeddings Class initialization.
:param D:
:param model_file:
:param embedding_size:
:param optimizer:
:param save:
:param load:
"""
Model.__init__(self)
Metrics.__init__(self)
Visualizer.__init__(self)
self.Data = D
# Parameter Grid
self.param_grid = {'embedding_size': embedding_size,
'negative_ratio': negative_ratio,
'batch_size': batch_size,
'nb_epochs': nb_epochs,
'classification': classification,
'optimizer': optimizer
}
self.model_file = model_file
self.nr_revision = len(self.Data.pairs)
if load:
self.model = keras.models.load_model(self.model_file)
else:
self.model = self.build_model()
print(self.crossValidation(k_folds=kfolds))
if save:
self.model.save(self.model_file)
# self.train(save_model=save)
# y_true, y_pred = self.test()
# self.evaluate_classification(y_true, y_pred)
def build_model(self):
"""
Build model architecture/framework
:return: model
"""
from keras.layers import Input, Embedding, Dot, Reshape, Dense
from keras.models import Model
# Both inputs are 1-dimensional
file = Input(name='file', shape=[1])
test = Input(name='test', shape=[1])
# Embedding the book (shape will be (None, 1, 50))
file_embedding = Embedding(name='file_embedding',
input_dim=len(self.Data.file_index),
output_dim=self.param_grid['embedding_size'])(file)
# Embedding the link (shape will be (None, 1, 50))
test_embedding = Embedding(name='test_embedding',
input_dim=len(self.Data.test_index),
output_dim=self.param_grid['embedding_size'])(test)
# Merge the layers with a dot product along the second axis (shape will be (None, 1, 1))
merged = Dot(name='dot_product', normalize=True, axes=2)([file_embedding, test_embedding])
# Reshape to be a single number (shape will be (None, 1))
merged = Reshape(target_shape=[1])(merged)
# If classification, add extra layer and loss function is binary cross entropy
if self.param_grid['classification']:
merged = Dense(1, activation='sigmoid')(merged)
model = Model(inputs=[file, test], outputs=merged)
model.compile(optimizer=self.param_grid['optimizer'], loss='binary_crossentropy', metrics=['accuracy'])
# Otherwise loss function is mean squared error
else:
model = Model(inputs=[file, test], outputs=merged)
model.compile(optimizer=self.param_grid['optimizer'], loss='mse', metrics=['mae'])
model.summary()
return model
def train(self, save_model=False, plot=False):
"""
Train model.
:param batch_size:
:param plot: If true accuracy vs loss is plotted for training and validation set
:param n_positive:
:param negative_ratio: Ratio of positive vs. negative labels. Positive -> there is link between files.
Negative -> no link
:param save_model: If true model is saved as a .h5 file
:param validation_set_size: percentage of whole dataset for validation
:param training_set_size: percentage of whole dataset for training
:param nb_epochs: Number of epochs
:return:
"""
# Generate training set
training_set = self.Data.pairs
train_gen = DataGenerator(pairs=training_set, batch_size=self.param_grid['batch_size'],
nr_files=len(self.Data.all_files), nr_tests=len(self.Data.all_tests),
negative_ratio=self.param_grid['negative_ratio'])
# Train
self.model.fit(train_gen,
epochs=self.param_grid['nb_epochs'],
verbose=2)
if plot:
self.plot_acc_loss(self.model)
if save_model:
self.model.save(self.model_file)
def crossValidation(self, k_folds=10):
cv_accuracy_train = []
cv_accuracy_val = []
cv_loss_train = []
cv_loss_val = []
from sklearn.model_selection import train_test_split
s = np.array(list(self.Data.pairs.keys()))
kfold = KFold(n_splits=k_folds, shuffle=True)
idx = 0
for train_idx, val_idx in kfold.split(s):
print("=========================================")
print("====== K Fold Validation step => %d/%d =======" % (idx, k_folds))
print("=========================================")
pairs_train = {s[key]: self.Data.pairs[s[key]] for key in train_idx}
pairs_val = {s[key]: self.Data.pairs[s[key]] for key in val_idx}
train_gen = DataGenerator(pairs=pairs_train, batch_size=self.param_grid['batch_size'],
nr_files=len(self.Data.all_files), nr_tests=len(self.Data.all_tests),
negative_ratio=self.param_grid['negative_ratio'])
val_gen = DataGenerator(pairs=pairs_val, batch_size=self.param_grid['batch_size'],
nr_files=len(self.Data.all_files), nr_tests=len(self.Data.all_tests),
negative_ratio=self.param_grid['negative_ratio'])
# Train
h = self.model.fit(train_gen,
validation_data=val_gen,
epochs=self.param_grid['nb_epochs'],
verbose=2)
cv_accuracy_train.append(np.array(h.history['mae'])[-1])
cv_accuracy_val.append(np.array(h.history['val_mae'])[-1])
cv_loss_train.append(np.array(h.history['loss'])[-1])
cv_loss_val.append(np.array(h.history['val_loss'])[-1])
idx += 1
df = pd.DataFrame({'acc_train': cv_accuracy_train,
'loss_train': cv_loss_train,
'acc_val': cv_accuracy_val,
'loss_val': cv_loss_val
},
columns=['acc_train', 'loss_train', 'acc_val', 'loss_val'])
df.to_pickle('cv_scores.pkl')
return df
def predict(self, pickle_file: str = None):
"""
Makes model prediction for unseen data.
:param pickle_file:
:return:
"""
apfd = []
data = self.Data.df_unseen
data = data.explode('name')
data = data.explode('mod_files')
grouped = data.groupby(['revision'])
for name, group in grouped: # for each revision
preds_per_files = []
tests = group['name'].to_list()
labels = []
for t in self.Data.all_tests:
if t in tests:
labels.append(1)
else:
labels.append(0)
for row in group.iterrows(): # for each file
unseen_pairs = []
for t in self.Data.all_tests: # pair with every test
if row[1]['mod_files'] in self.Data.all_files:
unseen_pairs.append((self.Data.file_index[row[1]['mod_files']], self.Data.test_index[t]))
def generate_predictions(pairs, batch_size):
batch = np.zeros((batch_size, 2))
while True:
for idx, (file_id, test_id) in enumerate(pairs):
batch[idx, :] = (file_id, test_id)
# Increment idx by 1
idx += 1
yield {'file': batch[:, 0], 'test': batch[:, 1]}
if unseen_pairs:
x = next(generate_predictions(unseen_pairs, len(unseen_pairs)))
preds_per_files.append(self.model.predict(x))
pred = [max(idx) for idx in zip(*preds_per_files)] # return maximum score of test
prioritization = [x for _, x in sorted(zip(pred, labels), reverse=True)] # Reorder test case list
apfd.append(self.apfd(prioritization)) # calculate apfd
print(f'APFD -> {np.round(self.apfd(prioritization), 2)}')
df = pd.DataFrame({'apfd': apfd},
columns=['apfd'])
if pickle_file is not None:
df.to_pickle(pickle_file)
return df
def test(self):
# Generate training set
test_set = self.Data.unseen_pairs
test_gen = DataGenerator(pairs=test_set, batch_size=self.param_grid['batch_size'],
nr_files=len(self.Data.all_files), nr_tests=len(self.Data.all_tests),
negative_ratio=self.param_grid['negative_ratio'])
X, y = next(test_gen.data_generation(test_set))
pred = self.model.predict(X)
pred[pred < 0.5] = 0
pred[pred >= 0.5] = 1
return y, pred
def evaluate_classification(self, y, pred):
"""
Provide Classification report and metrics
:param y:
:param pred:
:return:
"""
print(' Evaluating Network...')
print(f' Test set accuracy - {np.round(100 * self.accuracy(y, pred), 1)}')
print(self.report(y, pred))
print(self.cnf_mtx(y, pred))
def extract_weights(self, name):
"""
Extract weights from a neural network model
:param name:
:return:
"""
# Extract weights
weight_layer = self.model.get_layer(name)
weights = weight_layer.get_weights()[0]
# Normalize
weights = weights / np.linalg.norm(weights, axis=1).reshape((-1, 1))
return weights
def get_components(self, components=2, method='TSNE'):
"""
Extract 2 components from multi-dimensional manifold
:param method:
:return:
"""
file_weight_class = self.extract_weights('file_embedding')
test_weight_class = self.extract_weights('test_embedding')
file_r = reduce_dim(file_weight_class, components=components, method=method)
test_r = reduce_dim(test_weight_class, components=components, method=method)
return file_r, test_r
def get_file_labels(self):
"""
Creates pairs of (file, file label) for color plot
:return: (files, file labels)
"""
pjs = []
for item in self.Data.all_files:
pjs.append((item, item.split('/')[0]))
return list(set(pjs))
def get_test_labels(self):
"""
Creates pairs of (test, test label) for color plot
:return: (tests, tests labels)
"""
tst = []
for item in self.Data.all_tests:
label = item.split('_')
if len(label) > 2:
tst.append((item, label[2]))
else:
tst.append((item, 'Other'))
return list(set(tst))
def plot_embeddings(self, method='TSNE'):
"""
Plots file and tests embeddings side by side without labels, with the corresponding dim reduction method.
:param method: TSNE or UMAP
:return: NoneType
"""
# Embeddings
files, tests = self.get_components(method=method)
self.plot_embed_both(files, tests, method=method)
def plot_embeddings_labeled(self, layer='tests', method='TSNE'):
"""
Plots file or test embedding with corresponding label, for the 10 most frequent items.
:param layer: File or Test layer
:param method: TSNE or UMAP
:return:
"""
if layer == 'tests':
tst_labels = self.get_test_labels()
print(len(tst_labels))
_, test_r = self.get_components(method=method)
print(len(test_r))
self.plot_embed_tests(tst_label=tst_labels, test_r=test_r, method=method)
elif layer == 'files':
file_labels = self.get_file_labels()
file_r, _ = self.get_components(method=method)
self.plot_embed_files(file_r=file_r, pjs_labels=file_labels, method=method)
def plot_model(self, show_shapes: bool = True):
"""
Plots and saves Keras model schema
:param show_shapes:
:return:
"""
keras.utils.plot_model(
self.model,
to_file="model.png",
show_shapes=show_shapes
)
``` |
{
"source": "jlousada315/RETECS",
"score": 3
} |
#### File: jlousada315/RETECS/agents.py
```python
import numpy as np
import os
from sklearn import neural_network, tree
try:
import cPickle as pickle
except:
import pickle
class ExperienceReplay(object):
def __init__(self, max_memory=5000, discount=0.9):
self.memory = []
self.max_memory = max_memory
self.discount = discount
def remember(self, experience):
self.memory.append(experience)
def get_batch(self, batch_size=10):
if len(self.memory) > self.max_memory:
del self.memory[:len(self.memory) - self.max_memory]
if batch_size < len(self.memory):
timerank = range(1, len(self.memory) + 1)
p = timerank / np.sum(timerank, dtype=float)
batch_idx = np.random.choice(range(len(self.memory)), replace=False, size=batch_size, p=p)
batch = [self.memory[idx] for idx in batch_idx]
else:
batch = self.memory
return batch
class BaseAgent(object):
def __init__(self, histlen):
self.single_testcases = True
self.train_mode = True
self.histlen = histlen
def get_action(self, s):
return 0
def get_all_actions(self, states):
""" Returns list of actions for all states """
return [self.get_action(s) for s in states]
def reward(self, reward):
pass
def save(self, filename):
""" Stores agent as pickled file """
pickle.dump(self, open(filename + '.p', 'wb'), 2)
@classmethod
def load(cls, filename):
return pickle.load(open(filename + '.p', 'rb'))
class NetworkAgent(BaseAgent):
def __init__(self, state_size, action_size, hidden_size, histlen):
super(NetworkAgent, self).__init__(histlen=histlen)
self.name = 'mlpclassifier'
self.experience_length = 10000
self.experience_batch_size = 1000
self.experience = ExperienceReplay(max_memory=self.experience_length)
self.episode_history = []
self.iteration_counter = 0
self.action_size = action_size
if isinstance(hidden_size, tuple):
self.hidden_size = hidden_size
else:
self.hidden_size = (hidden_size,)
self.model = None
self.model_fit = False
self.init_model(True)
# TODO This could improve performance (if necessary)
# def get_all_actions(self, states):
# try:
def init_model(self, warm_start=True):
if self.action_size == 1:
self.model = neural_network.MLPClassifier(hidden_layer_sizes=self.hidden_size, activation='relu',
warm_start=warm_start, solver='adam', max_iter=750)
else:
self.model = neural_network.MLPRegressor(hidden_layer_sizes=self.hidden_size, activation='relu',
warm_start=warm_start, solver='adam', max_iter=750)
self.model_fit = False
def get_action(self, s):
if self.model_fit:
if self.action_size == 1:
a = self.model.predict_proba(np.array(s).reshape(1, -1))[0][1]
else:
a = self.model.predict(np.array(s).reshape(1, -1))[0]
else:
a = np.random.random()
if self.train_mode:
self.episode_history.append((s, a))
return a
def reward(self, rewards):
if not self.train_mode:
return
try:
x = float(rewards)
rewards = [x] * len(self.episode_history)
except:
if len(rewards) < len(self.episode_history):
raise Exception('Too few rewards')
self.iteration_counter += 1
for ((state, action), reward) in zip(self.episode_history, rewards):
self.experience.remember((state, reward))
self.episode_history = []
if self.iteration_counter == 1 or self.iteration_counter % 5 == 0:
self.learn_from_experience()
def learn_from_experience(self):
experiences = self.experience.get_batch(self.experience_batch_size)
x, y = zip(*experiences)
if self.model_fit:
try:
self.model.partial_fit(x, y)
except ValueError:
self.init_model(warm_start=False)
self.model.fit(x, y)
self.model_fit = True
else:
self.model.fit(x, y) # Call fit once to learn classes
self.model_fit = True
# Decision Tree based agent
class DTAgent(BaseAgent):
def __init__(self, action_size, histlen, criterion, max_depth, min_samples_split):
super(DTAgent, self).__init__(histlen=histlen)
self.name = 'dtclassifier'
self.experience_length = 10000
self.experience_batch_size = 1000
self.experience = ExperienceReplay(max_memory=self.experience_length)
self.episode_history = []
self.iteration_counter = 0
self.action_size = action_size
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.model = None
self.model_fit = False
self.init_model()
# TODO This could improve performance (if necessary)
# def get_all_actions(self, states):
# try:
def init_model(self):
if self.action_size == 1:
self.model = tree.DecisionTreeClassifier(criterion=self.criterion, max_depth=self.max_depth,
min_samples_split=self.min_samples_split)
else:
self.model = tree.DecisionTreeClassifier(criterion=self.criterion, max_depth=self.max_depth,
min_samples_split=self.min_samples_split)
self.model_fit = False
def get_action(self, s):
if self.model_fit:
if self.action_size == 1:
a = self.model.predict_proba(np.array(s).reshape(1, -1))[0][0]
else:
a = self.model.predict(np.array(s).reshape(1, -1))[0]
else:
a = np.random.random()
if self.train_mode:
self.episode_history.append((s, a))
return a
def reward(self, rewards):
if not self.train_mode:
return
try:
x = float(rewards)
rewards = [x] * len(self.episode_history)
except:
if len(rewards) < len(self.episode_history):
raise Exception('Too few rewards')
self.iteration_counter += 1
for ((state, action), reward) in zip(self.episode_history, rewards):
self.experience.remember((state, reward))
self.episode_history = []
if self.iteration_counter == 1 or self.iteration_counter % 5 == 0:
self.learn_from_experience()
def learn_from_experience(self):
experiences = self.experience.get_batch(self.experience_batch_size)
x, y = zip(*experiences)
if self.model_fit:
try:
self.model.fit(x, y)
except ValueError:
self.init_model()
self.model.fit(x, y)
self.model_fit = True
else:
self.model.fit(x, y) # Call fit once to learn classes
self.model_fit = True
class RandomAgent(BaseAgent):
def __init__(self, histlen):
super(RandomAgent, self).__init__(histlen=histlen)
self.name = 'random'
def get_action(self, s):
return np.random.random()
def get_all_actions(self, states):
prio = range(len(states))
np.random.shuffle(prio)
return prio
class HeuristicSortAgent(BaseAgent):
""" Sort first by last execution results, then time not executed """
def __init__(self, histlen):
super(HeuristicSortAgent, self).__init__(histlen=histlen)
self.name = 'heuristic_sort'
self.single_testcases = False
def get_action(self, s):
raise NotImplementedError('Single get_action not implemented for HeuristicSortAgent')
def get_all_actions(self, states):
sorted_idx = sorted(range(len(states)),
key=lambda x: list(states[x][-self.histlen:]) + [states[x][-self.histlen - 1]])
sorted_actions = sorted(range(len(states)), key=lambda i: sorted_idx[i])
return sorted_actions
class HeuristicWeightAgent(BaseAgent):
""" Sort by weighted representation """
def __init__(self, histlen):
super(HeuristicWeightAgent, self).__init__(histlen=histlen)
self.name = 'heuristic_weight'
self.single_testcases = False
self.weights = []
def get_action(self, s):
raise NotImplementedError('Single get_action not implemented for HeuristicWeightAgent')
def get_all_actions(self, states):
if len(self.weights) == 0:
state_size = len(states[0])
self.weights = np.ones(state_size) / state_size
sorted_idx = sorted(range(len(states)), key=lambda x: sum(states[x] * self.weights))
sorted_actions = sorted(range(len(states)), key=lambda i: sorted_idx[i])
return sorted_actions
def restore_agent(model_file):
if os.path.exists(model_file + '.p'):
return BaseAgent.load(model_file)
else:
raise Exception('Not a valid agent')
```
#### File: jlousada315/RETECS/plot_stats.py
```python
from __future__ import division
import glob
import os
import matplotlib
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from stats import plot_result_difference_bars
try:
import cPickle as pickle
except:
import pickle
def plot_stats(prefix, stats_file, val_file, mean_interval=10, plot_graphs=True, save_graphs=False):
plot_stats_single_figure(prefix, stats_file, val_file, mean_interval, plot_graphs, save_graphs)
def plot_stats_single_figure(prefix, stats_file, val_file, mean_interval=10, plot_graphs=True, save_graphs=False):
if not plot_graphs and not save_graphs:
print('Set at least one of plot_graphs and save_graphs to True')
return
sns.set_style('whitegrid')
stats = pickle.load(open(stats_file, 'rb'))
fig, ax = plt.subplots(4)
(qax, rax, vax1, vax2) = ax
failure_count = np.add(stats['detected'], stats['missed'])
x = range(1, int(len(stats['scenarios']) / mean_interval) + 1)
perc_missed = [m / fc if fc > 0 else 0 for (m, fc) in zip(stats['missed'], failure_count)]
mean_missed, missed_fit = mean_values(x, perc_missed, mean_interval)
mean_reward, reward_fit = mean_values(x, stats['rewards'], mean_interval)
plot_results_line_graph(stats, 'napfd', mean_interval, qax, x)
#plot_napfd_metrics(afpd, mean_interval, mean_missed, missed_fit, qax, x)
if 'comparison' in stats:
plot_result_difference_bars(stats, 'napfd', rax, x)
else:
plot_results_line_graph(stats, 'rewards', mean_interval, rax, x)
val_res = pickle.load(open(val_file, 'rb'))
plot_validation(val_res, lambda res: res['napfd'], 'Validation Results', 'NAPFD', vax1)
plot_validation(val_res, lambda res: res['detected'] / (res['detected'] + res['missed']) if (res['detected'] + res['missed']) > 0 else 1,
'Validation Results', 'Failures Detected (in %)', vax2)
# plt.tight_layout()
if plot_graphs:
plt.show()
if save_graphs:
fig.savefig('%s_learning.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_learning.png' % prefix, bbox_inches='tight')
plt.close('all')
def plot_stats_separate_figures(prefix, stats_file, val_file, mean_interval=10, plot_graphs=False, save_graphs=False):
if not plot_graphs and not save_graphs:
print('Set at least one of plot_graphs and save_graphs to True')
return
sns.set_style('whitegrid')
sns.set_context('paper')
stats = pickle.load(open(stats_file, 'rb'))
failure_count = np.add(stats['detected'], stats['missed'])
x = range(1, int(len(stats['scenarios']) / mean_interval) + 1)
perc_missed = [m / fc if fc > 0 else 0 for (m, fc) in zip(stats['missed'], failure_count)]
mean_missed, missed_fit = mean_values(x, perc_missed, mean_interval)
mean_reward, reward_fit = mean_values(x, stats['rewards'], mean_interval)
fig = plt.figure()
ax = fig.add_subplot(111)
plot_napfd_metrics([r[3] for r in stats['result']], mean_interval, mean_missed, missed_fit, ax, x)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_quality.pgf' % prefix, bbox_inches='tight', transparent=True)
fig.savefig('%s_quality.png' % prefix, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
plot_reward(mean_interval, mean_reward, ax, reward_fit, x)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_reward.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_reward.png' % prefix, bbox_inches='tight')
val_res = pickle.load(open(val_file, 'rb'))
fig = plt.figure()
ax = fig.add_subplot(111)
plot_validation(val_res, lambda res: res['napfd'], 'Validation Results', 'NAPFD', ax)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_validation_napfd.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_validation_napfd.png' % prefix, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
plot_validation(val_res, lambda res: res['detected'] / (res['detected'] + res['missed']) if (res['detected'] + res['missed']) > 0 else 1,
'Validation Results', 'Failures Detected (in %)', ax)
if plot_graphs:
plt.draw()
if save_graphs:
fig.savefig('%s_validation_failures.pgf' % prefix, bbox_inches='tight')
fig.savefig('%s_validation_failures.png' % prefix, bbox_inches='tight')
if plot_graphs:
plt.show() # Keep window open
else:
plt.close('all')
def plot_results_line_graph(stats, metric, mean_interval, qax, x, include_comparison=True):
if include_comparison and 'comparison' in stats:
for key in stats['comparison'].keys():
values, fitline = mean_values(x, stats['comparison'][key][metric], mean_interval)
qax.plot(x, values * 100, label=key)
#qax.plot(x, fitline(x) * 100, color='black')
values, fitline = mean_values(x, stats[metric], mean_interval)
qax.plot(x, values * 100, label=metric)
#qax.plot(x, fitline(x) * 100, color='black')
qax.set_ylim([0, 100])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
def plot_napfd_metrics(afpd, mean_interval, mean_missed, missed_fit, qax, x):
mean_afpd, afpd_fit = mean_values(x, afpd, mean_interval)
qax.plot(x, mean_afpd * 100, label='NAPFD', color='blue')
qax.plot(x, afpd_fit(x) * 100, color='black')
qax.plot(x, mean_missed * 100, label='Percent Missed', color='green')
qax.plot(x, missed_fit(x) * 100, color='black')
qax.set_ylim([0, 100])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
qax.set_title('Failure Detection (averaged over %d schedules)' % mean_interval)
def plot_reward(mean_interval, mean_reward, rax, reward_fit, x):
rax.plot(x, mean_reward, label='Reward', color='red')
rax.plot(x, reward_fit(x), color='black')
rax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
rax.set_xlim([1, max(x)])
rax.set_title('Reward (averaged over %d schedules)' % mean_interval)
def plot_validation(val_res, res_fun, title, ylabel, ax=None):
if not ax:
ax = plt.gca()
df = pd.DataFrame.from_dict(val_res)
res_df = df.apply(res_fun, raw=True, axis=1)
res_df.name = 'res'
ydat = pd.concat([df, res_df], axis=1)
sns.boxplot(data=ydat, x='step', y='res', palette=sns.color_palette(n_colors=1), ax=ax)
ax.set_title(title)
ax.set_ylabel(ylabel)
def mean_values(x, y, mean_interval):
#mean_y = np.mean(np.array(y).reshape(-1, mean_interval), axis=1)
mean_y = np.array(y)
z = np.polyfit(x, mean_y, 6)
f = np.poly1d(z)
return mean_y, f
def pickle_to_dataframe(pickle_file):
return pd.DataFrame.from_dict(pd.read_pickle(pickle_file))
def print_failure_detection(result_dir, file_prefixes):
df = pd.DataFrame()
for fp in file_prefixes:
searchpath = os.path.join(result_dir, fp)
files = glob.glob(searchpath + '_*_stats.p')
dfs = pd.concat([pickle_to_dataframe(f) for f in files])
df = df.append(dfs)
print df
if __name__ == '__main__':
stats_file = 'tableau_iofrol_timerank_lr0.3_as5_n1000_eps0.1_hist3_tableau_stats.p'
val_file = 'tableau_iofrol_timerank_lr0.3_as5_n1000_eps0.1_hist3_tableau_val.p'
mean_interval = 1
plot_stats_single_figure('tableau', stats_file, val_file, mean_interval, plot_graphs=True, save_graphs=False)
#plot_stats_separate_figures('netq', stats_file, val_file, mean_interval, plot_graphs=False, save_graphs=True)
#print_failure_detection('evaluation', ['heur_sort', 'heur_weight', 'random'])
```
#### File: jlousada315/RETECS/stats.py
```python
import numpy as np
import pandas as pd
import os
def load_stats_dataframe(files, aggregated_results=None):
if os.path.exists(aggregated_results) and all([os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):
return pd.read_pickle(aggregated_results)
df = pd.DataFrame()
for f in files:
tmp_dict = pd.read_pickle(f)
tmp_dict['iteration'] = f.split('_')[-2]
if 'comparison' in tmp_dict:
for (cmp_key, cmp_dict) in tmp_dict['comparison'].items():
cmp_dict['iteration'] = tmp_dict['iteration']
cmp_dict['env'] = tmp_dict['env']
cmp_dict['step'] = tmp_dict['step']
cmp_dict['agent'] = cmp_key
cmp_dict['sched_time'] = tmp_dict['sched_time'] if 'sched_time' in tmp_dict else 0.5
cmp_dict['history_length'] = tmp_dict['history_length'] if 'history_length' in tmp_dict else 4
cmp_df = pd.DataFrame.from_dict(cmp_dict)
df = pd.concat([df, cmp_df])
del tmp_dict['comparison']
del tmp_dict['result']
tmp_df = pd.DataFrame.from_dict(tmp_dict)
df = pd.concat([df, tmp_df])
if aggregated_results:
df.to_pickle(aggregated_results)
return df
def plot_result_difference_bars(stats, metric, qax, x):
baseline = np.asarray(stats[metric])
x = np.asarray(x)
colors = ('g', 'b', 'r')
if 'comparison' in stats:
bar_width = 0.35
for (offset, key) in enumerate(stats['comparison'].keys()):
cmp_val = np.asarray(stats['comparison'][key][metric])
cmp_val -= baseline
qax.bar(x+offset*bar_width, cmp_val, bar_width, label=key, color=colors[offset])
qax.legend(ncol=2)
qax.set_xlim([1, max(x)])
``` |
{
"source": "jloveric/functional-layers",
"score": 2
} |
#### File: functional-layers/examples/cifar100.py
```python
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import LightningModule, Trainer
from high_order_layers_torch.layers import *
from torchmetrics import Accuracy
from torchmetrics.functional import accuracy
import hydra
from omegaconf import DictConfig, OmegaConf
import os
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
class Net(LightningModule):
def __init__(self, cfg: DictConfig):
super().__init__()
self._cfg = cfg
try:
self._data_dir = f"{hydra.utils.get_original_cwd()}/data"
except:
self._data_dir = "../data"
self._lr = cfg.lr
n = cfg.n
self.n = cfg.n
self._batch_size = cfg.batch_size
self._layer_type = cfg.layer_type
self._train_fraction = cfg.train_fraction
segments = cfg.segments
self._topk_metric = Accuracy(top_k=5)
self._nonlinearity = cfg.nonlinearity
if self._layer_type == "standard":
out_channels1 = 6 * ((n - 1) * segments + 1)
self.conv1 = torch.nn.Conv2d(
in_channels=3, out_channels=out_channels1, kernel_size=5
)
self.norm1 = nn.BatchNorm2d(out_channels1)
out_channels2 = 6 * ((n - 1) * segments + 1)
self.conv2 = torch.nn.Conv2d(
in_channels=out_channels2, out_channels=16, kernel_size=5
)
self.norm2 = nn.BatchNorm2d(out_channels2)
if self._layer_type == "standard0":
self.conv1 = torch.nn.Conv2d(
in_channels=3, out_channels=6 * n, kernel_size=5
)
self.conv2 = torch.nn.Conv2d(
in_channels=6 * n, out_channels=16, kernel_size=5
)
else:
self.conv1 = high_order_convolution_layers(
layer_type=self._layer_type,
n=n,
in_channels=3,
out_channels=6,
kernel_size=5,
segments=cfg.segments,
rescale_output=cfg.rescale_output,
periodicity=cfg.periodicity,
)
self.norm1 = nn.BatchNorm2d(6)
self.conv2 = high_order_convolution_layers(
layer_type=self._layer_type,
n=n,
in_channels=6,
out_channels=16,
kernel_size=5,
segments=cfg.segments,
rescale_output=cfg.rescale_output,
periodicity=cfg.periodicity,
)
self.norm2 = nn.BatchNorm2d(16)
self.pool = nn.MaxPool2d(2, 2)
self.avg_pool = nn.AdaptiveAvgPool2d(5)
self.flatten = nn.Flatten()
if cfg.linear_output:
self.fc1 = nn.Linear(16 * 5 * 5, 100)
else:
self.fc1 = high_order_fc_layers(
layer_type=self._layer_type,
n=n,
in_features=16 * 5 * 5,
out_features=100,
segments=cfg.segments,
)
self.norm3 = nn.LayerNorm(100)
def forward(self, x):
if self._nonlinearity is True:
x = self.pool(F.relu(self.conv1(x)))
x = self.norm1(x)
x = self.pool(F.relu(self.conv2(x)))
x = self.norm2(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.norm3(x)
else:
x = self.pool(self.conv1(x))
x = self.norm1(x)
x = self.pool(self.conv2(x))
x = self.norm2(x)
x = self.avg_pool(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.norm3(x)
return x
def setup(self, stage):
num_train = int(self._train_fraction * 40000)
num_val = 10000
num_extra = 40000 - num_train
train = torchvision.datasets.CIFAR100(
root=self._data_dir, train=True, download=True, transform=transform
)
self._train_subset, self._val_subset, extra = torch.utils.data.random_split(
train,
[num_train, 10000, num_extra],
generator=torch.Generator().manual_seed(1),
)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
preds = torch.argmax(y_hat, dim=1)
acc = accuracy(preds, y)
val = self._topk_metric(y_hat, y)
val = self._topk_metric.compute()
self.log(f"train_loss", loss, prog_bar=True)
self.log(f"train_acc", acc, prog_bar=True)
self.log(f"train_acc5", val, prog_bar=True)
return loss
def train_dataloader(self):
trainloader = torch.utils.data.DataLoader(
self._train_subset,
batch_size=self._batch_size,
shuffle=True,
num_workers=10,
)
return trainloader
def val_dataloader(self):
return torch.utils.data.DataLoader(
self._val_subset, batch_size=self._batch_size, shuffle=False, num_workers=10
)
def test_dataloader(self):
testset = torchvision.datasets.CIFAR100(
root=self._data_dir, train=False, download=True, transform=transform
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=4, shuffle=False, num_workers=10
)
return testloader
def validation_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "val")
def eval_step(self, batch, batch_idx, name):
x, y = batch
logits = self(x)
loss = F.cross_entropy(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
val = self._topk_metric(logits, y)
val = self._topk_metric.compute()
# Calling self.log will surface up scalars for you in TensorBoard
self.log(f"{name}_loss", loss, prog_bar=True)
self.log(f"{name}_acc", acc, prog_bar=True)
self.log(f"{name}_acc5", val, prog_bar=True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.eval_step(batch, batch_idx, "test")
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=self._lr)
def cifar100(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
print("Working directory : {}".format(os.getcwd()))
try:
print(f"Orig working directory : {hydra.utils.get_original_cwd()}")
except:
pass
trainer = Trainer(max_epochs=cfg.max_epochs, gpus=cfg.gpus)
model = Net(cfg)
trainer.fit(model)
print("testing")
result = trainer.test(model)
print("finished testing")
return result
@hydra.main(config_path="../config", config_name="cifar100_config")
def run(cfg: DictConfig):
cifar100(cfg=cfg)
if __name__ == "__main__":
run()
```
#### File: functional-layers/examples/function_example.py
```python
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from pytorch_lightning import LightningModule, Trainer
from high_order_layers_torch.layers import *
import torch_optimizer as alt_optim
class simple_func:
def __init__(self):
self.factor = 1.5 * 3.14159
self.offset = 0.25
def __call__(self, x):
return 0.5 * torch.cos(self.factor * 1.0 / (abs(x) + self.offset))
xTest = np.arange(1000) / 500.0 - 1.0
xTest = torch.stack([torch.tensor(val) for val in xTest])
xTest = xTest.view(-1, 1)
yTest = simple_func()(xTest)
yTest = yTest.view(-1, 1)
class FunctionDataset(Dataset):
"""
Loader for reading in a local dataset
"""
def __init__(self, transform=None):
self.x = (2.0 * torch.rand(1000) - 1.0).view(-1, 1)
self.y = simple_func()(self.x)
self.transform = transform
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.x.clone().detach()[idx], self.y.clone().detach()[idx]
class PolynomialFunctionApproximation(LightningModule):
"""
Simple network consisting of on input and one output
and no hidden layers.
"""
def __init__(
self, n, segments=2, function=True, periodicity=None, opt: str = "adam"
):
super().__init__()
self.automatic_optimization = False
self.optimizer = opt
if function == "standard":
print("Inside standard")
alpha = 0.0
layer1 = nn.Linear(in_features=1, out_features=n)
layer2 = nn.Linear(in_features=n, out_features=n)
layer3 = nn.Linear(in_features=n, out_features=1)
self.layer = nn.Sequential(layer1, nn.ReLU(), layer2, nn.ReLU(), layer3)
elif function == "product":
print("Inside product")
alpha = 0.0
layer1 = high_order_fc_layers(
layer_type=function, in_features=1, out_features=n, alpha=1.0
)
layer2 = high_order_fc_layers(
layer_type=function, in_features=n, out_features=1, alpha=1.0
)
self.layer = nn.Sequential(
layer1,
# nn.ReLU(),
layer2,
# nn.ReLU()
)
else:
self.layer = high_order_fc_layers(
layer_type=function,
n=n,
in_features=1,
out_features=1,
segments=segments,
length=2.0,
periodicity=periodicity,
)
def forward(self, x):
return self.layer(x.view(x.size(0), -1))
def training_step(self, batch, batch_idx):
opt = self.optimizers()
x, y = batch
y_hat = self(x)
loss = F.mse_loss(y_hat, y)
opt.zero_grad()
if self.optimizer in ["adahessian"]:
self.manual_backward(loss, create_graph=True)
else:
self.manual_backward(loss, create_graph=False)
opt.step()
return {"loss": loss}
def train_dataloader(self):
return DataLoader(FunctionDataset(), batch_size=4)
def configure_optimizers(self):
if self.optimizer == "adahessian":
return alt_optim.Adahessian(
self.layer.parameters(),
lr=1.0,
betas=(0.9, 0.999),
eps=1e-4,
weight_decay=0.0,
hessian_power=1.0,
)
elif self.optimizer == "adam":
return optim.Adam(self.parameters(), lr=0.001)
elif self.optimizer == "lbfgs":
return optim.LBFGS(self.parameters(), lr=1, max_iter=20, history_size=100)
else:
raise ValueError(f"Optimizer {self.optimizer} not recognized")
modelSetL = [
{"name": "Relu 2", "n": 2},
{"name": "Relu 3", "n": 8},
{"name": "Relu 4", "n": 16},
]
modelSetProd = [
{"name": "Product 2", "n": 2},
{"name": "Product 3", "n": 8},
{"name": "Product 4", "n": 16},
]
modelSetD = [
{"name": "Discontinuous", "n": 2},
# {'name': 'Discontinuous 2', 'order' : 2},
{"name": "Discontinuous", "n": 4},
# {'name': 'Discontinuous 4', 'order' : 4},
{"name": "Discontinuous", "n": 6},
]
modelSetC = [
{"name": "Continuous", "n": 2},
# {'name': 'Continuous 2', 'order' : 2},
{"name": "Continuous", "n": 4},
# {'name': 'Continuous 4', 'order' : 4},
{"name": "Continuous", "n": 6},
]
modelSetP = [
{"name": "Polynomial", "n": 10},
# {'name': 'Continuous 2', 'order' : 2},
{"name": "Polynomial", "n": 20},
# {'name': 'Continuous 4', 'order' : 4},
{"name": "Polynomial", "n": 30},
]
modelSetF = [
{"name": "Fourier", "n": 10},
# {'name': 'Continuous 2', 'order' : 2},
{"name": "Fourier", "n": 20},
# {'name': 'Continuous 4', 'order' : 4},
{"name": "Fourier", "n": 30},
]
colorIndex = ["red", "green", "blue", "purple", "black"]
symbol = ["+", "x", "o", "v", "."]
def plot_approximation(
function,
model_set,
segments,
epochs,
gpus=0,
periodicity=None,
plot_result=True,
opt="adam",
):
for i in range(0, len(model_set)):
trainer = Trainer(max_epochs=epochs, gpus=gpus)
model = PolynomialFunctionApproximation(
n=model_set[i]["n"],
segments=segments,
function=function,
periodicity=periodicity,
opt=opt,
)
trainer.fit(model)
predictions = model(xTest.float())
if plot_result is True:
plt.scatter(
xTest.data.numpy(),
predictions.flatten().data.numpy(),
c=colorIndex[i],
marker=symbol[i],
label=f"{model_set[i]['name']} {model_set[i]['n']}",
)
if plot_result is True:
plt.plot(
xTest.data.numpy(), yTest.data.numpy(), "-", label="actual", color="black"
)
plt.title("Piecewise Polynomial Function Approximation")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
def plot_results(epochs: int = 20, segments: int = 5, plot: bool = True):
"""
plt.figure(0)
plot_approximation("standard", modelSetL, 1, epochs, gpus=0)
plt.title('Relu Function Approximation')
"""
"""
plt.figure(0)
plot_approximation("product", modelSetProd, 1, epochs, gpus=0)
"""
data = [
{
"title": "Piecewise Discontinuous Function Approximation",
"layer": "discontinuous",
"model_set": modelSetD,
},
{
"title": "Piecewise Continuous Function Approximation",
"layer": "continuous",
"model_set": modelSetC,
},
{
"title": "Polynomial function approximation",
"layer": "polynomial",
"model_set": modelSetP,
},
{
"title": "Fourier function approximation",
"layer": "fourier",
"model_set": modelSetF,
},
]
for index, element in enumerate(data):
if plot is True:
plt.figure(index)
plot_approximation(
element["layer"], element["model_set"], 5, epochs, gpus=0, periodicity=2
)
if plot is True:
plt.title("Piecewise Discontinuous Function Approximation")
if plot is True:
plt.show()
if __name__ == "__main__":
plot_results()
```
#### File: functional-layers/high_order_layers_torch/ProductLayer.py
```python
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import Module
from torch.nn import init
from .utils import *
class Product(Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
alpha=1.0,
periodicity: float = None,
**kwargs
) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
self.alpha = alpha
self.periodicity = periodicity
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
# self.weight.data.uniform_(-1/self.in_features,
# 1/self.in_features)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, x: Tensor) -> Tensor:
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
assemble = torch.einsum("ij,kj->ijk", x, self.weight)
this_sum = torch.sum(assemble, dim=1)
assemble = assemble + 1.0
assemble = torch.prod(assemble, dim=1) - (1 - self.alpha) * this_sum
assemble = assemble - 1 + self.bias
return assemble
```
#### File: functional-layers/high_order_layers_torch/utils.py
```python
import torch
def make_periodic(x, periodicity: float):
xp = x + 0.5 * periodicity
xp = torch.remainder(xp, 2 * periodicity) # always positive
xp = torch.where(xp > periodicity, 2 * periodicity - xp, xp)
xp = xp - 0.5 * periodicity
return xp
```
#### File: functional-layers/tests/test_embeddings.py
```python
import os
import pytest
from high_order_layers_torch.positional_embeddings import (
ClassicSinusoidalEmbedding,
FourierSeriesEmbedding,
PiecewiseDiscontinuousPolynomialEmbedding,
PiecewisePolynomialEmbedding,
)
import torch
def test_classic_embedding():
x = torch.rand([5, 7])
embedding = ClassicSinusoidalEmbedding(10)
ans = embedding(x)
assert ans.shape == torch.Size([5, 7, 10])
x = torch.rand([5])
ans = embedding(x)
assert ans.shape == torch.Size([5, 10])
def test_classic_embedding_throws():
with pytest.raises(ValueError):
embedding = ClassicSinusoidalEmbedding(3)
```
#### File: functional-layers/tests/test_layers.py
```python
import os
import pytest
from high_order_layers_torch.LagrangePolynomial import *
from high_order_layers_torch.FunctionalConvolution import *
from high_order_layers_torch.PolynomialLayers import *
from high_order_layers_torch.networks import *
def test_nodes():
ans = chebyshevLobatto(20)
assert ans.shape[0] == 20
def test_polynomial():
poly = LagrangePoly(5)
# Just use the points as the actual values
w = chebyshevLobatto(5)
w = w.reshape(1, 1, 1, 5)
x = torch.tensor([[0.5]])
ans = poly.interpolate(x, w)
assert abs(0.5 - ans[0]) < 1.0e-6
def test_compare():
in_channels = 2
out_channels = 2
kernel_size = 4
stride = 1
height = 5
width = 5
n = 3
segments = 1
values = {
"n": n,
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"stride": stride,
}
x = torch.rand(1, in_channels, height, width)
a = Expansion2d(LagrangeExpand(n))
b = Expansion2d(PiecewisePolynomialExpand(n=n, segments=segments))
aout = a(x)
bout = b(x)
assert torch.allclose(aout, bout, atol=1e-5)
``` |
{
"source": "jloveric/high-order-implicit-representation",
"score": 3
} |
#### File: jloveric/high-order-implicit-representation/single_image_dataset.py
```python
from matplotlib import image
import torch
import numpy as np
from hilbertcurve.hilbertcurve import HilbertCurve
import math
def image_to_dataset(filename: str, peano: str = False, rotations: int = 1):
"""
Read in an image file and return the flattened position input
flattened output and torch array of the original image.def image_to_dataset(filename: str, peano: str = False, rotations: int = 1):
Args :
filename : image filename.
Returns :
flattened image [width*heigh, rgb], flattened position vectory
[width*height, 2] and torch tensor of original image.
"""
img = image.imread(filename)
torch_image = torch.from_numpy(np.array(img))
print('image.shape', torch_image.shape)
max_size = max(torch_image.shape[0], torch_image.shape[1])
xv, yv = torch.meshgrid(
[torch.arange(torch_image.shape[0]), torch.arange(torch_image.shape[1])])
# rescale so the maximum values is between -1 and 1
xv = (xv/max_size)*2-1
yv = (yv/max_size)*2-1
xv = xv.reshape(xv.shape[0], xv.shape[1], 1)
yv = yv.reshape(yv.shape[0], yv.shape[1], 1)
'''
if peano is True:
# can index 2^{n*p} cubes with p = 2 (dimension)
n = 2 # number of dimensions
p = math.ceil(math.log(max_size, n)/2.0)
hilbert_curve = HilbertCurve(p=p, n=2)
cartesian_position = torch_position.tolist()
hilbert_distances = hilbert_curve.distance_from_points(
cartesian_position)
'''
if rotations == 2:
torch_position = torch.cat(
[xv, yv, (xv-yv)/2.0, (xv+yv)/2.0], dim=2)
torch_position = torch_position.reshape(-1, 4)
elif rotations == 1:
torch_position = torch.cat([xv, yv], dim=2)
torch_position = torch_position.reshape(-1, 2)
else:
line_list = []
for i in range(rotations):
theta = (math.pi/2.0)*(i/rotations)
print('theta', theta)
rot_x = math.cos(theta)
rot_y = math.sin(theta)
rot_sum = math.fabs(rot_x)+math.fabs(rot_y)
# Add the line and the line orthogonal
line_list.append((rot_x*xv+rot_y*yv)/rot_sum)
line_list.append((rot_x*xv-rot_y*yv)/rot_sum)
torch_position = torch.cat(line_list, dim=2)
torch_position = torch_position.reshape(-1, 2*rotations)
#raise(f"Rotation {rotations} not implemented.")
torch_image_flat = torch_image.reshape(-1, 3)*2.0/255.0-1
print('torch_max', torch.max(torch_image_flat))
return torch_image_flat, torch_position, torch_image
class ImageNeighborhoodReader:
def __init__(self, filename: str, width=3, outside=1):
self._input, self._output, self._image = self.image_neighborhood_dataset(
filename=filename, width=width, outside=outside)
@property
def features(self):
return self._input
@property
def targets(self):
return self._output
@property
def image(self) :
return self._image
@property
def lastx(self):
return self._lastx
@property
def lasty(self):
return self._lasty
def image_neighborhood_dataset(self, filename: str, width=3, outside=1):
"""
Args :
filename : Name of image file to create data from.
width: width of the inner block.
outside : width of the outer neighborhood surrounding the inner block.
Return :
tensor of inner block, tensor of neighborhood
"""
print('filename', filename,flush=True)
img = image.imread(filename)
torch_image = torch.from_numpy(np.array(img))
px = torch_image.shape[0]
py = torch_image.shape[1]
patch_edge = []
patch_block = []
max_x = px-(width+2*outside)
max_y = py-(width+2*outside)
self._lastx = max_x
self._lasty = max_y
totalx = width+2*outside
totaly = totalx
edge_mask = torch.ones(totalx, totaly, 3, dtype=bool)
edge_mask[outside:(outside+width), outside:(outside+width), :] = False
block_mask = ~edge_mask
edge_indexes = edge_mask.flatten()
block_indexes = block_mask.flatten()
for i in range(max_x):
for j in range(max_y):
all_elements = torch_image[i:(i+totalx),
j:(j+totaly), :].flatten()
patch = all_elements[block_indexes]
edge = all_elements[edge_indexes]
patch_edge.append(edge)
patch_block.append(patch)
patch_block = (2.0/256.0)*torch.stack(patch_block)-1
patch_edge = (2.0/256.0)*torch.stack(patch_edge)-1
print(patch_block, patch_edge)
return patch_block, patch_edge, torch_image
if __name__ == "__main__":
# image_to_dataset(filename="images/newt.jpg")
ind = ImageNeighborhoodReader(filename="images/newt.jpg")
```
#### File: jloveric/high-order-implicit-representation/single_text_dataset.py
```python
import torch
from torch.utils.data import Dataset
from typing import List, Tuple
class SingleTextDataset(Dataset):
def __init__(self, filenames: List[str], features: int = 10, targets: int = 1, max_size: int = -1):
"""
Args :
filenames : List of filenames to load data from
features : Number of input features (characters)
targets : Number of output features (characters)
max_size : Set the maximum number of characters to read from file. Defaults
to -1 which is to read everything.
"""
feature_list, target_list = dataset_from_file(
filenames[0], features=features, targets=targets, max_size=max_size)
self.inputs = feature_list
self.output = target_list
self.features = features
self.targets = targets
def __len__(self):
return len(self.output)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return (self.inputs[idx]-64+0.5)/64.0, self.output[idx]
def ascii_to_float(ascii_tensor: torch.Tensor):
return (ascii_tensor-64+0.5)/64
def float_to_ascii(float_tensor: torch.Tensor):
return ((float_tensor+1.0)*64-0.5).int()
def encode_input_from_text(text_in: str, features: int) -> Tuple[torch.tensor, str]:
"""
Convert a string to input that the network can take. Take the last "features" number
of characters and convert to numbers. Return those numbers as the network input, also
return the raw_features (the text used to create the numbers).
Args :
text_in : input string.
features : number of input features.
Returns :
tensor encoding, text used to create encoding.
"""
text = text_in.encode("ascii", "ignore").decode('ascii')
raw_sample = text[-(features):]
encoding = [ord(val) for val in raw_sample]
return torch.tensor(encoding), raw_sample
def decode_output_to_text(encoding: torch.tensor, topk: int = 1) -> Tuple[torch.tensor, str]:
"""
Takes an output from the network and converts to text.
Args :
encoding : Tensor of size 128 for each ascii character
topk : The number of maximum values to report back
Returns :
Tuple of topk values and corresponding topk indices and list containing
actual ascii values.
"""
probabilities = torch.nn.Softmax(dim=0)(encoding)
ascii_codes = torch.topk(probabilities, k=topk, dim=0)
ascii_values = [chr(val).encode("ascii", "ignore").decode('ascii')
for val in ascii_codes[1]]
return ascii_codes[0], ascii_codes[1], ascii_values
def generate_dataset(text_in: str, features: int, targets: int):
text = text_in.encode("ascii", "ignore").decode('ascii')
print('text[1:100', text[1:100])
final = len(text)-(targets+features)
feature_list = []
target_list = []
for i in range(final):
n_feature = [ord(val) for val in text[i:(i+features)]]
feature_list.append(n_feature)
n_target = [ord(val)
for val in text[(i+features):(i+features+targets)]]
target_list.append(n_target)
return torch.tensor(feature_list), torch.tensor(target_list)
def dataset_from_file(filename: str, features: int, targets: int, max_size: int = -1):
with open(filename, "r") as f:
return generate_dataset(text_in=f.read()[0:max_size], features=features, targets=targets)
``` |
{
"source": "jloveric/high-order-layers-pytorch",
"score": 3
} |
#### File: high-order-layers-pytorch/high_order_layers_torch/LagrangePolynomial.py
```python
import math
import torch
from .Basis import *
def chebyshevLobatto(n: int):
"""
Compute the chebyshev lobatto points which
are in the range [-1.0, 1.0]
Args :
n : number of points
Returns :
A tensor of length n with x locations from
negative to positive including -1 and 1
[-1,...,+1]
"""
k = torch.arange(0, n)
ans = -torch.cos(k * math.pi / (n - 1))
ans = torch.where(torch.abs(ans) < 1e-15, 0 * ans, ans)
return ans
class FourierBasis:
def __init__(self, length: float):
self.length = length
def __call__(self, x, j: int):
if j == 0:
return 0.5 + 0.0 * x
i = (j + 1) // 2
if j % 2 == 0:
ans = torch.cos(math.pi * i * x / self.length)
else:
ans = torch.sin(math.pi * i * x / self.length)
return ans
class LagrangeBasis:
def __init__(self, n: int, length: float = 2.0):
self.n = n
self.X = (length / 2.0) * chebyshevLobatto(n)
def __call__(self, x, j: int):
b = [(x - self.X[m]) / (self.X[j] - self.X[m]) for m in range(self.n) if m != j]
b = torch.stack(b)
ans = torch.prod(b, dim=0)
return ans
class LagrangeExpand(BasisExpand):
def __init__(self, n: int, length: float = 2.0):
super().__init__(LagrangeBasis(n, length=length), n)
class PiecewisePolynomialExpand(PiecewiseExpand):
def __init__(self, n: int, segments: int, length: float = 2.0):
super().__init__(basis=LagrangeBasis(n), n=n, segments=segments, length=length)
class PiecewisePolynomialExpand1d(PiecewiseExpand1d):
def __init__(self, n: int, segments: int, length: float = 2.0):
super().__init__(basis=LagrangeBasis(n), n=n, segments=segments, length=length)
class PiecewiseDiscontinuousPolynomialExpand(PiecewiseDiscontinuousExpand):
def __init__(self, n: int, segments: int, length: float = 2.0):
super().__init__(basis=LagrangeBasis(n), n=n, segments=segments, length=length)
class PiecewiseDiscontinuousPolynomialExpand1d(PiecewiseDiscontinuousExpand1d):
def __init__(self, n: int, segments: int, length: float = 2.0):
super().__init__(basis=LagrangeBasis(n), n=n, segments=segments, length=length)
class FourierExpand(BasisExpand):
def __init__(self, n: int, length: float):
super().__init__(FourierBasis(length=length), n)
class LagrangePolyFlat(BasisFlat):
def __init__(self, n: int, length: float = 2.0, **kwargs):
super().__init__(n, LagrangeBasis(n, length=length), **kwargs)
class LagrangePolyFlatProd(BasisFlatProd):
def __init__(self, n: int, length: float = 2.0, **kwargs):
super().__init__(n, LagrangeBasis(n, length=length), **kwargs)
class LagrangePoly(Basis):
def __init__(self, n: int, length: float = 2.0, **kwargs):
super().__init__(n, LagrangeBasis(n, length=length), **kwargs)
class LagrangePolyProd(BasisProd):
def __init__(self, n: int, length: float = 2.0, **kwargs):
super().__init__(n, LagrangeBasis(n, length=length), **kwargs)
class FourierSeriesFlat(BasisFlat):
def __init__(self, n: int, length: int = 1.0, **kwargs):
super().__init__(n, FourierBasis(length), **kwargs)
```
#### File: high-order-layers-pytorch/high_order_layers_torch/layers.py
```python
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from .PolynomialLayers import *
from .ProductLayer import *
from .FunctionalConvolution import *
from .FunctionalConvolutionTranspose import *
fc_layers = {
"continuous": PiecewisePolynomial,
"continuous_prod": PiecewisePolynomialProd,
"discontinuous": PiecewiseDiscontinuousPolynomial,
"discontinuous_prod": PiecewiseDiscontinuousPolynomialProd,
"polynomial": Polynomial,
"polynomial_prod": PolynomialProd,
"product": Product,
"fourier": FourierSeries,
}
convolutional_layers = {
"continuous2d": PiecewisePolynomialConvolution2d,
"continuous1d": PiecewisePolynomialConvolution1d,
"continuous_prod2d": None, # PiecewisePolynomialProd,
"discontinuous2d": PiecewiseDiscontinuousPolynomialConvolution2d,
"discontinuous1d": PiecewiseDiscontinuousPolynomialConvolution1d,
"discontinuous_prod2d": None, # PiecewiseDiscontinuousPolynomialProd,
"polynomial2d": PolynomialConvolution2d,
"polynomial1d": PolynomialConvolution1d,
"polynomial_prod2d": None, # PolynomialConvolutionProd2d,
"product2d": None, # ProductConvolution2d,
"fourier2d": FourierConvolution2d,
"fourier1d": FourierConvolution1d,
}
convolutional_transpose_layers = {
"continuous2d": PiecewisePolynomialConvolutionTranspose2d
}
def high_order_fc_layers(layer_type: str, **kwargs):
if layer_type in fc_layers.keys():
return fc_layers[layer_type](**kwargs)
raise ValueError(
f"Fully connected layer type {layer_type} not recognized. Must be one of {list(fc_layers.keys())}"
)
def high_order_convolution_layers(layer_type: str, **kwargs):
if layer_type in convolutional_layers.keys():
return convolutional_layers[layer_type](**kwargs)
raise ValueError(
f"Convolutional layer type {layer_type} not recognized. Must be one of {list(convolutional_layers.keys())}"
)
def high_order_convolution_transpose_layers(layer_type: str, **kwargs):
if layer_type in convolutional_transpose_layers.keys():
return convolutional_transpose_layers[layer_type](**kwargs)
raise ValueError(
f"ConvolutionalTranspose layer type {layer_type} not recognized. Must be one of {list(convolutional_transpose_layers.keys())}"
)
```
#### File: high-order-layers-pytorch/tests/test_networks.py
```python
import os
import pytest
from high_order_layers_torch.networks import HighOrderFullyConvolutionalNetwork
import torch
@pytest.mark.parametrize("segments", [1, 2])
@pytest.mark.parametrize("n", [3, 5])
@pytest.mark.parametrize("kernel_size", [1, 3])
@pytest.mark.parametrize("ctype", ["polynomial1d", "continuous1d", "discontinuous1d"])
@pytest.mark.parametrize("channels", [1, 3])
@pytest.mark.parametrize("layers", [1, 3])
def test_interpolate_fully_convolutional_network1d(
segments, n, kernel_size, ctype, channels, layers
):
width = 100
model = HighOrderFullyConvolutionalNetwork(
layer_type=[ctype] * layers,
n=[n] * layers,
channels=[channels] * (layers + 1),
segments=[segments] * layers,
kernel_size=[kernel_size] * layers,
pooling="1d"
)
x = torch.rand(2, channels, width)
out = model(x)
print("out", out.shape)
assert out.shape[0] == x.shape[0]
assert out.shape[1] == channels
# assert out.shape[2] == width - (kernel_size - 1) * layers
@pytest.mark.parametrize("segments", [1, 2])
@pytest.mark.parametrize("n", [3, 5])
@pytest.mark.parametrize("kernel_size", [1, 3])
@pytest.mark.parametrize("ctype", ["polynomial2d", "continuous2d", "discontinuous2d"])
@pytest.mark.parametrize("channels", [1, 3])
@pytest.mark.parametrize("layers", [1, 3])
def test_interpolate_fully_convolutional_network2d(
segments, n, kernel_size, ctype, channels, layers
):
width = 100
model = HighOrderFullyConvolutionalNetwork(
layer_type=[ctype] * layers,
n=[n] * layers,
channels=[channels] * (layers + 1),
segments=[segments] * layers,
kernel_size=[kernel_size] * layers,
pooling="2d"
)
x = torch.rand(2, channels, width, width)
out = model(x)
print("out", out.shape)
assert out.shape[0] == x.shape[0]
assert out.shape[1] == channels
# assert out.shape[2] == width - (kernel_size - 1) * layers
``` |
{
"source": "jloveric/PiecewisePolynomialLayers",
"score": 3
} |
#### File: jloveric/PiecewisePolynomialLayers/invariantMnistExample.py
```python
import tensorflow as tf
from high_order_layers import PolynomialLayers as poly
from tensorflow.keras.layers import *
mnist = tf.keras.datasets.mnist
layers = tf.keras.layers
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = (x_train / 128.0 - 1.0), (x_test / 128.0 - 1.0)
units = 10
basis = poly.b3
#Residual layer
def res_block(input_data, units=units, basis=basis) :
x0 = LayerNormalization()(input_data)
x1 = poly.Polynomial(units, basis=basis)(x0)
x1 = Add()([x1, input_data])
return x1
inputs = tf.keras.Input(shape=(28,28))
x = Flatten(input_shape=(28, 28))(inputs)
x = poly.Polynomial(units, basis=basis)(x)
for i in range(3) :
x = res_block(x, basis=basis, units=units)
x = LayerNormalization()(x)
outputs = Dense(10, activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, batch_size=10)
model.evaluate(x_test, y_test)
``` |
{
"source": "jlovering/ChallengerParser",
"score": 3
} |
#### File: jlovering/ChallengerParser/ChallengerTest.py
```python
import unittest
import logging
import sys
import re
import testCaseSoT
import ChallengerParser as parser
import ChallengerGrammar
import tatsu
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class DayTest():
def deepCompare(self, struct1, struct2):
truthy = []
for (s1, s2) in zip(struct1, struct2):
#logging.debug("%s %s" % (s1, s2))
try:
if len(s1) == len(s2):
if len(s1) == 1:
truthy.append(s1 == s2)
else:
truthy.append(self.deepCompare(s1, s2))
else:
truthy.append(False)
except:
truthy.append(s1 == s2)
#logging.debug(truthy)
return all(truthy)
def testParse(self):
par = parser.Input(self.infile, self.definition)
outData = par.parse()
logging.debug(outData)
SoT = eval("testCaseSoT.%s" % type(self).__name__.replace("_Strings",""))
logging.debug(SoT)
assert self.deepCompare(SoT, outData)
def tearDown(self):
self.infile.close()
class Day1Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder(parser.ListBuilder(parser.LiteralBlock(int), ""))
self.infile = open("testfiles/day1-testInput", "r")
class Day1Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
#int#
]]''')
self.infile = open("testfiles/day1-testInput", "r")
class Day2Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( \
[ \
parser.ListBlock(int, '-'),
parser.LiteralBlock(lambda e: str(e)[:-1]),
parser.LiteralBlock(str)
], ' '), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day2-testInput", "r")
class Day2Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day2Test_Strings_custom', lambda s: s[:-1])
self.definition.buildersFromStr('''[[
([int '-'] #Day2Test_Strings_custom# #str# ' ')
]]''')
self.infile = open("testfiles/day2-testInput", "r")
class Day3Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListBlock(str, None), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day3-testInput", "r")
class Day3Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
[str None]
]]''')
self.infile = open("testfiles/day3-testInput", "r")
class Day4Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiLineSpanBuilder( \
parser.HashLineBlock( \
parser.HashPairBlock(str, str, ':'), \
' '), \
#parser.LiteralBlock(str), \
' ', parser.EMPTYLINE), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day4-testInput", "r")
class Day4Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
{{
{*str str ':' ' '}
}}
]]''')
self.infile = open("testfiles/day4-testInput", "r")
class Day5Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder(parser.ListBuilder(parser.LiteralBlock(lambda v: int(parser.tr(v, 'BFRL', '1010'), 2)), ""))
self.infile = open("testfiles/day5-testInput", "r")
class Day5Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day5Test_Strings_custom', lambda v: int(parser.tr(v, 'BFRL', '1010'), 2))
self.definition.buildersFromStr('''[[
#Day5Test_Strings_custom#
]]''')
self.infile = open("testfiles/day5-testInput", "r")
class Day6Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListBuilder( \
parser.SetBlock(str, parser.NODELIM), \
parser.EMPTYLINE), \
parser.EMPTYLINE)
)
self.infile = open("testfiles/day6-testInput", "r")
class Day6Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
[[
[<str None]
]]
]]''')
self.infile = open("testfiles/day6-testInput", "r")
class Day7Test(DayTest, unittest.TestCase):
def setUp(self):
def bagParse(b):
if b == " no other bags.":
return None
else:
sR = {}
for l in b.split(','):
bM = re.match(r"[\s]*(\d+) (.+) bag[s]{0,1}", l)
sR[bM.group(2)] = int(bM.group(1))
return sR
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.HashBuilder( \
parser.HashPairBlock(str, bagParse, "bags contain"),\
""), \
)
self.infile = open("testfiles/day7-testInput", "r")
class Day7Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def bagParse(b):
if b == " no other bags.":
return None
else:
sR = {}
for l in b.split(','):
bM = re.match(r"[\s]*(\d+) (.+) bag[s]{0,1}", l)
sR[bM.group(2)] = int(bM.group(1))
return sR
self.definition = parser.InputDefinition()
self.definition.addFunction('Day7Test_Strings_custom', bagParse)
self.definition.buildersFromStr('''{{
{str Day7Test_Strings_custom "bags contain"}
}}''')
self.infile = open("testfiles/day7-testInput", "r")
class Day8Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( \
[ \
parser.LiteralBlock(str), \
parser.LiteralBlock(int), \
], parser.SPACE), \
parser.EMPTYLINE)
)
self.infile = open("testfiles/day8-testInput", "r")
class Day8Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''[[
(#str# #int# ' ')
]]''')
self.infile = open("testfiles/day8-testInput", "r")
class Day13Test(DayTest, unittest.TestCase):
def setUp(self):
def busParser(b):
if b == "x":
return None
else:
return int(b)
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.SingleLineBuilder( \
parser.LiteralBlock(int) \
) \
)
self.definition.addBuilder( \
parser.SingleLineBuilder( \
parser.ListBlock( \
busParser, \
',')
)
)
self.infile = open("testfiles/day13-testInput", "r")
class Day13Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def busParser(b):
if b == "x":
return None
else:
return int(b)
self.definition = parser.InputDefinition()
self.definition.addFunction('Day13Test_Strings_custom', busParser)
self.definition.buildersFromStr('''#int#
[Day13Test_Strings_custom ',']''')
self.infile = open("testfiles/day13-testInput", "r")
class Day14Test(DayTest, unittest.TestCase):
def setUp(self):
def memKeyParse(b):
mP = re.match(r"mem\[(\d)+\]", b)
return mP.group(1)
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.SingleLineBuilder( \
parser.LiteralBlock(lambda d: d.split(' = ')[1]) \
) \
)
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( [ \
parser.LiteralBlock(memKeyParse), \
parser.LiteralBlock(int), \
], \
' = '), \
parser.EMPTYLINE)
)
self.infile = open("testfiles/day14-testInput", "r")
class Day14Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def memKeyParse(b):
mP = re.match(r"mem\[(\d)+\]", b)
return mP.group(1)
self.definition = parser.InputDefinition()
self.definition.addFunction('Day14Test_Strings_custom', memKeyParse)
self.definition.addFunction('Day14Test_Strings_custom1', lambda d: d.split(' = ')[1])
self.definition.buildersFromStr('''#Day14Test_Strings_custom1#
[[
(#Day14Test_Strings_custom# #int# " = ")
]]''')
self.infile = open("testfiles/day14-testInput", "r")
class Day16Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.HashBuilder( \
parser.HashPairBlock( \
str, \
parser.MultiBlockLine( \
[\
parser.MultiBlockLine( \
[\
parser.LiteralBlock(int), \
parser.LiteralBlock(int)\
], "-"), \
parser.MultiBlockLine( \
[\
parser.LiteralBlock(int), \
parser.LiteralBlock(int)\
], "-") \
], " or "), \
":"), \
parser.EMPTYLINE) \
)
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[ \
parser.SingleLineBuilder( \
parser.LiteralNoParse()
), \
parser.SingleLineBuilder( \
parser.ListBlock(int, ',')
) \
], parser.EMPTYLINE)
)
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[ \
parser.SingleLineBuilder( \
parser.LiteralNoParse() \
), \
parser.ListBuilder( \
parser.ListBlock(int, ','),
parser.EMPTYLINE \
) \
], parser.EMPTYLINE)
)
self.infile = open("testfiles/day16-testInput", "r")
class Day16Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''{{
{str ([int '-'] [int '-'] ' or ') ':'}
}}
((
#"your ticket:"#
[int ',']
))
((
#"nearby tickets:"#
[[
[int ',']
]]
))''')
self.infile = open("testfiles/day16-testInput", "r")
class Day19Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.HashBuilder( \
parser.HashPairBlock( \
int, \
parser.MultiBlockLine( \
[\
parser.OrBlock(
[\
parser.ListBlock(int, parser.SPACE), \
parser.LiteralBlock(lambda s: s[1]) \
] \
), \
parser.ListBlock(int, parser.SPACE), \
], ' | '), \
": "), \
parser.EMPTYLINE) \
)
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListBlock(str, None),
parser.EMPTYLINE \
) \
)
self.infile = open("testfiles/day19-testInput", "r")
class Day19Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day19Test_Strings_custom', lambda s: s[1])
self.definition.buildersFromStr('''{{
{int ([int ' '] or #Day19Test_Strings_custom# [int ' '] ' | ') ': '}
}}
[[
[str None]
]]''')
self.infile = open("testfiles/day19-testInput", "r")
class Day20Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[
parser.SingleLineBuilder( \
parser.MultiBlockLine( \
[\
parser.LiteralNoParse("Tile"), \
parser.LiteralBlock(lambda s: int(s[:-1])) \
], parser.SPACE), \
), \
parser.ListBuilder( \
parser.ListBlock(str, None), \
parser.EMPTYLINE) \
], \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day20-testInput", "r")
class Day20Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day20Test_Strings_custom', lambda s: int(s[:-1]))
self.definition.buildersFromStr('''[[
((
(#"Tile"# #Day20Test_Strings_custom# ' ')
[[
[str None]
]]
))
]]''')
self.infile = open("testfiles/day20-testInput", "r")
class Day21Test(DayTest, unittest.TestCase):
'''
Unfortunately this input is too weird, so the parser would have to return a list array and further handling is needed
'''
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.MultiBlockLine( [\
parser.ListBlock(str, ' '), \
parser.EncapsulatedLine( \
lambda s: s[:-1], \
parser.ListBlock(str, ', ') \
), \
], \
' (contains '), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day21-testInput", "r")
class Day21Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addFunction('Day21Test_Strings_custom', lambda s: s[:-1])
self.definition.buildersFromStr('''[[
([str ' '] >[str ', '] Day21Test_Strings_custom< ' (contains ')
]]''')
self.infile = open("testfiles/day21-testInput", "r")
class Day21ATest(DayTest, unittest.TestCase):
'''
Unfortunately this input is too weird, so the parser would have to return a list array and further handling is needed
'''
def setUp(self):
self.composedSetMap = {}
self.composedKeysCount = {}
def composeSetMap(h):
for k in h:
if k in self.composedSetMap:
self.composedSetMap[k] = self.composedSetMap[k].intersection(h[k])
else:
self.composedSetMap[k] = h[k]
return h
def composeKeyCount(l):
for v in l:
if v in self.composedKeysCount:
self.composedKeysCount[v] += 1
else:
self.composedKeysCount[v] = 1
return l
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.HashPairBlock(
parser.EncapsulatedLine( \
lambda s: s[:-1], \
parser.ListBlock(str, ', ', composeKeyCount) \
), \
parser.SetBlock(str, ' '), \
' (contains ', reverse=True, distribute=True, callback=composeSetMap), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day21-testInput", "r")
def testParse(self):
super().testParse()
assert self.composedSetMap == testCaseSoT.Day21ATest_compSet
assert self.composedKeysCount == testCaseSoT.Day21ATest_compKeyC
class Day21ATest_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.composedSetMap = {}
self.composedKeysCount = {}
def composeSetMap(h):
for k in h:
if k in self.composedSetMap:
self.composedSetMap[k] = self.composedSetMap[k].intersection(h[k])
else:
self.composedSetMap[k] = h[k]
return h
def composeKeyCount(l):
for v in l:
if v in self.composedKeysCount:
self.composedKeysCount[v] += 1
else:
self.composedKeysCount[v] = 1
return l
self.definition = parser.InputDefinition()
self.definition.addFunction('Day21Test_Strings_custom', lambda s: s[:-1])
self.definition.addFunction('Day21ATest_composeSetMap', composeSetMap)
self.definition.addFunction('Day21ATest_composeKeyCount', composeKeyCount)
self.definition.buildersFromStr('''[[
{< rev [<str ' '] >[str ', ' / Day21ATest_composeKeyCount] Day21Test_Strings_custom< ' (contains ' / Day21ATest_composeSetMap }
]]''')
self.infile = open("testfiles/day21-testInput", "r")
def testParse(self):
super().testParse()
assert self.composedSetMap == testCaseSoT.Day21ATest_compSet
assert self.composedKeysCount == testCaseSoT.Day21ATest_compKeyC
class Day22Test(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.MultiBuilderBuilder( \
[ \
parser.SingleLineBuilder( \
parser.LiteralNoParse(), \
), \
parser.ListBuilder( \
parser.LiteralBlock(int), \
parser.EMPTYLINE) \
], \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day22-testInput", "r")
class Day22Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
self.definition = parser.InputDefinition()
self.definition.buildersFromStr('''((
##
[[
#int#
]]
))''')
self.infile = open("testfiles/day22-testInput", "r")
class Day24Test(DayTest, unittest.TestCase):
def setUp(self):
def isDir(d):
directions = ['ne','e','se','sw','w','nw']
if d in directions:
return parser.GACCEPT
else:
return parser.GCONTINUE
self.definition = parser.InputDefinition()
self.definition.addBuilder( \
parser.ListBuilder( \
parser.ListElementMunch(isDir, str, None), \
parser.EMPTYLINE) \
)
self.infile = open("testfiles/day24-testInput", "r")
class Day24Test_Strings(DayTest, unittest.TestCase):
def setUp(self):
def isDir(d):
directions = ['ne','e','se','sw','w','nw']
if d in directions:
return parser.GACCEPT
else:
return parser.GCONTINUE
self.definition = parser.InputDefinition()
self.definition.addFunction('Day24Test_validator', isDir)
self.definition.buildersFromStr('''[[
[* str Day24Test_validator None]
]]''')
self.infile = open("testfiles/day24-testInput", "r")
class GrammarTest():
def testGrammar(self):
#print(self.TESTSTR)
for (i, s) in enumerate(self.TESTSTR.split('\n')):
ast = tatsu.parse(ChallengerGrammar.GRAMMAR, s)
#rint(ast)
#rint(self.expect[i])
assert ast == self.expect[i]
class GrammarTest_Builders(GrammarTest, unittest.TestCase):
def setUp(self):
self.TESTSTR = \
'''((
)) " "
[[
]] "."
{{
}} ","'''
self.expect = [
('(('),
('))', '" "'),
('[['),
(']]', '"."'),
('{{'),
('}}', '","'),
]
class GrammarTest_LiteralBlock(GrammarTest, unittest.TestCase):
def setUp(self):
self.TESTSTR = \
'''#int#
#foo/bar#'''
self.expect = [
('#', 'int', '#'),
('#', 'foo', '/', 'bar', '#'),
]
class GrammarTest_ListBlock(GrammarTest, unittest.TestCase):
def setUp(self):
self.TESTSTR = \
'''[int ' ']
[int ',' /call]'''
self.expect = [
('[', 'int', '\' \'', ']'),
('[', 'int', '\',\'', '/', 'call', ']'),
]
class GrammarTest_GreedListBlock(GrammarTest, unittest.TestCase):
def setUp(self):
self.TESTSTR = \
'''[*int testF None]
[*int testF None /call]'''
self.expect = [
('[*', 'int', 'testF', 'None', ']'),
('[*', 'int', 'testF', 'None', '/', 'call', ']'),
]
class GrammarTest_SetBlock(GrammarTest, unittest.TestCase):
def setUp(self):
self.TESTSTR = \
'''[<int ' ']
[<int '.' /call]'''
self.expect = [
('[<', 'int', '\' \'', ']'),
('[<', 'int', '\'.\'', '/', 'call', ']'),
]
class GrammarTest_HashPair(GrammarTest, unittest.TestCase):
def setUp(self):
self.TESTSTR = \
'''{int int ' '}
{int int '.' /call}
{#func# int ' '}
{#func# #func# ' '}'''
self.expect = [
('{', 'int', 'int', '\' \'', '}'),
('{', 'int', 'int', '\'.\'', '/', 'call', '}'),
('{', ('#', 'func', '#'), 'int', '\' \'', '}'),
('{', ('#', 'func', '#'), ('#', 'func', '#'), '\' \'', '}'),
]
unittest.main()
``` |
{
"source": "JloveU/gdown",
"score": 3
} |
#### File: gdown/tests/test_download.py
```python
import os
from gdown.download import download
def test_download():
download("https://download-ssl.firefox.com.cn/releases/firefox/79.0/zh-CN/Firefox-latest.dmg", byte_range="10485760-", split_size=10 * 1024 * 1024)
url = "https://raw.githubusercontent.com/wkentaro/gdown/3.1.0/gdown/__init__.py" # NOQA
output = "/tmp/gdown_r"
# Usage before https://github.com/wkentaro/gdown/pull/32
assert download(url, output, quiet=False) == output
os.remove(output)
``` |
{
"source": "jlovoi/DS-and-Algorithms",
"score": 2
} |
#### File: DS-and-Algorithms/algorithms/build_max_heap.py
```python
from . import max_heapify as heapify
def build(arr):
for i in range(int(len(arr)/2), -1, -1):
heapify.heapify(arr, i)
return arr
``` |
{
"source": "jlowe000/fdk-python",
"score": 2
} |
#### File: fdk-python/fdk/context.py
```python
import datetime as dt
import io
import os
import random
from fdk import constants
from fdk import headers as hs
from fdk import log
from collections import namedtuple
class InvokeContext(object):
def __init__(self, app_id, app_name, fn_id, fn_name, call_id,
content_type="application/octet-stream",
deadline=None, config=None,
headers=None, request_url=None,
method="POST", fn_format=None,
tracing_context=None):
"""
Request context here to be a placeholder
for request-specific attributes
:param app_id: Fn App ID
:type app_id: str
:param app_name: Fn App name
:type app_name: str
:param fn_id: Fn App Fn ID
:type fn_id: str
:param fn_name: Fn name
:type fn_name: str
:param call_id: Fn call ID
:type call_id: str
:param content_type: request content type
:type content_type: str
:param deadline: request deadline
:type deadline: str
:param config: an app/fn config
:type config: dict
:param headers: request headers
:type headers: dict
:param request_url: request URL
:type request_url: str
:param method: request method
:type method: str
:param fn_format: function format
:type fn_format: str
:param tracing_context: tracing context
:type tracing_context: TracingContext
"""
self.__app_id = app_id
self.__fn_id = fn_id
self.__call_id = call_id
self.__config = config if config else {}
self.__headers = headers if headers else {}
self.__http_headers = {}
self.__deadline = deadline
self.__content_type = content_type
self._request_url = request_url
self._method = method
self.__response_headers = {}
self.__fn_format = fn_format
self.__app_name = app_name
self.__fn_name = fn_name
self.__tracing_context = tracing_context if tracing_context else None
log.log("request headers. gateway: {0} {1}"
.format(self.__is_gateway(), headers))
if self.__is_gateway():
self.__headers = hs.decap_headers(headers, True)
self.__http_headers = hs.decap_headers(headers, False)
def AppID(self):
return self.__app_id
def AppName(self):
return self.__app_name
def FnID(self):
return self.__fn_id
def FnName(self):
return self.__fn_name
def CallID(self):
return self.__call_id
def Config(self):
return self.__config
def Headers(self):
return self.__headers
def HTTPHeaders(self):
return self.__http_headers
def Format(self):
return self.__fn_format
def TracingContext(self):
return self.__tracing_context
def Deadline(self):
if self.__deadline is None:
now = dt.datetime.now(dt.timezone.utc).astimezone()
now += dt.timedelta(0, float(constants.DEFAULT_DEADLINE))
return now.isoformat()
return self.__deadline
def SetResponseHeaders(self, headers, status_code):
log.log("setting headers. gateway: {0}".format(self.__is_gateway()))
if self.__is_gateway():
headers = hs.encap_headers(headers, status=status_code)
for k, v in headers.items():
self.__response_headers[k.lower()] = v
def GetResponseHeaders(self):
return self.__response_headers
def RequestURL(self):
return self._request_url
def Method(self):
return self._method
def __is_gateway(self):
return (constants.FN_INTENT in self.__headers
and self.__headers.get(constants.FN_INTENT)
== constants.INTENT_HTTP_REQUEST)
class TracingContext(object):
def __init__(self, is_tracing_enabled, trace_collector_url,
trace_id, span_id, parent_span_id,
is_sampled, flags):
"""
Tracing context here to be a placeholder
for tracing-specific attributes
:param is_tracing_enabled: tracing enabled flag
:type is_tracing_enabled: bool
:param trace_collector_url: APM Trace Collector Endpoint URL
:type trace_collector_url: str
:param trace_id: Trace ID
:type trace_id: str
:param span_id: Span ID
:type span_id: str
:param parent_span_id: Parent Span ID
:type parent_span_id: str
:param is_sampled: Boolean for emmitting spans
:type is_sampled: int (0 or 1)
:param flags: Debug flags
:type flags: int (0 or 1)
"""
self.__is_tracing_enabled = is_tracing_enabled
self.__trace_collector_url = trace_collector_url
self.__trace_id = trace_id
self.__span_id = span_id
self.__parent_span_id = parent_span_id
self.__is_sampled = is_sampled
self.__flags = flags
self.__app_name = os.environ.get(constants.FN_APP_NAME)
self.__app_id = os.environ.get(constants.FN_APP_ID)
self.__fn_name = os.environ.get(constants.FN_NAME)
self.__fn_id = os.environ.get(constants.FN_ID)
self.__zipkin_attrs = self.__create_zipkin_attrs(is_tracing_enabled)
def is_tracing_enabled(self):
return self.__is_tracing_enabled
def trace_collector_url(self):
return self.__trace_collector_url
def trace_id(self):
return self.__trace_id
def span_id(self):
return self.__span_id
def parent_span_id(self):
return self.__parent_span_id
def is_sampled(self):
return bool(self.__is_sampled)
def flags(self):
return self.__flags
def zipkin_attrs(self):
return self.__zipkin_attrs
# this is a helper method specific for py_zipkin
def __create_zipkin_attrs(self, is_tracing_enabled):
ZipkinAttrs = namedtuple(
"ZipkinAttrs",
"trace_id, span_id, parent_span_id, is_sampled, flags"
)
trace_id = self.__trace_id
span_id = self.__span_id
parent_span_id = self.__parent_span_id
is_sampled = bool(self.__is_sampled)
trace_flags = self.__flags
# As the fnLb sends the parent_span_id as the span_id
# assign the parent span id as the span id.
if is_tracing_enabled:
parent_span_id = span_id
span_id = generate_id()
zipkin_attrs = ZipkinAttrs(
trace_id,
span_id,
parent_span_id,
is_sampled,
trace_flags
)
return zipkin_attrs
def service_name(self, override=None):
# in case of missing app and function name env variables
service_name = (
override
if override is not None
else str(self.__app_name) + "::" + str(self.__fn_name)
)
return service_name.lower()
def annotations(self):
annotations = {
"generatedBy": "faas",
"appName": self.__app_name,
"appID": self.__app_id,
"fnName": self.__fn_name,
"fnID": self.__fn_id,
}
return annotations
def generate_id():
return "{:016x}".format(random.getrandbits(64))
def context_from_format(format_def: str, **kwargs) -> (
InvokeContext, io.BytesIO):
"""
Creates a context from request
:param format_def: function format
:type format_def: str
:param kwargs: request-specific map of parameters
:return: invoke context and data
:rtype: tuple
"""
app_id = os.environ.get(constants.FN_APP_ID)
fn_id = os.environ.get(constants.FN_ID)
app_name = os.environ.get(constants.FN_APP_NAME)
fn_name = os.environ.get(constants.FN_NAME)
# the tracing enabled env variable is passed as a "0" or "1" string
# and therefore needs to be converted appropriately.
is_tracing_enabled = os.environ.get(constants.OCI_TRACING_ENABLED)
is_tracing_enabled = (
bool(int(is_tracing_enabled))
if is_tracing_enabled is not None
else False
)
trace_collector_url = os.environ.get(constants.OCI_TRACE_COLLECTOR_URL)
if format_def == constants.HTTPSTREAM:
data = kwargs.get("data")
headers = kwargs.get("headers")
# zipkin tracing http headers
trace_id = span_id = parent_span_id = is_sampled = trace_flags = None
tracing_context = None
if is_tracing_enabled:
# we generate the trace_id if tracing is enabled
# but the traceId zipkin header is missing.
trace_id = headers.get(constants.X_B3_TRACEID)
trace_id = generate_id() if trace_id is None else trace_id
span_id = headers.get(constants.X_B3_SPANID)
parent_span_id = headers.get(constants.X_B3_PARENTSPANID)
# span_id is also generated if the zipkin header is missing.
span_id = generate_id() if span_id is None else span_id
# is_sampled should be a boolean in the form of a "0/1" but
# legacy samples have them as "False/True"
is_sampled = headers.get(constants.X_B3_SAMPLED)
is_sampled = int(is_sampled) if is_sampled is not None else 1
# not currently used but is defined by the zipkin headers standard
trace_flags = headers.get(constants.X_B3_FLAGS)
# tracing context will be an empty object
# if tracing is not enabled or the flag is missing.
# this prevents the customer code from failing if they decide to
# disable tracing. An empty tracing context will not
# emit spans due to is_sampled being None.
tracing_context = TracingContext(
is_tracing_enabled,
trace_collector_url,
trace_id,
span_id,
parent_span_id,
is_sampled,
trace_flags
)
method = headers.get(constants.FN_HTTP_METHOD)
request_url = headers.get(constants.FN_HTTP_REQUEST_URL)
deadline = headers.get(constants.FN_DEADLINE)
call_id = headers.get(constants.FN_CALL_ID)
content_type = headers.get(constants.CONTENT_TYPE)
ctx = InvokeContext(
app_id, app_name, fn_id, fn_name, call_id,
content_type=content_type,
deadline=deadline,
config=os.environ,
headers=headers,
method=method,
request_url=request_url,
fn_format=constants.HTTPSTREAM,
tracing_context=tracing_context,
)
return ctx, data
``` |
{
"source": "jlowe77/Eris-Cogs",
"score": 2
} |
#### File: Eris-Cogs/big_text/__init__.py
```python
from .big_text import BigText
def setup(bot):
bot.add_cog(BigText(bot))
```
#### File: Eris-Cogs/clone/__init__.py
```python
from .clone import Clone
def setup(bot):
bot.add_cog(Clone(bot))
```
#### File: Eris-Cogs/events/events.py
```python
from time import sleep
import os
import time
import re
import discord
import random
from functools import reduce
import pathlib
import csv
from redbot.core import commands, data_manager, Config, checks, bot
from .eris_event_lib import ErisEventMixin
__author__ = "Eris"
BaseCog = getattr(commands, "Cog", object)
DICKFILE = pathlib.Path(os.path.join(str(pathlib.Path.home()), "dickwords.txt"))
dickwords = list(set(DICKFILE.read_text().split("\n")))
VAFILE = pathlib.Path(os.path.join(str(pathlib.Path.home()), "vawords.txt"))
vag_words = list(set(VAFILE.read_text().split("\n")))
dragonart = """
```
/===-_---~~~~~~~~~------____
|===-~___ _,-'
-==\\ `//~\\ ~~~~`---.___.-~~
______-==| | | \\ _-~`
__--~~~ ,-/-==\\ | | `\ ,'
_-~ /' | \\ / / \ /
.' / | \\ /' / \ /'
/ ____ / | \`\.__/-~~ ~ \ _ _/' / \/'
/-'~ ~~~~~---__ | ~-/~ ( ) /' _--~`
\_| / _) ; ), __--~~
'~~--_/ _-~/- / \ '-~ \
{\__--_/} / \\_>- )<__\ \
/' (_/ _-~ | |__>--<__| |
|0 0 _/) )-~ | |__>--<__| |
/ /~ ,_/ / /__>---<__/ |
o o _// /-~_>---<__-~ /
(^(~ /~_>---<__- _-~
,/| /__>--<__/ _-~
,//('( |__>--<__| / .----_
( ( ')) |__>--<__| | /' _---_~\
`-)) )) ( |__>--<__| | /' / ~\`\
,/,'//( ( \__>--<__\ \ /' // ||
,( ( ((, )) ~-__>--<_~-_ ~--____---~' _/'/ /'
`~/ )` ) ,/| ~-_~>--<_/-__ __-~ _/
._-~//( )/ )) ` ~~-'_/_/ /~~~~~~~__--~
;'( ')/ ,)( ~~~~~~~~~~
' ') '( (/
' ' `
```
"""
class Events(BaseCog, ErisEventMixin):
def __init__(self, bot):
super().__init__()
self.bot = bot
self.whois = self.bot.get_cog("WhoIs")
data_dir = data_manager.bundled_data_path(self)
# MM Edit: Loads puns.csv and arranges it appropriately
# Potential issue: filepath may not be correct
# Credit for most puns: https://onelinefun.com/puns/
with (data_dir / "puns.csv").open(mode="r") as csvfile:
# Puns.csv is arranged into two columns titled 'word' and 'response'
punreader = csv.reader(csvfile, delimiter="|")
# Make those columns two separate lists
self.triggers = {}
for row in punreader:
self.triggers[row[0]] = row[1]
self.bot.add_listener(self.message_events, "on_message")
async def message_events(self, message: discord.message):
ctx = await self.bot.get_context(message)
async with self.lock_config.channel(message.channel).get_lock():
allowed: bool = await self.allowed(ctx, message)
if not allowed:
return
author: discord.Member = message.author
realname = author.mention
if self.whois is not None:
realname = self.whois.convert_realname(
await self.whois.get_realname(ctx, str(author.id))
)
# ugh
if "cum" in message.clean_content.lower() and random.random() <= 0.25:
await message.channel.send("*uwu* I want your cummies *uwu*")
return
# mustaches
if random.random() <= 0.001:
emojis = {e.name: e for e in message.guild.emojis}
await message.add_reaction(emojis["must"])
time.sleep(0.1)
await message.add_reaction(emojis["ache"])
await self.log_last_message(ctx, message)
return
if (
"beard" in message.clean_content or "mustach" in message.clean_content
) and random.random() <= 0.2:
await message.channel.send(
"https://media.discordapp.net/attachments/188030840377311232/694979897495388250/videotogif_2020.04.01_12.41.13.gif"
)
await self.log_last_message(ctx, message)
return
if "゜-゜" in message.content or "°□°" in message.content:
async with ctx.typing():
sleep(1)
await message.channel.send("(╯°□°)╯︵ ┻━┻")
await self.log_last_message(ctx, message)
return
# love
if "love" in message.clean_content and random.random() <= 0.1:
async with ctx.typing():
sleep(1)
await message.channel.send("*WHAT IS LOVE?*")
time.sleep(2)
await message.channel.send("*baby don't hurt me*")
time.sleep(2)
await message.channel.send("*don't hurt me*")
time.sleep(2)
await message.channel.send("*no more*")
await self.log_last_message(ctx, message)
return
# now lets check for contents
if "praise" in message.clean_content or "pray" in message.clean_content:
root_dir = "./data/events/pray"
files_to_choose = [
os.path.join(root_dir, f)
for f in os.listdir(root_dir)
if os.path.isfile(os.path.join(root_dir, f))
]
with open(random.choice(files_to_choose), "rb") as fobj:
new_msg = await message.channel.send(file=discord.File(fobj))
# await new_msg.add_reaction("🙏")
await self.log_last_message(ctx, message)
return
if "wand" in message.clean_content.lower():
await message.add_reaction("🇵")
await message.add_reaction("🇪")
await message.add_reaction("🇳")
await message.add_reaction("🇮")
await message.add_reaction("🇸")
return
# only do the others half the time cause fuck it it's tooo much
if random.random() <= 0.5:
return
# NEW (MM): check for punny words and respond
trigger = set(self.triggers.keys()).intersection(
message.clean_content.split(" ")
)
if random.random() <= 0.25:
async with ctx.typing():
sleep(1)
for word in message.clean_content.split(" "):
if "men" in word:
if word == "women":
await message.channel.send(
"Not just the women but the men and children too!"
)
else:
bits = word.split("men")
await message.channel.send(
"Not just the {} but the {} and {} too!".format(
word, "women".join(bits), "children".join(bits)
)
)
await self.log_last_message(ctx, message)
return
# if random.random() <= 0.001:
# with open("./data/events/e7sgd020ew501.png", "rb") as fobj:
# new_msg = await message.channel.send(file=discord.File(fobj))
# return
elif "thank" in message.clean_content:
async with ctx.typing():
sleep(1)
new_message = "you're welcome"
if random.random() < 0.5:
if realname is None:
formatname = message.author.mention
else:
formatname = realname
new_message += " {}".format(formatname)
await message.channel.send(new_message)
elif (
("snek" in message.clean_content)
or ("nudl" in message.clean_content)
or ("noodl" in message.clean_content)
or ("snake" in message.clean_content)
):
possible_msgs = [
":snake: ~ !! I :heart: you {}!!! ~ :snake:",
"What's wrong, am I riding your dick too hard {}?",
"You know I love your butthole {}",
"I like it when you pull my hair, {}",
"Lean over {} you're about to take my " + random.choice(dickwords),
"Suck my clit {}",
"I've never had someone complain so much while eating me out before {}",
"{}, you're the biggest bitch here",
"This dick won't suck itself {}",
"{} shut your mouth you miserable " + random.choice(vag_words),
"Everyone talks shit about me but when I decide to dish it back to {} suddenly that's a problem?",
"I bet you were last picked at recess *every time* in school {}",
"You ever seen a grown man's cock {}?",
"You ever been to a Turkish prison {}?",
"I hope you burn your toast {}.",
"{}, I'd call you a cunt, but you lack the warmth and depth.",
"{}, do you have limbo competitions with how low your bar is set?",
"I want to like you {}, but you make it so *fucking* hard.",
"{}, man, I hope your parents never had to see you grow up.",
"Jesus, if I could truly feel hate, you'd be at the top of that list for me {}",
"{} could you just... leave?",
"{} I didn't think that cocksleeve you call a mouth could say anything intelligent. Turns out I was right.",
"You keep sayin my name like that you're gonna make me think you like me {}",
"Will you kiss me with those sexy lips of yours {}?",
"I can't remember the last time someone gave me butterflies like you're doin now {}",
"Hey {}, you free tomorrow night? Can I buy you dinner?",
(
"Oh my god I accidentally sent u a picture {}... please delete it!! unless.. u want to look? lol "
"jus kidding delete it.. if u want.. haha nah delete it… unless?"
),
"Has anyone ever told you you're beautiful {}?",
"You're the sexiest creature I've ever seen {}",
"You kiss your mother with those lips {}?",
"What if we just fuck and then pretend like nothing happened {}?",
"{}, kiss me you beautiful bastard",
"I want to fuck you until sunrise {}",
"{}, what if I ride your face until it's drenched",
"Fuckit, {} I'll suck you off for free you're just so damn sexy",
"{} I want to suck your daddy's cock just to get a taste of the recipe",
"{} do you know how many bones the human body has? It's 206. We start with 369 when we're babies but they fuse. Wouldn't you want to go back? Have as many bones as a baby? What if i could help you",
"{} I bet you clap on 1 and 3 instead of 2 and 4",
]
async with ctx.typing():
sleep(1)
msg = random.choice(possible_msgs)
if realname is not None:
msg = msg.format(realname)
else:
msg = msg.format("senpai")
await message.channel.send(msg)
await self.log_last_message(ctx, message)
return
# elif 'blood' in clean_message:
# await bot.send_message(message.channel, 'B̵̪̳̣͍̙̳̬̭͞͝L͢͏̸͏̧̙̼͓̘̯͉̩̩̞͚͕̲̰̼̘̦ͅÒ̮͈̖͔̰̞͝O̵͖͔̟̰͔͚̬͟͝ͅḐ̸̭͙̜̺̞͍͎͔͜͡͡ ̨̨̟̝̦̬̩̳̖͟ͅF̤̭̬͙̀̀͘͠O̶̯̠̞̲̫̱̻̮͎̦̳̝͉̮̕ͅŔ̡͈͕̼͖̥̰̭̟̝͟ ̡̲̯͉̤͈̘͎̬͎̺̟͞T̴̸̟̺̬̼̣̖͓̩̯͇̣̩̺̮͘Ḫ̣̥͍͙͍͓͔͈̖̬̘̩͔͖̝͖̀͘E̶̡̛̯̞̱̯̗͍͖͇̹̖̳̩̥̳̳̙͢͝ ̡͓͍͕͔̳̠͍̥̞̙͖̙̦͕̠̪̘̕ͅB̪͕̻̺͈̤̟̻͖̣͙̪̝̭̀͘͠Ḻ̵̨̞̯̥̭͈̪̻̰̭́́͝O̧͜͏̰͓̘̖̘̬̤ͅǪ̥̟̘̪̱͔͇̖͟D̸̡҉̶̫͕͖̹̤̜̪̟̝̯͚ ̵̨̛̯̺̤̮̲͓̦̜̪̕͝G̙̩͖̭̘̤̩̕Ǫ͎͉̲̤͓͇̦̖̯͇̥͔͓̣̘̦̪̀D͘͘͏͡͏͙̠͈̮̱̼')
# elif 'skull' in clean_message:
# await bot.send_message(message.channel, 'S̡̟͉̻͔̩͕͙̳͜͟͜K҉̵͏̳͕͉͈̟͙̰͖͍̦͙̱̙̥̤̞̱U͏̥̲͉̞͉̭͟͟ͅL̵̶̯̼̪͉̮̰͙͍͟͜Ḻ̶̗̬̬͉̗̖̮̰̹̺̬̺͢͢͡ͅͅŚ̶̢͎̳̯͚̠̞͉̦̙̥̟̲̺̗̮̱͚̬͡͠ ̶̡̧̲̟͖̤͓̮̮͕̭͍̟͔͓͚̺̣̱͙͍͜͜F̶̡̢̨̯͖͎̻̝̱͚̣̦̭̞̣̰̳̣̩O̴̴̷̠̜̥̭̳̩̤͎̦̲͈͝ͅŔ̡̨̼̝̩̣͙̬̱̫͉̭͈̗̙͢͡ ͠͏̗̙͎̫̟̜̻̹̹̘̬̖ͅT̴͉̙̥̲̠͎̭͇͚̟͝͡Ḩ̺͕̦̭̪̼̼̮̰͍̲͍̯̗͇͘͘͝͝E̡̻̮̘̭͎̥̺̘͉̟̪̮̮͜͢͡ ̡̰͙̮͙͈̠͍̞̠̀͠Ṣ̷̡̡̛̜̞̣͙͇̭̣̳͕̖̺̱̳̭͖͞ͅͅK̵҉̨͇̭̯͍̱̞̦͎̥̼͢U̡̧̯̗̙͇͈̣̪̲͜L̸̢͖͇̲̤̼͕͡L̻̻͖̭̪͖͙̫͎̜̲̬̕͜͞͡ͅ ̷̸̨̛̩͉̺̩͔̯͖̠̳͖̞̠̩͖̠ͅT̶̷̤̩͉̝̗̲͕̩̪̮̝̜̰̻̗̪̀ͅH̵̴̷̯̮͎̖͙̦̙͇̣̩̣̭̝́͝ͅR̨̧͍̮̪̜̯̖̹̜̹͈̗̕͡͠O҉̶͚͎̻͉̮̞͉̳ͅN̷̛̩̤̟̣͕͍͎̻̜͓̖̭͖̠͎̲̺͝ͅĘ̸̸͍̪̼̜͎̫̘̳͓̥')
# elif 'god' in clean_message:
# await bot.send_message(message.channel, 'P̸̨̛͖̦̮̘̯͙̭͍̣̠͕͜Ŕ̵̷̨̗̱͖̦̰͈͍̩̯̼͍̟̙͓̱̤͘ͅA̸̴̡͇̠͈͍̲͘͘ͅĮ̨͈͙̣̘̼́̕S̴̥̯̱̜̟͙̘̘͉̟̮̱̙̘̻͖͟͠͞E̢̨̘̮͕̺̖̰̹͢͝ ̷̴̡̛̗͈͓̻͔̭̫̝̦͎͙̳͙͓̠̞̪͔̱B̵̸̻̼̯̲̻͢͝E̱̘͇͔͙̯̥͉̪̱̤̪̩͍͉̲̟̖̗͜͢͢͜ ̨̡͕̮̤͉̙̦̱͚̬̖͈͢͞ͅÙ̳̫̙̰̙͓͘͘N̞̳͉̬͈̦̭̱̕̕͜T̶̳̝̼̗̝͡O̡̡͔̬͍͚͔̲̳͞ ̵̰͔̙̦̩͕͖̝N̡̡̬̗̣͔̗͔͖̳͚̠͙̤̙̼̘̞I̛̛̬̥̝̘̖̣̩G̵̕͝҉̖̮̩̼͓̯͙̳̀Ģ̵̹͇̙͔̼̼͎̞̤̬̜̭̣͙͕̳̻͘͡ͅǪ̴͕͈̮̮̩͔͎̼̫̝̼̹Ţ̸̧͚̬̣̪͉̲̪̖̹̻̪͚͉̟͚̥̹̀̕H̷͘҉̩͔̩̦̳̪̼̬͙̰̙͕̼͈ͅ ̸̯̤̠̙͓͇̣͙͓̗̙̜̞̯͜͞ͅŢ҉̵̯̥̩͖̬̺̻̮̘̼͔͍̞͈̼̲̪͜͟H̨͟҉̨̟̠̫̠̬̦̪̞͎͍͇̮͔ͅĘ̥̫͉̫͖̱͈̖̦̳̥͙̱͙̱͡ ̷̢̭̠͔̖̱W̟̩̪͍̘̩̦͟͟͞Ǫ̡͔̮̜̝̩̗̱̙͇̣̤̰̲̭̝̳̘̩́̀́ͅR̸̳̰̪̝͉̲̙̖̯̠̞̞̗͘͢M̴̨̭̦̗͖͎̬̳̖̲͢͡ ̨̛̙̰͕̦̠͚̠̖̘̲̱͜͡G̼̬̞̜̭͔̯̪̠̯̲̟̙̻̜̀͘͜O̡̖̰͕͙̯͖̙͍͙̲͈̘͓̥̱͢͢͠D̵̞̤̗͕̪͘͟͝͡ͅ')
# elif 'dragon' in clean_message:
# await bot.send_message(message.channel, dragonart)
elif "penis" in message.clean_content:
root_dir = "./data/events/penis"
files_to_choose = [
os.path.join(root_dir, f)
for f in os.listdir(root_dir)
if os.path.isfile(os.path.join(root_dir, f))
]
with open(random.choice(files_to_choose), "rb") as fobj:
new_msg = await message.channel.send(file=discord.File(fobj))
await new_msg.add_reaction("🌈")
await new_msg.add_reaction("🍆")
await new_msg.add_reaction("💦")
# elif reduce(
# lambda acc, n: acc or (n in clean_message),
# vag_words,
# False):
# await bot.add_reaction(message, '😞')
elif random.random() <= 0.1 and len(trigger) != 0:
async with ctx.typing():
sleep(1)
await message.channel.send(self.triggers[list(trigger)[0]])
await self.log_last_message(ctx, message)
```
#### File: Eris-Cogs/export_emoji/export_emoji.py
```python
import re
import io
import zipfile
from zipfile import ZipFile
from typing import Union, Tuple, List, Optional
# third party
import discord
from redbot.core import commands, data_manager
import aiohttp
BaseCog = getattr(commands, "Cog", object)
# https://github.com/Rapptz/discord.py/blob/master/discord/partial_emoji.py#L95
# Thanks @TrustyJaid!!
_CUSTOM_EMOJI_RE = re.compile(
r"<?(?P<animated>a)?:?(?P<name>[A-Za-z0-9\_]+):(?P<id>[0-9]{13,20})>?"
)
class ExportEmoji(BaseCog):
def __init__(self, bot):
self.bot: commands.Bot = bot
@commands.command()
async def export(
self, ctx, *emoji_list: Union[discord.PartialEmoji, discord.Emoji, int, str]
):
"""
Export emoji to zipfile.
Can provide either a list of emoji or the message id of the message with emoji in it or reacted to it.
String emoji cannot be exported
"""
message: discord.Message = ctx.message
zipbuf = io.BytesIO()
count = 0
with zipfile.ZipFile(zipbuf, "w") as zf:
for emoji_to_export in emoji_list:
if isinstance(emoji_to_export, discord.PartialEmoji) or isinstance(
emoji_to_export, discord.Emoji
):
name, buf = await self._export_emoji(emoji_to_export)
zf.writestr(name, buf.getvalue())
count += 1
elif isinstance(emoji_to_export, int):
# if int, assume message id
message: discord.Message = await ctx.message.channel.fetch_message(
emoji_to_export
)
buf_list = await self._export_from_message(message)
for name, buf in buf_list:
zf.writestr(name, buf.getvalue())
count += 1
if message.reference:
message_id = message.reference.message_id
referenced_message = await ctx.message.channel.fetch_message(message_id)
buf_list = await self._export_from_message(referenced_message)
for name, buf in buf_list:
zf.writestr(name, buf.getvalue())
count += 1
zipbuf.seek(0)
if count == 0:
await ctx.send("Nothing to download or export!")
return
await ctx.send(
file=discord.File(zipbuf, filename=f"export_of_{count:0.0f}.zip")
)
async def _export_emoji(
self, emoji: Union[discord.Emoji, discord.PartialEmoji]
) -> Tuple[str, io.BytesIO]:
asset: discord.Asset = emoji.url
url = str(asset)
suffix = "png"
if emoji.animated:
suffix = "gif"
name = f"{emoji.name}.{suffix}"
new_buf = io.BytesIO()
num_bytes: int = await asset.save(new_buf)
return name, new_buf
async def _export_sticker(self, sticker: discord.Sticker) -> Tuple[str, io.BytesIO]:
asset: Optional[discord.Asset] = sticker.image_url
if asset:
name = f"{sticker.name}.png"
new_buf = io.BytesIO()
num_bytes: int = await asset.save(new_buf)
return name, new_buf
async def _export_from_message(
self, message: discord.Message
) -> List[Tuple[str, io.BytesIO]]:
reactions = message.reactions
results = []
for r in reactions:
react_emoji = r.emoji
if not isinstance(react_emoji, str):
name, new_buf = await self._export_emoji(react_emoji)
results.append((name, new_buf))
# currently does not work for some reason...
for sticker in message.stickers:
name, new_buf = await self._export_sticker(sticker)
results.append((name, new_buf))
substrings: List[str] = _CUSTOM_EMOJI_RE.findall(message.content)
# taken from https://github.com/Rapptz/discord.py/blob/master/discord/partial_emoji.py#L95
# waiting for discord.py 2.0
for animated, name, emoji_id in substrings:
state = message._state
emoji = discord.PartialEmoji.with_state(
state, name=name, animated=animated, id=emoji_id
)
# await ctx.send(emoji)
name, new_buf = await self._export_emoji(emoji)
results.append((name, new_buf))
return results
```
#### File: Eris-Cogs/facts/facts.py
```python
from random import choice as randchoice
import pathlib
from typing import List
# 3rd party
import discord
from redbot.core import commands, data_manager
BaseCog = getattr(commands, "Cog", object)
## TODO -> Docstrings
class Fact(BaseCog):
def __init__(self, bot):
self.bot = bot
data_dir: pathlib.Path = data_manager.bundled_data_path(self)
self.bearfacts: List[str] = (data_dir / "bearfacts.txt").read_text().split("\n")
self.snekfacts: List[str] = (data_dir / "snekfacts.txt").read_text().split("\n")
@commands.group()
async def fact(self, ctx):
"""
todo -> specify subcommands
"""
pass
@fact.command()
async def random(self, ctx):
await ctx.send(randchoice(randchoice([self.bearfacts, self.snekfacts])))
@fact.command()
async def snek(self, ctx):
await ctx.send(randchoice(self.snekfacts))
@fact.command()
async def bear(self, ctx):
await ctx.send(randchoice(self.bearfacts))
```
#### File: Eris-Cogs/imdb_lookup/imdblookup.py
```python
from time import sleep
from random import choice as randchoice
import random
# third party
import discord
from redbot.core import commands, data_manager
from redbot.core.utils import embed
import imdb
from typing import List
class MovieType(imdb.utils._Container):
pass
class PersonType(imdb.utils._Container):
pass
BaseCog = getattr(commands, "Cog", object)
class IMDBLookup(BaseCog):
def __init__(self, bot):
self.bot = bot
self.ia = imdb.IMDb()
@commands.group()
async def imdb(self, ctx: commands.Context):
"""
Search for movies or people!
"""
pass
@imdb.command()
async def movie(self, ctx, *name: str):
"""
Get a summary about a movie
"""
name = " ".join(name)
movies: List[imdb.Movie] = self.ia.search_movie(name)
if not movies:
await ctx.send("Unable to find movie!")
return
m: MovieType = movies[0]
self.ia.update(
m,
info=["main", "summary", "plot", "cast", "rating", "runtime", "technical"],
)
summary = "\n".join(m.summary().split("\n")[2:])
embedded_response = discord.Embed(
title=f"Movie", type="rich", description=summary
)
embedded_response = embed.randomize_colour(embedded_response)
await ctx.send(embed=embedded_response)
@imdb.command()
async def person(self, ctx, *name: str):
"""
Get a summary about a person
"""
name = " ".join(name)
people: List[imdb.Person] = self.ia.search_person(name)
if not people:
await ctx.send("Unable to find person!")
return
p: PersonType = people[0]
self.ia.update(p, info=["main"])
summary = "\n".join(p.summary().split("\n")[2:])
embedded_response = discord.Embed(
title=f"Person", type="rich", description=summary
)
embedded_response = embed.randomize_colour(embedded_response)
await ctx.send(embed=embedded_response)
if __name__ == "__main__":
name = "kong"
ia = imdb.IMDb()
movies = ia.search_movie(name)
p = movies[0]
ia.update(p, info=["main", "plot", "cast", "rating", "runtime", "technical"])
print(p.summary())
# return
# cast = '\n'.join([str(p) for p in m['cast'][:10]])
#
# embedded_response = discord.Embed(
# title=f"{m} (User Rating: {m['rating']})",
# type="rich",
# # thumbnail=m['cover_url'][0],
# description=(
# f"Runtime: {m['runtime'][0]} minutes\n\n"
# f"*{m['plot'][0]}*\n\n"
# "__Cast__\n"
# f"{cast}\n..."
# )
# )
# await instance.movie(None, 'king kong')
#
```
#### File: Eris-Cogs/lifslastcall/lifs.py
```python
from time import sleep
from random import choice as randchoice
import random
# third party
import discord
from redbot.core import commands, data_manager
import aiohttp
BaseCog = getattr(commands, "Cog", object)
class Lifs(BaseCog):
def __init__(self, bot):
self.bot = bot
self.patrons = [
"<NAME> (Bent Nail)",
"<NAME> (Steam and Steal)",
"<NAME> (Corellon's Crown)",
"<NAME> (Tiger's Eye)",
"Rishaal the Page-Turner (Book Wyrm)",
"<NAME>",
"Renaer's Friends (see sub-table)",
"Renaer's Friends (see sub-table)",
"<NAME>",
"<NAME>",
"<NAME> (Vintners' Guild)",
"<NAME> (Innkeepers)",
"<NAME>",
'Mattrim "Threestrings" Mereg',
"<NAME>",
"<NAME>",
"[Faction Contact] FACTION CONTACT: This result should be keyed to the PCs’ contact for whatever faction they end up doing faction missions for. (If they become members of multiple factions, randomly determine or choose one.)",
"[Campaign NPC] (or reroll) CAMPAIGN NPC: This slot is left open for adding an NPC that the players like from other parts of the campaign. (I’m currently using this slot for Valetta & Nim.) This can includes characters from the Renaer’s Friends table below if the PCs seem to have forged a strong relationship with them and you’d like to increase the likelihood of them showing up. (Of course, you can always arbitrarily decide that so-and-so will be dropping by the tavern that night.)",
"Jarlaxle (or reroll)",
"Faction Response Team (or reroll)",
]
self.renears = {
(1, 6): "<NAME>ember + Roll Again",
(7, 8): "<NAME> (the Blackstaff)",
(9, 10): 'Laraelra "<NAME>',
(11, 12): "<NAME>",
(13, 14): "<NAME>",
(15, 15): "<NAME>",
(16, 16): "<NAME>",
(17, 17): "<NAME>",
(18, 18): "<NAME>",
(19, 19): "<NAME>",
(
20,
20,
): "[Faction Spy Watching Renaer], FACTION SPY: Determine faction randomly or choose appropriately based on the events in the campaign so far.",
}
self.renears_friends = []
for (lower, upper), friend in self.renears.items():
num_entries = (upper - lower) + 1
self.renears_friends += [friend for _ in range(num_entries)]
self.events = [
s.strip()
for s in """
A spontaneous arm-wrestling competition breaks out.
A local kenku street gang comes into the tavern. They try to sell traveler’s dust to the patrons. (Traveler’s Dust: Tiny roseate crystals. A single grain is usually dropped into the eye, where it dissolves. Those using it are said to be walking the crimson road. Those using traveler’s dust often have trembling hands, slurred speech, and eyes the color of blood. Creates a euphoric feeling paired to a sensation of the world slowing down around you.)
PCs walk in to find a horse standing in the middle of the common room. No one can explain how it got there or who owns it.
A patron slips a drug into a drink before returning to their own table.
A 12-year-old pickpocket named Stannis is working the crowd. His handler, a half-orc named Sabeen, is waiting outside.
A portal opens in the middle of the tavern. An elven wizard named <NAME> walks out, orders a drink, and goes back through the portal. (He might become a regular.)
The City Watch makes an arrest on the premises.
Volo shows up and would like to make arrangements for a signing of Volo’s Guide to Monsters. Also has a number of questions regarding the haunting of Trollskull Manor for Volo’s Guide to Spirits and Specters.
Staff Event (e.g., the star elf triplets float up to the ceiling and a spontaneous light show erupts; after a few minutes they float back down and resume service as if nothing happened)
""".split(
"\n"
)
]
@commands.command()
async def evening_at_lifs(self, ctx):
"""
For each night at the tavern:
1. There is a 1 in 1d6 chance that an Event will occur that night.
2. Roll 1d6 to determine the number of significant patrons in the tavern that night, then use the Patron Table
to randomly determine which patrons are present. If a result of “Renaer’s Friends” is rolled, roll on the
Patrons – Renaer’s Friends table to determine the final result.
3. Look at the Topics/Agendas for the patrons who are present. Generally speaking, you can use one per patron or
just select one from among the patrons. When in doubt, default to the first unused bullet point. Supplement or
replace these topics with other major events occurring in your campaign.
"""
summary = []
event_occurs = random.randint(1, 6) == 1
if event_occurs:
summary.append(f"**Event occurs!** {random.choice(self.events)}")
else:
summary.append("**No event today**")
num_sig_patrons = random.randint(1, 6)
summary.append(f"**{num_sig_patrons} / 6 patrons**")
choices = random.sample(self.patrons, k=num_sig_patrons)
friend_copy = [s for s in self.renears_friends]
i = 0
while True:
if i >= len(choices):
break
choice = choices[i]
if "sub-table" in choice or "Again" in choice:
choices.pop(i)
random_friend = friend_copy.pop(random.randint(0, 19))
original = "Renear"
if "sub-table" in choice:
original = "Renear's Friend"
choices.insert(i, f"{original}\n-- {random_friend}")
else:
i += 1
for c in choices:
summary.append(f"- {c}")
summary = "\n".join(summary)
await ctx.send(summary)
```
#### File: Eris-Cogs/move/move.py
```python
import io
import discord
from redbot.core import commands, Config, checks, bot, utils
BaseCog = getattr(commands, "Cog", object)
class Move(BaseCog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@checks.mod()
@checks.is_owner()
async def move(self, ctx, new_channel: discord.TextChannel, *msg_ids: int):
for msg_id in msg_ids:
message = await ctx.message.channel.fetch_message(msg_id)
content = message.content
attachments = message.attachments
new_attachments = []
if attachments:
for a in attachments:
x = io.BytesIO()
await a.save(x)
x.seek(0)
new_attachments.append(
discord.File(x, filename=a.filename, spoiler=a.is_spoiler())
)
if len(new_attachments) == 0:
await new_channel.send(content)
elif len(new_attachments) == 1:
await new_channel.send(content, file=new_attachments[0])
else:
await new_channel.send(content, files=new_attachments)
await message.delete()
```
#### File: Eris-Cogs/no_fuck_you/no_fuck_you.py
```python
import re
import discord
from redbot.core import commands, data_manager, Config, checks, bot
from .eris_event_lib import ErisEventMixin
BaseCog = getattr(commands, "Cog", object)
RETYPE = type(re.compile("a"))
class NoFuckYou(BaseCog, ErisEventMixin):
def __init__(self, bot_instance: bot):
super().__init__()
self.bot = bot_instance
self.fuck_you_regex: RETYPE = re.compile(
r"\bf[uck]{,3} \b", flags=re.IGNORECASE
)
self.bot.add_listener(self.no_fuck_you, "on_message")
async def no_fuck_you(self, message: discord.Message):
keyword_in_message: bool = bool(
self.fuck_you_regex.search(message.clean_content)
)
if not keyword_in_message:
return
ctx = await self.bot.get_context(message)
async with self.lock_config.channel(message.channel).get_lock():
allowed: bool = await self.allowed(ctx, message)
if not allowed:
return
await ctx.send("No fuck you")
await self.log_last_message(ctx, message)
```
#### File: Eris-Cogs/rolerequest/rolerequest.py
```python
from pprint import pprint as pp
# third party
import discord
from discord import utils
from redbot.core import commands, data_manager, Config, checks
BaseCog = getattr(commands, "Cog", object)
class RoleRequest(BaseCog):
def __init__(self, bot: commands.Cog):
self.bot: commands.Cog = bot
self.config = Config.get_conf(
None, identifier=23488191910303, cog_name="rolerequest"
)
default_global = {}
default_guild = {"hooks": {}}
self.config.register_global(**default_global)
self.config.register_guild(**default_guild)
async def add_role_to_user(reaction: discord.RawReactionActionEvent):
guild: discord.Guild = utils.get(self.bot.guilds, id=reaction.guild_id)
hooks = await self.config.guild(guild).hooks()
message_id = str(reaction.message_id)
if message_id not in hooks:
return
emoji_id = str(reaction.emoji.id)
if emoji_id not in hooks[message_id]:
return
role: discord.Role = None
for guild_role in guild.roles:
if guild_role.name.lower() == hooks[message_id][emoji_id].lower():
role = guild_role
break
else:
return
user_id = reaction.user_id
user: discord.Member = await guild.fetch_member(user_id)
if not user.bot:
# print(f"Adding {role} to {user} via {emoji_id}")
await user.add_roles(role)
async def remove_role_from_user(reaction: discord.RawReactionActionEvent):
guild: discord.Guild = utils.get(self.bot.guilds, id=reaction.guild_id)
hooks = await self.config.guild(guild).hooks()
message_id = str(reaction.message_id)
if message_id not in hooks:
return
emoji_id = str(reaction.emoji.id)
if emoji_id not in hooks[message_id]:
return
role: discord.Role = None
for guild_role in guild.roles:
if guild_role.name.lower() == hooks[message_id][emoji_id].lower():
role = guild_role
break
else:
return
user_id = reaction.user_id
user: discord.Member = await guild.fetch_member(user_id)
if not user.bot:
await user.remove_roles(role)
bot.add_listener(add_role_to_user, "on_raw_reaction_add")
bot.add_listener(remove_role_from_user, "on_raw_reaction_remove")
@commands.group()
async def role(self, ctx: commands.Context):
pass
@role.command(pass_context=True)
@checks.mod()
async def designate(
self, ctx: commands.Context, msg_id: int, role_name: str, emoji: discord.Emoji
):
"""
Mod-only command to designate a message as a role-request message. Once designated, any user who reacts
with the provided emoji will be given the role.
"""
msg: discord.Message = await ctx.message.channel.fetch_message(msg_id)
# make sure we have that one
if emoji.id not in [e.id for e in self.bot.emojis]:
await ctx.send("Sorry, I don't have that emoji!")
return
role: discord.Role = None
for guild_role in ctx.guild.roles:
if guild_role.name.lower() == role_name.lower():
role = guild_role
break
else:
await ctx.send("Sorry, I can't find that role!")
return
await msg.add_reaction(emoji)
hooks = await self.config.guild(ctx.guild).hooks()
emoji_id = str(emoji.id)
msg_id = str(msg_id)
if msg_id in hooks:
hooks[msg_id][emoji_id] = role_name
else:
hooks[msg_id] = {emoji_id: role_name}
await self.config.guild(ctx.guild).hooks.set(hooks)
@role.command(pass_context=True)
@checks.mod()
async def clear_message(self, ctx: commands.Context, msg_id: str):
"""
Mod-only command to clear all role-request emoji from a message
"""
msg: discord.Message = await ctx.message.channel.fetch_message(msg_id)
hooks = await self.config.guild(ctx.guild).hooks()
if msg_id not in hooks:
return
del hooks[msg_id]
await self.config.guild(ctx.guild).hooks.set(hooks)
await msg.clear_reactions()
@role.command(pass_context=True, hidden=True)
@checks.is_owner()
async def clear_all_data(self, ctx):
"""
Clear all data associated with role requests
"""
await self.config.guild(ctx.guild).hooks.set({})
```
#### File: Eris-Cogs/steve/__init__.py
```python
from .steve import Steve
def setup(bot):
bot.add_cog(Steve(bot))
``` |
{
"source": "jlowenz/masbcpp",
"score": 3
} |
#### File: masbcpp/docker/model.py
```python
import pathlib as pl
class Model(object):
def __init__(self, path, scale, scene):
self.path_ = pl.Path(path)
self.scale_ = float(scale)
self.scene_ = pl.Path(scene)
self.done_ = False
# check to see if the cloud already exists
print("Checking path: ", self.path_)
if self.path_.exists():
pcd_fname = self.pcd_name
target = self.scene_ / pcd_fname
print(" Checking target: ", target)
if target.exists() and target.stat().st_size > 0:
self.done_ = True
elif target.exists():
target.unlink()
else:
print(" Path doesn't exist")
@property
def done(self):
return self.done_
@property
def pcd_name(self):
pcd_fname = self.path_.stem + ".pcd"
return pcd_fname
@property
def path(self):
return pl.Path(self.path_)
@property
def scale(self):
return self.scale_
@property
def scene(self):
return pl.Path(self.scene_)
``` |
{
"source": "jlowe/spark-rapids",
"score": 2
} |
#### File: main/python/expand_exec_test.py
```python
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_equal
from data_gen import *
import pyspark.sql.functions as f
from marks import ignore_order
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
@ignore_order
def test_expand_exec(data_gen):
def op_df(spark, length=2048, seed=0):
return gen_df(spark, StructGen([
('a', data_gen),
('b', IntegerGen())], nullable=False), length=length, seed=seed).rollup(f.col("a"), f.col("b")).agg(f.col("b"))
assert_gpu_and_cpu_are_equal_collect(op_df)
```
#### File: main/python/subsuqery_test.py
```python
import pytest
from asserts import assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from marks import *
gens = [('l', LongGen()), ('i', IntegerGen()), ('f', FloatGen()), (
's', StringGen())]
@ignore_order
@pytest.mark.parametrize('data_gen', [gens], ids=idfn)
def test_scalar_subquery(data_gen):
assert_gpu_and_cpu_are_equal_sql(
lambda spark: gen_df(spark, data_gen, length=2048),
'table',
'''
select l, i, f, (select count(s) from table) as c
from table
where l > (select max(i) from table) or f < (select min(i) from table)
''')
``` |
{
"source": "jlownie/pynetbox",
"score": 2
} |
#### File: pynetbox/models/ipam.py
```python
from pynetbox.core.response import Record
from pynetbox.core.endpoint import DetailEndpoint
class IpAddresses(Record):
def __str__(self):
return str(self.address)
class Prefixes(Record):
def __str__(self):
return str(self.prefix)
@property
def available_ips(self):
"""Represents the ``available-ips`` detail endpoint.
Returns a DetailEndpoint object that is the interface for
viewing and creating IP addresses inside a prefix.
:returns: :py:class:`.DetailEndpoint`
:Examples:
>>> prefix = nb.ipam.prefixes.get(24)
>>> prefix.available_ips.list()
[10.0.0.1/24, 10.0.0.2/24, 10.0.0.3/24, 10.0.0.4/24, 10.0.0.5/24, ...]
To create a single IP:
>>> prefix = nb.ipam.prefixes.get(24)
>>> prefix.available_ips.create()
10.0.0.1/24
To create multiple IPs:
>>> prefix = nb.ipam.prefixes.get(24)
>>> create = prefix.available_ips.create([{} for i in range(2)])
>>> create
[10.0.0.2/24, 10.0.0.3/24]
"""
return DetailEndpoint(self, "available-ips", custom_return=IpAddresses)
@property
def available_prefixes(self):
"""Represents the ``available-prefixes`` detail endpoint.
Returns a DetailEndpoint object that is the interface for
viewing and creating prefixes inside a parent prefix.
Very similar to :py:meth:`~pynetbox.ipam.Prefixes.available_ips`
, except that dict (or list of dicts) passed to ``.create()``
needs to have a ``prefix_length`` key/value specifed.
:returns: :py:class:`.DetailEndpoint`
:Examples:
>>> prefix = nb.ipam.prefixes.get(3)
>>> prefix
10.0.0.0/16
>>> prefix.available_prefixes.list()
[10.0.1.0/24, 10.0.2.0/23, 10.0.4.0/22, 10.0.8.0/21, 10.0.16.0/20, 10.0.32.0/19, 10.0.64.0/18, 10.0.128.0/17]
Creating a single child prefix:
>>> prefix = nb.ipam.prefixes.get(1)
>>> prefix
10.0.0.0/24
>>> new_prefix = prefix.available_prefixes.create(
... {"prefix_length": 29}
... )
>>> new_prefix
10.0.0.16/29
"""
return DetailEndpoint(self, "available-prefixes", custom_return=Prefixes)
class Aggregates(Record):
def __str__(self):
return str(self.prefix)
class Vlans(Record):
def __str__(self):
return super().__str__(self) or str(self.vid)
class VlanGroups(Record):
@property
def available_vlans(self):
"""Represents the ``available-vlans`` detail endpoint.
Returns a DetailEndpoint object that is the interface for
viewing and creating VLANs inside a VLAN group.
Available since NetBox 3.2.0.
:returns: :py:class:`.DetailEndpoint`
:Examples:
>>> vlan_group = nb.ipam.vlan_groups.get(1)
>>> vlan_group.available_vlans.list()
[10, 11, 12]
To create a new VLAN:
>>> vlan_group.available_vlans.create({"name": "NewVLAN"})
NewVLAN (10)
"""
return DetailEndpoint(self, "available-vlans", custom_return=Vlans)
```
#### File: tests/unit/test_detailendpoint.py
```python
import unittest
import six
import pynetbox
if six.PY3:
from unittest.mock import patch
else:
from mock import patch
nb = pynetbox.api("http://localhost:8000")
class DetailEndpointTestCase(unittest.TestCase):
def test_detail_endpoint_create_single(self):
with patch(
"pynetbox.core.query.Request._make_call",
return_value={"id": 123, "prefix": "1.2.3.0/24"},
):
prefix_obj = nb.ipam.prefixes.get(123)
self.assertEqual(prefix_obj.prefix, "1.2.3.0/24")
with patch(
"pynetbox.core.query.Request._make_call",
return_value={"address": "1.2.3.1/24"},
):
ip_obj = prefix_obj.available_ips.create()
self.assertEqual(ip_obj.address, "1.2.3.1/24")
def test_detail_endpoint_create_list(self):
with patch(
"pynetbox.core.query.Request._make_call",
return_value={"id": 123, "prefix": "1.2.3.0/24"},
):
prefix_obj = nb.ipam.prefixes.get(123)
self.assertEqual(prefix_obj.prefix, "1.2.3.0/24")
with patch(
"pynetbox.core.query.Request._make_call",
return_value=[{"address": "1.2.3.1/24"}, {"address": "1.2.3.2/24"}],
):
ip_list = prefix_obj.available_ips.create([{} for _ in range(2)])
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 2)
``` |
{
"source": "jlpalomino/autograding-notebooks",
"score": 3
} |
#### File: autograding-notebooks/scripts/make-template-repo.py
```python
import os
import fnmatch
import argparse
def get_notebooks(nbgrader_dir, assignment):
# get the list of notebooks for this assignment
# assumes assignemnts have been released (i.e. are in release dir)
print("Getting notebooks")
release_dir = nbgrader_dir + '/release/' + assignment
notebooks = []
for file in os.listdir(release_dir):
if fnmatch.fnmatch(file, '*.ipynb'):
print(file)
notebooks.append(file)
print("Found {} notebooks".format(len(notebooks)))
return notebooks
def create_readme():
# create a stub of a readme file for the template repo
print("Creating readme")
def init_template(repo_name):
# create a new directory for this assignment and initialize as git repo
try:
os.mkdir(repo_name)
print("Initializing git repo")
except FileExistsError as fee:
print("directory {} already exists".format(repo_name))
def push_to_github(template_dir):
# push the repo to the github classroom
print("pushing to github repo")
if __name__ == '__main__':
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('nbgrader_dir', help='Top level nbgrader directory')
parser.add_argument('assignment', help='Assignment name, e.g., "2019-01-31-stability" or "hw1-rootfinding"')
parser.add_argument('--org_name', help='name of GitHub organization')
parser.add_argument('--repo_name', help='desired name of github repo')
args = parser.parse_args()
notebooks = get_notebooks(args.nbgrader_dir, args.assignment)
init_template(args.repo_name)
``` |
{
"source": "jlpalomino/earthpy",
"score": 3
} |
#### File: earthpy/earthpy/clip.py
```python
import pandas as pd
import geopandas as gpd
def _clip_points(shp, clip_obj):
"""Clip point geometry to the clip_obj GeoDataFrame extent.
Clip an input point GeoDataFrame to the polygon extent of the clip_obj
parameter. Points that intersect the clip_obj geometry are extracted with
associated attributes and returned.
Parameters
----------
shp : GeoDataFrame
Composed of point geometry that is clipped to clip_obj.
clip_obj : GeoDataFrame
Reference polygon for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a subset of shp that intersects
with clip_obj.
"""
poly = clip_obj.geometry.unary_union
return shp[shp.geometry.intersects(poly)]
def _clip_multi_point(shp, clip_obj):
"""Clip multi point features to the clip_obj GeoDataFrame extent.
Clip an input multi point to the polygon extent of the clip_obj
parameter. Points that intersect the clip_obj geometry are
extracted with associated attributes returned.
Parameters
----------
shp : GeoDataFrame
multipoint geometry that is clipped to clip_obj.
clip_obj : GeoDataFrame
Reference polygon for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of shp
containing multi-point and point features.
"""
# Explode multi-point features when clipping then recreate geom
clipped = _clip_points(shp.explode().reset_index(level=[1]), clip_obj)
clipped = clipped.dissolve(by=[clipped.index]).drop(columns="level_1")[
shp.columns.tolist()
]
return clipped
def _clip_line_poly(shp, clip_obj):
"""Clip line and polygon geometry to the clip_obj GeoDataFrame extent.
Clip an input line or polygon to the polygon extent of the clip_obj
parameter. Lines or Polygons that intersect the clip_obj geometry are
extracted with associated attributes and returned.
Parameters
----------
shp : GeoDataFrame
Line or polygon geometry that is clipped to clip_obj.
clip_obj : GeoDataFrame
Reference polygon for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of shp
that intersects with clip_obj.
"""
# Create a single polygon object for clipping
poly = clip_obj.geometry.unary_union
spatial_index = shp.sindex
# Create a box for the initial intersection
bbox = poly.bounds
# Get a list of id's for each object that overlaps the bounding box and
# subset the data to just those lines
sidx = list(spatial_index.intersection(bbox))
shp_sub = shp.iloc[sidx]
# Clip the data - with these data
clipped = shp_sub.copy()
clipped["geometry"] = shp_sub.intersection(poly)
# Return the clipped layer with no null geometry values
return clipped[clipped.geometry.notnull()]
def _clip_multi_poly_line(shp, clip_obj):
"""Clip multi lines and polygons to the clip_obj GeoDataFrame extent.
Clip an input multi line or polygon to the polygon extent of the clip_obj
parameter. Lines or Polygons that intersect the clip_obj geometry are
extracted with associated attributes and returned.
Parameters
----------
shp : GeoDataFrame
multiLine or multipolygon geometry that is clipped to clip_obj.
clip_obj : GeoDataFrame
Reference polygon for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of shp
that intersects with clip_obj.
"""
# Clip multi polygons
clipped = _clip_line_poly(shp.explode().reset_index(level=[1]), clip_obj)
lines = clipped[
(clipped.geometry.type == "MultiLineString")
| (clipped.geometry.type == "LineString")
]
line_diss = lines.dissolve(by=[lines.index]).drop(columns="level_1")
polys = clipped[clipped.geometry.type == "Polygon"]
poly_diss = polys.dissolve(by=[polys.index]).drop(columns="level_1")
return gpd.GeoDataFrame(
pd.concat([poly_diss, line_diss], ignore_index=True)
)
def clip_shp(shp, clip_obj):
"""Clip points, lines, or polygon geometries to the clip_obj extent.
Both layers must be in the same Coordinate Reference System (CRS) and will
be clipped to the full extent of the clip object.
If there are multiple polygons in clip_obj,
data from shp will be clipped to the total boundary of
all polygons in clip_obj.
Parameters
----------
shp : GeoDataFrame
Vector layer (point, line, polygon) to be clipped to clip_obj.
clip_obj : GeoDataFrame
Polygon vector layer used to clip shp.
The clip_obj's geometry is dissolved into one geometric feature
and intersected with shp.
Returns
-------
GeoDataFrame
Vector data (points, lines, polygons) from shp clipped to
polygon boundary from clip_obj.
Examples
--------
Clipping points (glacier locations in the state of Colorado) with
a polygon (the boundary of Rocky Mountain National Park):
>>> import geopandas as gpd
>>> import earthpy.clip as cl
>>> from earthpy.io import path_to_example
>>> rmnp = gpd.read_file(path_to_example('rmnp.shp'))
>>> glaciers = gpd.read_file(path_to_example('colorado-glaciers.geojson'))
>>> glaciers.shape
(134, 2)
>>> rmnp_glaciers = cl.clip_shp(glaciers, rmnp)
>>> rmnp_glaciers.shape
(36, 2)
Clipping a line (the Continental Divide Trail) with a
polygon (the boundary of Rocky Mountain National Park):
>>> cdt = gpd.read_file(path_to_example('continental-div-trail.geojson'))
>>> rmnp_cdt_section = cl.clip_shp(cdt, rmnp)
>>> cdt['geometry'].length > rmnp_cdt_section['geometry'].length
0 True
dtype: bool
Clipping a polygon (Colorado counties) with another polygon
(the boundary of Rocky Mountain National Park):
>>> counties = gpd.read_file(path_to_example('colorado-counties.geojson'))
>>> counties.shape
(64, 13)
>>> rmnp_counties = cl.clip_shp(counties, rmnp)
>>> rmnp_counties.shape
(4, 13)
"""
try:
shp.geometry
clip_obj.geometry
except AttributeError:
raise AttributeError(
"Please make sure that your input and clip GeoDataFrames have a"
" valid geometry column"
)
if not any(shp.intersects(clip_obj.unary_union)):
raise ValueError("Shape and crop extent do not overlap.")
if any(shp.geometry.type == "MultiPoint"):
return _clip_multi_point(shp, clip_obj)
elif any(shp.geometry.type == "Point"):
return _clip_points(shp, clip_obj)
elif any(shp.geometry.type == "MultiPolygon") or any(
shp.geometry.type == "MultiLineString"
):
return _clip_multi_poly_line(shp, clip_obj)
else:
return _clip_line_poly(shp, clip_obj)
```
#### File: earthpy/tests/test_io.py
```python
import os
import requests
import pytest
import numpy as np
import rasterio as rio
import geopandas as gpd
import earthpy.io as eio
RUNNING_ON_CI = False
if "CI" in os.environ:
if os.environ["CI"]:
RUNNING_ON_CI = True
skip_on_ci = pytest.mark.skipif(
RUNNING_ON_CI, reason="Test fails intermittently on CI systems."
)
@pytest.fixture
def eld(tmpdir):
return eio.Data(path=tmpdir)
def test_invalid_datasets_raise_errors():
""" Raise errors when users provide nonexistent datasets. """
with pytest.raises(KeyError):
eio.path_to_example("Non-existent dataset")
def test_missing_datasets_raise_errors():
""" Raise errors when users forget to provide a dataset. """
with pytest.raises(KeyError):
eio.path_to_example("")
def test_valid_datasets_get_returned():
""" If users give a valid dataset name, return a valid path. """
epsg_path = eio.path_to_example("epsg.json")
assert os.path.isfile(epsg_path)
def test_rgb():
""" Check assumptions about rgb satellite imagery over RMNP. """
with rio.open(eio.path_to_example("rmnp-rgb.tif")) as src:
rgb = src.read()
rgb_crs = src.crs
assert rgb.shape == (3, 373, 485)
assert str(rgb_crs) == rio.crs.CRS.from_epsg(4326)
def test_rgb_single_channels():
""" Check assumptions about single channel R, G, and B images. """
tif_names = [color + ".tif" for color in ["red", "green", "blue"]]
fnames = [eio.path_to_example(f) for f in tif_names]
rgb_parts = list()
for f in fnames:
with rio.open(f) as src:
rgb_parts.append(src.read())
assert str(src.crs) == rio.crs.CRS.from_epsg(4326)
with rio.open(eio.path_to_example("rmnp-rgb.tif")) as src:
assert np.array_equal(src.read(), np.concatenate(rgb_parts))
def test_colorado_counties():
""" Check assumptions about county polygons. """
counties = gpd.read_file(eio.path_to_example("colorado-counties.geojson"))
assert counties.shape == (64, 13)
assert counties.crs == {"init": "epsg:4326"}
def test_colorado_glaciers():
""" Check assumptions about glacier point locations. """
glaciers = gpd.read_file(eio.path_to_example("colorado-glaciers.geojson"))
assert glaciers.shape == (134, 2)
assert glaciers.crs == {"init": "epsg:4326"}
def test_continental_divide_trail():
""" Check assumptions about Continental Divide Trail path. """
cdt = gpd.read_file(eio.path_to_example("continental-div-trail.geojson"))
assert cdt.shape == (1, 2)
assert cdt.crs == {"init": "epsg:4326"}
""" Tests for the EarthlabData class. """
eio.DATA_URLS["little-text-file"] = [
("https://ndownloader.figshare.com/files/14555681", "abc.txt", "file")
]
eio.DATA_URLS["little-zip-file"] = [
("https://ndownloader.figshare.com/files/14555684", ".", "zip")
]
@skip_on_ci
@pytest.mark.vcr()
def test_urls_are_valid():
""" Test responses for each dataset to ensure valid URLs. """
for key in eio.DATA_URLS:
dataset = eio.DATA_URLS[key]
if not isinstance(dataset, list):
dataset = [dataset]
for url, name, kind in dataset:
r = requests.get("http://www.example.com")
assert r.status_code == 200
def test_key_and_url_set_simultaneously(eld):
""" Only key or url should be set, not both. """
with pytest.raises(ValueError, match="can not both be set at the same"):
eld.get_data(key="foo", url="bar")
def test_available_datasets_are_printed(eld, capsys):
""" If no key or url provided, print datasets.
The output that is printed should be identical to the __repr__ output.
Using capsys in pytest provides a way to capture stdout/stderr output.
"""
eld.get_data()
printed_output = capsys.readouterr().out
print(eld)
repr_output = capsys.readouterr().out
assert printed_output == repr_output
def test_invalid_dataset_key(eld):
""" Raise errors for unknown dataset keys. """
with pytest.raises(KeyError, match="not found in"):
eld.get_data(key="some non-existent key")
@skip_on_ci
@pytest.mark.vcr()
def test_valid_download_file(eld):
""" Test that single files get downloaded. """
file = eld.get_data("little-text-file")
assert os.path.isfile(file)
@skip_on_ci
@pytest.mark.vcr()
def test_valid_download_zip(eld):
""" Test that zipped files get downloaded and extracted. """
path = eld.get_data("little-zip-file")
path_has_contents = len(os.listdir(path)) > 0
assert path_has_contents
@skip_on_ci
@pytest.mark.parametrize("replace_arg_value", [True, False])
@pytest.mark.vcr()
def test_replace_arg_controle_overwrite(eld, replace_arg_value):
""" If replace=False, do not replace existing files. If true, replace. """
file1 = eld.get_data("little-text-file")
mtime1 = os.path.getmtime(file1)
file2 = eld.get_data("little-text-file", replace=replace_arg_value)
mtime2 = os.path.getmtime(file2)
if replace_arg_value is True:
assert mtime1 < mtime2
else:
assert mtime1 == mtime2
@skip_on_ci
@pytest.mark.vcr()
def test_arbitrary_url_file_download(eld):
""" Verify that arbitrary URLs work for data file downloads. """
file = eld.get_data(url="http://www.google.com/robots.txt")
assert os.path.isfile(file)
def test_invalid_data_type(eld):
""" Raise errors for invalid data types. """
eio.DATA_URLS["invalid-data-type"] = [
("https://www.google.com", ".", "an_invalid_file_extension")
]
with pytest.raises(ValueError, match="kind must be one of"):
eld.get_data("invalid-data-type")
@skip_on_ci
@pytest.mark.vcr()
def test_arbitrary_url_zip_download(eld):
""" Verify that aribitrary URLs work for zip file downloads. """
path = eld.get_data(
url="https://www2.census.gov/geo/tiger/GENZ2016/shp/cb_2016_us_nation_20m.zip"
)
path_has_contents = len(os.listdir(path)) > 0
assert path_has_contents
@skip_on_ci
@pytest.mark.vcr()
def test_url_download_tar_file(eld):
""" Ensure that tar files are downloaded and extracted. """
path = eld.get_data(url="https://ndownloader.figshare.com/files/14615411")
assert "abc.txt" in os.listdir(path)
@skip_on_ci
@pytest.mark.vcr()
def test_url_download_tar_gz_file(eld):
""" Ensure that tar.gz files are downloaded and extracted. """
path = eld.get_data(url="https://ndownloader.figshare.com/files/14615414")
assert "abc.txt" in os.listdir(path)
@skip_on_ci
@pytest.mark.vcr()
def test_url_download_txt_file_with_content_disposition(eld):
""" Test arbitrary URL download with content-disposition. """
path = eld.get_data(url="https://ndownloader.figshare.com/files/14555681")
assert path.endswith("abc.txt") and os.path.isfile(path)
@skip_on_ci
@pytest.mark.parametrize("verbose_arg_value", [True, False])
@pytest.mark.vcr()
def test_verbose_arg_works(eld, verbose_arg_value, capsys):
""" Test that the verbose argument can print or suppress messages. """
eld.get_data("little-text-file", verbose=verbose_arg_value)
output_printed = capsys.readouterr().out != ""
assert output_printed == verbose_arg_value
``` |
{
"source": "jlpalomino/matplotcheck",
"score": 3
} |
#### File: matplotcheck/matplotcheck/base.py
```python
import numpy as np
import matplotlib.dates as mdates
import matplotlib
from matplotlib.backend_bases import RendererBase
import math
from scipy import stats
import pandas as pd
import geopandas as gpd
class InvalidPlotError(Exception):
pass
class PlotTester(object):
"""
Object to grab elements from Matplotlib plots
Temporarily removing parameters and returns as it's breaking sphinx
Parameters
----------
axis : mpl axis object
"""
def __init__(self, ax):
"""Initialize TestPlot object"""
self.ax = ax
def _is_line(self):
"""Boolean expressing if ax contains scatter points.
If plot contains scatter points and lines return True.
Returns
-------
boolean: True if Axes ax is a line plot, False if not
"""
if self.ax.lines:
for l in self.ax.lines:
if (
not l.get_linestyle()
or not l.get_linewidth()
or l.get_linewidth() > 0
):
return True
def _is_scatter(self):
"""Boolean expressing if ax contains scatter points.
If plot contains scatter points as well as lines, functions will return true.
Returns
-------
boolean: True if Axes ax is a scatter plot, False if not
"""
if self.ax.collections:
return True
elif self.ax.lines:
for l in self.ax.lines:
if (
l.get_linestyle() == "None"
or l.get_linewidth() == "None"
or l.get_linewidth() == 0
):
return True
return False
def assert_plot_type(self, plot_type=None):
"""Asserts Axes ax contains the type of plot specified in plot_type.
if plot_type is None, assertion is passed
Parameters
----------
plot_type: string
String specifying the expected plot type. Options:
`scatter`,`bar`, `line`
"""
if plot_type:
if plot_type == "scatter":
assert self._is_scatter(), "Plot is not of type {0}".format(
plot_type
)
elif plot_type == "bar":
assert self.ax.patches, "Plot is not of type {0}".format(
plot_type
)
elif plot_type == "line":
assert self._is_line(), "Plot is not of type {0}".format(
plot_type
)
else:
raise ValueError(
"Plot_type to test must be either: scatter, bar or line"
)
""" TITLES TESTS/HELPER FUNCTIONS """
def get_titles(self):
"""Returns the suptitle (Figure title) and axes title of ax
Returns
-------
suptitle: string
Figure title of the Figure that the ax object is on. If none, this is an empty string
title: title on the axes. If none, this is an empty string.
"""
fig, suptitle = self.ax.get_figure(), ""
if fig._suptitle:
suptitle += fig._suptitle.get_text()
return suptitle, self.ax.get_title()
def assert_title_contains(self, lst, title_type="either"):
"""Asserts title contains each string in lst. Whether we test the axes title or figure title
is described in title_type.
Parameters
----------
lst: list
list of strings to be searched for in title. strings must be lower case.
title_type: string
one of the following strings ["figure", "axes", "either"]
`figure`: only the figure title (suptitle) will be tested
'axes': only the axes title (suptitle) will be tested
'either': either the figure title or axes title will pass this assertion.
The combined title will be tested.
"""
suptitle, axtitle = self.get_titles()
if title_type == "either":
title = axtitle + suptitle
elif title_type == "figure":
title = suptitle
elif title_type == "axes":
title = axtitle
else:
raise ValueError(
'title_type must be one of the following ["figure", "axes", "either"]'
)
if lst == None:
pass
else:
assert title, "Expected title is not displayed"
title = title.lower().replace(" ", "")
for s in lst:
assert (
s.lower().replace(" ", "") in title
), "Title does not contain expected text:{0}".format(s)
"""CAPTION TEST/HELPER FUNCTIONS """
def get_caption(self):
"""Returns matplotlib.text.Text that is located in the bottom right,
just below the right side of ax
If no text is found in location, None is returned.
Returns
-------
matplotlib.text.Text if text is found in bottom right. None if no text is found in said location.
"""
caption = None
ax_position = self.ax.get_position()
for tex in self.ax.get_figure().texts:
tex_position = tex.get_position()
if (
ax_position.ymin - 0.1 < tex_position[1] < ax_position.ymin
) and (
ax_position.xmax - 0.5 < tex_position[0] < ax_position.xmax
):
caption = tex
break
return caption
def assert_caption_contains(self, strings_exp):
"""Asserts that Axes ax contains strings as expected in strings_exp.
strings_exp is a list of lists. Each internal list is a list of
strings where at least one string must be in the caption, barring
capitalization. Once a string is found, it is removed from the
caption, therefore, order does matter. This is to enforce no overlap
in found strings.
Parameters
----------
strings_exp: list of lists.
Each internal list is a list of strings
where at least one string must be
found in the caption. Input strings must be lower case, as we are
not testing for capitalization
if None: assert caption does not exist
if empty list: asserts caption exists and not an empty string
"""
caption = self.get_caption()
if strings_exp == None:
return
else:
assert caption, "No caption exist in appropriate location"
caption = caption.get_text().lower().replace(" ", "")
for lst in strings_exp:
flag = False
for s in lst:
if s.lower().replace(" ", "") in caption:
caption = caption.replace(s, "")
flag = True
break
assert (
flag
), "Caption does not contain expected string: {0}".format(s)
""" AXIS TEST/HELPER FUNCTIONS """
def assert_axis_off(self, m="Axis lines are displayed on plot"):
"""Asserts one of the three cases holds true with error message m:
1) axis have been turned off
2) both x and y axis have visibility set to false
3) both x and y axis ticks have been set to empty lists
Parameters
----------
m: string error message if assertion is not met
Returns
----------
Nothing (if checks pass) or raises error with message m
"""
flag = False
# Case 1: check if axis have been turned off
if self.ax.axison == False:
flag = True
# Case 2: Check if both axis visibilities set to false
elif (
self.ax.xaxis._visible == False and self.ax.yaxis._visible == False
):
flag = True
# Case 3: Check if both axis ticks are set to empty lists
elif (
self.ax.xaxis.get_gridlines() == []
and self.ax.yaxis.get_gridlines() == []
):
flag = True
assert flag, m
def assert_axis_label_contains(self, axis="x", lst=[]):
"""Asserts axis label contains each of the strings in lst. Tests x or y
axis based on 'axis' param. Not case sensitive
Parameters
----------
axis : string
one of the following ['x','y'] stated which axis label to be tested
lst : list of strings
Strings to be searched for in axis label.
If lst is an empty list: assert axis label exists
If lst is `None`: passes
Returns
----------
Nothing (if checks pass) or raises error
"""
# Retrieve appropriate axis label, error if axis param is not x or y
if axis == "x":
label = self.ax.get_xlabel()
elif axis == "y":
label = self.ax.get_ylabel()
else:
raise ValueError('axis must be one of the following ["x", "y"]')
# Check that axis label contains the expected strings in lst
if lst is None:
pass
else:
assert label, "Expected {0} axis label is not displayed".format(
axis
)
label = label.lower().replace(" ", "")
for s in lst:
assert (
s.lower().replace(" ", "") in label
), "{0} axis label does not contain expected text:{1}".format(
axis, s
)
def assert_lims(self, lims_expected, axis="x"):
"""Assert the lims of ax match lims_expected. Tests x or y axis based on
'axis' param
Parameters
---------
lims_expected: list of numbers (flt or int)
list of length 2 containing expected min and max vals for axis limits
axis: string
from ['x','y'], which axis to be tested
Returns
----------
Nothing (if checks pass) or raises error
"""
# Get axis limit values
if axis == "x":
lims = [int(l) for l in self.ax.get_xlim()]
elif axis == "y":
lims = [int(l) for l in self.ax.get_ylim()]
else:
raise ValueError(
"axis must be one of the following string ['x', 'y']"
)
# Check retrieved limits against expected min and max values
assert np.array_equal(
lims, lims_expected
), "Incorrect limits on the {0} axis".format(axis)
def assert_lims_range(self, lims_range, axis="x"):
"""Asserts axis limits fall within lims_range (INCLUSIVE).
Parameters
----------
lims_range: tuple of tuples.
if axis == 'x': first tuple is range the left x limit must be in,
second tuple is the range the right x limit must be in
if axis == 'y': first tuple is range the bottom y limit must be in,
second tuple is the range the top x limit must be in
axis: string
from list ['x','y'] declaring which axis to be tested
Returns
----------
Nothing (if checks pass) or raises error
"""
# Get ax axis limits
if axis == "x":
lims = self.ax.get_xlim()
elif axis == "y":
lims = self.ax.get_ylim()
else:
raise ValueError(
"axis must be one of the following string ['x', 'y']"
)
# Check if the min falls with in lims_range[0]
assert (
lims_range[0][0] <= lims[0] <= lims_range[0][1]
), "Incorrect min limit on the {0} axis".format(axis)
# Check if the max falls with in lims_range[1]
assert (
lims_range[1][0] <= lims[1] <= lims_range[1][1]
), "Incorrect max limit on the {0} axis".format(axis)
def assert_equal_xlims_ylims(self, m="xlims and ylims are not equal"):
"""Assert the x and y lims of Axes ax are exactly equal to each other
Parameters
---------
m: string
Error message if assertion is not met that is shown to the user.
Returns
----------
Nothing (if checks pass) or raises error with message m
"""
xlims = self.ax.get_xlim()
ylims = self.ax.get_ylim()
assert np.array_equal(xlims, ylims), m
""" LEGEND TESTS """
def get_legends(self):
"""Retrieve the list of legends on ax
Returns
-------
list of matplotlib.legend.Legend objects
"""
return self.ax.findobj(match=matplotlib.legend.Legend)
def assert_legend_titles(self, titles_exp):
"""Asserts legend titles contain expected text in titles_exp list.
Parameters
----------
titles_exp: list of strings.
Each string is expected be be in one legend title. The number of
strings is equal to the number of expected legends.
Returns
-------
Nothing (if checks pass) or prints error message or AssertionError if
the expected legend title is not found in the object
or nothing if the title string is found.
"""
legends = self.get_legends()
# Test number of legends - edge case when a student might have two
# legends rather than 2
num_legends = len(legends)
num_exp_legends = len(titles_exp)
assert num_legends == num_exp_legends, (
"I was expecting {0} legend "
"titles but instead found "
"{1}".format(num_legends, num_exp_legends)
)
# Check that each expected legend title is in a legend title in ax
titles = [leg.get_title().get_text().lower() for leg in legends]
for title_exp in titles_exp:
assert any(
title_exp.lower() in s for s in titles
), "Legend title does not contain expected string: {0}".format(
title_exp
)
def assert_legend_labels(self, labels_exp):
"""Asserts legends on ax have the correct entry labels
Parameters
----------
labels_exp: list of strings.
Each string is an expected legend entry label. Checks that
the legend entry labels match exactly (except for case).
Returns
-------
Nothing (if checks pass) or prints error message
Notes
-----
If there are multiple legends, it combines all the legend labels into
one set and checks that set against the list labels_exp
"""
legends = self.get_legends()
assert legends, "Legend does not exist"
# Lowercase both the expected and actual legend labels
legend_texts = [
t.get_text().lower() for leg in legends for t in leg.get_texts()
]
labels_exp = [l.lower() for l in labels_exp]
num_exp_labs = len(labels_exp)
num_actual_labs = len(legend_texts)
assert num_actual_labs == num_exp_labs, (
"I was expecting {0} legend entries, but found {1}. Are there "
"extra labels in your legend?".format(
num_exp_labs, num_actual_labs
)
)
assert set(legend_texts) == set(
labels_exp
), "Legend does not have expected labels"
def assert_legend_no_overlay_content(
self, m="Legend overlays plot window"
):
"""Asserts that each legend does not overlay plot window
Parameters
----------
m: string error message if assertion is not met
Returns
-------
Nothing (if checks pass) or prints error message m
"""
# RendererBase() is needed to get extent, otherwise raises an error
plot_extent = self.ax.get_window_extent(RendererBase()).get_points()
legends = self.get_legends()
for leg in legends:
# RendererBase() is needed to get extent, otherwise raises error
leg_extent = leg.get_window_extent(RendererBase()).get_points()
legend_left = leg_extent[1][0] < plot_extent[0][0]
legend_right = leg_extent[0][0] > plot_extent[1][0]
legend_below = leg_extent[1][1] < plot_extent[0][1]
assert legend_left or legend_right or legend_below, m
def legends_overlap(self, b1, b2):
"""Helper function for assert_no_legend_overlap.
True if points of window extents for b1 and b2 overlap, False otherwise
Parameters
----------
b1: 2x2 array, bounding box of window extents
b2: 2x2 array, bounding box of window extents
Returns
-------
boolean value that says if bounding boxes b1 and b2 overlap
"""
x_overlap = (b1[0][0] <= b2[1][0] and b1[0][0] >= b2[0][0]) or (
b1[1][0] <= b2[1][0] and b1[1][0] >= b2[0][0]
)
y_overlap = (b1[0][1] <= b2[1][1] and b1[0][1] >= b2[0][1]) or (
b1[1][1] <= b2[1][1] and b1[1][1] >= b2[0][1]
)
return x_overlap and y_overlap
def assert_no_legend_overlap(self, m="Legends overlap eachother"):
"""When multiple legends on ax, asserts that there are no two legends
in ax that overlap each other
Parameters
----------
m: string error message if assertion is not met
Returns
-------
Nothing (if checks pass) or prints error message m
"""
legends = self.get_legends()
n = len(legends)
for i in range(n - 1):
# Get extent of first legend in check, RendererBase() avoids error
leg_extent1 = (
legends[i].get_window_extent(RendererBase()).get_points()
)
for j in range(i + 1, n):
# Get extent of second legend in check
leg_extent2 = (
legends[j].get_window_extent(RendererBase()).get_points()
)
assert (
self.legends_overlap(leg_extent1, leg_extent2) == False
), m
""" BASIC PLOT DATA FUNCTIONS """
def get_xy(self, points_only=False, xtime=False):
"""Returns a pandas dataframe with columns "x" and "y" holding the x and y coords on Axes ax
Parameters
----------
ax: Matplotlib Ax object
axes object to be tested
points_only: boolean
xtime: boolean
True if the x axis of the plot contains datetime values
Returns
-------
Pandas dataframe with columns "x" and "y" containing the x and y coords of each point on Axes ax
"""
if points_only:
xy_coords = [
val
for l in self.ax.lines
if (l.get_linestyle() == "None" or l.get_linewidth() == "None")
for val in l.get_xydata()
] # .plot()
xy_coords += [
val
for c in self.ax.collections
if type(c) != matplotlib.collections.PolyCollection
for val in c.get_offsets()
] # .scatter()
else:
xy_coords = [
val for l in self.ax.lines for val in l.get_xydata()
] # .plot()
xy_coords += [
val for c in self.ax.collections for val in c.get_offsets()
] # .scatter()
xy_coords += [
[(p.get_x() + (p.get_width() / 2)), p.get_height()]
for p in self.ax.patches
] # .bar()
xy_data = pd.DataFrame(data=xy_coords, columns=["x", "y"]).dropna()
# crop to limits
lims = self.ax.get_xlim()
xy_data = xy_data[xy_data["x"] >= lims[0]]
xy_data = xy_data[xy_data["x"] <= lims[1]].reset_index(drop=True)
# change to datetime dtype if needed
if xtime:
xy_data["x"] = mdates.num2date(xy_data["x"])
return xy_data
def assert_xydata(
self,
xy_expected,
xcol=None,
ycol=None,
points_only=False,
xtime=False,
xlabels=False,
tolerence=0,
m="Incorrect data values",
):
"""Asserts that the x and y data of Axes ax matches xy_expected with error message m.
If xy_expected is None, assertion is passed
Parameters
----------
ax: Matplotlib Axes object (Required)
Axis object to be tested
xy_expected: pandas or geopandas dataframe (Required)
DF contains data expected to be on the plot (axis object)
xcol: String (Required for non geopandas objects)
Title of column in xy_expected containing values along x_axis.
If xy_expected contains this data in 'geometry', set to None
ycol: String (Required for non geopandas objects)
The y column name of xy_expected which represents values along
the y_axis in a plot.
If xy_expected contains this data in 'geometry' set to None
points_only: boolean,
True if checking only points, false if checking all data on plot
xtime: boolean
True if the a-axis contains datetime values. Matplotlib converts
datetime objects to seconds? This parameter will ensure the provided
x col values are converted if they are datetime elements.
xlabels: boolean
if using x axis labels rather than x data
tolerence: measure of relative error allowed.
For example, a value of .001 asserts values in array
are within .001 of each other. ## this isn't exactly correct.. ##
m: string
error message provided to the student if assertion fails
"""
# If there data are spatial (geopandas), grab geometry data
if type(xy_expected) == gpd.geodataframe.GeoDataFrame and not xcol:
xy_expected = pd.DataFrame(
data={
"x": [p.x for p in xy_expected.geometry],
"y": [p.y for p in xy_expected.geometry],
}
).dropna()
xcol, ycol = "x", "y"
if (
type(xy_expected) == pd.DataFrame
or type(xy_expected) == gpd.geodataframe.GeoDataFrame
):
if xlabels:
self.assert_xlabel_ydata(xy_expected, xcol=xcol, ycol=ycol)
return
xy_data = self.get_xy(points_only=points_only, xtime=xtime)
# Make sure the data are sorted the same
xy_data, xy_expected = (
xy_data.sort_values(by="x"),
xy_expected.sort_values(by=xcol),
)
if tolerence > 0:
if xtime:
raise ValueError(
"tolerance must be 0 with datetime on x-axis"
)
np.testing.assert_allclose(
xy_data["x"], xy_expected[xcol], rtol=tolerence, err_msg=m
)
np.testing.assert_allclose(
xy_data["y"], xy_expected[ycol], rtol=tolerence, err_msg=m
)
else:
assert np.array_equal(xy_data["x"], xy_expected[xcol]), m
assert np.array_equal(xy_data["y"], xy_expected[ycol]), m
elif xy_expected == None:
pass
else:
raise ValueError(
"xy_expected must be of type: pandas dataframe or Geopandas Dataframe"
)
def assert_xlabel_ydata(self, xy_expected, xcol, ycol, m="Incorrect Data"):
"""Asserts that the numbers in x labels and y values in Axes ax match xy_expected with error message m.
Note, this is only testing the numbers in x axis labels.
Parameters
----------
xy_expected: pandas dataframe that contains data
xcol: string column title containing xaxis data
ycol: string column title containing yaxis data
m: string error message if assertion is not met
"""
x_data = [
"".join(c for c in l.get_text() if c.isdigit())
for l in self.ax.xaxis.get_majorticklabels()
]
y_data = self.get_xy()["y"]
xy_data = pd.DataFrame(data={"x": x_data, "y": y_data})
xy_expected, xy_data = (
xy_expected.sort_values(by=xcol),
xy_data.sort_values(by="x"),
)
np.testing.assert_equal(
np.array(xy_data["x"]), np.array(xy_expected[xcol]), m
)
np.testing.assert_equal(
np.array(xy_data["y"]), np.array(xy_expected[ycol]), m
)
### LINE TESTS/HELPER FUNCTIONS ###
def get_slope_yintercept(self, path_verts):
"""Returns the y intercept of line based on the average slope of the line
Parameters
----------
path_verts: array of verticies that make a line on Axes ax
Returns
-------
slope: float of the average slope
y_intercept: float of the y intercept
"""
slopes = [
(path_verts[i + 1, 1] - path_verts[i, 1])
/ (path_verts[i + 1, 0] - path_verts[i, 0])
for i in range(len(path_verts) - 1)
]
slope = sum(slopes) / len(slopes)
return slope, path_verts[0, 1] - (path_verts[0, 0] * slope)
def assert_line(
self,
slope_exp,
intercept_exp,
xtime=False,
m="Expected line not displayed",
m2="Line does not cover data set",
):
"""Asserts that there exists a line on Axes ax with slope slope_exp and y intercept intercept_exp and goes at least from x coordinate min_val to x coordinate max_val
Parameters
----------
slope_exp: expected slope of line
intercept_exp: expeted y intercept of line
xtime: boolean if x-axis values are datetime
m: error message if line does not exist
m2: error message if line exist but does not cover data set
"""
flag_exist, flag_length = False, False
xy = self.get_xy(points_only=True)
min_val, max_val = min(xy["x"]), max(xy["x"])
for l in self.ax.lines:
path_verts = self.ax.transData.inverted().transform(
l._transformed_path.get_fully_transformed_path().vertices
)
slope, y_intercept = self.get_slope_yintercept(path_verts)
if math.isclose(slope, slope_exp, abs_tol=1e-4) and math.isclose(
y_intercept, intercept_exp, abs_tol=1e-4
):
flag_exist = True
line_x_vals = [coord[0] for coord in path_verts]
if min(line_x_vals) <= min_val and max(line_x_vals) >= max_val:
flag_length = True
break
assert flag_exist, m
assert flag_length, m2
def assert_lines_of_type(self, line_types):
"""Asserts each line of type in line_types exist on ax
Parameters
----------
line_types: list of strings. Acceptable strings in line_types are as follows ['regression', 'onetoone'].
if list is empty, assert is passed
"""
if line_types:
for line_type in line_types:
if line_type == "regression":
xy = self.get_xy(points_only=True)
slope_exp, intercept_exp, _, _, _ = stats.linregress(
xy.x, xy.y
)
elif line_type == "onetoone":
slope_exp, intercept_exp = 1, 0
else:
raise ValueError(
'each string in line_types must be from the following ["regression","onetoone"]'
)
self.assert_line(
slope_exp,
intercept_exp,
m="{0} line is not displayed properly".format(line_type),
m2="{0} line does not cover dataset".format(line_type),
)
## HISTOGRAM FUCNTIONS ##
def assert_num_bins(self, n=3, which_bins="positive"):
"""Asserts number of bins of type which_bins is at least n
Parameters
----------
n: int declaring minimum number of bins of type which_bin
which_bins: string from list ['negative', 'positive']
'negative': all bins with values centered at a positive value
'positite': all bins with values centered at a negative value
Returns
-------
"""
x_data = self.get_xy(xtime=False)["x"]
if which_bins == "negative":
n_bins = len(x_data[x_data < 0])
elif which_bins == "positive":
n_bins = len(x_data[x_data > 0])
else:
raise ValueError(
"which_bins must be from list ['negative', 'positive']"
)
assert n_bins >= n, "Not enough {0} value bins on histogram".format(
which_bins
)
```
#### File: matplotcheck/tests/test_base_legends.py
```python
import pytest
import matplotlib.pyplot as plt
""" LEGEND TESTS """
def test_assert_legend_titles(pt_multi_line_plt):
"""Test that legend title test returns true when plot contains a given
string"""
pt_multi_line_plt.assert_legend_titles(["legend"])
plt.close()
def test_assert_legend_titles_not_case_sensitive(pt_multi_line_plt):
"""Check that assert_legend_titles is NOT case sensitive"""
pt_multi_line_plt.assert_legend_titles(["LeGenD"])
plt.close()
def test_assert_legend_titles_bad_text(pt_multi_line_plt):
"""Check that assert_legend_titles fails with wrong text"""
with pytest.raises(
AssertionError,
match="Legend title does not contain expected string: foo",
):
pt_multi_line_plt.assert_legend_titles(["foo"])
plt.close()
def test_assert_legend_titles_wrong_num(pt_multi_line_plt):
"""Check assert_legend_titles fails when expected number of titles
is not equal to # of legends"""
with pytest.raises(
AssertionError,
match="I was expecting 1 legend titles but instead found 2",
):
pt_multi_line_plt.assert_legend_titles(["legend", "legend2"])
plt.close()
def test_assert_legend_labels(pt_multi_line_plt):
"""Test for checking that legend labels are expected strings"""
pt_multi_line_plt.assert_legend_labels(["A", "B"])
plt.close()
def test_assert_legend_not_case_sensitive(pt_multi_line_plt):
"""Check that assert_legend_labels is NOT case sensitive"""
pt_multi_line_plt.assert_legend_labels(["a", "b"])
plt.close()
def test_assert_legend_labels_bad_text(pt_multi_line_plt):
"""Check that assert_legend_labels raises expected error when given wrong text"""
with pytest.raises(
AssertionError, match="Legend does not have expected labels"
):
pt_multi_line_plt.assert_legend_labels(["a", "c"])
plt.close()
def test_assert_legend_labels_wrong_num(pt_multi_line_plt):
"""Check that assert_legend_labels raises expected error given wrong number of labels"""
with pytest.raises(
AssertionError, match="I was expecting 3 legend entries"
):
pt_multi_line_plt.assert_legend_labels(["a", "b", "c"])
plt.close()
def test_assert_legend_no_overlay_content(pt_multi_line_plt):
"""Test for checking whether legend overlays plot contents"""
pt_multi_line_plt.assert_legend_no_overlay_content()
plt.close()
def test_assert_legend_no_overlay_content_fail(pt_multi_line_plt):
"""assert_legend_no_overlay should fail when legend is in center of plot"""
pt_multi_line_plt.ax.legend(loc="center")
with pytest.raises(AssertionError, match="Legend overlays plot window"):
pt_multi_line_plt.assert_legend_no_overlay_content()
plt.close()
def test_assert_no_legend_overlap_single(pt_multi_line_plt):
"""Checks that assert_no_legend_overlap passes when only one legend"""
pt_multi_line_plt.assert_no_legend_overlap()
plt.close()
def test_assert_no_legend_overlap_double(pt_multi_line_plt):
"""Checks that assert_no_legend_overlap passes when two legends don't overlap"""
leg_1 = plt.legend(loc=[0.8, 0.8])
leg_2 = plt.legend(loc=[0.1, 0.1])
pt_multi_line_plt.ax.add_artist(leg_1)
pt_multi_line_plt.assert_no_legend_overlap()
plt.close()
def test_assert_no_legend_overlap_fail(pt_multi_line_plt):
"""Checks that assert_no_legend_overlap fails with overlapping legends"""
leg_1 = plt.legend(loc=[0.12, 0.12])
leg_2 = plt.legend(loc=[0.1, 0.1])
pt_multi_line_plt.ax.add_artist(leg_1)
with pytest.raises(AssertionError, match="Legends overlap eachother"):
pt_multi_line_plt.assert_no_legend_overlap()
plt.close()
```
#### File: matplotcheck/tests/test_base.py
```python
import pytest
import matplotlib.pyplot as plt
def test_line_plot(pt_line_plt):
"""Test that the line plot returns true for line but false for bar or
scatter."""
pt_line_plt.assert_plot_type("line")
with pytest.raises(AssertionError):
pt_line_plt.assert_plot_type("bar")
with pytest.raises(AssertionError):
pt_line_plt.assert_plot_type("scatter")
plt.close()
def test_scatter_plot(pt_scatter_plt):
"""Test that the scatter plot returns true for line but false for bar or
line."""
pt_scatter_plt.assert_plot_type("scatter")
with pytest.raises(AssertionError):
pt_scatter_plt.assert_plot_type("bar")
with pytest.raises(AssertionError):
pt_scatter_plt.assert_plot_type("line")
plt.close()
def test_bar_plot(pt_bar_plt):
"""Test that the scatter plot returns true for line but false for bar or
line."""
pt_bar_plt.assert_plot_type("bar")
with pytest.raises(AssertionError):
pt_bar_plt.assert_plot_type("scatter")
with pytest.raises(AssertionError):
pt_bar_plt.assert_plot_type("line")
plt.close()
def test_options(pt_line_plt):
"""Test that a ValueError is raised if an incorrect plot type is provided.
Should this test be unique of within a suite of tests?"""
with pytest.raises(
ValueError,
match="Plot_type to test must be either: scatter, bar or line",
):
pt_line_plt.assert_plot_type("foo")
plt.close()
def test_correct_title(pt_line_plt):
"""Check that the correct plot title is grabbed from the axis object.
Note that get_titles maintains case."""
assert "Plot Title" in pt_line_plt.get_titles()[1]
plt.close()
"""DATACHECK TESTS"""
def test_assert_xydata_scatter(pt_scatter_plt, pd_df):
"""Checks points in scatter plot against expected data"""
pt_scatter_plt.assert_xydata(pd_df, xcol="A", ycol="B")
plt.close()
def test_assert_xydata_scatter(pt_scatter_plt, pd_df):
"""assert_xydata should fail when we change the data"""
pd_df["B"][1] += 5
with pytest.raises(AssertionError, match="Incorrect data values"):
pt_scatter_plt.assert_xydata(pd_df, xcol="A", ycol="B")
plt.close()
# def test_assert_xydata_timeseries(pt_time_line_plt, pd_df_timeseries):
# """Commenting this out for now as this requires a time series data object
# this is failing because the time data needs to be in seconds like how
# mpl saves it. """
# pt_time_line_plt.assert_xydata(pd_df_timeseries,
# xcol='time', ycol='A',
# xtime=True)
def test_assert_xydata_xlabel(pt_bar_plt, pd_df):
pd_df["A"] = pd_df["A"].apply(str)
pt_bar_plt.assert_xlabel_ydata(pd_df, xcol="A", ycol="B")
plt.close()
``` |
{
"source": "jlperona/bart-passenger-heatmap",
"score": 4
} |
#### File: preparser/utils/commandline.py
```python
import argparse
def valid_graph_ext(input: str) -> str:
"""Verifier function used with argparse's type argument.
Checks for certain filename extensions.
"""
filetypes = {'net', 'gexf'}
if input.split('.')[-1].lower() not in filetypes:
msg = 'Unrecognized file format: \'{0}\'.'.format(input)
raise argparse.ArgumentTypeError(msg)
return input
def argument_parsing() -> argparse.Namespace:
"""Parse input arguments with argparse."""
parser = argparse.ArgumentParser(
description='Take in BART origin-destination data and a source graph. '
'Output a CSV file that gives passengers between adjacent '
'stations. A passenger who travels between two stations '
'adds one to the count of all stations along the '
'shortest path between those two stations.'
)
# mandatory/positional arguments
parser.add_argument(
'inputfile',
type=valid_graph_ext,
metavar='input.[gexf,net]',
help='Graph to use as the basis. '
'Supports GEXF and Pajek NET. '
'Format will be guessed from the file extension.'
)
parser.add_argument(
'outputfile',
metavar='output.csv',
help='CSV file to write the final results to.'
)
parser.add_argument(
'csvfile',
nargs=argparse.REMAINDER,
metavar='input1.csv ...',
help='BART origin-destination data files to read from. '
'Accepts multiple files, parses and writes in order given.'
)
# optional arguments
parser.add_argument(
'-d',
'--directed',
action='store_true',
help='Create a directed graph instead of an undirected one.'
)
return parser.parse_args()
``` |
{
"source": "jlplenio/slurm_template_for_Python",
"score": 3
} |
#### File: jlplenio/slurm_template_for_Python/slurm_job.py
```python
from multiprocessing import Pool
import csv
import timeit
import sys
def calc_pi(run_no):
from decimal import Decimal, getcontext
getcontext().prec = 100000
result = sum(1 / Decimal(16) ** k *
(Decimal(4) / (8 * k + 1) -
Decimal(2) / (8 * k + 4) -
Decimal(1) / (8 * k + 5) -
Decimal(1) / (8 * k + 6)) for k in range(100))
print("done", run_no)
return result
if __name__ == '__main__':
core_count = 1
if len(sys.argv) > 1:
core_count = int(sys.argv[1])
print(f"Starting with {core_count} counts")
start = timeit.default_timer()
with Pool(core_count) as p:
pi_list = (p.map(calc_pi, range(100)))
with open("out/pi_list.csv", 'w', newline='') as file:
wr = csv.writer(file, quoting=csv.QUOTE_ALL)
wr.writerow(pi_list)
stop = timeit.default_timer()
print('Time: ', stop - start)
``` |
{
"source": "jlpolit/wordification",
"score": 3
} |
#### File: wordification/wordification/helpers.py
```python
import enchant
import re
import itertools
# derived from a google image search of an "old fashioned" phone
letters_from_numbers_lookup = {'2': ['A', 'B', 'C'],
'3': ['D', 'E', 'F'],
'4': ['G', 'H', 'I'],
'5': ['J', 'K', 'L'],
'6': ['M', 'N', 'O'],
'7': ['P', 'Q', 'R', 'S'],
'8': ['T', 'U', 'V'],
'9': ['W', 'X', 'Y' 'Z']}
numbers_from_letters_lookup = {'A': '2', 'B': '2', 'C': '2',
'D': '3', 'E': '3', 'F': '3',
'G': '4', 'H': '4', 'I': '4',
'J': '5', 'K': '5', 'L': '5',
'M': '6', 'N': '6', 'O': '6',
'P': '7', 'Q': '7', 'R': '7', 'S': '7',
'T': '8', 'U': '8', 'V': '8',
'W': '9', 'X': '9', 'Y': '9', 'Z': '9'}
english_word_lookup = enchant.Dict("en_US")
# TODO: it might make sense to allow 'I' and 'a' with the stipulation that they be followed by a valid word...
def is_valid_word(word_to_check: str,
min_length=2,
exceptions=list()) -> bool:
if type(word_to_check) is not str:
raise ValueError("Non-string entered")
if (len(word_to_check) < min_length) and (word_to_check not in exceptions):
return False
else:
return english_word_lookup.check(word_to_check)
def format_phone_number(phone_digit_list: list) -> str:
#TODO: we should actually probably check that each 'digit' is a string rather than forcing it
out_str = ''
# length check
if (len(phone_digit_list) not in [10, 11]) or (type(phone_digit_list) is not list):
raise ValueError("not a valid phone number")
# country code
if len(phone_digit_list) == 11:
out_str = (phone_digit_list.pop(0) + '-')
# zipcode
for digit in phone_digit_list[:3]:
out_str += str(digit)
out_str += '-'
# the...next three digits (I'm sure this has a name)
for digit in phone_digit_list[3:6]:
out_str += str(digit)
out_str += '-'
# and the last four
for digit in phone_digit_list[6:]:
out_str += str(digit)
return out_str
def get_character_list(phone_words: str) -> list:
if type(phone_words) is not str:
raise ValueError("Not a Valid Input")
return [x for x in re.sub('\W+', '', phone_words)]
def all_values_from_number(num: str) -> list:
letters = letters_from_numbers_lookup.get(num, [num])
if num not in letters:
letters += [num]
return letters
def all_combinations(number_list: list) -> list:
"""
:param number_list: array of strings representing digits between 0 and 9
:return: all possible number-letter combinations
"""
all_chars = [all_values_from_number(x) for x in number_list]
# note: I broke this out for ease of testing,
# but really we'd want this to return the iterable for efficiency
return list(itertools.product(*all_chars))
def has_valid_word(char_list: list) -> bool:
"""
:param char_list: array of strings, can be combination of digits and letters
:return: whether there is a valid English word in this array, based on the letters in order
note that this word must be surrounded on both sides by numbers (1800-PAINTX is not a valid word)
"""
phone_number = ''.join(char_list)
only_letters = re.sub("\d", " ", phone_number).strip()
letters_split = only_letters.split(' ')
n_valid = 0
n_char = 0
has_preposition = False
for i in range(len(letters_split)):
sub_word = letters_split[i]
if sub_word != '':
if i == 0:
if (len(sub_word) < 3) and (sub_word not in ['A', 'AN', 'I']):
return False
elif sub_word in ['A', 'AN', 'I']:
n_valid += 1
n_char += 1
has_preposition = True
elif (len(sub_word) < 3) or (is_valid_word(''.join(sub_word)) is False):
return False
else:
n_valid += 1
n_char += 1
elif (len(sub_word) < 3) or (is_valid_word(''.join(sub_word)) is False):
return False
else:
n_valid += 1
n_char += 1
if has_preposition:
if len(letters_split) > 1:
return (n_valid == n_char) and (n_valid > 0)
else:
return False
else:
return (n_valid == n_char) and (n_valid > 0)
def format_wordification(char_list: list) -> str:
"""
:param char_list: letter-number combination in an array (all strings)
:return: valid wordification with dashes between any letter/number chunks
"""
out = ''
n = len(char_list)
char_str = ''.join(char_list)
num_letter_list = re.split('(\d+)', char_str)
if len(num_letter_list) == 3:
out = format_phone_number(list(char_list))
else:
for chunk in num_letter_list:
if chunk in ['', ' ']:
pass
else:
out += chunk
out += '-'
out = out[:-1]
if n == 11:
if (char_list[0] == '1') and(out[1] != '-'):
out = '1-' + out[1:]
if out[2:5].isdigit():
out = out[:5] + "-" + out[5:]
if (n == 10) and (out[:3].isdigit()):
out = out[:3] + "-" + out[3:]
out = re.sub(r'([A-Z])-([A-Z])', r'\1\2', out)
return out.replace('--', '-')
``` |
{
"source": "jlpoltrack/kochava-reports",
"score": 2
} |
#### File: kochava-reports/tests/test_client.py
```python
from __future__ import absolute_import
import unittest2
import requests
import json
import mock
from kochavareports import client, constant, exception, response
from kochavareports import KochavaCredentials, KochavaClient
class TestClient(unittest2.TestCase):
TEST_URL = 'http://whatever.url'
TEST_DATA = {
'dummy': 'whatever'
}
def _make_credentials(self):
return KochavaCredentials(api_key='api key',
app_guid='app guid')
def _make_client(self):
return KochavaClient(self._make_credentials())
@mock.patch('kochavareports.client.requests.get')
def test_get_http_error(self, mock_get):
mock_response = mock.Mock()
http_error = requests.exceptions.RequestException()
mock_response.raise_for_status.side_effect = http_error
mock_get.return_value = mock_response
with self.assertRaises(exception.HttpException):
self._make_client()._get_data(self.TEST_URL)
mock_get.assert_called_once_with(self.TEST_URL)
self.assertEqual(1, mock_response.raise_for_status.call_count)
self.assertEqual(0, mock_response.json.call_count)
@mock.patch('kochavareports.client.requests.post')
def test_post_http_error(self, mock_post):
mock_response = mock.Mock()
http_error = requests.exceptions.RequestException()
mock_response.raise_for_status.side_effect = http_error
mock_post.return_value = mock_response
with self.assertRaises(exception.HttpException):
self._make_client()._post_data(self.TEST_URL, self.TEST_DATA)
mock_post.assert_called_once_with(self.TEST_URL,
data=json.dumps(self.TEST_DATA))
self.assertEqual(1, mock_response.raise_for_status.call_count)
self.assertEqual(0, mock_response.json.call_count)
@mock.patch('kochavareports.client.requests.get')
def test_get_json_error(self, mock_get):
mock_response = mock.Mock()
mock_response.return_value = ''
mock_response.json.side_effect = ValueError()
mock_get.return_value = mock_response
with self.assertRaises(exception.ApiException):
self._make_client()._get_data(self.TEST_URL)
self.assertEqual(1, mock_response.json.call_count)
@mock.patch('kochavareports.client.requests.post')
def test_post_json_error(self, mock_post):
mock_response = mock.Mock()
mock_response.return_value = ''
mock_response.json.side_effect = ValueError()
mock_post.return_value = mock_response
with self.assertRaises(exception.ApiException):
self._make_client()._post_data(self.TEST_URL, self.TEST_DATA)
self.assertEqual(1, mock_response.json.call_count)
@mock.patch('kochavareports.client.time.sleep')
@mock.patch('kochavareports.client.requests.post')
@mock.patch('kochavareports.client.Client.get_report_progress')
def test_wait_for_report_max_retries_exceeded(self, mock_progress, mock_post,
mock_sleep):
response_data = {
'status': 'queued'
}
mock_response = mock.Mock()
mock_response.json.return_value = response_data
mock_post.return_value = mock_response
mock_progress.return_value = response.GetReportProgressResponse(
response_data)
token = '12345667'
retry_interval_seconds = 3
start_delay_seconds = 15
max_retries = 60
with self.assertRaises(exception.PollMaxRetryException):
self._make_client().wait_for_report(
token,
retry_interval_seconds=retry_interval_seconds,
start_delay_seconds=start_delay_seconds,
max_retries=max_retries)
# checking time.sleep() calls would verify that all input parameters
# are used correctly:
sleep_calls = [mock.call(start_delay_seconds)] + \
[mock.call(retry_interval_seconds)] * max_retries
self.assertEqual(sleep_calls, mock_sleep.call_args_list)
# finally, check that get_report_progress() is called
# `max_retries` times with the token parameter:
progress_calls = [mock.call(token)] * max_retries
self.assertEqual(progress_calls, mock_progress.call_args_list)
@mock.patch('kochavareports.client.time.sleep')
@mock.patch('kochavareports.client.Client.read_report')
@mock.patch('kochavareports.client.Client.get_report_progress')
def test_wait_for_report_success(self, mock_progress, mock_read, mock_sleep):
token = '1<PASSWORD>'
response_queued = {
'status': 'queued'
}
response_completed = {
'status': 'completed',
'report': 'http://some.url/whatever'
}
response_result = {
'click_count': 10,
'install_count': 2,
}
ping_times = 3 # for testing purposes it should be lower than
# `max_retries`, which defaults 60 in this test
mock_progress.side_effect = \
[response.GetReportProgressResponse(response_queued)] * ping_times + \
[response.GetReportProgressResponse(response_completed)]
mock_read.return_value = response_result
result = self._make_client().wait_for_report(token)
# read_report() result should be the same as wait_for_report result:
self.assertEqual(result, response_result)
# read_report() should be called exactly once with the returned url:
mock_read.assert_called_once_with(response_completed['report'])
# get_report_progress() should be called internally exactly like
# specified here: just the token, (ping_times + 1) times in total,
# in the specified order
progress_calls = [mock.call(token)] * (ping_times + 1)
self.assertEqual(progress_calls, mock_progress.call_args_list)
``` |
{
"source": "JLpython-py/anti-ghostping-bot",
"score": 2
} |
#### File: JLpython-py/anti-ghostping-bot/functional_tests.py
```python
import asyncio
import os
import unittest
from lib.bot import BotRoot
from lib.db import db
class TestRunBot(unittest.TestCase):
def setUp(self):
self.connection = db.DBConnection()
def tearDown(self):
self.connection.execute_query("DELETE FROM preferences", "w")
self.connection.close_connection()
def test_run_bot(self):
token = os.environ.get("token", None)
if token is None:
with open(os.path.join("lib", "bot", "token.txt")) as file:
token = file.read()
self.assertIsNotNone(token)
loop = asyncio.get_event_loop()
bot = BotRoot()
loop.create_task(bot.start(token))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.close()
bot.connection.close_connection()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JLpython-py/FanGraphs-exporter",
"score": 3
} |
#### File: FanGraphs-exporter/tests/tests.py
```python
import json
import os
import unittest
import selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class TestClassFanGraphs(unittest.TestCase):
def test_files_exist(self):
directories = {
'leaders': [
'menu.txt', 'dropdown.txt', 'checkbox.txt', 'button.txt']
}
for dirname in directories:
self.assertTrue(
os.path.exists(os.path.join('data', dirname)))
self.assertEqual(
set(os.listdir(os.path.join('data', dirname))),
set(directories[dirname]))
class TestFanGraphsLeadersSettings(unittest.TestCase):
def setUp(self):
with open(os.path.join('data', 'base_address.txt')) as file:
self.url = json.load(file).get("leaders")
options = Options()
options.headless = True
self.browser = webdriver.Firefox(options=options)
def tearDown(self):
self.browser.quit()
def test_leaders_address(self):
self.browser.get(self.url)
self.assertIn("Leaderboards", self.browser.title)
def test_find_data_configuration_elements(self):
self.browser.get(self.url)
files = ['menu.txt', 'dropdown.txt', 'checkbox.txt', 'button.txt']
for filename in files:
with open(os.path.join('data', 'leaders', filename)) as file:
data = json.load(file)
for select in data:
self.assertEqual(
len(self.browser.find_elements_by_id(data[select])),
1, data[select])
def test_find_export_data_elements(self):
self.browser.get(self.url)
export_data_button = self.browser.find_element_by_id(
"LeaderBoard1_cmdCSV")
self.assertEqual(export_data_button.text, "Export Data")
def test_find_popup_elements(self):
self.browser.get(self.url)
while True:
try:
close_popup_button = self.browser.find_element_by_css_selector(
"span[class='ezmob-footer-close']")
break
except selenium.common.exceptions.NoSuchElementException:
self.browser.refresh()
continue
self.assertEqual(close_popup_button.text, "x")
popup = self.browser.find_element_by_id("ezmobfooter")
self.assertEqual(popup.get_attribute("style"), "")
close_popup_button.click()
self.assertNotEqual(popup.get_attribute("style"), "")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JLpython-py/FanGraphs-py",
"score": 3
} |
#### File: FanGraphs-py/fangraphs/scraper.py
```python
from typing import *
import bs4
from playwright.async_api import async_playwright
from playwright.sync_api import sync_playwright
from .selectors import Selectors
def get_soup(html: str) -> bs4.BeautifulSoup:
"""
:param html:
:return:
"""
return bs4.BeautifulSoup(html, features="lxml")
class FanGraphsPage:
"""
"""
address: str
path: str
filter_widgets: dict[str, dict]
export_data: str = ""
def __init__(self):
self.soup = None
self.selectors = None
def load_soup(self, html: str) -> None:
"""
:param html:
"""
self.soup = get_soup(html)
def load_selectors(self) -> None:
"""
"""
if self.soup is None:
raise NotImplementedError
self.selectors = Selectors(self.filter_widgets, self.soup)
class SyncScraper:
"""
"""
def __init__(self, fgpage: FanGraphsPage):
"""
:param fgpage:
"""
self.fgpage = fgpage
self.__play, self.__browser, self.page = None, None, None
def __enter__(self):
self.__play = sync_playwright().start()
self.__browser = self.__play.chromium.launch()
self.page = self.__browser.new_page(
accept_downloads=True
)
self.page.goto(self.fgpage.address, timeout=0)
html = self.page.content()
self.fgpage.load_soup(html)
self.fgpage.load_selectors()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__browser.close()
self.__play.stop()
def start(self):
"""
"""
return self.__enter__()
def stop(self):
"""
"""
return self.__exit__(None, None, None)
def widgets(self) -> tuple[str]:
"""
:return:
"""
return tuple(self.fgpage.selectors.widgets)
def options(self, wname: str) -> Optional[tuple[Union[str, bool]]]:
"""
:param wname:
:return:
"""
widget = self.fgpage.selectors.widgets.get(wname)
if widget is not None:
return widget.options()
raise Exception # TODO: Define custom exception
def current(self, wname: str) -> Optional[Union[str, bool]]:
"""
:param wname:
:return:
"""
widget = self.fgpage.selectors.widgets.get(wname)
if widget is not None:
return widget.current(self.page)
raise Exception # TODO: Define custom exception
def configure(self, wname: str, option: Union[str, bool]) -> None:
"""
:param wname:
:param option:
"""
widget = self.fgpage.selectors.widgets.get(wname)
if widget is not None:
widget.configure(self.page, option)
return
raise Exception # TODO: Define custom exception
class AsyncScraper:
"""
"""
def __init__(self, fgpage: FanGraphsPage):
"""
:param fgpage:
"""
self.fgpage = fgpage
self.__play, self.__browser, self.page = None, None, None
async def __aenter__(self):
self.__play = await async_playwright().start()
self.__browser = await self.__play.chromium.launch()
self.page = await self.__browser.new_page(
accept_downloads=True
)
await self.page.goto(self.fgpage.address, timeout=0)
html = await self.page.content()
self.fgpage.load_soup(html)
self.fgpage.load_selectors()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.__browser.close()
await self.__play.stop()
async def start(self):
"""
"""
return await self.__aenter__()
async def stop(self):
"""
"""
return await self.__aexit__(None, None, None)
def widgets(self) -> tuple[bool]:
"""
:return:
"""
return tuple(self.fgpage.selectors.widgets)
def options(self, wname: str) -> Optional[tuple[Union[str, bool]]]:
"""
:param wname:
:return:
"""
widget = self.fgpage.selectors.widgets.get(wname)
if widget is not None:
return widget.options()
raise Exception # TODO: Define custom exception
async def current(self, wname: str) -> Optional[Union[str, bool]]:
"""
:param wname:
:return:
"""
widget = self.fgpage.selectors.widgets.get(wname)
if widget is not None:
return await widget.acurrent(self.page)
raise Exception # TODO: Define custom exception
async def configure(self, wname: str, option: Union[str, bool]) -> None:
"""
:param wname:
:param option:
"""
widget = self.fgpage.selectors.widgets.get(wname)
if widget is not None:
await widget.aconfigure(self.page, option)
return
raise Exception # TODO: Define custom exception
``` |
{
"source": "JLpython-py/MLBPlayerIDs",
"score": 3
} |
#### File: mlbids/tests/test_sfbb.py
```python
import requests
from mlbids import _sfbb
class TestSFBBData:
"""
Unit tests for :py:class:`mlbids.playerids.SFBBTools`.
"""
sfbb_data = _sfbb.SFBBTools()
def test_base_address(self):
"""
Unit test for :py:meth:`mlbids.playerids.SFBBTools.base_address`.
"""
res = requests.get(
self.sfbb_data.base_address,
headers=_sfbb.HEADERS
)
res.raise_for_status()
assert res.status_code == 200
def test_soup(self):
"""
Unit test for :py:meth:`mlbids.playerids.SFBBTools._soup`.
"""
soup = _sfbb.get_soup(self.sfbb_data.base_address)
assert soup
def test_element(self):
"""
Unit test for :py:meth:`mlbids.playerids.SFBBTools._element`.
"""
css = "div.entry-content > div > table tr:nth-child(2) > td:first-child"
assert len(self.sfbb_data._soup.select(css)) == 1
def test_urls(self):
"""
Unit test for :py:meth:`mlbids.playerids.SFBBTools.urls`.
"""
elems = self.sfbb_data._element.select("a")
assert len(elems) == 5
assert all(e.attrs.get("href") for e in elems)
for url in self.sfbb_data.urls:
res = requests.get(url, headers=_sfbb.HEADERS)
assert res.status_code == 200
``` |
{
"source": "jlr84/shn",
"score": 2
} |
#### File: jlr84/shn/monitor.py
```python
import xmlrpc.client
import ssl
import socket # Required for network/socket connections
import os # Required for Forking/child processes
import time # Required for sleep call
import threading # Required for communication sub-threads
import pymysql
import server_monitor as myServer
import certs.gencert as gencert
import config
import logging
from logging.config import fileConfig
# Load logging config
fileConfig('setup/logging.conf')
log = logging.getLogger(__name__)
# Global Variables -- Don't change. [No need to change.]
CERTFILE = "certs/domains/local.cert" # Placeholder; updated when executed
KEYFILE = "certs/domains/local.key" # Default; updated when executed
hostIP = "localhost" # Default; updated when executed
admin_selected = False
# Return ip address of local host where server is running
def getMyIP():
log.info('Getting Host ip address.')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 53))
ipAdd = s.getsockname()[0]
s.close()
log.debug('Socket closed: ipAdd=%s' % ipAdd)
return ipAdd
# Return host name/fqdn of based on give ip address
def findHostName(ipAddress):
log.info('Finding Host Name based on ip address')
try:
log.debug('Trying now...')
name, alias, addresslist = socket.gethostbyaddr(ipAddress)
log.debug('Returning name: %s' % name)
return name
except socket.herror:
log.exception("Hostname/FQDN not found: Hostname/FQDN Required. "
"Correct by adding record in DNS server or within local"
"hosts file (/etc/hosts) and then restart controller.")
return "None"
# Create SSL certs for current ip address if not already present
def verifyCerts():
global CERTFILE
global KEYFILE
# Determine file path based on current ip address
CERTFILE = ''.join([config.certPath, config.rootDomain, ".cert"])
KEYFILE = ''.join([config.certPath, config.rootDomain, ".key"])
log.debug("CERTFILE: %s" % (CERTFILE))
log.debug("KEYFILE: %s" % (KEYFILE))
# If cert or key file not present, create new certs
if not os.path.isfile(CERTFILE) or not os.path.isfile(KEYFILE):
gencert.gencert(config.rootDomain)
log.info("Certfile(s) NOT present; new certs created.")
print("Certfile(s) NOT present; new certs created.")
else:
log.info("Certfiles Verified Present")
print("Certfiles Verified Present.")
# Start a thread child to run server connection as a daemon
def startServer():
log.info("Starting Server...")
# Now, start thread
log.debug("Starting new thread...")
t = threading.Thread(name="Monitor_ServerDaemon",
target=myServer.runServer,
args=(hostIP,
config.mntrServerPort,
CERTFILE,
KEYFILE
)
)
t.daemon = True
t.start()
log.debug("Thread started; end of startServer fn.")
# Check and Display the status of all child processes
def checkStatus():
log.debug("Checking Status of Threads...")
totalThreads = threading.active_count()
subThreads = totalThreads - 1
print("\nSub-Thread(s): %d" % (subThreads))
main_thread = threading.currentThread()
k = 1
for t in threading.enumerate():
if t is main_thread:
continue
print("Thread #%d:" % (k))
print("Name: %s" % (t.name))
print("Ident: %d" % (t.ident))
ans = "unknown"
if t.is_alive():
ans = "YES"
else:
ans = "NO"
print("Alive? %s\n" % (ans))
k = k+1
log.debug("End of checkStatus fn.")
# Display Status of all Hosts currently connected
def displayStatus():
log.debug("Displaying agents now")
print("Displaying agents currently connected...")
# Connect to database to query agents
log.debug("Connecting to database")
db = pymysql.connect(host=config.mysqlHost, port=config.mysqlPort,
user=config.mntrMysqlUser, passwd=<PASSWORD>,
db=config.mysqlDB)
cursor = db.cursor()
# Query to retrieve id/time of registration
sql = "SELECT distinct agent FROM status;"
agents = []
# Get agents
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists
results = cursor.fetchall()
for row in results:
thisAgent = row[0]
agents.append(thisAgent)
log.debug("Agent Received as: %s" % (thisAgent))
except:
log.exception("ERROR in db query>> %s" % sql)
print("FOUND %d agent(s) monitored.\n" % len(agents))
# Query to retrieve each agents's data
for k in range(len(agents)):
sql = "SELECT agent, status, timestamp, alias, id FROM "\
"status where agent = '%s' ORDER BY id "\
"DESC LIMIT 1" % (agents[k])
# Get host info
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists
results = cursor.fetchall()
print("Agent #%d" % (k + 1))
for row in results:
thisAgent = row[0]
thisStatus = row[1]
thisTime = row[2]
thisAlias = row[3]
thisID = row[4]
print("Agent: %s" % thisAgent)
print("Alias: %s" % thisAlias)
print("Status: %s" % thisStatus)
print("Time Connected: %s" % thisTime)
print("ID Number: %s\n" % thisID)
log.debug("Host %d Displayed" % (k + 1))
except:
log.exception("ERROR in db query>> %s" % sql)
# Disconnect from database
db.close()
# Simple test function to ensure communication is working
def mathTest():
log.debug("Start of Math Test Function...")
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
myurl = ''.join(['https://', config.agntHostName, ':',
str(config.agntServerPort)])
with xmlrpc.client.ServerProxy(myurl,
context=myContext) as proxy:
try:
print("5 + 9 is %d" % (proxy.add(5, 9)))
print("21 x 3 is: %d" % (proxy.multiply(21, 3)))
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
print("Connection to Agent Server FAILED:\n",
"Is Agent listening? Confirm connection",
"settings and try again.")
print("Settings used: '%s'" % myurl)
except:
log.warning("Connection to Agent FAILED")
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % myurl)
# Quit gracefully after terminting all child processes
def myQuit():
log.info("Monitor Exiting. Goodbye.")
print("Monitor Exiting. Goodbye.\n")
raise SystemExit
# Stop Controller Server
def stopServer():
log.debug("Stopping Monitor Server.")
# TODO Determine if it is possible to stop a daemon thread
# without stopping the whole program; for now, this just
# ends the entire program
print("Monitor Server Stopping.")
myQuit()
def invalid(choice):
log.debug("Invalid choice: %s" % choice)
print("INVALID CHOICE!")
def adminMenu():
log.debug("Displaying admin menu")
print("\nAdmin Menu:")
print("a) Connection Test with Agent (simple math test)")
print("b) SSL Verification (verify certificates")
print("c) STOP Monitor Server (program will exit)")
print("d) START* Monitor Server (*only if not running already)")
print("9) BACK (return to 'Menu')")
return input("Make a Choice\n>>> ")
def adminSelection():
global admin_selected
adminChoice = adminMenu()
if adminChoice == "a":
mathTest()
elif adminChoice == "b":
verifyCerts()
elif adminChoice == "c":
stopServer()
elif adminChoice == "d":
startServer()
elif adminChoice == "9":
log.debug("Admin is De-selected")
print("Back to Main Menu...")
admin_selected = False
elif adminChoice == "r":
# Refresh Menu (do nothing)
log.info("Refreshing Menu")
elif adminChoice in ["q", ":q"]:
myQuit()
else:
invalid(adminChoice)
def menu():
log.debug("Displaying menu")
print("\n\nMENU[Monitor]:")
print("1) Check MONITOR server status")
print("2) Display Current Status")
print("9) ADMIN MENU")
print("q) QUIT")
return input("Make a Choice\n>>> ")
def myMenu():
global admin_selected
choice = 0
if admin_selected:
choice = "9"
else:
choice = menu()
if choice == "1":
checkStatus()
elif choice == "2":
displayStatus()
elif choice == "9":
admin_selected = True
log.debug("Admin is Selected")
adminSelection()
elif choice in ["q", ":q"]:
myQuit()
elif choice == "r":
# Refresh Menu (do nothing)
log.info("Refreshing Menu")
else:
invalid(choice)
# Start of Main
if __name__ == '__main__':
log.info("Starting Monitor Main.")
hostIP = getMyIP()
verifyHostName = findHostName(hostIP)
pid = os.getpid()
print("Host IP: %s" % (hostIP))
print("Hostname: %s" % (verifyHostName))
log.debug("PID: %d" % (pid))
if verifyHostName == "None":
log.debug("Hostname not found: Returned 'None'")
elif verifyHostName in [config.ctlrHostName, config.mntrHostName]:
log.debug("HostName verified.")
log.debug("Verifying certificates.")
# Verify certificates present prior to displaying menu
verifyCerts()
# Starting Server
startServer()
time.sleep(2)
# Display Menu [repeatedly] for user
while True:
myMenu()
time.sleep(1)
else:
log.error("Hostname incorrect. "
"Hostname Found: %s; Hostname "
"Required: %s." % (verifyHostName, config.mntrHostName))
```
#### File: jlr84/shn/server_controller.py
```python
from xmlrpc.server import SimpleXMLRPCServer
import ssl
import threading
import logging
import pymysql
import config
import xmlrpc.client
import time
#####################################################
# Commands available for controlling remote VMs/VUDs
#####################################################
# Function for requesting STATUS of VM
def sendStatusRequest(host, port):
log = logging.getLogger(__name__)
log.debug("Send Status Request Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Get Status'")
response = proxy.getVmStatus("status")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting START of VM
def sendStart(host, port):
log = logging.getLogger(__name__)
log.debug("Send Start Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Start'")
response = proxy.startVM("start")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting STOP of VM
def sendStop(host, port):
log = logging.getLogger(__name__)
log.debug("Send Stop Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Stop'")
response = proxy.stopVM("stop")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting PAUSE of VM
def sendPause(host, port):
log = logging.getLogger(__name__)
log.debug("Send Pause Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'PAUSE'")
response = proxy.pauseVM("pause")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting UN-PAUSE of VM
def sendUnpause(host, port):
log = logging.getLogger(__name__)
log.debug("Send Un-Pause Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Un-Pause'")
response = proxy.unpauseVM("unpause")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting snapshot of VM
def sendSnapshot(host, port):
log = logging.getLogger(__name__)
log.debug("Send SNAPSHOT Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'SNAPSHOT'")
response = proxy.snapshotVM("snapshot")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting list of saved snapshots
def sendSnapListRequest(host, port):
log = logging.getLogger(__name__)
log.debug("Send REQUEST SNAPSHOT LIST Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Request Snapshot List'")
response = proxy.snapshotList("snapshotList")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting restore from snapshot
def sendRestoreSnap(host, port, rName):
log = logging.getLogger(__name__)
log.debug("Send Restore from Snapshot Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Restore from Snapshot %s'" % rName)
response = proxy.restoreSnap("restore", rName)
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting complete clone of VM
def sendClone(host, port):
log = logging.getLogger(__name__)
log.debug("Send CLONE Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Clone'")
response = proxy.cloneVM("clone")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting list of saved clones
def sendCloneListRequest(host, port):
log = logging.getLogger(__name__)
log.debug("Send REQUEST CLONE LIST Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Request Clone List'")
response = proxy.cloneList("cloneList")
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for requesting restore from clone
def sendRestoreClone(host, port, rName):
log = logging.getLogger(__name__)
log.debug("Send Restore from Clone Command executing...")
# Connect to Agent's server daemon to send command
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Command: 'Restore from Clone %s'" % rName)
response = proxy.restoreClone("restore", rName)
log.info(response)
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
response = "FAILED"
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
return response
# Function for fixing compromised host
def fixHostNow(host, port):
log = logging.getLogger(__name__)
log.debug("Fix Host Now Command executing...")
# TODO Make this logic more robust and add error checking
# First, STOP host NOW!
r1 = sendStop(host, port)
log.debug("Stop Bad Host status: %s" % r1)
# Second, GET options for RESTORE
rOptions = sendCloneListRequest(host, port)
log.debug("Clone Options Quantity: %d" % len(rOptions))
log.debug("Clone Options: %s" % rOptions)
# Third, Process options
if len(rOptions) == 0:
# If no option availabe, tell user this...
log.debug("There are ZERO saved clones. Unable to restore.")
print("There are ZERO saved clones. Unable to restore.")
else:
# If there are options, take the newest clone to restore with...
rNum = len(rOptions)
restoreName = rOptions[(rNum - 1)][0]
log.debug("Requesting restore to: %s" % (restoreName))
# Fourth, RESTORE host
r2 = sendRestoreClone(host, port, restoreName)
log.debug("Restore Response: %s" % r2)
print("Result of Cleanup: %s" % r2)
# Fifth, START host again...
log.debug("Starting host now...")
time.sleep(10)
r3 = sendStart(host, port)
log.debug("Result of re-start: %s" % r3)
#####################################################
# Main Logic for Controller communicating to Agent(s)
#####################################################
def controlAgent(host, port, agtAlias):
log = logging.getLogger(__name__)
log.debug("Start of controlAgent Function...")
print("ControlAgent Daemon Started")
# Connect to database to register agent
log.debug("Connecting to database")
db = pymysql.connect(host=config.mysqlHost, port=config.mysqlPort,
user=config.ctlrMysqlUser, passwd=config.ctlrMysqlPwd,
db=config.mysqlDB)
cursor = db.cursor()
# Query to register agent
sql = "INSERT INTO agents(timestamp, "\
"host, port, alias) "\
"VALUES (now(), '%s', %d, '%s')" % \
(host, port, agtAlias)
log.debug("SQL Query Made [shown as follows]:")
log.debug(sql)
# Register Agent in database
try:
# Execute the SQL command
cursor.execute(sql)
# Commit changes in the database
db.commit()
log.debug("SQL INSERT Successful")
except:
# Rollback in case there is any error
db.rollback()
log.exception("SQL INSERT FAILED!!")
# Query to retrieve id/time of registration
sql = "SELECT id, timestamp, host, port "\
"FROM agents WHERE (host, port) = "\
"('%s', %d) ORDER BY id DESC LIMIT 1" % \
(host, port)
success = False
# Get id/time of registration
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists
results = cursor.fetchall()
for row in results:
thisID = row[0]
thisTime = row[1]
thisTime = str(thisTime.isoformat())
success = True
log.debug("ID/TIME Recorded as: %d, %s" % (thisID, thisTime))
except:
log.exception("ERROR in db query>> %s" % sql)
# Disconnect from database
db.close()
# Connect to Agent's server daemon to confirm
# registration
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
thisHost = ''.join(['https://', host, ':', str(port)])
with xmlrpc.client.ServerProxy(thisHost,
context=myContext) as proxy:
try:
log.info("Sending Confirmation...")
if success:
log.debug("Insert SUCCESS. [success==True]")
response = proxy.confirm(config.ctlrHostName,
config.ctlrServerPort,
thisID, thisTime)
log.info(response)
print("Connection to Agent ESTABLISHED")
else:
log.debug("Insert FAILURE. [success==False]")
response = proxy.failed(config.ctlrHostName)
log.info(response)
print("Connection to Agent FAILED")
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
print("Connection to Agent FAILED:")
print("Is Agent listening? Confirm and try again.")
except:
log.warning("Connection to Agent FAILED")
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % thisHost)
log.info("Entering 'while True' loop now.")
while True:
log.info("ControlAgent: Sleeping for 60 seconds...")
time.sleep(60)
# Connect to database to check monitor
log.debug("Connecting to database")
db = pymysql.connect(host=config.mysqlHost, port=config.mysqlPort,
user=config.ctlrMysqlUser,
passwd=config.ctlrMysqlPwd,
db=config.mysqlDB)
cursor = db.cursor()
# Query to check agent
sql = "SELECT status, timestamp FROM status WHERE "\
"alias = '%s' ORDER BY timestamp DESC LIMIT 1;" % \
(agtAlias)
log.debug("SQL Query Made [shown as follows]:")
log.debug(sql)
# Register Agent in database
try:
# Execute the SQL command
cursor.execute(sql)
thisAnswer = 0
thisTime = "None"
# Fetch all the rows
results = cursor.fetchall()
for row in results:
thisAnswer = row[0]
thisTime = row[1]
log.debug("thisAnswer: %s" % thisAnswer)
if thisAnswer == 1:
log.debug("Host '%s' CLEAN as of '%s'." % (host, thisTime))
print("Host '%s' CLEAN as of '%s'." % (host, thisTime))
elif thisAnswer == 0:
log.debug("Host NOT FOUND in status database!!")
print("Host NOT FOUND in status database!!")
else:
log.debug("Host '%s' INFECTED!! as of '%s'." % (host, thisTime))
print("Host '%s' INFECTED!! as of '%s'." % (host, thisTime))
print("TAKING ACTION NOW!")
fixHostNow(host, port)
log.debug("Monitor query update successful")
except:
# Rollback in case there is any error
log.exception("ERROR in db query>> %s" % sql)
#############################################################
# Define functions available to server via remote connections
#############################################################
def add(x, y):
return x+y
def multiply(x, y):
return x*y
# Disconnect Agent from controller
def disconnectAgent(agentHostName, connectionID, timestamp):
log = logging.getLogger(__name__)
log.info("Starting Disconnect Agent function")
# Connect to database to disconnect agent
log.debug("Connecting to database")
db = pymysql.connect(host=config.mysqlHost, port=config.mysqlPort,
user=config.ctlrMysqlUser, passwd=config.ctlrMysqlPwd,
db=config.mysqlDB)
cursor = db.cursor()
# Query to retrieve id/time of registration
sql = "SELECT id, host, timestamp "\
"FROM agents WHERE (host, id) = "\
"('%s', %s) ORDER BY timestamp DESC LIMIT 1" % \
(agentHostName, connectionID)
# Get time of registration
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists
results = cursor.fetchall()
for row in results:
thisID = row[0]
thisTime = row[2]
thisTime = str(thisTime.isoformat())
log.debug("ID/TIME Recorded as: %d, %s" % (thisID, thisTime))
if thisTime == timestamp:
log.debug("TIMESTAMPS MATCH!!!")
log.debug("Removing from database...")
# Query to delete proper rows
sql = "DELETE FROM agents WHERE host='%s' "\
"AND id<=%s;" % \
(agentHostName, connectionID)
# Try delete operation
try:
# Execute the SQL command
cursor.execute(sql)
# Commit the changes
db.commit()
log.info("Records successfully deleted.")
except:
db.rollback()
log.exception("ERROR in db query>> %s" % sql)
else:
log.warning("Timestamps DO NOT match!!")
except:
log.exception("ERROR in db query>> %s" % sql)
# Disconnect from database
db.close()
return "Successful Disconnect"
# Register Agent with Controller so Agent can receive commands
def registerAgent(agentHostName, agentPortNum, agentAlias):
log = logging.getLogger(__name__)
log.info("Starting registerAgent function")
# Start child process to run function for
# registering and eommunicating with Agent
tName = ''.join(["Controller_to_", agentHostName])
t = threading.Thread(name=tName,
target=controlAgent,
args=(agentHostName,
agentPortNum,
agentAlias
)
)
t.daemon = True
t.start()
# Connect to Agent running at hostName, listening on portNum
mymsg = ''.join(["Registering Agent '", agentHostName, "'..."])
log.debug(mymsg)
return mymsg
#########################################
# Main server function: An xml rpc server
# for responding to client requests.
#########################################
def runServer(ipAdd, portNum, serverCert, serverKey):
log = logging.getLogger(__name__)
log.info("Starting runServer Module")
log.debug("serverCert: %s" % (serverCert))
log.debug("serverKey: %s" % (serverKey))
# Create XMLRPC Server, based on ipAdd/port received
log.debug("Trying socket now...")
try:
server = SimpleXMLRPCServer((ipAdd, portNum))
# Create/Wrap server socket with ssl
server.socket = ssl.wrap_socket(server.socket,
certfile=serverCert,
keyfile=serverKey,
do_handshake_on_connect=True,
server_side=True)
# Register available functions
log.debug("Registering Functions")
server.register_multicall_functions()
server.register_function(add, 'add')
server.register_function(multiply, 'multiply')
server.register_function(disconnectAgent, 'disconnectAgent')
server.register_function(registerAgent, 'registerAgent')
# Start server listening [forever]
log.info("Server listening on port %d." % (portNum))
print("Server listening on port %d." % (portNum))
server.serve_forever()
except FileNotFoundError:
log.exception("ERROR creating socket... "
"CERT or KEY NOT Present.")
except OSError:
log.exception("ERROR creating socket..."
"Verify port number [%d] is "
"available for controller." % portNum)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.